2 * Copyright (c) 2016 QLogic Corporation.
6 * See LICENSE.qede_pmd for copyright and licensing details.
11 #include "ecore_init_ops.h"
13 #include "ecore_rt_defs.h"
14 #include "ecore_hsi_common.h"
15 #include "ecore_hsi_init_func.h"
16 #include "ecore_hsi_eth.h"
17 #include "ecore_hsi_init_tool.h"
18 #include "ecore_iro.h"
19 #include "ecore_init_fw_funcs.h"
21 #define CDU_VALIDATION_DEFAULT_CFG 61
23 static u16 con_region_offsets[3][E4_NUM_OF_CONNECTION_TYPES] = {
24 { 400, 336, 352, 304, 304, 384, 416, 352}, /* region 3 offsets */
25 { 528, 496, 416, 448, 448, 512, 544, 480}, /* region 4 offsets */
26 { 608, 544, 496, 512, 576, 592, 624, 560} /* region 5 offsets */
28 static u16 task_region_offsets[1][E4_NUM_OF_CONNECTION_TYPES] = {
29 { 240, 240, 112, 0, 0, 0, 0, 96} /* region 1 offsets */
32 /* General constants */
33 #define QM_PQ_MEM_4KB(pq_size) (pq_size ? DIV_ROUND_UP((pq_size + 1) * \
34 QM_PQ_ELEMENT_SIZE, 0x1000) : 0)
35 #define QM_PQ_SIZE_256B(pq_size) (pq_size ? DIV_ROUND_UP(pq_size, 0x100) - 1 : \
37 #define QM_INVALID_PQ_ID 0xffff
40 #define QM_BYPASS_EN 1
41 #define QM_BYTE_CRD_EN 1
43 /* Other PQ constants */
44 #define QM_OTHER_PQS_PER_PF 4
48 /* Upper bound in MB, 10 * burst size of 1ms in 50Gbps */
49 #define QM_WFQ_UPPER_BOUND 62500000
51 /* Bit of VOQ in WFQ VP PQ map */
52 #define QM_WFQ_VP_PQ_VOQ_SHIFT 0
54 /* Bit of PF in WFQ VP PQ map */
55 #define QM_WFQ_VP_PQ_PF_SHIFT 5
57 /* 0x9000 = 4*9*1024 */
58 #define QM_WFQ_INC_VAL(weight) ((weight) * 0x9000)
60 /* 0.7 * upper bound (62500000) */
61 #define QM_WFQ_MAX_INC_VAL 43750000
65 /* Upper bound is set to 10 * burst size of 1ms in 50Gbps */
66 #define QM_RL_UPPER_BOUND 62500000
69 #define QM_RL_PERIOD 5
71 /* Period in 25MHz cycles */
72 #define QM_RL_PERIOD_CLK_25M (25 * QM_RL_PERIOD)
74 /* 0.7 * upper bound (62500000) */
75 #define QM_RL_MAX_INC_VAL 43750000
77 /* RL increment value - rate is specified in mbps. the factor of 1.01 was
78 * added after seeing only 99% factor reached in a 25Gbps port with DPDK RFC
79 * 2544 test. In this scenario the PF RL was reducing the line rate to 99%
80 * although the credit increment value was the correct one and FW calculated
81 * correct packet sizes. The reason for the inaccuracy of the RL is unknown at
84 #define QM_RL_INC_VAL(rate) OSAL_MAX_T(u32, (u32)(((rate ? rate : 1000000) * \
85 QM_RL_PERIOD * 101) / (8 * 100)), 1)
87 /* AFullOprtnstcCrdMask constants */
88 #define QM_OPPOR_LINE_VOQ_DEF 1
89 #define QM_OPPOR_FW_STOP_DEF 0
90 #define QM_OPPOR_PQ_EMPTY_DEF 1
92 /* Command Queue constants: */
94 /* Pure LB CmdQ lines (+spare) */
95 #define PBF_CMDQ_PURE_LB_LINES 150
97 #define PBF_CMDQ_LINES_RT_OFFSET(voq) \
98 (PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET + voq * \
99 (PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET - \
100 PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET))
102 #define PBF_BTB_GUARANTEED_RT_OFFSET(voq) \
103 (PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET + voq * \
104 (PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET - \
105 PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET))
107 #define QM_VOQ_LINE_CRD(pbf_cmd_lines) \
108 ((((pbf_cmd_lines) - 4) * 2) | QM_LINE_CRD_REG_SIGN_BIT)
110 /* BTB: blocks constants (block size = 256B) */
112 /* 256B blocks in 9700B packet */
113 #define BTB_JUMBO_PKT_BLOCKS 38
115 /* Headroom per-port */
116 #define BTB_HEADROOM_BLOCKS BTB_JUMBO_PKT_BLOCKS
117 #define BTB_PURE_LB_FACTOR 10
119 /* Factored (hence really 0.7) */
120 #define BTB_PURE_LB_RATIO 7
122 /* QM stop command constants */
123 #define QM_STOP_PQ_MASK_WIDTH 32
124 #define QM_STOP_CMD_ADDR 2
125 #define QM_STOP_CMD_STRUCT_SIZE 2
126 #define QM_STOP_CMD_PAUSE_MASK_OFFSET 0
127 #define QM_STOP_CMD_PAUSE_MASK_SHIFT 0
128 #define QM_STOP_CMD_PAUSE_MASK_MASK 0xffffffff /* @DPDK */
129 #define QM_STOP_CMD_GROUP_ID_OFFSET 1
130 #define QM_STOP_CMD_GROUP_ID_SHIFT 16
131 #define QM_STOP_CMD_GROUP_ID_MASK 15
132 #define QM_STOP_CMD_PQ_TYPE_OFFSET 1
133 #define QM_STOP_CMD_PQ_TYPE_SHIFT 24
134 #define QM_STOP_CMD_PQ_TYPE_MASK 1
135 #define QM_STOP_CMD_MAX_POLL_COUNT 100
136 #define QM_STOP_CMD_POLL_PERIOD_US 500
138 /* QM command macros */
139 #define QM_CMD_STRUCT_SIZE(cmd) cmd##_STRUCT_SIZE
140 #define QM_CMD_SET_FIELD(var, cmd, field, value) \
141 SET_FIELD(var[cmd##_##field##_OFFSET], cmd##_##field, value)
144 #define PHYS_VOQ(port, tc, max_phys_tcs_per_port) \
145 ((port) * (max_phys_tcs_per_port) + (tc))
146 #define LB_VOQ(port) (MAX_PHYS_VOQS + (port))
147 #define VOQ(port, tc, max_phys_tcs_per_port) \
148 ((tc) < LB_TC ? PHYS_VOQ(port, tc, max_phys_tcs_per_port) : \
152 /******************** INTERNAL IMPLEMENTATION *********************/
154 /* Prepare PF RL enable/disable runtime init values */
155 static void ecore_enable_pf_rl(struct ecore_hwfn *p_hwfn, bool pf_rl_en)
157 STORE_RT_REG(p_hwfn, QM_REG_RLPFENABLE_RT_OFFSET, pf_rl_en ? 1 : 0);
159 /* Enable RLs for all VOQs */
160 STORE_RT_REG(p_hwfn, QM_REG_RLPFVOQENABLE_RT_OFFSET,
161 (1 << MAX_NUM_VOQS) - 1);
163 /* Write RL period */
164 STORE_RT_REG(p_hwfn, QM_REG_RLPFPERIOD_RT_OFFSET,
165 QM_RL_PERIOD_CLK_25M);
166 STORE_RT_REG(p_hwfn, QM_REG_RLPFPERIODTIMER_RT_OFFSET,
167 QM_RL_PERIOD_CLK_25M);
169 /* Set credit threshold for QM bypass flow */
171 STORE_RT_REG(p_hwfn, QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET,
176 /* Prepare PF WFQ enable/disable runtime init values */
177 static void ecore_enable_pf_wfq(struct ecore_hwfn *p_hwfn, bool pf_wfq_en)
179 STORE_RT_REG(p_hwfn, QM_REG_WFQPFENABLE_RT_OFFSET, pf_wfq_en ? 1 : 0);
181 /* Set credit threshold for QM bypass flow */
182 if (pf_wfq_en && QM_BYPASS_EN)
183 STORE_RT_REG(p_hwfn, QM_REG_AFULLQMBYPTHRPFWFQ_RT_OFFSET,
187 /* Prepare VPORT RL enable/disable runtime init values */
188 static void ecore_enable_vport_rl(struct ecore_hwfn *p_hwfn, bool vport_rl_en)
190 STORE_RT_REG(p_hwfn, QM_REG_RLGLBLENABLE_RT_OFFSET,
191 vport_rl_en ? 1 : 0);
193 /* Write RL period (use timer 0 only) */
194 STORE_RT_REG(p_hwfn, QM_REG_RLGLBLPERIOD_0_RT_OFFSET,
195 QM_RL_PERIOD_CLK_25M);
196 STORE_RT_REG(p_hwfn, QM_REG_RLGLBLPERIODTIMER_0_RT_OFFSET,
197 QM_RL_PERIOD_CLK_25M);
199 /* Set credit threshold for QM bypass flow */
202 QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET,
207 /* Prepare VPORT WFQ enable/disable runtime init values */
208 static void ecore_enable_vport_wfq(struct ecore_hwfn *p_hwfn, bool vport_wfq_en)
210 STORE_RT_REG(p_hwfn, QM_REG_WFQVPENABLE_RT_OFFSET,
211 vport_wfq_en ? 1 : 0);
213 /* Set credit threshold for QM bypass flow */
214 if (vport_wfq_en && QM_BYPASS_EN)
215 STORE_RT_REG(p_hwfn, QM_REG_AFULLQMBYPTHRVPWFQ_RT_OFFSET,
219 /* Prepare runtime init values to allocate PBF command queue lines for
222 static void ecore_cmdq_lines_voq_rt_init(struct ecore_hwfn *p_hwfn,
223 u8 voq, u16 cmdq_lines)
227 qm_line_crd = QM_VOQ_LINE_CRD(cmdq_lines);
229 OVERWRITE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(voq),
231 STORE_RT_REG(p_hwfn, QM_REG_VOQCRDLINE_RT_OFFSET + voq, qm_line_crd);
232 STORE_RT_REG(p_hwfn, QM_REG_VOQINITCRDLINE_RT_OFFSET + voq,
236 /* Prepare runtime init values to allocate PBF command queue lines. */
237 static void ecore_cmdq_lines_rt_init(struct ecore_hwfn *p_hwfn,
238 u8 max_ports_per_engine,
239 u8 max_phys_tcs_per_port,
240 struct init_qm_port_params
241 port_params[MAX_NUM_PORTS])
243 u8 tc, voq, port_id, num_tcs_in_port;
245 /* Clear PBF lines for all VOQs */
246 for (voq = 0; voq < MAX_NUM_VOQS; voq++)
247 STORE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(voq), 0);
249 for (port_id = 0; port_id < max_ports_per_engine; port_id++) {
250 u16 phys_lines, phys_lines_per_tc;
252 if (!port_params[port_id].active)
255 /* Find #lines to divide between the active physical TCs */
256 phys_lines = port_params[port_id].num_pbf_cmd_lines -
257 PBF_CMDQ_PURE_LB_LINES;
259 /* Find #lines per active physical TC */
261 for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++)
262 if (((port_params[port_id].active_phys_tcs >> tc) &
265 phys_lines_per_tc = phys_lines / num_tcs_in_port;
267 /* Init registers per active TC */
268 for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) {
269 if (((port_params[port_id].active_phys_tcs >> tc) &
271 voq = PHYS_VOQ(port_id, tc,
272 max_phys_tcs_per_port);
273 ecore_cmdq_lines_voq_rt_init(p_hwfn, voq,
278 /* Init registers for pure LB TC */
279 ecore_cmdq_lines_voq_rt_init(p_hwfn, LB_VOQ(port_id),
280 PBF_CMDQ_PURE_LB_LINES);
285 * Prepare runtime init values to allocate guaranteed BTB blocks for the
286 * specified port. The guaranteed BTB space is divided between the TCs as
287 * follows (shared space Is currently not used):
289 * B BTB blocks for this port
290 * C Number of physical TCs for this port
292 * a. 38 blocks (9700B jumbo frame) are allocated for global per port
294 * b. B = B 38 (remainder after global headroom allocation)
295 * c. MAX(38,B/(C+0.7)) blocks are allocated for the pure LB VOQ.
296 * d. B = B MAX(38, B/(C+0.7)) (remainder after pure LB allocation).
297 * e. B/C blocks are allocated for each physical TC.
299 * - MTU is up to 9700 bytes (38 blocks)
300 * - All TCs are considered symmetrical (same rate and packet size)
301 * - No optimization for lossy TC (all are considered lossless). Shared space is
302 * not enabled and allocated for each TC.
304 static void ecore_btb_blocks_rt_init(struct ecore_hwfn *p_hwfn,
305 u8 max_ports_per_engine,
306 u8 max_phys_tcs_per_port,
307 struct init_qm_port_params
308 port_params[MAX_NUM_PORTS])
310 u32 usable_blocks, pure_lb_blocks, phys_blocks;
311 u8 tc, voq, port_id, num_tcs_in_port;
313 for (port_id = 0; port_id < max_ports_per_engine; port_id++) {
314 if (!port_params[port_id].active)
317 /* Subtract headroom blocks */
318 usable_blocks = port_params[port_id].num_btb_blocks -
321 /* Find blocks per physical TC. use factor to avoid floating
325 for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++)
326 if (((port_params[port_id].active_phys_tcs >> tc) &
330 pure_lb_blocks = (usable_blocks * BTB_PURE_LB_FACTOR) /
331 (num_tcs_in_port * BTB_PURE_LB_FACTOR +
333 pure_lb_blocks = OSAL_MAX_T(u32, BTB_JUMBO_PKT_BLOCKS,
336 phys_blocks = (usable_blocks - pure_lb_blocks) /
339 /* Init physical TCs */
340 for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) {
341 if (((port_params[port_id].active_phys_tcs >> tc) &
343 voq = PHYS_VOQ(port_id, tc,
344 max_phys_tcs_per_port);
346 PBF_BTB_GUARANTEED_RT_OFFSET(voq),
351 /* Init pure LB TC */
353 PBF_BTB_GUARANTEED_RT_OFFSET(LB_VOQ(port_id)),
358 /* Prepare Tx PQ mapping runtime init values for the specified PF */
359 static void ecore_tx_pq_map_rt_init(struct ecore_hwfn *p_hwfn,
360 struct ecore_ptt *p_ptt,
363 u8 max_phys_tcs_per_port,
370 u32 base_mem_addr_4kb,
371 struct init_qm_pq_params *pq_params,
372 struct init_qm_vport_params *vport_params)
374 /* A bit per Tx PQ indicating if the PQ is associated with a VF */
375 u32 tx_pq_vf_mask[MAX_QM_TX_QUEUES / QM_PF_QUEUE_GROUP_SIZE] = { 0 };
376 u32 num_tx_pq_vf_masks = MAX_QM_TX_QUEUES / QM_PF_QUEUE_GROUP_SIZE;
377 u16 num_pqs, first_pq_group, last_pq_group, i, pq_id, pq_group;
378 u32 pq_mem_4kb, vport_pq_mem_4kb, mem_addr_4kb;
380 num_pqs = num_pf_pqs + num_vf_pqs;
382 first_pq_group = start_pq / QM_PF_QUEUE_GROUP_SIZE;
383 last_pq_group = (start_pq + num_pqs - 1) / QM_PF_QUEUE_GROUP_SIZE;
385 pq_mem_4kb = QM_PQ_MEM_4KB(num_pf_cids);
386 vport_pq_mem_4kb = QM_PQ_MEM_4KB(num_vf_cids);
387 mem_addr_4kb = base_mem_addr_4kb;
389 /* Set mapping from PQ group to PF */
390 for (pq_group = first_pq_group; pq_group <= last_pq_group; pq_group++)
391 STORE_RT_REG(p_hwfn, QM_REG_PQTX2PF_0_RT_OFFSET + pq_group,
395 STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_0_RT_OFFSET,
396 QM_PQ_SIZE_256B(num_pf_cids));
397 STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_1_RT_OFFSET,
398 QM_PQ_SIZE_256B(num_vf_cids));
400 /* Go over all Tx PQs */
401 for (i = 0, pq_id = start_pq; i < num_pqs; i++, pq_id++) {
402 u32 max_qm_global_rls = MAX_QM_GLOBAL_RLS;
403 struct qm_rf_pq_map tx_pq_map;
404 bool is_vf_pq, rl_valid;
405 u8 voq, vport_id_in_pf;
408 voq = VOQ(port_id, pq_params[i].tc_id, max_phys_tcs_per_port);
409 is_vf_pq = (i >= num_pf_pqs);
410 rl_valid = pq_params[i].rl_valid && pq_params[i].vport_id <
413 /* Update first Tx PQ of VPORT/TC */
414 vport_id_in_pf = pq_params[i].vport_id - start_vport;
416 vport_params[vport_id_in_pf].first_tx_pq_id[pq_params[i].tc_id];
417 if (first_tx_pq_id == QM_INVALID_PQ_ID) {
418 /* Create new VP PQ */
419 vport_params[vport_id_in_pf].
420 first_tx_pq_id[pq_params[i].tc_id] = pq_id;
421 first_tx_pq_id = pq_id;
423 /* Map VP PQ to VOQ and PF */
425 QM_REG_WFQVPMAP_RT_OFFSET + first_tx_pq_id,
426 (voq << QM_WFQ_VP_PQ_VOQ_SHIFT) | (pf_id <<
427 QM_WFQ_VP_PQ_PF_SHIFT));
431 if (pq_params[i].rl_valid && pq_params[i].vport_id >=
433 DP_NOTICE(p_hwfn, true,
434 "Invalid VPORT ID for rate limiter config\n");
436 /* Fill PQ map entry */
437 OSAL_MEMSET(&tx_pq_map, 0, sizeof(tx_pq_map));
438 SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_PQ_VALID, 1);
439 SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_RL_VALID,
441 SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_VP_PQ_ID, first_tx_pq_id);
442 SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_RL_ID,
443 rl_valid ? pq_params[i].vport_id : 0);
444 SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_VOQ, voq);
445 SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_WRR_WEIGHT_GROUP,
446 pq_params[i].wrr_group);
448 /* Write PQ map entry to CAM */
449 STORE_RT_REG(p_hwfn, QM_REG_TXPQMAP_RT_OFFSET + pq_id,
450 *((u32 *)&tx_pq_map));
452 /* Set base address */
453 STORE_RT_REG(p_hwfn, QM_REG_BASEADDRTXPQ_RT_OFFSET + pq_id,
456 /* If VF PQ, add indication to PQ VF mask */
458 tx_pq_vf_mask[pq_id / QM_PF_QUEUE_GROUP_SIZE] |=
459 (1 << (pq_id % QM_PF_QUEUE_GROUP_SIZE));
460 mem_addr_4kb += vport_pq_mem_4kb;
462 mem_addr_4kb += pq_mem_4kb;
466 /* Store Tx PQ VF mask to size select register */
467 for (i = 0; i < num_tx_pq_vf_masks; i++)
468 if (tx_pq_vf_mask[i])
469 STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET +
470 i, tx_pq_vf_mask[i]);
473 /* Prepare Other PQ mapping runtime init values for the specified PF */
474 static void ecore_other_pq_map_rt_init(struct ecore_hwfn *p_hwfn,
478 u32 base_mem_addr_4kb)
480 u32 pq_size, pq_mem_4kb, mem_addr_4kb;
481 u16 i, pq_id, pq_group;
483 /* A single other PQ group is used in each PF, where PQ group i is used
487 pq_size = num_pf_cids + num_tids;
488 pq_mem_4kb = QM_PQ_MEM_4KB(pq_size);
489 mem_addr_4kb = base_mem_addr_4kb;
491 /* Map PQ group to PF */
492 STORE_RT_REG(p_hwfn, QM_REG_PQOTHER2PF_0_RT_OFFSET + pq_group,
496 STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_2_RT_OFFSET,
497 QM_PQ_SIZE_256B(pq_size));
499 /* Set base address */
500 for (i = 0, pq_id = pf_id * QM_PF_QUEUE_GROUP_SIZE;
501 i < QM_OTHER_PQS_PER_PF; i++, pq_id++) {
502 STORE_RT_REG(p_hwfn, QM_REG_BASEADDROTHERPQ_RT_OFFSET + pq_id,
504 mem_addr_4kb += pq_mem_4kb;
508 /* Prepare PF WFQ runtime init values for the specified PF.
509 * Return -1 on error.
511 static int ecore_pf_wfq_rt_init(struct ecore_hwfn *p_hwfn,
515 u8 max_phys_tcs_per_port,
517 struct init_qm_pq_params *pq_params)
519 u32 inc_val, crd_reg_offset;
523 crd_reg_offset = (pf_id < MAX_NUM_PFS_BB ? QM_REG_WFQPFCRD_RT_OFFSET :
524 QM_REG_WFQPFCRD_MSB_RT_OFFSET) +
525 (pf_id % MAX_NUM_PFS_BB);
527 inc_val = QM_WFQ_INC_VAL(pf_wfq);
528 if (!inc_val || inc_val > QM_WFQ_MAX_INC_VAL) {
529 DP_NOTICE(p_hwfn, true,
530 "Invalid PF WFQ weight configuration\n");
534 for (i = 0; i < num_tx_pqs; i++) {
535 voq = VOQ(port_id, pq_params[i].tc_id, max_phys_tcs_per_port);
536 OVERWRITE_RT_REG(p_hwfn, crd_reg_offset + voq * MAX_NUM_PFS_BB,
537 (u32)QM_WFQ_CRD_REG_SIGN_BIT);
540 STORE_RT_REG(p_hwfn, QM_REG_WFQPFUPPERBOUND_RT_OFFSET + pf_id,
541 QM_WFQ_UPPER_BOUND | (u32)QM_WFQ_CRD_REG_SIGN_BIT);
542 STORE_RT_REG(p_hwfn, QM_REG_WFQPFWEIGHT_RT_OFFSET + pf_id, inc_val);
546 /* Prepare PF RL runtime init values for the specified PF.
547 * Return -1 on error.
549 static int ecore_pf_rl_rt_init(struct ecore_hwfn *p_hwfn, u8 pf_id, u32 pf_rl)
553 inc_val = QM_RL_INC_VAL(pf_rl);
554 if (inc_val > QM_RL_MAX_INC_VAL) {
555 DP_NOTICE(p_hwfn, true,
556 "Invalid PF rate limit configuration\n");
560 STORE_RT_REG(p_hwfn, QM_REG_RLPFCRD_RT_OFFSET + pf_id,
561 (u32)QM_RL_CRD_REG_SIGN_BIT);
562 STORE_RT_REG(p_hwfn, QM_REG_RLPFUPPERBOUND_RT_OFFSET + pf_id,
563 QM_RL_UPPER_BOUND | (u32)QM_RL_CRD_REG_SIGN_BIT);
564 STORE_RT_REG(p_hwfn, QM_REG_RLPFINCVAL_RT_OFFSET + pf_id, inc_val);
569 /* Prepare VPORT WFQ runtime init values for the specified VPORTs.
570 * Return -1 on error.
572 static int ecore_vp_wfq_rt_init(struct ecore_hwfn *p_hwfn,
574 struct init_qm_vport_params *vport_params)
580 /* Go over all PF VPORTs */
581 for (i = 0; i < num_vports; i++) {
582 if (!vport_params[i].vport_wfq)
585 inc_val = QM_WFQ_INC_VAL(vport_params[i].vport_wfq);
586 if (inc_val > QM_WFQ_MAX_INC_VAL) {
587 DP_NOTICE(p_hwfn, true,
588 "Invalid VPORT WFQ weight configuration\n");
592 /* Each VPORT can have several VPORT PQ IDs for various TCs */
593 for (tc = 0; tc < NUM_OF_TCS; tc++) {
594 vport_pq_id = vport_params[i].first_tx_pq_id[tc];
595 if (vport_pq_id != QM_INVALID_PQ_ID) {
596 STORE_RT_REG(p_hwfn, QM_REG_WFQVPCRD_RT_OFFSET +
598 (u32)QM_WFQ_CRD_REG_SIGN_BIT);
600 QM_REG_WFQVPWEIGHT_RT_OFFSET +
601 vport_pq_id, inc_val);
608 /* Prepare VPORT RL runtime init values for the specified VPORTs.
609 * Return -1 on error.
611 static int ecore_vport_rl_rt_init(struct ecore_hwfn *p_hwfn,
614 struct init_qm_vport_params *vport_params)
619 if (start_vport + num_vports >= MAX_QM_GLOBAL_RLS) {
620 DP_NOTICE(p_hwfn, true,
621 "Invalid VPORT ID for rate limiter configuration\n");
625 /* Go over all PF VPORTs */
626 for (i = 0, vport_id = start_vport; i < num_vports; i++, vport_id++) {
627 u32 inc_val = QM_RL_INC_VAL(vport_params[i].vport_rl);
628 if (inc_val > QM_RL_MAX_INC_VAL) {
629 DP_NOTICE(p_hwfn, true,
630 "Invalid VPORT rate-limit configuration\n");
634 STORE_RT_REG(p_hwfn, QM_REG_RLGLBLCRD_RT_OFFSET + vport_id,
635 (u32)QM_RL_CRD_REG_SIGN_BIT);
637 QM_REG_RLGLBLUPPERBOUND_RT_OFFSET + vport_id,
638 QM_RL_UPPER_BOUND | (u32)QM_RL_CRD_REG_SIGN_BIT);
639 STORE_RT_REG(p_hwfn, QM_REG_RLGLBLINCVAL_RT_OFFSET + vport_id,
646 static bool ecore_poll_on_qm_cmd_ready(struct ecore_hwfn *p_hwfn,
647 struct ecore_ptt *p_ptt)
651 for (i = 0, reg_val = 0; i < QM_STOP_CMD_MAX_POLL_COUNT && !reg_val;
653 OSAL_UDELAY(QM_STOP_CMD_POLL_PERIOD_US);
654 reg_val = ecore_rd(p_hwfn, p_ptt, QM_REG_SDMCMDREADY);
657 /* Check if timeout while waiting for SDM command ready */
658 if (i == QM_STOP_CMD_MAX_POLL_COUNT) {
659 DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG,
660 "Timeout waiting for QM SDM cmd ready signal\n");
667 static bool ecore_send_qm_cmd(struct ecore_hwfn *p_hwfn,
668 struct ecore_ptt *p_ptt,
669 u32 cmd_addr, u32 cmd_data_lsb, u32 cmd_data_msb)
671 if (!ecore_poll_on_qm_cmd_ready(p_hwfn, p_ptt))
674 ecore_wr(p_hwfn, p_ptt, QM_REG_SDMCMDADDR, cmd_addr);
675 ecore_wr(p_hwfn, p_ptt, QM_REG_SDMCMDDATALSB, cmd_data_lsb);
676 ecore_wr(p_hwfn, p_ptt, QM_REG_SDMCMDDATAMSB, cmd_data_msb);
677 ecore_wr(p_hwfn, p_ptt, QM_REG_SDMCMDGO, 1);
678 ecore_wr(p_hwfn, p_ptt, QM_REG_SDMCMDGO, 0);
680 return ecore_poll_on_qm_cmd_ready(p_hwfn, p_ptt);
684 /******************** INTERFACE IMPLEMENTATION *********************/
686 u32 ecore_qm_pf_mem_size(u32 num_pf_cids,
692 return QM_PQ_MEM_4KB(num_pf_cids) * num_pf_pqs +
693 QM_PQ_MEM_4KB(num_vf_cids) * num_vf_pqs +
694 QM_PQ_MEM_4KB(num_pf_cids + num_tids) * QM_OTHER_PQS_PER_PF;
697 int ecore_qm_common_rt_init(struct ecore_hwfn *p_hwfn,
698 u8 max_ports_per_engine,
699 u8 max_phys_tcs_per_port,
704 struct init_qm_port_params
705 port_params[MAX_NUM_PORTS])
709 /* Init AFullOprtnstcCrdMask */
710 mask = (QM_OPPOR_LINE_VOQ_DEF <<
711 QM_RF_OPPORTUNISTIC_MASK_LINEVOQ_SHIFT) |
712 (QM_BYTE_CRD_EN << QM_RF_OPPORTUNISTIC_MASK_BYTEVOQ_SHIFT) |
713 (pf_wfq_en << QM_RF_OPPORTUNISTIC_MASK_PFWFQ_SHIFT) |
714 (vport_wfq_en << QM_RF_OPPORTUNISTIC_MASK_VPWFQ_SHIFT) |
715 (pf_rl_en << QM_RF_OPPORTUNISTIC_MASK_PFRL_SHIFT) |
716 (vport_rl_en << QM_RF_OPPORTUNISTIC_MASK_VPQCNRL_SHIFT) |
717 (QM_OPPOR_FW_STOP_DEF <<
718 QM_RF_OPPORTUNISTIC_MASK_FWPAUSE_SHIFT) |
719 (QM_OPPOR_PQ_EMPTY_DEF <<
720 QM_RF_OPPORTUNISTIC_MASK_QUEUEEMPTY_SHIFT);
721 STORE_RT_REG(p_hwfn, QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET, mask);
723 /* Enable/disable PF RL */
724 ecore_enable_pf_rl(p_hwfn, pf_rl_en);
726 /* Enable/disable PF WFQ */
727 ecore_enable_pf_wfq(p_hwfn, pf_wfq_en);
729 /* Enable/disable VPORT RL */
730 ecore_enable_vport_rl(p_hwfn, vport_rl_en);
732 /* Enable/disable VPORT WFQ */
733 ecore_enable_vport_wfq(p_hwfn, vport_wfq_en);
735 /* Init PBF CMDQ line credit */
736 ecore_cmdq_lines_rt_init(p_hwfn, max_ports_per_engine,
737 max_phys_tcs_per_port, port_params);
739 /* Init BTB blocks in PBF */
740 ecore_btb_blocks_rt_init(p_hwfn, max_ports_per_engine,
741 max_phys_tcs_per_port, port_params);
746 int ecore_qm_pf_rt_init(struct ecore_hwfn *p_hwfn,
747 struct ecore_ptt *p_ptt,
750 u8 max_phys_tcs_per_port,
761 struct init_qm_pq_params *pq_params,
762 struct init_qm_vport_params *vport_params)
764 u32 other_mem_size_4kb;
767 other_mem_size_4kb = QM_PQ_MEM_4KB(num_pf_cids + num_tids) *
770 /* Clear first Tx PQ ID array for each VPORT */
771 for (i = 0; i < num_vports; i++)
772 for (tc = 0; tc < NUM_OF_TCS; tc++)
773 vport_params[i].first_tx_pq_id[tc] = QM_INVALID_PQ_ID;
775 /* Map Other PQs (if any) */
776 #if QM_OTHER_PQS_PER_PF > 0
777 ecore_other_pq_map_rt_init(p_hwfn, pf_id, num_pf_cids, num_tids, 0);
781 ecore_tx_pq_map_rt_init(p_hwfn, p_ptt, port_id, pf_id,
782 max_phys_tcs_per_port, num_pf_cids, num_vf_cids,
783 start_pq, num_pf_pqs, num_vf_pqs, start_vport,
784 other_mem_size_4kb, pq_params, vport_params);
788 if (ecore_pf_wfq_rt_init
789 (p_hwfn, port_id, pf_id, pf_wfq, max_phys_tcs_per_port,
790 num_pf_pqs + num_vf_pqs, pq_params))
794 if (ecore_pf_rl_rt_init(p_hwfn, pf_id, pf_rl))
798 if (ecore_vp_wfq_rt_init(p_hwfn, num_vports, vport_params))
802 if (ecore_vport_rl_rt_init
803 (p_hwfn, start_vport, num_vports, vport_params))
809 int ecore_init_pf_wfq(struct ecore_hwfn *p_hwfn,
810 struct ecore_ptt *p_ptt, u8 pf_id, u16 pf_wfq)
814 inc_val = QM_WFQ_INC_VAL(pf_wfq);
815 if (!inc_val || inc_val > QM_WFQ_MAX_INC_VAL) {
816 DP_NOTICE(p_hwfn, true,
817 "Invalid PF WFQ weight configuration\n");
821 ecore_wr(p_hwfn, p_ptt, QM_REG_WFQPFWEIGHT + pf_id * 4, inc_val);
826 int ecore_init_pf_rl(struct ecore_hwfn *p_hwfn,
827 struct ecore_ptt *p_ptt, u8 pf_id, u32 pf_rl)
831 inc_val = QM_RL_INC_VAL(pf_rl);
832 if (inc_val > QM_RL_MAX_INC_VAL) {
833 DP_NOTICE(p_hwfn, true,
834 "Invalid PF rate limit configuration\n");
838 ecore_wr(p_hwfn, p_ptt, QM_REG_RLPFCRD + pf_id * 4,
839 (u32)QM_RL_CRD_REG_SIGN_BIT);
840 ecore_wr(p_hwfn, p_ptt, QM_REG_RLPFINCVAL + pf_id * 4, inc_val);
845 int ecore_init_vport_wfq(struct ecore_hwfn *p_hwfn,
846 struct ecore_ptt *p_ptt,
847 u16 first_tx_pq_id[NUM_OF_TCS], u16 vport_wfq)
853 inc_val = QM_WFQ_INC_VAL(vport_wfq);
854 if (!inc_val || inc_val > QM_WFQ_MAX_INC_VAL) {
855 DP_NOTICE(p_hwfn, true,
856 "Invalid VPORT WFQ weight configuration\n");
860 for (tc = 0; tc < NUM_OF_TCS; tc++) {
861 vport_pq_id = first_tx_pq_id[tc];
862 if (vport_pq_id != QM_INVALID_PQ_ID) {
863 ecore_wr(p_hwfn, p_ptt,
864 QM_REG_WFQVPWEIGHT + vport_pq_id * 4, inc_val);
871 int ecore_init_vport_rl(struct ecore_hwfn *p_hwfn,
872 struct ecore_ptt *p_ptt, u8 vport_id, u32 vport_rl)
874 u32 inc_val, max_qm_global_rls = MAX_QM_GLOBAL_RLS;
876 if (vport_id >= max_qm_global_rls) {
877 DP_NOTICE(p_hwfn, true,
878 "Invalid VPORT ID for rate limiter configuration\n");
882 inc_val = QM_RL_INC_VAL(vport_rl);
883 if (inc_val > QM_RL_MAX_INC_VAL) {
884 DP_NOTICE(p_hwfn, true,
885 "Invalid VPORT rate-limit configuration\n");
889 ecore_wr(p_hwfn, p_ptt, QM_REG_RLGLBLCRD + vport_id * 4,
890 (u32)QM_RL_CRD_REG_SIGN_BIT);
891 ecore_wr(p_hwfn, p_ptt, QM_REG_RLGLBLINCVAL + vport_id * 4, inc_val);
896 bool ecore_send_qm_stop_cmd(struct ecore_hwfn *p_hwfn,
897 struct ecore_ptt *p_ptt,
899 bool is_tx_pq, u16 start_pq, u16 num_pqs)
901 u32 cmd_arr[QM_CMD_STRUCT_SIZE(QM_STOP_CMD)] = { 0 };
902 u32 pq_mask = 0, last_pq, pq_id;
904 last_pq = start_pq + num_pqs - 1;
906 /* Set command's PQ type */
907 QM_CMD_SET_FIELD(cmd_arr, QM_STOP_CMD, PQ_TYPE, is_tx_pq ? 0 : 1);
909 /* Go over requested PQs */
910 for (pq_id = start_pq; pq_id <= last_pq; pq_id++) {
911 /* Set PQ bit in mask (stop command only) */
913 pq_mask |= (1 << (pq_id % QM_STOP_PQ_MASK_WIDTH));
915 /* If last PQ or end of PQ mask, write command */
916 if ((pq_id == last_pq) ||
917 (pq_id % QM_STOP_PQ_MASK_WIDTH ==
918 (QM_STOP_PQ_MASK_WIDTH - 1))) {
919 QM_CMD_SET_FIELD(cmd_arr, QM_STOP_CMD, PAUSE_MASK,
921 QM_CMD_SET_FIELD(cmd_arr, QM_STOP_CMD, GROUP_ID,
922 pq_id / QM_STOP_PQ_MASK_WIDTH);
923 if (!ecore_send_qm_cmd
924 (p_hwfn, p_ptt, QM_STOP_CMD_ADDR, cmd_arr[0],
935 /* NIG: ETS configuration constants */
936 #define NIG_TX_ETS_CLIENT_OFFSET 4
937 #define NIG_LB_ETS_CLIENT_OFFSET 1
938 #define NIG_ETS_MIN_WFQ_BYTES 1600
940 /* NIG: ETS constants */
941 #define NIG_ETS_UP_BOUND(weight, mtu) \
942 (2 * ((weight) > (mtu) ? (weight) : (mtu)))
944 /* NIG: RL constants */
946 /* Byte base type value */
947 #define NIG_RL_BASE_TYPE 1
950 #define NIG_RL_PERIOD 1
952 /* Period in 25MHz cycles */
953 #define NIG_RL_PERIOD_CLK_25M (25 * NIG_RL_PERIOD)
956 #define NIG_RL_INC_VAL(rate) (((rate) * NIG_RL_PERIOD) / 8)
958 #define NIG_RL_MAX_VAL(inc_val, mtu) \
959 (2 * ((inc_val) > (mtu) ? (inc_val) : (mtu)))
961 /* NIG: packet prioritry configuration constants */
962 #define NIG_PRIORITY_MAP_TC_BITS 4
965 void ecore_init_nig_ets(struct ecore_hwfn *p_hwfn,
966 struct ecore_ptt *p_ptt,
967 struct init_ets_req *req, bool is_lb)
969 u32 min_weight, tc_weight_base_addr, tc_weight_addr_diff;
970 u32 tc_bound_base_addr, tc_bound_addr_diff;
971 u8 sp_tc_map = 0, wfq_tc_map = 0;
972 u8 tc, num_tc, tc_client_offset;
974 num_tc = is_lb ? NUM_OF_TCS : NUM_OF_PHYS_TCS;
975 tc_client_offset = is_lb ? NIG_LB_ETS_CLIENT_OFFSET :
976 NIG_TX_ETS_CLIENT_OFFSET;
977 min_weight = 0xffffffff;
978 tc_weight_base_addr = is_lb ? NIG_REG_LB_ARB_CREDIT_WEIGHT_0 :
979 NIG_REG_TX_ARB_CREDIT_WEIGHT_0;
980 tc_weight_addr_diff = is_lb ? NIG_REG_LB_ARB_CREDIT_WEIGHT_1 -
981 NIG_REG_LB_ARB_CREDIT_WEIGHT_0 :
982 NIG_REG_TX_ARB_CREDIT_WEIGHT_1 -
983 NIG_REG_TX_ARB_CREDIT_WEIGHT_0;
984 tc_bound_base_addr = is_lb ? NIG_REG_LB_ARB_CREDIT_UPPER_BOUND_0 :
985 NIG_REG_TX_ARB_CREDIT_UPPER_BOUND_0;
986 tc_bound_addr_diff = is_lb ? NIG_REG_LB_ARB_CREDIT_UPPER_BOUND_1 -
987 NIG_REG_LB_ARB_CREDIT_UPPER_BOUND_0 :
988 NIG_REG_TX_ARB_CREDIT_UPPER_BOUND_1 -
989 NIG_REG_TX_ARB_CREDIT_UPPER_BOUND_0;
991 for (tc = 0; tc < num_tc; tc++) {
992 struct init_ets_tc_req *tc_req = &req->tc_req[tc];
996 sp_tc_map |= (1 << tc);
998 if (!tc_req->use_wfq)
1001 /* Update WFQ map */
1002 wfq_tc_map |= (1 << tc);
1004 /* Find minimal weight */
1005 if (tc_req->weight < min_weight)
1006 min_weight = tc_req->weight;
1010 ecore_wr(p_hwfn, p_ptt,
1011 is_lb ? NIG_REG_LB_ARB_CLIENT_IS_STRICT :
1012 NIG_REG_TX_ARB_CLIENT_IS_STRICT,
1013 (sp_tc_map << tc_client_offset));
1016 ecore_wr(p_hwfn, p_ptt,
1017 is_lb ? NIG_REG_LB_ARB_CLIENT_IS_SUBJECT2WFQ :
1018 NIG_REG_TX_ARB_CLIENT_IS_SUBJECT2WFQ,
1019 (wfq_tc_map << tc_client_offset));
1020 /* write WFQ weights */
1021 for (tc = 0; tc < num_tc; tc++, tc_client_offset++) {
1022 struct init_ets_tc_req *tc_req = &req->tc_req[tc];
1025 if (!tc_req->use_wfq)
1028 /* Translate weight to bytes */
1029 byte_weight = (NIG_ETS_MIN_WFQ_BYTES * tc_req->weight) /
1032 /* Write WFQ weight */
1033 ecore_wr(p_hwfn, p_ptt, tc_weight_base_addr +
1034 tc_weight_addr_diff * tc_client_offset, byte_weight);
1036 /* Write WFQ upper bound */
1037 ecore_wr(p_hwfn, p_ptt, tc_bound_base_addr +
1038 tc_bound_addr_diff * tc_client_offset,
1039 NIG_ETS_UP_BOUND(byte_weight, req->mtu));
1043 void ecore_init_nig_lb_rl(struct ecore_hwfn *p_hwfn,
1044 struct ecore_ptt *p_ptt,
1045 struct init_nig_lb_rl_req *req)
1047 u32 ctrl, inc_val, reg_offset;
1050 /* Disable global MAC+LB RL */
1053 NIG_REG_TX_LB_GLBRATELIMIT_CTRL_TX_LB_GLBRATELIMIT_BASE_TYPE_SHIFT;
1054 ecore_wr(p_hwfn, p_ptt, NIG_REG_TX_LB_GLBRATELIMIT_CTRL, ctrl);
1056 /* Configure and enable global MAC+LB RL */
1057 if (req->lb_mac_rate) {
1059 ecore_wr(p_hwfn, p_ptt, NIG_REG_TX_LB_GLBRATELIMIT_INC_PERIOD,
1060 NIG_RL_PERIOD_CLK_25M);
1061 inc_val = NIG_RL_INC_VAL(req->lb_mac_rate);
1062 ecore_wr(p_hwfn, p_ptt, NIG_REG_TX_LB_GLBRATELIMIT_INC_VALUE,
1064 ecore_wr(p_hwfn, p_ptt, NIG_REG_TX_LB_GLBRATELIMIT_MAX_VALUE,
1065 NIG_RL_MAX_VAL(inc_val, req->mtu));
1070 NIG_REG_TX_LB_GLBRATELIMIT_CTRL_TX_LB_GLBRATELIMIT_EN_SHIFT;
1071 ecore_wr(p_hwfn, p_ptt, NIG_REG_TX_LB_GLBRATELIMIT_CTRL, ctrl);
1074 /* Disable global LB-only RL */
1077 NIG_REG_LB_BRBRATELIMIT_CTRL_LB_BRBRATELIMIT_BASE_TYPE_SHIFT;
1078 ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_BRBRATELIMIT_CTRL, ctrl);
1080 /* Configure and enable global LB-only RL */
1083 ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_BRBRATELIMIT_INC_PERIOD,
1084 NIG_RL_PERIOD_CLK_25M);
1085 inc_val = NIG_RL_INC_VAL(req->lb_rate);
1086 ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_BRBRATELIMIT_INC_VALUE,
1088 ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_BRBRATELIMIT_MAX_VALUE,
1089 NIG_RL_MAX_VAL(inc_val, req->mtu));
1093 1 << NIG_REG_LB_BRBRATELIMIT_CTRL_LB_BRBRATELIMIT_EN_SHIFT;
1094 ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_BRBRATELIMIT_CTRL, ctrl);
1098 for (tc = 0, reg_offset = 0; tc < NUM_OF_PHYS_TCS;
1099 tc++, reg_offset += 4) {
1103 NIG_REG_LB_TCRATELIMIT_CTRL_0_LB_TCRATELIMIT_BASE_TYPE_0_SHIFT;
1104 ecore_wr(p_hwfn, p_ptt,
1105 NIG_REG_LB_TCRATELIMIT_CTRL_0 + reg_offset, ctrl);
1107 /* Configure and enable TC RL */
1108 if (!req->tc_rate[tc])
1112 ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_TCRATELIMIT_INC_PERIOD_0 +
1113 reg_offset, NIG_RL_PERIOD_CLK_25M);
1114 inc_val = NIG_RL_INC_VAL(req->tc_rate[tc]);
1115 ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_TCRATELIMIT_INC_VALUE_0 +
1116 reg_offset, inc_val);
1117 ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_TCRATELIMIT_MAX_VALUE_0 +
1118 reg_offset, NIG_RL_MAX_VAL(inc_val, req->mtu));
1122 NIG_REG_LB_TCRATELIMIT_CTRL_0_LB_TCRATELIMIT_EN_0_SHIFT;
1123 ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_TCRATELIMIT_CTRL_0 +
1128 void ecore_init_nig_pri_tc_map(struct ecore_hwfn *p_hwfn,
1129 struct ecore_ptt *p_ptt,
1130 struct init_nig_pri_tc_map_req *req)
1132 u8 tc_pri_mask[NUM_OF_PHYS_TCS] = { 0 };
1133 u32 pri_tc_mask = 0;
1136 for (pri = 0; pri < NUM_OF_VLAN_PRIORITIES; pri++) {
1137 if (!req->pri[pri].valid)
1140 pri_tc_mask |= (req->pri[pri].tc_id <<
1141 (pri * NIG_PRIORITY_MAP_TC_BITS));
1142 tc_pri_mask[req->pri[pri].tc_id] |= (1 << pri);
1145 /* Write priority -> TC mask */
1146 ecore_wr(p_hwfn, p_ptt, NIG_REG_PKT_PRIORITY_TO_TC, pri_tc_mask);
1148 /* Write TC -> priority mask */
1149 for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) {
1150 ecore_wr(p_hwfn, p_ptt, NIG_REG_PRIORITY_FOR_TC_0 + tc * 4,
1152 ecore_wr(p_hwfn, p_ptt, NIG_REG_RX_TC0_PRIORITY_MASK + tc * 4,
1158 /* PRS: ETS configuration constants */
1159 #define PRS_ETS_MIN_WFQ_BYTES 1600
1160 #define PRS_ETS_UP_BOUND(weight, mtu) \
1161 (2 * ((weight) > (mtu) ? (weight) : (mtu)))
1164 void ecore_init_prs_ets(struct ecore_hwfn *p_hwfn,
1165 struct ecore_ptt *p_ptt, struct init_ets_req *req)
1167 u32 tc_weight_addr_diff, tc_bound_addr_diff, min_weight = 0xffffffff;
1168 u8 tc, sp_tc_map = 0, wfq_tc_map = 0;
1170 tc_weight_addr_diff = PRS_REG_ETS_ARB_CREDIT_WEIGHT_1 -
1171 PRS_REG_ETS_ARB_CREDIT_WEIGHT_0;
1172 tc_bound_addr_diff = PRS_REG_ETS_ARB_CREDIT_UPPER_BOUND_1 -
1173 PRS_REG_ETS_ARB_CREDIT_UPPER_BOUND_0;
1175 for (tc = 0; tc < NUM_OF_TCS; tc++) {
1176 struct init_ets_tc_req *tc_req = &req->tc_req[tc];
1180 sp_tc_map |= (1 << tc);
1182 if (!tc_req->use_wfq)
1185 /* Update WFQ map */
1186 wfq_tc_map |= (1 << tc);
1188 /* Find minimal weight */
1189 if (tc_req->weight < min_weight)
1190 min_weight = tc_req->weight;
1194 ecore_wr(p_hwfn, p_ptt, PRS_REG_ETS_ARB_CLIENT_IS_STRICT, sp_tc_map);
1197 ecore_wr(p_hwfn, p_ptt, PRS_REG_ETS_ARB_CLIENT_IS_SUBJECT2WFQ,
1200 /* write WFQ weights */
1201 for (tc = 0; tc < NUM_OF_TCS; tc++) {
1202 struct init_ets_tc_req *tc_req = &req->tc_req[tc];
1205 if (!tc_req->use_wfq)
1208 /* Translate weight to bytes */
1209 byte_weight = (PRS_ETS_MIN_WFQ_BYTES * tc_req->weight) /
1212 /* Write WFQ weight */
1213 ecore_wr(p_hwfn, p_ptt, PRS_REG_ETS_ARB_CREDIT_WEIGHT_0 + tc *
1214 tc_weight_addr_diff, byte_weight);
1216 /* Write WFQ upper bound */
1217 ecore_wr(p_hwfn, p_ptt, PRS_REG_ETS_ARB_CREDIT_UPPER_BOUND_0 +
1218 tc * tc_bound_addr_diff, PRS_ETS_UP_BOUND(byte_weight,
1224 /* BRB: RAM configuration constants */
1225 #define BRB_TOTAL_RAM_BLOCKS_BB 4800
1226 #define BRB_TOTAL_RAM_BLOCKS_K2 5632
1227 #define BRB_BLOCK_SIZE 128
1228 #define BRB_MIN_BLOCKS_PER_TC 9
1229 #define BRB_HYST_BYTES 10240
1230 #define BRB_HYST_BLOCKS (BRB_HYST_BYTES / BRB_BLOCK_SIZE)
1232 /* Temporary big RAM allocation - should be updated */
1233 void ecore_init_brb_ram(struct ecore_hwfn *p_hwfn,
1234 struct ecore_ptt *p_ptt, struct init_brb_ram_req *req)
1236 u32 tc_headroom_blocks, min_pkt_size_blocks, total_blocks;
1237 u32 active_port_blocks, reg_offset = 0;
1238 u8 port, active_ports = 0;
1240 tc_headroom_blocks = (u32)DIV_ROUND_UP(req->headroom_per_tc,
1242 min_pkt_size_blocks = (u32)DIV_ROUND_UP(req->min_pkt_size,
1244 total_blocks = ECORE_IS_K2(p_hwfn->p_dev) ? BRB_TOTAL_RAM_BLOCKS_K2 :
1245 BRB_TOTAL_RAM_BLOCKS_BB;
1247 /* Find number of active ports */
1248 for (port = 0; port < MAX_NUM_PORTS; port++)
1249 if (req->num_active_tcs[port])
1252 active_port_blocks = (u32)(total_blocks / active_ports);
1254 for (port = 0; port < req->max_ports_per_engine; port++) {
1255 u32 port_blocks, port_shared_blocks, port_guaranteed_blocks;
1256 u32 full_xoff_th, full_xon_th, pause_xoff_th, pause_xon_th;
1257 u32 tc_guaranteed_blocks;
1260 /* Calculate per-port sizes */
1261 tc_guaranteed_blocks = (u32)DIV_ROUND_UP(req->guranteed_per_tc,
1263 port_blocks = req->num_active_tcs[port] ? active_port_blocks :
1265 port_guaranteed_blocks = req->num_active_tcs[port] *
1266 tc_guaranteed_blocks;
1267 port_shared_blocks = port_blocks - port_guaranteed_blocks;
1268 full_xoff_th = req->num_active_tcs[port] *
1269 BRB_MIN_BLOCKS_PER_TC;
1270 full_xon_th = full_xoff_th + min_pkt_size_blocks;
1271 pause_xoff_th = tc_headroom_blocks;
1272 pause_xon_th = pause_xoff_th + min_pkt_size_blocks;
1274 /* Init total size per port */
1275 ecore_wr(p_hwfn, p_ptt, BRB_REG_TOTAL_MAC_SIZE + port * 4,
1278 /* Init shared size per port */
1279 ecore_wr(p_hwfn, p_ptt, BRB_REG_SHARED_HR_AREA + port * 4,
1280 port_shared_blocks);
1282 for (tc = 0; tc < NUM_OF_TCS; tc++, reg_offset += 4) {
1283 /* Clear init values for non-active TCs */
1284 if (tc == req->num_active_tcs[port]) {
1285 tc_guaranteed_blocks = 0;
1292 /* Init guaranteed size per TC */
1293 ecore_wr(p_hwfn, p_ptt,
1294 BRB_REG_TC_GUARANTIED_0 + reg_offset,
1295 tc_guaranteed_blocks);
1296 ecore_wr(p_hwfn, p_ptt,
1297 BRB_REG_MAIN_TC_GUARANTIED_HYST_0 + reg_offset,
1300 /* Init pause/full thresholds per physical TC - for
1303 ecore_wr(p_hwfn, p_ptt,
1304 BRB_REG_LB_TC_FULL_XOFF_THRESHOLD_0 +
1305 reg_offset, full_xoff_th);
1306 ecore_wr(p_hwfn, p_ptt,
1307 BRB_REG_LB_TC_FULL_XON_THRESHOLD_0 +
1308 reg_offset, full_xon_th);
1309 ecore_wr(p_hwfn, p_ptt,
1310 BRB_REG_LB_TC_PAUSE_XOFF_THRESHOLD_0 +
1311 reg_offset, pause_xoff_th);
1312 ecore_wr(p_hwfn, p_ptt,
1313 BRB_REG_LB_TC_PAUSE_XON_THRESHOLD_0 +
1314 reg_offset, pause_xon_th);
1316 /* Init pause/full thresholds per physical TC - for
1319 ecore_wr(p_hwfn, p_ptt,
1320 BRB_REG_MAIN_TC_FULL_XOFF_THRESHOLD_0 +
1321 reg_offset, full_xoff_th);
1322 ecore_wr(p_hwfn, p_ptt,
1323 BRB_REG_MAIN_TC_FULL_XON_THRESHOLD_0 +
1324 reg_offset, full_xon_th);
1325 ecore_wr(p_hwfn, p_ptt,
1326 BRB_REG_MAIN_TC_PAUSE_XOFF_THRESHOLD_0 +
1327 reg_offset, pause_xoff_th);
1328 ecore_wr(p_hwfn, p_ptt,
1329 BRB_REG_MAIN_TC_PAUSE_XON_THRESHOLD_0 +
1330 reg_offset, pause_xon_th);
1335 /* In MF should be called once per port to set EtherType of OuterTag */
1336 void ecore_set_port_mf_ovlan_eth_type(struct ecore_hwfn *p_hwfn, u32 ethType)
1338 /* Update DORQ register */
1339 STORE_RT_REG(p_hwfn, DORQ_REG_TAG1_ETHERTYPE_RT_OFFSET, ethType);
1342 #define SET_TUNNEL_TYPE_ENABLE_BIT(var, offset, enable) \
1343 (var = ((var) & ~(1 << (offset))) | ((enable) ? (1 << (offset)) : 0))
1344 #define PRS_ETH_TUNN_FIC_FORMAT -188897008
1345 void ecore_set_vxlan_dest_port(struct ecore_hwfn *p_hwfn,
1346 struct ecore_ptt *p_ptt, u16 dest_port)
1348 /* Update PRS register */
1349 ecore_wr(p_hwfn, p_ptt, PRS_REG_VXLAN_PORT, dest_port);
1351 /* Update NIG register */
1352 ecore_wr(p_hwfn, p_ptt, NIG_REG_VXLAN_CTRL, dest_port);
1354 /* Update PBF register */
1355 ecore_wr(p_hwfn, p_ptt, PBF_REG_VXLAN_PORT, dest_port);
1358 void ecore_set_vxlan_enable(struct ecore_hwfn *p_hwfn,
1359 struct ecore_ptt *p_ptt, bool vxlan_enable)
1363 /* Update PRS register */
1364 reg_val = ecore_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN);
1365 SET_TUNNEL_TYPE_ENABLE_BIT(reg_val,
1366 PRS_REG_ENCAPSULATION_TYPE_EN_VXLAN_ENABLE_SHIFT,
1368 ecore_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
1370 ecore_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2,
1371 (u32)PRS_ETH_TUNN_FIC_FORMAT);
1374 /* Update NIG register */
1375 reg_val = ecore_rd(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE);
1376 SET_TUNNEL_TYPE_ENABLE_BIT(reg_val,
1377 NIG_REG_ENC_TYPE_ENABLE_VXLAN_ENABLE_SHIFT,
1379 ecore_wr(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE, reg_val);
1381 /* Update DORQ register */
1382 ecore_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_VXLAN_EN,
1383 vxlan_enable ? 1 : 0);
1386 void ecore_set_gre_enable(struct ecore_hwfn *p_hwfn,
1387 struct ecore_ptt *p_ptt,
1388 bool eth_gre_enable, bool ip_gre_enable)
1392 /* Update PRS register */
1393 reg_val = ecore_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN);
1394 SET_TUNNEL_TYPE_ENABLE_BIT(reg_val,
1395 PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GRE_ENABLE_SHIFT,
1397 SET_TUNNEL_TYPE_ENABLE_BIT(reg_val,
1398 PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GRE_ENABLE_SHIFT,
1400 ecore_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
1402 ecore_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2,
1403 (u32)PRS_ETH_TUNN_FIC_FORMAT);
1406 /* Update NIG register */
1407 reg_val = ecore_rd(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE);
1408 SET_TUNNEL_TYPE_ENABLE_BIT(reg_val,
1409 NIG_REG_ENC_TYPE_ENABLE_ETH_OVER_GRE_ENABLE_SHIFT,
1411 SET_TUNNEL_TYPE_ENABLE_BIT(reg_val,
1412 NIG_REG_ENC_TYPE_ENABLE_IP_OVER_GRE_ENABLE_SHIFT,
1414 ecore_wr(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE, reg_val);
1416 /* Update DORQ registers */
1417 ecore_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_GRE_ETH_EN,
1418 eth_gre_enable ? 1 : 0);
1419 ecore_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_GRE_IP_EN,
1420 ip_gre_enable ? 1 : 0);
1423 void ecore_set_geneve_dest_port(struct ecore_hwfn *p_hwfn,
1424 struct ecore_ptt *p_ptt, u16 dest_port)
1426 /* Update PRS register */
1427 ecore_wr(p_hwfn, p_ptt, PRS_REG_NGE_PORT, dest_port);
1429 /* Update NIG register */
1430 ecore_wr(p_hwfn, p_ptt, NIG_REG_NGE_PORT, dest_port);
1432 /* Update PBF register */
1433 ecore_wr(p_hwfn, p_ptt, PBF_REG_NGE_PORT, dest_port);
1436 void ecore_set_geneve_enable(struct ecore_hwfn *p_hwfn,
1437 struct ecore_ptt *p_ptt,
1438 bool eth_geneve_enable, bool ip_geneve_enable)
1442 /* Update PRS register */
1443 reg_val = ecore_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN);
1444 SET_TUNNEL_TYPE_ENABLE_BIT(reg_val,
1445 PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GENEVE_ENABLE_SHIFT,
1447 SET_TUNNEL_TYPE_ENABLE_BIT(reg_val,
1448 PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GENEVE_ENABLE_SHIFT,
1450 ecore_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
1452 ecore_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2,
1453 (u32)PRS_ETH_TUNN_FIC_FORMAT);
1456 /* Update NIG register */
1457 ecore_wr(p_hwfn, p_ptt, NIG_REG_NGE_ETH_ENABLE,
1458 eth_geneve_enable ? 1 : 0);
1459 ecore_wr(p_hwfn, p_ptt, NIG_REG_NGE_IP_ENABLE,
1460 ip_geneve_enable ? 1 : 0);
1462 /* EDPM with geneve tunnel not supported in BB */
1463 if (ECORE_IS_BB_B0(p_hwfn->p_dev))
1466 /* Update DORQ registers */
1467 ecore_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_NGE_ETH_EN_K2_E5,
1468 eth_geneve_enable ? 1 : 0);
1469 ecore_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_NGE_IP_EN_K2_E5,
1470 ip_geneve_enable ? 1 : 0);
1474 #define T_ETH_PACKET_ACTION_GFT_EVENTID 23
1475 #define PARSER_ETH_CONN_GFT_ACTION_CM_HDR 272
1476 #define T_ETH_PACKET_MATCH_RFS_EVENTID 25
1477 #define PARSER_ETH_CONN_CM_HDR 0
1478 #define CAM_LINE_SIZE sizeof(u32)
1479 #define RAM_LINE_SIZE sizeof(u64)
1480 #define REG_SIZE sizeof(u32)
1482 void ecore_set_rfs_mode_disable(struct ecore_hwfn *p_hwfn,
1483 struct ecore_ptt *p_ptt,
1486 union gft_cam_line_union cam_line;
1487 struct gft_ram_line ram_line;
1488 u32 i, *ram_line_ptr;
1490 ram_line_ptr = (u32 *)&ram_line;
1492 /* Stop using gft logic, disable gft search */
1493 ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_GFT, 0);
1494 ecore_wr(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT, 0x0);
1496 /* Clean ram & cam for next rfs/gft session*/
1499 OSAL_MEMSET(&cam_line, 0, sizeof(cam_line));
1500 ecore_wr(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE * pf_id,
1501 cam_line.cam_line_mapped.camline);
1504 OSAL_MEMSET(&ram_line, 0, sizeof(ram_line));
1506 /* Each iteration write to reg */
1507 for (i = 0; i < RAM_LINE_SIZE / REG_SIZE; i++)
1508 ecore_wr(p_hwfn, p_ptt, PRS_REG_GFT_PROFILE_MASK_RAM +
1509 RAM_LINE_SIZE * pf_id +
1510 i * REG_SIZE, *(ram_line_ptr + i));
1514 void ecore_set_gft_event_id_cm_hdr(struct ecore_hwfn *p_hwfn,
1515 struct ecore_ptt *p_ptt)
1517 u32 rfs_cm_hdr_event_id;
1519 /* Set RFS event ID to be awakened i Tstorm By Prs */
1520 rfs_cm_hdr_event_id = ecore_rd(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT);
1521 rfs_cm_hdr_event_id |= T_ETH_PACKET_ACTION_GFT_EVENTID <<
1522 PRS_REG_CM_HDR_GFT_EVENT_ID_SHIFT;
1523 rfs_cm_hdr_event_id |= PARSER_ETH_CONN_GFT_ACTION_CM_HDR <<
1524 PRS_REG_CM_HDR_GFT_CM_HDR_SHIFT;
1525 ecore_wr(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT, rfs_cm_hdr_event_id);
1528 void ecore_set_rfs_mode_enable(struct ecore_hwfn *p_hwfn,
1529 struct ecore_ptt *p_ptt,
1536 u32 rfs_cm_hdr_event_id = ecore_rd(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT);
1537 union gft_cam_line_union camLine;
1538 struct gft_ram_line ramLine;
1539 u32 *ramLinePointer = (u32 *)&ramLine;
1543 DP_NOTICE(p_hwfn, true,
1544 "set_rfs_mode_enable: must accept at "
1545 "least on of - ipv4 or ipv6");
1548 DP_NOTICE(p_hwfn, true,
1549 "set_rfs_mode_enable: must accept at "
1550 "least on of - udp or tcp");
1552 /* Set RFS event ID to be awakened i Tstorm By Prs */
1553 rfs_cm_hdr_event_id |= T_ETH_PACKET_MATCH_RFS_EVENTID <<
1554 PRS_REG_CM_HDR_GFT_EVENT_ID_SHIFT;
1555 rfs_cm_hdr_event_id |= PARSER_ETH_CONN_CM_HDR <<
1556 PRS_REG_CM_HDR_GFT_CM_HDR_SHIFT;
1557 ecore_wr(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT, rfs_cm_hdr_event_id);
1559 /* Configure Registers for RFS mode */
1561 /* Enable gft search */
1562 ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_GFT, 1);
1563 ecore_wr(p_hwfn, p_ptt, PRS_REG_LOAD_L2_FILTER, 0); /* do not load
1567 camLine.cam_line_mapped.camline = 0;
1569 /* Cam line is now valid!! */
1570 SET_FIELD(camLine.cam_line_mapped.camline,
1571 GFT_CAM_LINE_MAPPED_VALID, 1);
1573 /* Filters are per PF!! */
1574 SET_FIELD(camLine.cam_line_mapped.camline,
1575 GFT_CAM_LINE_MAPPED_PF_ID_MASK,
1576 GFT_CAM_LINE_MAPPED_PF_ID_MASK_MASK);
1577 SET_FIELD(camLine.cam_line_mapped.camline,
1578 GFT_CAM_LINE_MAPPED_PF_ID, pf_id);
1580 if (!(tcp && udp)) {
1581 SET_FIELD(camLine.cam_line_mapped.camline,
1582 GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK,
1583 GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK_MASK);
1585 SET_FIELD(camLine.cam_line_mapped.camline,
1586 GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE,
1587 GFT_PROFILE_TCP_PROTOCOL);
1589 SET_FIELD(camLine.cam_line_mapped.camline,
1590 GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE,
1591 GFT_PROFILE_UDP_PROTOCOL);
1594 if (!(ipv4 && ipv6)) {
1595 SET_FIELD(camLine.cam_line_mapped.camline,
1596 GFT_CAM_LINE_MAPPED_IP_VERSION_MASK, 1);
1598 SET_FIELD(camLine.cam_line_mapped.camline,
1599 GFT_CAM_LINE_MAPPED_IP_VERSION,
1602 SET_FIELD(camLine.cam_line_mapped.camline,
1603 GFT_CAM_LINE_MAPPED_IP_VERSION,
1607 /* Write characteristics to cam */
1608 ecore_wr(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE * pf_id,
1609 camLine.cam_line_mapped.camline);
1610 camLine.cam_line_mapped.camline =
1611 ecore_rd(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE * pf_id);
1613 /* Write line to RAM - compare to filter 4 tuple */
1616 SET_FIELD(ramLine.hi, GFT_RAM_LINE_DST_IP, 1);
1617 SET_FIELD(ramLine.hi, GFT_RAM_LINE_SRC_IP, 1);
1618 SET_FIELD(ramLine.hi, GFT_RAM_LINE_OVER_IP_PROTOCOL, 1);
1619 SET_FIELD(ramLine.lo, GFT_RAM_LINE_ETHERTYPE, 1);
1620 SET_FIELD(ramLine.lo, GFT_RAM_LINE_SRC_PORT, 1);
1621 SET_FIELD(ramLine.lo, GFT_RAM_LINE_DST_PORT, 1);
1623 /* Each iteration write to reg */
1624 for (i = 0; i < RAM_LINE_SIZE / REG_SIZE; i++)
1625 ecore_wr(p_hwfn, p_ptt, PRS_REG_GFT_PROFILE_MASK_RAM +
1626 RAM_LINE_SIZE * pf_id +
1627 i * REG_SIZE, *(ramLinePointer + i));
1629 /* Set default profile so that no filter match will happen */
1630 ramLine.lo = 0xffffffff;
1633 for (i = 0; i < RAM_LINE_SIZE / REG_SIZE; i++)
1634 ecore_wr(p_hwfn, p_ptt, PRS_REG_GFT_PROFILE_MASK_RAM +
1635 RAM_LINE_SIZE * PRS_GFT_CAM_LINES_NO_MATCH +
1636 i * REG_SIZE, *(ramLinePointer + i));
1639 /* Configure VF zone size mode */
1640 void ecore_config_vf_zone_size_mode(struct ecore_hwfn *p_hwfn,
1641 struct ecore_ptt *p_ptt, u16 mode,
1644 u32 msdm_vf_size_log = MSTORM_VF_ZONE_DEFAULT_SIZE_LOG;
1645 u32 msdm_vf_offset_mask;
1647 if (mode == VF_ZONE_SIZE_MODE_DOUBLE)
1648 msdm_vf_size_log += 1;
1649 else if (mode == VF_ZONE_SIZE_MODE_QUAD)
1650 msdm_vf_size_log += 2;
1652 msdm_vf_offset_mask = (1 << msdm_vf_size_log) - 1;
1655 STORE_RT_REG(p_hwfn,
1656 PGLUE_REG_B_MSDM_VF_SHIFT_B_RT_OFFSET,
1658 STORE_RT_REG(p_hwfn,
1659 PGLUE_REG_B_MSDM_OFFSET_MASK_B_RT_OFFSET,
1660 msdm_vf_offset_mask);
1662 ecore_wr(p_hwfn, p_ptt,
1663 PGLUE_B_REG_MSDM_VF_SHIFT_B, msdm_vf_size_log);
1664 ecore_wr(p_hwfn, p_ptt,
1665 PGLUE_B_REG_MSDM_OFFSET_MASK_B, msdm_vf_offset_mask);
1669 /* Get mstorm statistics for offset by VF zone size mode */
1670 u32 ecore_get_mstorm_queue_stat_offset(struct ecore_hwfn *p_hwfn,
1672 u16 vf_zone_size_mode)
1674 u32 offset = MSTORM_QUEUE_STAT_OFFSET(stat_cnt_id);
1676 if ((vf_zone_size_mode != VF_ZONE_SIZE_MODE_DEFAULT) &&
1677 (stat_cnt_id > MAX_NUM_PFS)) {
1678 if (vf_zone_size_mode == VF_ZONE_SIZE_MODE_DOUBLE)
1679 offset += (1 << MSTORM_VF_ZONE_DEFAULT_SIZE_LOG) *
1680 (stat_cnt_id - MAX_NUM_PFS);
1681 else if (vf_zone_size_mode == VF_ZONE_SIZE_MODE_QUAD)
1682 offset += 3 * (1 << MSTORM_VF_ZONE_DEFAULT_SIZE_LOG) *
1683 (stat_cnt_id - MAX_NUM_PFS);
1689 /* Get mstorm VF producer offset by VF zone size mode */
1690 u32 ecore_get_mstorm_eth_vf_prods_offset(struct ecore_hwfn *p_hwfn,
1693 u16 vf_zone_size_mode)
1695 u32 offset = MSTORM_ETH_VF_PRODS_OFFSET(vf_id, vf_queue_id);
1697 if (vf_zone_size_mode != VF_ZONE_SIZE_MODE_DEFAULT) {
1698 if (vf_zone_size_mode == VF_ZONE_SIZE_MODE_DOUBLE)
1699 offset += (1 << MSTORM_VF_ZONE_DEFAULT_SIZE_LOG) *
1701 else if (vf_zone_size_mode == VF_ZONE_SIZE_MODE_QUAD)
1702 offset += 3 * (1 << MSTORM_VF_ZONE_DEFAULT_SIZE_LOG) *
1709 #ifndef LINUX_REMOVE
1710 #define CRC8_INIT_VALUE 0xFF
1711 #define CRC8_TABLE_SIZE 256
1713 static u8 cdu_crc8_table[CRC8_TABLE_SIZE];
1715 /* Calculate and return CDU validation byte per connection type / region /
1718 static u8 ecore_calc_cdu_validation_byte(u8 conn_type, u8 region, u32 cid)
1720 const u8 validation_cfg = CDU_VALIDATION_DEFAULT_CFG;
1722 static u8 crc8_table_valid; /*automatically initialized to 0*/
1723 u8 crc, validation_byte = 0;
1724 u32 validation_string = 0;
1727 if (crc8_table_valid == 0) {
1728 OSAL_CRC8_POPULATE(cdu_crc8_table, 0x07);
1729 crc8_table_valid = 1;
1733 * The CRC is calculated on the String-to-compress:
1734 * [31:8] = {CID[31:20],CID[11:0]}
1738 if ((validation_cfg >> CDU_CONTEXT_VALIDATION_CFG_USE_CID) & 1)
1739 validation_string |= (cid & 0xFFF00000) | ((cid & 0xFFF) << 8);
1741 if ((validation_cfg >> CDU_CONTEXT_VALIDATION_CFG_USE_REGION) & 1)
1742 validation_string |= ((region & 0xF) << 4);
1744 if ((validation_cfg >> CDU_CONTEXT_VALIDATION_CFG_USE_TYPE) & 1)
1745 validation_string |= (conn_type & 0xF);
1747 /* Convert to big-endian and calculate CRC8*/
1748 data_to_crc = OSAL_BE32_TO_CPU(validation_string);
1750 crc = OSAL_CRC8(cdu_crc8_table, (u8 *)&data_to_crc, sizeof(data_to_crc),
1753 /* The validation byte [7:0] is composed:
1754 * for type A validation
1755 * [7] = active configuration bit
1758 * for type B validation
1759 * [7] = active configuration bit
1760 * [6:3] = connection_type[3:0]
1764 validation_byte |= ((validation_cfg >>
1765 CDU_CONTEXT_VALIDATION_CFG_USE_ACTIVE) & 1) << 7;
1767 if ((validation_cfg >>
1768 CDU_CONTEXT_VALIDATION_CFG_VALIDATION_TYPE_SHIFT) & 1)
1769 validation_byte |= ((conn_type & 0xF) << 3) | (crc & 0x7);
1771 validation_byte |= crc & 0x7F;
1773 return validation_byte;
1776 /* Calcualte and set validation bytes for session context */
1777 void ecore_calc_session_ctx_validation(void *p_ctx_mem, u16 ctx_size,
1778 u8 ctx_type, u32 cid)
1780 u8 *x_val_ptr, *t_val_ptr, *u_val_ptr, *p_ctx;
1782 p_ctx = (u8 *)p_ctx_mem;
1783 x_val_ptr = &p_ctx[con_region_offsets[0][ctx_type]];
1784 t_val_ptr = &p_ctx[con_region_offsets[1][ctx_type]];
1785 u_val_ptr = &p_ctx[con_region_offsets[2][ctx_type]];
1787 OSAL_MEMSET(p_ctx, 0, ctx_size);
1789 *x_val_ptr = ecore_calc_cdu_validation_byte(ctx_type, 3, cid);
1790 *t_val_ptr = ecore_calc_cdu_validation_byte(ctx_type, 4, cid);
1791 *u_val_ptr = ecore_calc_cdu_validation_byte(ctx_type, 5, cid);
1794 /* Calcualte and set validation bytes for task context */
1795 void ecore_calc_task_ctx_validation(void *p_ctx_mem, u16 ctx_size, u8 ctx_type,
1798 u8 *p_ctx, *region1_val_ptr;
1800 p_ctx = (u8 *)p_ctx_mem;
1801 region1_val_ptr = &p_ctx[task_region_offsets[0][ctx_type]];
1803 OSAL_MEMSET(p_ctx, 0, ctx_size);
1805 *region1_val_ptr = ecore_calc_cdu_validation_byte(ctx_type, 1, tid);
1808 /* Memset session context to 0 while preserving validation bytes */
1809 void ecore_memset_session_ctx(void *p_ctx_mem, u32 ctx_size, u8 ctx_type)
1811 u8 *x_val_ptr, *t_val_ptr, *u_val_ptr, *p_ctx;
1812 u8 x_val, t_val, u_val;
1814 p_ctx = (u8 *)p_ctx_mem;
1815 x_val_ptr = &p_ctx[con_region_offsets[0][ctx_type]];
1816 t_val_ptr = &p_ctx[con_region_offsets[1][ctx_type]];
1817 u_val_ptr = &p_ctx[con_region_offsets[2][ctx_type]];
1823 OSAL_MEMSET(p_ctx, 0, ctx_size);
1830 /* Memset task context to 0 while preserving validation bytes */
1831 void ecore_memset_task_ctx(void *p_ctx_mem, u32 ctx_size, u8 ctx_type)
1833 u8 *p_ctx, *region1_val_ptr;
1836 p_ctx = (u8 *)p_ctx_mem;
1837 region1_val_ptr = &p_ctx[task_region_offsets[0][ctx_type]];
1839 region1_val = *region1_val_ptr;
1841 OSAL_MEMSET(p_ctx, 0, ctx_size);
1843 *region1_val_ptr = region1_val;
1846 /* Enable and configure context validation */
1847 void ecore_enable_context_validation(struct ecore_hwfn *p_hwfn,
1848 struct ecore_ptt *p_ptt)
1852 /* Enable validation for connection region 3 - bits [31:24] */
1853 ctx_validation = CDU_VALIDATION_DEFAULT_CFG << 24;
1854 ecore_wr(p_hwfn, p_ptt, CDU_REG_CCFC_CTX_VALID0, ctx_validation);
1856 /* Enable validation for connection region 5 - bits [15: 8] */
1857 ctx_validation = CDU_VALIDATION_DEFAULT_CFG << 8;
1858 ecore_wr(p_hwfn, p_ptt, CDU_REG_CCFC_CTX_VALID1, ctx_validation);
1860 /* Enable validation for connection region 1 - bits [15: 8] */
1861 ctx_validation = CDU_VALIDATION_DEFAULT_CFG << 8;
1862 ecore_wr(p_hwfn, p_ptt, CDU_REG_TCFC_CTX_VALID0, ctx_validation);