2 * Copyright (c) 2016 QLogic Corporation.
6 * See LICENSE.qede_pmd for copyright and licensing details.
11 #include "ecore_init_ops.h"
13 #include "ecore_rt_defs.h"
14 #include "ecore_hsi_common.h"
15 #include "ecore_hsi_init_func.h"
16 #include "ecore_hsi_eth.h"
17 #include "ecore_hsi_init_tool.h"
18 #include "ecore_iro.h"
19 #include "ecore_init_fw_funcs.h"
21 #define CDU_VALIDATION_DEFAULT_CFG 61
23 static u16 con_region_offsets[3][NUM_OF_CONNECTION_TYPES_E4] = {
24 { 400, 336, 352, 304, 304, 384, 416, 352}, /* region 3 offsets */
25 { 528, 496, 416, 448, 448, 512, 544, 480}, /* region 4 offsets */
26 { 608, 544, 496, 512, 576, 592, 624, 560} /* region 5 offsets */
28 static u16 task_region_offsets[1][NUM_OF_CONNECTION_TYPES_E4] = {
29 { 240, 240, 112, 0, 0, 0, 0, 96} /* region 1 offsets */
32 /* General constants */
33 #define QM_PQ_MEM_4KB(pq_size) (pq_size ? DIV_ROUND_UP((pq_size + 1) * \
34 QM_PQ_ELEMENT_SIZE, 0x1000) : 0)
35 #define QM_PQ_SIZE_256B(pq_size) (pq_size ? DIV_ROUND_UP(pq_size, 0x100) - 1 : \
37 #define QM_INVALID_PQ_ID 0xffff
40 #define QM_BYPASS_EN 1
41 #define QM_BYTE_CRD_EN 1
43 /* Other PQ constants */
44 #define QM_OTHER_PQS_PER_PF 4
47 #define QM_E5_NUM_EXT_VOQ (MAX_NUM_PORTS_E5 * NUM_OF_TCS)
51 /* Upper bound in MB, 10 * burst size of 1ms in 50Gbps */
52 #define QM_WFQ_UPPER_BOUND 62500000
54 /* Bit of VOQ in WFQ VP PQ map */
55 #define QM_WFQ_VP_PQ_VOQ_SHIFT 0
57 /* Bit of PF in WFQ VP PQ map */
58 #define QM_WFQ_VP_PQ_PF_E4_SHIFT 5
59 #define QM_WFQ_VP_PQ_PF_E5_SHIFT 6
61 /* 0x9000 = 4*9*1024 */
62 #define QM_WFQ_INC_VAL(weight) ((weight) * 0x9000)
64 /* Max WFQ increment value is 0.7 * upper bound */
65 #define QM_WFQ_MAX_INC_VAL ((QM_WFQ_UPPER_BOUND * 7) / 10)
67 /* Number of VOQs in E5 QmWfqCrd register */
68 #define QM_WFQ_CRD_E5_NUM_VOQS 16
73 #define QM_RL_PERIOD 5
75 /* Period in 25MHz cycles */
76 #define QM_RL_PERIOD_CLK_25M (25 * QM_RL_PERIOD)
78 /* RL increment value - rate is specified in mbps. the factor of 1.01 was
79 * added after seeing only 99% factor reached in a 25Gbps port with DPDK RFC
80 * 2544 test. In this scenario the PF RL was reducing the line rate to 99%
81 * although the credit increment value was the correct one and FW calculated
82 * correct packet sizes. The reason for the inaccuracy of the RL is unknown at
85 #define QM_RL_INC_VAL(rate) \
86 OSAL_MAX_T(u32, (u32)(((rate ? rate : 100000) * QM_RL_PERIOD * 101) / \
89 /* PF RL Upper bound is set to 10 * burst size of 1ms in 50Gbps */
90 #define QM_PF_RL_UPPER_BOUND 62500000
92 /* Max PF RL increment value is 0.7 * upper bound */
93 #define QM_PF_RL_MAX_INC_VAL ((QM_PF_RL_UPPER_BOUND * 7) / 10)
95 /* Vport RL Upper bound, link speed is in Mpbs */
96 #define QM_VP_RL_UPPER_BOUND(speed) \
97 ((u32)OSAL_MAX_T(u32, QM_RL_INC_VAL(speed), 9700 + 1000))
99 /* Max Vport RL increment value is the Vport RL upper bound */
100 #define QM_VP_RL_MAX_INC_VAL(speed) QM_VP_RL_UPPER_BOUND(speed)
102 /* Vport RL credit threshold in case of QM bypass */
103 #define QM_VP_RL_BYPASS_THRESH_SPEED (QM_VP_RL_UPPER_BOUND(10000) - 1)
105 /* AFullOprtnstcCrdMask constants */
106 #define QM_OPPOR_LINE_VOQ_DEF 1
107 #define QM_OPPOR_FW_STOP_DEF 0
108 #define QM_OPPOR_PQ_EMPTY_DEF 1
110 /* Command Queue constants: */
112 /* Pure LB CmdQ lines (+spare) */
113 #define PBF_CMDQ_PURE_LB_LINES 150
115 #define PBF_CMDQ_LINES_E5_RSVD_RATIO 8
117 #define PBF_CMDQ_LINES_RT_OFFSET(ext_voq) \
118 (PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET + \
120 (PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET - \
121 PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET))
123 #define PBF_BTB_GUARANTEED_RT_OFFSET(ext_voq) \
124 (PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET + \
126 (PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET - \
127 PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET))
129 #define QM_VOQ_LINE_CRD(pbf_cmd_lines) \
130 ((((pbf_cmd_lines) - 4) * 2) | QM_LINE_CRD_REG_SIGN_BIT)
132 /* BTB: blocks constants (block size = 256B) */
134 /* 256B blocks in 9700B packet */
135 #define BTB_JUMBO_PKT_BLOCKS 38
137 /* Headroom per-port */
138 #define BTB_HEADROOM_BLOCKS BTB_JUMBO_PKT_BLOCKS
139 #define BTB_PURE_LB_FACTOR 10
141 /* Factored (hence really 0.7) */
142 #define BTB_PURE_LB_RATIO 7
144 /* QM stop command constants */
145 #define QM_STOP_PQ_MASK_WIDTH 32
146 #define QM_STOP_CMD_ADDR 2
147 #define QM_STOP_CMD_STRUCT_SIZE 2
148 #define QM_STOP_CMD_PAUSE_MASK_OFFSET 0
149 #define QM_STOP_CMD_PAUSE_MASK_SHIFT 0
150 #define QM_STOP_CMD_PAUSE_MASK_MASK 0xffffffff /* @DPDK */
151 #define QM_STOP_CMD_GROUP_ID_OFFSET 1
152 #define QM_STOP_CMD_GROUP_ID_SHIFT 16
153 #define QM_STOP_CMD_GROUP_ID_MASK 15
154 #define QM_STOP_CMD_PQ_TYPE_OFFSET 1
155 #define QM_STOP_CMD_PQ_TYPE_SHIFT 24
156 #define QM_STOP_CMD_PQ_TYPE_MASK 1
157 #define QM_STOP_CMD_MAX_POLL_COUNT 100
158 #define QM_STOP_CMD_POLL_PERIOD_US 500
160 /* QM command macros */
161 #define QM_CMD_STRUCT_SIZE(cmd) cmd##_STRUCT_SIZE
162 #define QM_CMD_SET_FIELD(var, cmd, field, value) \
163 SET_FIELD(var[cmd##_##field##_OFFSET], cmd##_##field, value)
165 #define QM_INIT_TX_PQ_MAP(p_hwfn, map, chip, pq_id, rl_valid, \
166 vp_pq_id, rl_id, ext_voq, wrr) \
168 OSAL_MEMSET(&map, 0, sizeof(map)); \
169 SET_FIELD(map.reg, QM_RF_PQ_MAP_##chip##_PQ_VALID, 1); \
170 SET_FIELD(map.reg, QM_RF_PQ_MAP_##chip##_RL_VALID, rl_valid); \
171 SET_FIELD(map.reg, QM_RF_PQ_MAP_##chip##_VP_PQ_ID, vp_pq_id); \
172 SET_FIELD(map.reg, QM_RF_PQ_MAP_##chip##_RL_ID, rl_id); \
173 SET_FIELD(map.reg, QM_RF_PQ_MAP_##chip##_VOQ, ext_voq); \
175 QM_RF_PQ_MAP_##chip##_WRR_WEIGHT_GROUP, wrr); \
176 STORE_RT_REG(p_hwfn, QM_REG_TXPQMAP_RT_OFFSET + pq_id, \
180 #define WRITE_PQ_INFO_TO_RAM 1
181 #define PQ_INFO_ELEMENT(vp, pf, tc, port, rl_valid, rl) \
182 (((vp) << 0) | ((pf) << 12) | ((tc) << 16) | \
183 ((port) << 20) | ((rl_valid) << 22) | ((rl) << 24))
184 #define PQ_INFO_RAM_GRC_ADDRESS(pq_id) \
185 (XSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM + 21776 + (pq_id) * 4)
187 /******************** INTERNAL IMPLEMENTATION *********************/
189 /* Returns the external VOQ number */
190 static u8 ecore_get_ext_voq(struct ecore_hwfn *p_hwfn,
193 u8 max_phys_tcs_per_port)
195 if (tc == PURE_LB_TC)
196 return NUM_OF_PHYS_TCS * (MAX_NUM_PORTS_BB) + port_id;
198 return port_id * (max_phys_tcs_per_port) + tc;
201 /* Prepare PF RL enable/disable runtime init values */
202 static void ecore_enable_pf_rl(struct ecore_hwfn *p_hwfn, bool pf_rl_en)
204 STORE_RT_REG(p_hwfn, QM_REG_RLPFENABLE_RT_OFFSET, pf_rl_en ? 1 : 0);
206 u8 num_ext_voqs = MAX_NUM_VOQS_E4;
207 u64 voq_bit_mask = ((u64)1 << num_ext_voqs) - 1;
209 /* Enable RLs for all VOQs */
210 STORE_RT_REG(p_hwfn, QM_REG_RLPFVOQENABLE_RT_OFFSET,
212 #ifdef QM_REG_RLPFVOQENABLE_MSB_RT_OFFSET
213 if (num_ext_voqs >= 32)
214 STORE_RT_REG(p_hwfn, QM_REG_RLPFVOQENABLE_MSB_RT_OFFSET,
215 (u32)(voq_bit_mask >> 32));
218 /* Write RL period */
219 STORE_RT_REG(p_hwfn, QM_REG_RLPFPERIOD_RT_OFFSET,
220 QM_RL_PERIOD_CLK_25M);
221 STORE_RT_REG(p_hwfn, QM_REG_RLPFPERIODTIMER_RT_OFFSET,
222 QM_RL_PERIOD_CLK_25M);
224 /* Set credit threshold for QM bypass flow */
226 STORE_RT_REG(p_hwfn, QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET,
227 QM_PF_RL_UPPER_BOUND);
231 /* Prepare PF WFQ enable/disable runtime init values */
232 static void ecore_enable_pf_wfq(struct ecore_hwfn *p_hwfn, bool pf_wfq_en)
234 STORE_RT_REG(p_hwfn, QM_REG_WFQPFENABLE_RT_OFFSET, pf_wfq_en ? 1 : 0);
236 /* Set credit threshold for QM bypass flow */
237 if (pf_wfq_en && QM_BYPASS_EN)
238 STORE_RT_REG(p_hwfn, QM_REG_AFULLQMBYPTHRPFWFQ_RT_OFFSET,
242 /* Prepare VPORT RL enable/disable runtime init values */
243 static void ecore_enable_vport_rl(struct ecore_hwfn *p_hwfn, bool vport_rl_en)
245 STORE_RT_REG(p_hwfn, QM_REG_RLGLBLENABLE_RT_OFFSET,
246 vport_rl_en ? 1 : 0);
248 /* Write RL period (use timer 0 only) */
249 STORE_RT_REG(p_hwfn, QM_REG_RLGLBLPERIOD_0_RT_OFFSET,
250 QM_RL_PERIOD_CLK_25M);
251 STORE_RT_REG(p_hwfn, QM_REG_RLGLBLPERIODTIMER_0_RT_OFFSET,
252 QM_RL_PERIOD_CLK_25M);
254 /* Set credit threshold for QM bypass flow */
257 QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET,
258 QM_VP_RL_BYPASS_THRESH_SPEED);
262 /* Prepare VPORT WFQ enable/disable runtime init values */
263 static void ecore_enable_vport_wfq(struct ecore_hwfn *p_hwfn, bool vport_wfq_en)
265 STORE_RT_REG(p_hwfn, QM_REG_WFQVPENABLE_RT_OFFSET,
266 vport_wfq_en ? 1 : 0);
268 /* Set credit threshold for QM bypass flow */
269 if (vport_wfq_en && QM_BYPASS_EN)
270 STORE_RT_REG(p_hwfn, QM_REG_AFULLQMBYPTHRVPWFQ_RT_OFFSET,
274 /* Prepare runtime init values to allocate PBF command queue lines for
277 static void ecore_cmdq_lines_voq_rt_init(struct ecore_hwfn *p_hwfn,
283 qm_line_crd = QM_VOQ_LINE_CRD(cmdq_lines);
285 OVERWRITE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(ext_voq),
287 STORE_RT_REG(p_hwfn, QM_REG_VOQCRDLINE_RT_OFFSET + ext_voq,
289 STORE_RT_REG(p_hwfn, QM_REG_VOQINITCRDLINE_RT_OFFSET + ext_voq,
293 /* Prepare runtime init values to allocate PBF command queue lines. */
294 static void ecore_cmdq_lines_rt_init(struct ecore_hwfn *p_hwfn,
295 u8 max_ports_per_engine,
296 u8 max_phys_tcs_per_port,
297 struct init_qm_port_params
298 port_params[MAX_NUM_PORTS])
300 u8 tc, ext_voq, port_id, num_tcs_in_port;
301 u8 num_ext_voqs = MAX_NUM_VOQS_E4;
303 /* Clear PBF lines of all VOQs */
304 for (ext_voq = 0; ext_voq < num_ext_voqs; ext_voq++)
305 STORE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(ext_voq), 0);
307 for (port_id = 0; port_id < max_ports_per_engine; port_id++) {
308 u16 phys_lines, phys_lines_per_tc;
310 if (!port_params[port_id].active)
313 /* Find number of command queue lines to divide between the
314 * active physical TCs. In E5, 1/8 of the lines are reserved.
315 * the lines for pure LB TC are subtracted.
317 phys_lines = port_params[port_id].num_pbf_cmd_lines;
318 phys_lines -= PBF_CMDQ_PURE_LB_LINES;
320 /* Find #lines per active physical TC */
322 for (tc = 0; tc < max_phys_tcs_per_port; tc++)
323 if (((port_params[port_id].active_phys_tcs >> tc) &
326 phys_lines_per_tc = phys_lines / num_tcs_in_port;
328 /* Init registers per active TC */
329 for (tc = 0; tc < max_phys_tcs_per_port; tc++) {
330 ext_voq = ecore_get_ext_voq(p_hwfn, port_id, tc,
331 max_phys_tcs_per_port);
332 if (((port_params[port_id].active_phys_tcs >> tc) &
334 ecore_cmdq_lines_voq_rt_init(p_hwfn, ext_voq,
338 /* Init registers for pure LB TC */
339 ext_voq = ecore_get_ext_voq(p_hwfn, port_id, PURE_LB_TC,
340 max_phys_tcs_per_port);
341 ecore_cmdq_lines_voq_rt_init(p_hwfn, ext_voq,
342 PBF_CMDQ_PURE_LB_LINES);
347 * Prepare runtime init values to allocate guaranteed BTB blocks for the
348 * specified port. The guaranteed BTB space is divided between the TCs as
349 * follows (shared space Is currently not used):
351 * B BTB blocks for this port
352 * C Number of physical TCs for this port
354 * a. 38 blocks (9700B jumbo frame) are allocated for global per port
356 * b. B = B 38 (remainder after global headroom allocation)
357 * c. MAX(38,B/(C+0.7)) blocks are allocated for the pure LB VOQ.
358 * d. B = B MAX(38, B/(C+0.7)) (remainder after pure LB allocation).
359 * e. B/C blocks are allocated for each physical TC.
361 * - MTU is up to 9700 bytes (38 blocks)
362 * - All TCs are considered symmetrical (same rate and packet size)
363 * - No optimization for lossy TC (all are considered lossless). Shared space is
364 * not enabled and allocated for each TC.
366 static void ecore_btb_blocks_rt_init(struct ecore_hwfn *p_hwfn,
367 u8 max_ports_per_engine,
368 u8 max_phys_tcs_per_port,
369 struct init_qm_port_params
370 port_params[MAX_NUM_PORTS])
372 u32 usable_blocks, pure_lb_blocks, phys_blocks;
373 u8 tc, ext_voq, port_id, num_tcs_in_port;
375 for (port_id = 0; port_id < max_ports_per_engine; port_id++) {
376 if (!port_params[port_id].active)
379 /* Subtract headroom blocks */
380 usable_blocks = port_params[port_id].num_btb_blocks -
383 /* Find blocks per physical TC. use factor to avoid floating
387 for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++)
388 if (((port_params[port_id].active_phys_tcs >> tc) &
392 pure_lb_blocks = (usable_blocks * BTB_PURE_LB_FACTOR) /
393 (num_tcs_in_port * BTB_PURE_LB_FACTOR +
395 pure_lb_blocks = OSAL_MAX_T(u32, BTB_JUMBO_PKT_BLOCKS,
398 phys_blocks = (usable_blocks - pure_lb_blocks) /
401 /* Init physical TCs */
402 for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) {
403 if (((port_params[port_id].active_phys_tcs >> tc) &
405 ext_voq = ecore_get_ext_voq(p_hwfn, port_id, tc,
406 max_phys_tcs_per_port);
408 PBF_BTB_GUARANTEED_RT_OFFSET(ext_voq),
413 /* Init pure LB TC */
414 ext_voq = ecore_get_ext_voq(p_hwfn, port_id, PURE_LB_TC,
415 max_phys_tcs_per_port);
416 STORE_RT_REG(p_hwfn, PBF_BTB_GUARANTEED_RT_OFFSET(ext_voq),
421 /* Prepare Tx PQ mapping runtime init values for the specified PF */
422 static void ecore_tx_pq_map_rt_init(struct ecore_hwfn *p_hwfn,
423 struct ecore_ptt *p_ptt,
425 u8 max_phys_tcs_per_port,
433 u32 base_mem_addr_4kb,
434 struct init_qm_pq_params *pq_params,
435 struct init_qm_vport_params *vport_params)
437 /* A bit per Tx PQ indicating if the PQ is associated with a VF */
438 u32 tx_pq_vf_mask[MAX_QM_TX_QUEUES / QM_PF_QUEUE_GROUP_SIZE] = { 0 };
439 u32 num_tx_pq_vf_masks = MAX_QM_TX_QUEUES / QM_PF_QUEUE_GROUP_SIZE;
440 u16 num_pqs, first_pq_group, last_pq_group, i, j, pq_id, pq_group;
441 u32 pq_mem_4kb, vport_pq_mem_4kb, mem_addr_4kb;
443 num_pqs = num_pf_pqs + num_vf_pqs;
445 first_pq_group = start_pq / QM_PF_QUEUE_GROUP_SIZE;
446 last_pq_group = (start_pq + num_pqs - 1) / QM_PF_QUEUE_GROUP_SIZE;
448 pq_mem_4kb = QM_PQ_MEM_4KB(num_pf_cids);
449 vport_pq_mem_4kb = QM_PQ_MEM_4KB(num_vf_cids);
450 mem_addr_4kb = base_mem_addr_4kb;
452 /* Set mapping from PQ group to PF */
453 for (pq_group = first_pq_group; pq_group <= last_pq_group; pq_group++)
454 STORE_RT_REG(p_hwfn, QM_REG_PQTX2PF_0_RT_OFFSET + pq_group,
458 STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_0_RT_OFFSET,
459 QM_PQ_SIZE_256B(num_pf_cids));
460 STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_1_RT_OFFSET,
461 QM_PQ_SIZE_256B(num_vf_cids));
463 /* Go over all Tx PQs */
464 for (i = 0, pq_id = start_pq; i < num_pqs; i++, pq_id++) {
465 u32 max_qm_global_rls = MAX_QM_GLOBAL_RLS;
466 u8 ext_voq, vport_id_in_pf;
467 bool is_vf_pq, rl_valid;
470 ext_voq = ecore_get_ext_voq(p_hwfn, pq_params[i].port_id,
472 max_phys_tcs_per_port);
473 is_vf_pq = (i >= num_pf_pqs);
474 rl_valid = pq_params[i].rl_valid > 0;
476 /* Update first Tx PQ of VPORT/TC */
477 vport_id_in_pf = pq_params[i].vport_id - start_vport;
479 vport_params[vport_id_in_pf].first_tx_pq_id[pq_params[i].tc_id];
480 if (first_tx_pq_id == QM_INVALID_PQ_ID) {
481 u32 map_val = (ext_voq << QM_WFQ_VP_PQ_VOQ_SHIFT) |
482 (pf_id << (QM_WFQ_VP_PQ_PF_E4_SHIFT));
484 /* Create new VP PQ */
485 vport_params[vport_id_in_pf].
486 first_tx_pq_id[pq_params[i].tc_id] = pq_id;
487 first_tx_pq_id = pq_id;
489 /* Map VP PQ to VOQ and PF */
490 STORE_RT_REG(p_hwfn, QM_REG_WFQVPMAP_RT_OFFSET +
491 first_tx_pq_id, map_val);
495 if (rl_valid && pq_params[i].vport_id >= max_qm_global_rls) {
496 DP_NOTICE(p_hwfn, true,
497 "Invalid VPORT ID for rate limiter config\n");
501 /* Prepare PQ map entry */
502 struct qm_rf_pq_map_e4 tx_pq_map;
504 QM_INIT_TX_PQ_MAP(p_hwfn, tx_pq_map, E4, pq_id, rl_valid ?
506 first_tx_pq_id, rl_valid ?
507 pq_params[i].vport_id : 0,
508 ext_voq, pq_params[i].wrr_group);
510 /* Set PQ base address */
511 STORE_RT_REG(p_hwfn, QM_REG_BASEADDRTXPQ_RT_OFFSET + pq_id,
514 /* Clear PQ pointer table entry (64 bit) */
516 for (j = 0; j < 2; j++)
517 STORE_RT_REG(p_hwfn, QM_REG_PTRTBLTX_RT_OFFSET +
520 /* Write PQ info to RAM */
521 if (WRITE_PQ_INFO_TO_RAM != 0) {
524 pq_info = PQ_INFO_ELEMENT(first_tx_pq_id, pf_id,
526 pq_params[i].port_id,
527 rl_valid ? 1 : 0, rl_valid ?
528 pq_params[i].vport_id : 0);
529 ecore_wr(p_hwfn, p_ptt, PQ_INFO_RAM_GRC_ADDRESS(pq_id),
533 /* If VF PQ, add indication to PQ VF mask */
535 tx_pq_vf_mask[pq_id / QM_PF_QUEUE_GROUP_SIZE] |=
536 (1 << (pq_id % QM_PF_QUEUE_GROUP_SIZE));
537 mem_addr_4kb += vport_pq_mem_4kb;
539 mem_addr_4kb += pq_mem_4kb;
543 /* Store Tx PQ VF mask to size select register */
544 for (i = 0; i < num_tx_pq_vf_masks; i++)
545 if (tx_pq_vf_mask[i])
546 STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET +
547 i, tx_pq_vf_mask[i]);
550 /* Prepare Other PQ mapping runtime init values for the specified PF */
551 static void ecore_other_pq_map_rt_init(struct ecore_hwfn *p_hwfn,
556 u32 base_mem_addr_4kb)
558 u32 pq_size, pq_mem_4kb, mem_addr_4kb;
559 u16 i, j, pq_id, pq_group;
561 /* A single other PQ group is used in each PF, where PQ group i is used
565 pq_size = num_pf_cids + num_tids;
566 pq_mem_4kb = QM_PQ_MEM_4KB(pq_size);
567 mem_addr_4kb = base_mem_addr_4kb;
569 /* Map PQ group to PF */
570 STORE_RT_REG(p_hwfn, QM_REG_PQOTHER2PF_0_RT_OFFSET + pq_group,
574 STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_2_RT_OFFSET,
575 QM_PQ_SIZE_256B(pq_size));
577 for (i = 0, pq_id = pf_id * QM_PF_QUEUE_GROUP_SIZE;
578 i < QM_OTHER_PQS_PER_PF; i++, pq_id++) {
579 /* Set PQ base address */
580 STORE_RT_REG(p_hwfn, QM_REG_BASEADDROTHERPQ_RT_OFFSET + pq_id,
583 /* Clear PQ pointer table entry */
585 for (j = 0; j < 2; j++)
587 QM_REG_PTRTBLOTHER_RT_OFFSET +
590 mem_addr_4kb += pq_mem_4kb;
594 /* Prepare PF WFQ runtime init values for the specified PF.
595 * Return -1 on error.
597 static int ecore_pf_wfq_rt_init(struct ecore_hwfn *p_hwfn,
600 u8 max_phys_tcs_per_port,
602 struct init_qm_pq_params *pq_params)
604 u32 inc_val, crd_reg_offset;
608 inc_val = QM_WFQ_INC_VAL(pf_wfq);
609 if (!inc_val || inc_val > QM_WFQ_MAX_INC_VAL) {
610 DP_NOTICE(p_hwfn, true,
611 "Invalid PF WFQ weight configuration\n");
615 for (i = 0; i < num_tx_pqs; i++) {
616 ext_voq = ecore_get_ext_voq(p_hwfn, pq_params[i].port_id,
618 max_phys_tcs_per_port);
619 crd_reg_offset = (pf_id < MAX_NUM_PFS_BB ?
620 QM_REG_WFQPFCRD_RT_OFFSET :
621 QM_REG_WFQPFCRD_MSB_RT_OFFSET) +
622 ext_voq * MAX_NUM_PFS_BB +
623 (pf_id % MAX_NUM_PFS_BB);
624 OVERWRITE_RT_REG(p_hwfn, crd_reg_offset,
625 (u32)QM_WFQ_CRD_REG_SIGN_BIT);
628 STORE_RT_REG(p_hwfn, QM_REG_WFQPFUPPERBOUND_RT_OFFSET +
629 pf_id, QM_WFQ_UPPER_BOUND | (u32)QM_WFQ_CRD_REG_SIGN_BIT);
630 STORE_RT_REG(p_hwfn, QM_REG_WFQPFWEIGHT_RT_OFFSET + pf_id, inc_val);
635 /* Prepare PF RL runtime init values for the specified PF.
636 * Return -1 on error.
638 static int ecore_pf_rl_rt_init(struct ecore_hwfn *p_hwfn, u8 pf_id, u32 pf_rl)
642 inc_val = QM_RL_INC_VAL(pf_rl);
643 if (inc_val > QM_PF_RL_MAX_INC_VAL) {
644 DP_NOTICE(p_hwfn, true,
645 "Invalid PF rate limit configuration\n");
649 STORE_RT_REG(p_hwfn, QM_REG_RLPFCRD_RT_OFFSET + pf_id,
650 (u32)QM_RL_CRD_REG_SIGN_BIT);
651 STORE_RT_REG(p_hwfn, QM_REG_RLPFUPPERBOUND_RT_OFFSET + pf_id,
652 QM_PF_RL_UPPER_BOUND | (u32)QM_RL_CRD_REG_SIGN_BIT);
653 STORE_RT_REG(p_hwfn, QM_REG_RLPFINCVAL_RT_OFFSET + pf_id, inc_val);
658 /* Prepare VPORT WFQ runtime init values for the specified VPORTs.
659 * Return -1 on error.
661 static int ecore_vp_wfq_rt_init(struct ecore_hwfn *p_hwfn,
663 struct init_qm_vport_params *vport_params)
669 /* Go over all PF VPORTs */
670 for (i = 0; i < num_vports; i++) {
671 if (!vport_params[i].vport_wfq)
674 inc_val = QM_WFQ_INC_VAL(vport_params[i].vport_wfq);
675 if (inc_val > QM_WFQ_MAX_INC_VAL) {
676 DP_NOTICE(p_hwfn, true,
677 "Invalid VPORT WFQ weight configuration\n");
681 /* Each VPORT can have several VPORT PQ IDs for various TCs */
682 for (tc = 0; tc < NUM_OF_TCS; tc++) {
683 vport_pq_id = vport_params[i].first_tx_pq_id[tc];
684 if (vport_pq_id != QM_INVALID_PQ_ID) {
685 STORE_RT_REG(p_hwfn, QM_REG_WFQVPCRD_RT_OFFSET +
687 (u32)QM_WFQ_CRD_REG_SIGN_BIT);
689 QM_REG_WFQVPWEIGHT_RT_OFFSET +
690 vport_pq_id, inc_val);
697 /* Prepare VPORT RL runtime init values for the specified VPORTs.
698 * Return -1 on error.
700 static int ecore_vport_rl_rt_init(struct ecore_hwfn *p_hwfn,
704 struct init_qm_vport_params *vport_params)
709 if (start_vport + num_vports >= MAX_QM_GLOBAL_RLS) {
710 DP_NOTICE(p_hwfn, true,
711 "Invalid VPORT ID for rate limiter configuration\n");
715 /* Go over all PF VPORTs */
716 for (i = 0, vport_id = start_vport; i < num_vports; i++, vport_id++) {
717 inc_val = QM_RL_INC_VAL(vport_params[i].vport_rl ?
718 vport_params[i].vport_rl : link_speed);
719 if (inc_val > QM_VP_RL_MAX_INC_VAL(link_speed)) {
720 DP_NOTICE(p_hwfn, true,
721 "Invalid VPORT rate-limit configuration\n");
725 STORE_RT_REG(p_hwfn, QM_REG_RLGLBLCRD_RT_OFFSET + vport_id,
726 (u32)QM_RL_CRD_REG_SIGN_BIT);
728 QM_REG_RLGLBLUPPERBOUND_RT_OFFSET + vport_id,
729 QM_VP_RL_UPPER_BOUND(link_speed) |
730 (u32)QM_RL_CRD_REG_SIGN_BIT);
731 STORE_RT_REG(p_hwfn, QM_REG_RLGLBLINCVAL_RT_OFFSET + vport_id,
738 static bool ecore_poll_on_qm_cmd_ready(struct ecore_hwfn *p_hwfn,
739 struct ecore_ptt *p_ptt)
743 for (i = 0, reg_val = 0; i < QM_STOP_CMD_MAX_POLL_COUNT && !reg_val;
745 OSAL_UDELAY(QM_STOP_CMD_POLL_PERIOD_US);
746 reg_val = ecore_rd(p_hwfn, p_ptt, QM_REG_SDMCMDREADY);
749 /* Check if timeout while waiting for SDM command ready */
750 if (i == QM_STOP_CMD_MAX_POLL_COUNT) {
751 DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG,
752 "Timeout waiting for QM SDM cmd ready signal\n");
759 static bool ecore_send_qm_cmd(struct ecore_hwfn *p_hwfn,
760 struct ecore_ptt *p_ptt,
765 if (!ecore_poll_on_qm_cmd_ready(p_hwfn, p_ptt))
768 ecore_wr(p_hwfn, p_ptt, QM_REG_SDMCMDADDR, cmd_addr);
769 ecore_wr(p_hwfn, p_ptt, QM_REG_SDMCMDDATALSB, cmd_data_lsb);
770 ecore_wr(p_hwfn, p_ptt, QM_REG_SDMCMDDATAMSB, cmd_data_msb);
771 ecore_wr(p_hwfn, p_ptt, QM_REG_SDMCMDGO, 1);
772 ecore_wr(p_hwfn, p_ptt, QM_REG_SDMCMDGO, 0);
774 return ecore_poll_on_qm_cmd_ready(p_hwfn, p_ptt);
778 /******************** INTERFACE IMPLEMENTATION *********************/
780 u32 ecore_qm_pf_mem_size(u32 num_pf_cids,
786 return QM_PQ_MEM_4KB(num_pf_cids) * num_pf_pqs +
787 QM_PQ_MEM_4KB(num_vf_cids) * num_vf_pqs +
788 QM_PQ_MEM_4KB(num_pf_cids + num_tids) * QM_OTHER_PQS_PER_PF;
791 int ecore_qm_common_rt_init(struct ecore_hwfn *p_hwfn,
792 u8 max_ports_per_engine,
793 u8 max_phys_tcs_per_port,
798 struct init_qm_port_params
799 port_params[MAX_NUM_PORTS])
803 /* Init AFullOprtnstcCrdMask */
804 mask = (QM_OPPOR_LINE_VOQ_DEF <<
805 QM_RF_OPPORTUNISTIC_MASK_LINEVOQ_SHIFT) |
806 (QM_BYTE_CRD_EN << QM_RF_OPPORTUNISTIC_MASK_BYTEVOQ_SHIFT) |
807 (pf_wfq_en << QM_RF_OPPORTUNISTIC_MASK_PFWFQ_SHIFT) |
808 (vport_wfq_en << QM_RF_OPPORTUNISTIC_MASK_VPWFQ_SHIFT) |
809 (pf_rl_en << QM_RF_OPPORTUNISTIC_MASK_PFRL_SHIFT) |
810 (vport_rl_en << QM_RF_OPPORTUNISTIC_MASK_VPQCNRL_SHIFT) |
811 (QM_OPPOR_FW_STOP_DEF <<
812 QM_RF_OPPORTUNISTIC_MASK_FWPAUSE_SHIFT) |
813 (QM_OPPOR_PQ_EMPTY_DEF <<
814 QM_RF_OPPORTUNISTIC_MASK_QUEUEEMPTY_SHIFT);
815 STORE_RT_REG(p_hwfn, QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET, mask);
817 /* Enable/disable PF RL */
818 ecore_enable_pf_rl(p_hwfn, pf_rl_en);
820 /* Enable/disable PF WFQ */
821 ecore_enable_pf_wfq(p_hwfn, pf_wfq_en);
823 /* Enable/disable VPORT RL */
824 ecore_enable_vport_rl(p_hwfn, vport_rl_en);
826 /* Enable/disable VPORT WFQ */
827 ecore_enable_vport_wfq(p_hwfn, vport_wfq_en);
829 /* Init PBF CMDQ line credit */
830 ecore_cmdq_lines_rt_init(p_hwfn, max_ports_per_engine,
831 max_phys_tcs_per_port, port_params);
833 /* Init BTB blocks in PBF */
834 ecore_btb_blocks_rt_init(p_hwfn, max_ports_per_engine,
835 max_phys_tcs_per_port, port_params);
840 int ecore_qm_pf_rt_init(struct ecore_hwfn *p_hwfn,
841 struct ecore_ptt *p_ptt,
843 u8 max_phys_tcs_per_port,
856 struct init_qm_pq_params *pq_params,
857 struct init_qm_vport_params *vport_params)
859 u32 other_mem_size_4kb;
862 other_mem_size_4kb = QM_PQ_MEM_4KB(num_pf_cids + num_tids) *
865 /* Clear first Tx PQ ID array for each VPORT */
866 for (i = 0; i < num_vports; i++)
867 for (tc = 0; tc < NUM_OF_TCS; tc++)
868 vport_params[i].first_tx_pq_id[tc] = QM_INVALID_PQ_ID;
870 /* Map Other PQs (if any) */
871 #if QM_OTHER_PQS_PER_PF > 0
872 ecore_other_pq_map_rt_init(p_hwfn, pf_id, is_pf_loading, num_pf_cids,
877 ecore_tx_pq_map_rt_init(p_hwfn, p_ptt, pf_id, max_phys_tcs_per_port,
878 is_pf_loading, num_pf_cids, num_vf_cids,
879 start_pq, num_pf_pqs, num_vf_pqs, start_vport,
880 other_mem_size_4kb, pq_params, vport_params);
884 if (ecore_pf_wfq_rt_init(p_hwfn, pf_id, pf_wfq,
885 max_phys_tcs_per_port,
886 num_pf_pqs + num_vf_pqs, pq_params))
890 if (ecore_pf_rl_rt_init(p_hwfn, pf_id, pf_rl))
894 if (ecore_vp_wfq_rt_init(p_hwfn, num_vports, vport_params))
898 if (ecore_vport_rl_rt_init
899 (p_hwfn, start_vport, num_vports, link_speed, vport_params))
905 int ecore_init_pf_wfq(struct ecore_hwfn *p_hwfn,
906 struct ecore_ptt *p_ptt, u8 pf_id, u16 pf_wfq)
910 inc_val = QM_WFQ_INC_VAL(pf_wfq);
911 if (!inc_val || inc_val > QM_WFQ_MAX_INC_VAL) {
912 DP_NOTICE(p_hwfn, true,
913 "Invalid PF WFQ weight configuration\n");
917 ecore_wr(p_hwfn, p_ptt, QM_REG_WFQPFWEIGHT + pf_id * 4, inc_val);
922 int ecore_init_pf_rl(struct ecore_hwfn *p_hwfn,
923 struct ecore_ptt *p_ptt, u8 pf_id, u32 pf_rl)
927 inc_val = QM_RL_INC_VAL(pf_rl);
928 if (inc_val > QM_PF_RL_MAX_INC_VAL) {
929 DP_NOTICE(p_hwfn, true,
930 "Invalid PF rate limit configuration\n");
934 ecore_wr(p_hwfn, p_ptt, QM_REG_RLPFCRD + pf_id * 4,
935 (u32)QM_RL_CRD_REG_SIGN_BIT);
936 ecore_wr(p_hwfn, p_ptt, QM_REG_RLPFINCVAL + pf_id * 4, inc_val);
941 int ecore_init_vport_wfq(struct ecore_hwfn *p_hwfn,
942 struct ecore_ptt *p_ptt,
943 u16 first_tx_pq_id[NUM_OF_TCS], u16 vport_wfq)
949 inc_val = QM_WFQ_INC_VAL(vport_wfq);
950 if (!inc_val || inc_val > QM_WFQ_MAX_INC_VAL) {
951 DP_NOTICE(p_hwfn, true,
952 "Invalid VPORT WFQ weight configuration\n");
956 for (tc = 0; tc < NUM_OF_TCS; tc++) {
957 vport_pq_id = first_tx_pq_id[tc];
958 if (vport_pq_id != QM_INVALID_PQ_ID) {
959 ecore_wr(p_hwfn, p_ptt,
960 QM_REG_WFQVPWEIGHT + vport_pq_id * 4, inc_val);
967 int ecore_init_vport_rl(struct ecore_hwfn *p_hwfn,
968 struct ecore_ptt *p_ptt, u8 vport_id,
972 u32 inc_val, max_qm_global_rls = MAX_QM_GLOBAL_RLS;
974 if (vport_id >= max_qm_global_rls) {
975 DP_NOTICE(p_hwfn, true,
976 "Invalid VPORT ID for rate limiter configuration\n");
980 inc_val = QM_RL_INC_VAL(vport_rl ? vport_rl : link_speed);
981 if (inc_val > QM_VP_RL_MAX_INC_VAL(link_speed)) {
982 DP_NOTICE(p_hwfn, true,
983 "Invalid VPORT rate-limit configuration\n");
987 ecore_wr(p_hwfn, p_ptt, QM_REG_RLGLBLCRD + vport_id * 4,
988 (u32)QM_RL_CRD_REG_SIGN_BIT);
989 ecore_wr(p_hwfn, p_ptt, QM_REG_RLGLBLINCVAL + vport_id * 4, inc_val);
994 bool ecore_send_qm_stop_cmd(struct ecore_hwfn *p_hwfn,
995 struct ecore_ptt *p_ptt,
997 bool is_tx_pq, u16 start_pq, u16 num_pqs)
999 u32 cmd_arr[QM_CMD_STRUCT_SIZE(QM_STOP_CMD)] = { 0 };
1000 u32 pq_mask = 0, last_pq, pq_id;
1002 last_pq = start_pq + num_pqs - 1;
1004 /* Set command's PQ type */
1005 QM_CMD_SET_FIELD(cmd_arr, QM_STOP_CMD, PQ_TYPE, is_tx_pq ? 0 : 1);
1007 /* Go over requested PQs */
1008 for (pq_id = start_pq; pq_id <= last_pq; pq_id++) {
1009 /* Set PQ bit in mask (stop command only) */
1010 if (!is_release_cmd)
1011 pq_mask |= (1 << (pq_id % QM_STOP_PQ_MASK_WIDTH));
1013 /* If last PQ or end of PQ mask, write command */
1014 if ((pq_id == last_pq) ||
1015 (pq_id % QM_STOP_PQ_MASK_WIDTH ==
1016 (QM_STOP_PQ_MASK_WIDTH - 1))) {
1017 QM_CMD_SET_FIELD(cmd_arr, QM_STOP_CMD, PAUSE_MASK,
1019 QM_CMD_SET_FIELD(cmd_arr, QM_STOP_CMD, GROUP_ID,
1020 pq_id / QM_STOP_PQ_MASK_WIDTH);
1021 if (!ecore_send_qm_cmd
1022 (p_hwfn, p_ptt, QM_STOP_CMD_ADDR, cmd_arr[0],
1033 /* NIG: ETS configuration constants */
1034 #define NIG_TX_ETS_CLIENT_OFFSET 4
1035 #define NIG_LB_ETS_CLIENT_OFFSET 1
1036 #define NIG_ETS_MIN_WFQ_BYTES 1600
1038 /* NIG: ETS constants */
1039 #define NIG_ETS_UP_BOUND(weight, mtu) \
1040 (2 * ((weight) > (mtu) ? (weight) : (mtu)))
1042 /* NIG: RL constants */
1044 /* Byte base type value */
1045 #define NIG_RL_BASE_TYPE 1
1048 #define NIG_RL_PERIOD 1
1050 /* Period in 25MHz cycles */
1051 #define NIG_RL_PERIOD_CLK_25M (25 * NIG_RL_PERIOD)
1054 #define NIG_RL_INC_VAL(rate) (((rate) * NIG_RL_PERIOD) / 8)
1056 #define NIG_RL_MAX_VAL(inc_val, mtu) \
1057 (2 * ((inc_val) > (mtu) ? (inc_val) : (mtu)))
1059 /* NIG: packet prioritry configuration constants */
1060 #define NIG_PRIORITY_MAP_TC_BITS 4
1063 void ecore_init_nig_ets(struct ecore_hwfn *p_hwfn,
1064 struct ecore_ptt *p_ptt,
1065 struct init_ets_req *req, bool is_lb)
1067 u32 min_weight, tc_weight_base_addr, tc_weight_addr_diff;
1068 u32 tc_bound_base_addr, tc_bound_addr_diff;
1069 u8 sp_tc_map = 0, wfq_tc_map = 0;
1070 u8 tc, num_tc, tc_client_offset;
1072 num_tc = is_lb ? NUM_OF_TCS : NUM_OF_PHYS_TCS;
1073 tc_client_offset = is_lb ? NIG_LB_ETS_CLIENT_OFFSET :
1074 NIG_TX_ETS_CLIENT_OFFSET;
1075 min_weight = 0xffffffff;
1076 tc_weight_base_addr = is_lb ? NIG_REG_LB_ARB_CREDIT_WEIGHT_0 :
1077 NIG_REG_TX_ARB_CREDIT_WEIGHT_0;
1078 tc_weight_addr_diff = is_lb ? NIG_REG_LB_ARB_CREDIT_WEIGHT_1 -
1079 NIG_REG_LB_ARB_CREDIT_WEIGHT_0 :
1080 NIG_REG_TX_ARB_CREDIT_WEIGHT_1 -
1081 NIG_REG_TX_ARB_CREDIT_WEIGHT_0;
1082 tc_bound_base_addr = is_lb ? NIG_REG_LB_ARB_CREDIT_UPPER_BOUND_0 :
1083 NIG_REG_TX_ARB_CREDIT_UPPER_BOUND_0;
1084 tc_bound_addr_diff = is_lb ? NIG_REG_LB_ARB_CREDIT_UPPER_BOUND_1 -
1085 NIG_REG_LB_ARB_CREDIT_UPPER_BOUND_0 :
1086 NIG_REG_TX_ARB_CREDIT_UPPER_BOUND_1 -
1087 NIG_REG_TX_ARB_CREDIT_UPPER_BOUND_0;
1089 for (tc = 0; tc < num_tc; tc++) {
1090 struct init_ets_tc_req *tc_req = &req->tc_req[tc];
1094 sp_tc_map |= (1 << tc);
1096 if (!tc_req->use_wfq)
1099 /* Update WFQ map */
1100 wfq_tc_map |= (1 << tc);
1102 /* Find minimal weight */
1103 if (tc_req->weight < min_weight)
1104 min_weight = tc_req->weight;
1108 ecore_wr(p_hwfn, p_ptt,
1109 is_lb ? NIG_REG_LB_ARB_CLIENT_IS_STRICT :
1110 NIG_REG_TX_ARB_CLIENT_IS_STRICT,
1111 (sp_tc_map << tc_client_offset));
1114 ecore_wr(p_hwfn, p_ptt,
1115 is_lb ? NIG_REG_LB_ARB_CLIENT_IS_SUBJECT2WFQ :
1116 NIG_REG_TX_ARB_CLIENT_IS_SUBJECT2WFQ,
1117 (wfq_tc_map << tc_client_offset));
1118 /* write WFQ weights */
1119 for (tc = 0; tc < num_tc; tc++, tc_client_offset++) {
1120 struct init_ets_tc_req *tc_req = &req->tc_req[tc];
1123 if (!tc_req->use_wfq)
1126 /* Translate weight to bytes */
1127 byte_weight = (NIG_ETS_MIN_WFQ_BYTES * tc_req->weight) /
1130 /* Write WFQ weight */
1131 ecore_wr(p_hwfn, p_ptt, tc_weight_base_addr +
1132 tc_weight_addr_diff * tc_client_offset, byte_weight);
1134 /* Write WFQ upper bound */
1135 ecore_wr(p_hwfn, p_ptt, tc_bound_base_addr +
1136 tc_bound_addr_diff * tc_client_offset,
1137 NIG_ETS_UP_BOUND(byte_weight, req->mtu));
1141 void ecore_init_nig_lb_rl(struct ecore_hwfn *p_hwfn,
1142 struct ecore_ptt *p_ptt,
1143 struct init_nig_lb_rl_req *req)
1145 u32 ctrl, inc_val, reg_offset;
1148 /* Disable global MAC+LB RL */
1151 NIG_REG_TX_LB_GLBRATELIMIT_CTRL_TX_LB_GLBRATELIMIT_BASE_TYPE_SHIFT;
1152 ecore_wr(p_hwfn, p_ptt, NIG_REG_TX_LB_GLBRATELIMIT_CTRL, ctrl);
1154 /* Configure and enable global MAC+LB RL */
1155 if (req->lb_mac_rate) {
1157 ecore_wr(p_hwfn, p_ptt, NIG_REG_TX_LB_GLBRATELIMIT_INC_PERIOD,
1158 NIG_RL_PERIOD_CLK_25M);
1159 inc_val = NIG_RL_INC_VAL(req->lb_mac_rate);
1160 ecore_wr(p_hwfn, p_ptt, NIG_REG_TX_LB_GLBRATELIMIT_INC_VALUE,
1162 ecore_wr(p_hwfn, p_ptt, NIG_REG_TX_LB_GLBRATELIMIT_MAX_VALUE,
1163 NIG_RL_MAX_VAL(inc_val, req->mtu));
1168 NIG_REG_TX_LB_GLBRATELIMIT_CTRL_TX_LB_GLBRATELIMIT_EN_SHIFT;
1169 ecore_wr(p_hwfn, p_ptt, NIG_REG_TX_LB_GLBRATELIMIT_CTRL, ctrl);
1172 /* Disable global LB-only RL */
1175 NIG_REG_LB_BRBRATELIMIT_CTRL_LB_BRBRATELIMIT_BASE_TYPE_SHIFT;
1176 ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_BRBRATELIMIT_CTRL, ctrl);
1178 /* Configure and enable global LB-only RL */
1181 ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_BRBRATELIMIT_INC_PERIOD,
1182 NIG_RL_PERIOD_CLK_25M);
1183 inc_val = NIG_RL_INC_VAL(req->lb_rate);
1184 ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_BRBRATELIMIT_INC_VALUE,
1186 ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_BRBRATELIMIT_MAX_VALUE,
1187 NIG_RL_MAX_VAL(inc_val, req->mtu));
1191 1 << NIG_REG_LB_BRBRATELIMIT_CTRL_LB_BRBRATELIMIT_EN_SHIFT;
1192 ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_BRBRATELIMIT_CTRL, ctrl);
1196 for (tc = 0, reg_offset = 0; tc < NUM_OF_PHYS_TCS;
1197 tc++, reg_offset += 4) {
1201 NIG_REG_LB_TCRATELIMIT_CTRL_0_LB_TCRATELIMIT_BASE_TYPE_0_SHIFT;
1202 ecore_wr(p_hwfn, p_ptt,
1203 NIG_REG_LB_TCRATELIMIT_CTRL_0 + reg_offset, ctrl);
1205 /* Configure and enable TC RL */
1206 if (!req->tc_rate[tc])
1210 ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_TCRATELIMIT_INC_PERIOD_0 +
1211 reg_offset, NIG_RL_PERIOD_CLK_25M);
1212 inc_val = NIG_RL_INC_VAL(req->tc_rate[tc]);
1213 ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_TCRATELIMIT_INC_VALUE_0 +
1214 reg_offset, inc_val);
1215 ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_TCRATELIMIT_MAX_VALUE_0 +
1216 reg_offset, NIG_RL_MAX_VAL(inc_val, req->mtu));
1220 NIG_REG_LB_TCRATELIMIT_CTRL_0_LB_TCRATELIMIT_EN_0_SHIFT;
1221 ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_TCRATELIMIT_CTRL_0 +
1226 void ecore_init_nig_pri_tc_map(struct ecore_hwfn *p_hwfn,
1227 struct ecore_ptt *p_ptt,
1228 struct init_nig_pri_tc_map_req *req)
1230 u8 tc_pri_mask[NUM_OF_PHYS_TCS] = { 0 };
1231 u32 pri_tc_mask = 0;
1234 for (pri = 0; pri < NUM_OF_VLAN_PRIORITIES; pri++) {
1235 if (!req->pri[pri].valid)
1238 pri_tc_mask |= (req->pri[pri].tc_id <<
1239 (pri * NIG_PRIORITY_MAP_TC_BITS));
1240 tc_pri_mask[req->pri[pri].tc_id] |= (1 << pri);
1243 /* Write priority -> TC mask */
1244 ecore_wr(p_hwfn, p_ptt, NIG_REG_PKT_PRIORITY_TO_TC, pri_tc_mask);
1246 /* Write TC -> priority mask */
1247 for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) {
1248 ecore_wr(p_hwfn, p_ptt, NIG_REG_PRIORITY_FOR_TC_0 + tc * 4,
1250 ecore_wr(p_hwfn, p_ptt, NIG_REG_RX_TC0_PRIORITY_MASK + tc * 4,
1256 /* PRS: ETS configuration constants */
1257 #define PRS_ETS_MIN_WFQ_BYTES 1600
1258 #define PRS_ETS_UP_BOUND(weight, mtu) \
1259 (2 * ((weight) > (mtu) ? (weight) : (mtu)))
1262 void ecore_init_prs_ets(struct ecore_hwfn *p_hwfn,
1263 struct ecore_ptt *p_ptt, struct init_ets_req *req)
1265 u32 tc_weight_addr_diff, tc_bound_addr_diff, min_weight = 0xffffffff;
1266 u8 tc, sp_tc_map = 0, wfq_tc_map = 0;
1268 tc_weight_addr_diff = PRS_REG_ETS_ARB_CREDIT_WEIGHT_1 -
1269 PRS_REG_ETS_ARB_CREDIT_WEIGHT_0;
1270 tc_bound_addr_diff = PRS_REG_ETS_ARB_CREDIT_UPPER_BOUND_1 -
1271 PRS_REG_ETS_ARB_CREDIT_UPPER_BOUND_0;
1273 for (tc = 0; tc < NUM_OF_TCS; tc++) {
1274 struct init_ets_tc_req *tc_req = &req->tc_req[tc];
1278 sp_tc_map |= (1 << tc);
1280 if (!tc_req->use_wfq)
1283 /* Update WFQ map */
1284 wfq_tc_map |= (1 << tc);
1286 /* Find minimal weight */
1287 if (tc_req->weight < min_weight)
1288 min_weight = tc_req->weight;
1292 ecore_wr(p_hwfn, p_ptt, PRS_REG_ETS_ARB_CLIENT_IS_STRICT, sp_tc_map);
1295 ecore_wr(p_hwfn, p_ptt, PRS_REG_ETS_ARB_CLIENT_IS_SUBJECT2WFQ,
1298 /* write WFQ weights */
1299 for (tc = 0; tc < NUM_OF_TCS; tc++) {
1300 struct init_ets_tc_req *tc_req = &req->tc_req[tc];
1303 if (!tc_req->use_wfq)
1306 /* Translate weight to bytes */
1307 byte_weight = (PRS_ETS_MIN_WFQ_BYTES * tc_req->weight) /
1310 /* Write WFQ weight */
1311 ecore_wr(p_hwfn, p_ptt, PRS_REG_ETS_ARB_CREDIT_WEIGHT_0 + tc *
1312 tc_weight_addr_diff, byte_weight);
1314 /* Write WFQ upper bound */
1315 ecore_wr(p_hwfn, p_ptt, PRS_REG_ETS_ARB_CREDIT_UPPER_BOUND_0 +
1316 tc * tc_bound_addr_diff, PRS_ETS_UP_BOUND(byte_weight,
1322 /* BRB: RAM configuration constants */
1323 #define BRB_TOTAL_RAM_BLOCKS_BB 4800
1324 #define BRB_TOTAL_RAM_BLOCKS_K2 5632
1325 #define BRB_BLOCK_SIZE 128
1326 #define BRB_MIN_BLOCKS_PER_TC 9
1327 #define BRB_HYST_BYTES 10240
1328 #define BRB_HYST_BLOCKS (BRB_HYST_BYTES / BRB_BLOCK_SIZE)
1330 /* Temporary big RAM allocation - should be updated */
1331 void ecore_init_brb_ram(struct ecore_hwfn *p_hwfn,
1332 struct ecore_ptt *p_ptt, struct init_brb_ram_req *req)
1334 u32 tc_headroom_blocks, min_pkt_size_blocks, total_blocks;
1335 u32 active_port_blocks, reg_offset = 0;
1336 u8 port, active_ports = 0;
1338 tc_headroom_blocks = (u32)DIV_ROUND_UP(req->headroom_per_tc,
1340 min_pkt_size_blocks = (u32)DIV_ROUND_UP(req->min_pkt_size,
1342 total_blocks = ECORE_IS_K2(p_hwfn->p_dev) ? BRB_TOTAL_RAM_BLOCKS_K2 :
1343 BRB_TOTAL_RAM_BLOCKS_BB;
1345 /* Find number of active ports */
1346 for (port = 0; port < MAX_NUM_PORTS; port++)
1347 if (req->num_active_tcs[port])
1350 active_port_blocks = (u32)(total_blocks / active_ports);
1352 for (port = 0; port < req->max_ports_per_engine; port++) {
1353 u32 port_blocks, port_shared_blocks, port_guaranteed_blocks;
1354 u32 full_xoff_th, full_xon_th, pause_xoff_th, pause_xon_th;
1355 u32 tc_guaranteed_blocks;
1358 /* Calculate per-port sizes */
1359 tc_guaranteed_blocks = (u32)DIV_ROUND_UP(req->guranteed_per_tc,
1361 port_blocks = req->num_active_tcs[port] ? active_port_blocks :
1363 port_guaranteed_blocks = req->num_active_tcs[port] *
1364 tc_guaranteed_blocks;
1365 port_shared_blocks = port_blocks - port_guaranteed_blocks;
1366 full_xoff_th = req->num_active_tcs[port] *
1367 BRB_MIN_BLOCKS_PER_TC;
1368 full_xon_th = full_xoff_th + min_pkt_size_blocks;
1369 pause_xoff_th = tc_headroom_blocks;
1370 pause_xon_th = pause_xoff_th + min_pkt_size_blocks;
1372 /* Init total size per port */
1373 ecore_wr(p_hwfn, p_ptt, BRB_REG_TOTAL_MAC_SIZE + port * 4,
1376 /* Init shared size per port */
1377 ecore_wr(p_hwfn, p_ptt, BRB_REG_SHARED_HR_AREA + port * 4,
1378 port_shared_blocks);
1380 for (tc = 0; tc < NUM_OF_TCS; tc++, reg_offset += 4) {
1381 /* Clear init values for non-active TCs */
1382 if (tc == req->num_active_tcs[port]) {
1383 tc_guaranteed_blocks = 0;
1390 /* Init guaranteed size per TC */
1391 ecore_wr(p_hwfn, p_ptt,
1392 BRB_REG_TC_GUARANTIED_0 + reg_offset,
1393 tc_guaranteed_blocks);
1394 ecore_wr(p_hwfn, p_ptt,
1395 BRB_REG_MAIN_TC_GUARANTIED_HYST_0 + reg_offset,
1398 /* Init pause/full thresholds per physical TC - for
1401 ecore_wr(p_hwfn, p_ptt,
1402 BRB_REG_LB_TC_FULL_XOFF_THRESHOLD_0 +
1403 reg_offset, full_xoff_th);
1404 ecore_wr(p_hwfn, p_ptt,
1405 BRB_REG_LB_TC_FULL_XON_THRESHOLD_0 +
1406 reg_offset, full_xon_th);
1407 ecore_wr(p_hwfn, p_ptt,
1408 BRB_REG_LB_TC_PAUSE_XOFF_THRESHOLD_0 +
1409 reg_offset, pause_xoff_th);
1410 ecore_wr(p_hwfn, p_ptt,
1411 BRB_REG_LB_TC_PAUSE_XON_THRESHOLD_0 +
1412 reg_offset, pause_xon_th);
1414 /* Init pause/full thresholds per physical TC - for
1417 ecore_wr(p_hwfn, p_ptt,
1418 BRB_REG_MAIN_TC_FULL_XOFF_THRESHOLD_0 +
1419 reg_offset, full_xoff_th);
1420 ecore_wr(p_hwfn, p_ptt,
1421 BRB_REG_MAIN_TC_FULL_XON_THRESHOLD_0 +
1422 reg_offset, full_xon_th);
1423 ecore_wr(p_hwfn, p_ptt,
1424 BRB_REG_MAIN_TC_PAUSE_XOFF_THRESHOLD_0 +
1425 reg_offset, pause_xoff_th);
1426 ecore_wr(p_hwfn, p_ptt,
1427 BRB_REG_MAIN_TC_PAUSE_XON_THRESHOLD_0 +
1428 reg_offset, pause_xon_th);
1433 /* In MF should be called once per port to set EtherType of OuterTag */
1434 void ecore_set_port_mf_ovlan_eth_type(struct ecore_hwfn *p_hwfn, u32 ethType)
1436 /* Update DORQ register */
1437 STORE_RT_REG(p_hwfn, DORQ_REG_TAG1_ETHERTYPE_RT_OFFSET, ethType);
1440 #define SET_TUNNEL_TYPE_ENABLE_BIT(var, offset, enable) \
1441 (var = ((var) & ~(1 << (offset))) | ((enable) ? (1 << (offset)) : 0))
1442 #define PRS_ETH_TUNN_OUTPUT_FORMAT -188897008
1443 #define PRS_ETH_OUTPUT_FORMAT -46832
1445 void ecore_set_vxlan_dest_port(struct ecore_hwfn *p_hwfn,
1446 struct ecore_ptt *p_ptt, u16 dest_port)
1448 /* Update PRS register */
1449 ecore_wr(p_hwfn, p_ptt, PRS_REG_VXLAN_PORT, dest_port);
1451 /* Update NIG register */
1452 ecore_wr(p_hwfn, p_ptt, NIG_REG_VXLAN_CTRL, dest_port);
1454 /* Update PBF register */
1455 ecore_wr(p_hwfn, p_ptt, PBF_REG_VXLAN_PORT, dest_port);
1458 void ecore_set_vxlan_enable(struct ecore_hwfn *p_hwfn,
1459 struct ecore_ptt *p_ptt, bool vxlan_enable)
1463 /* Update PRS register */
1464 reg_val = ecore_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN);
1465 SET_TUNNEL_TYPE_ENABLE_BIT(reg_val,
1466 PRS_REG_ENCAPSULATION_TYPE_EN_VXLAN_ENABLE_SHIFT,
1468 ecore_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
1469 if (reg_val) { /* TODO: handle E5 init */
1470 reg_val = ecore_rd(p_hwfn, p_ptt,
1471 PRS_REG_OUTPUT_FORMAT_4_0_BB_K2);
1473 /* Update output only if tunnel blocks not included. */
1474 if (reg_val == (u32)PRS_ETH_OUTPUT_FORMAT)
1475 ecore_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2,
1476 (u32)PRS_ETH_TUNN_OUTPUT_FORMAT);
1479 /* Update NIG register */
1480 reg_val = ecore_rd(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE);
1481 SET_TUNNEL_TYPE_ENABLE_BIT(reg_val,
1482 NIG_REG_ENC_TYPE_ENABLE_VXLAN_ENABLE_SHIFT,
1484 ecore_wr(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE, reg_val);
1486 /* Update DORQ register */
1487 ecore_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_VXLAN_EN,
1488 vxlan_enable ? 1 : 0);
1491 void ecore_set_gre_enable(struct ecore_hwfn *p_hwfn,
1492 struct ecore_ptt *p_ptt,
1493 bool eth_gre_enable, bool ip_gre_enable)
1497 /* Update PRS register */
1498 reg_val = ecore_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN);
1499 SET_TUNNEL_TYPE_ENABLE_BIT(reg_val,
1500 PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GRE_ENABLE_SHIFT,
1502 SET_TUNNEL_TYPE_ENABLE_BIT(reg_val,
1503 PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GRE_ENABLE_SHIFT,
1505 ecore_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
1506 if (reg_val) { /* TODO: handle E5 init */
1507 reg_val = ecore_rd(p_hwfn, p_ptt,
1508 PRS_REG_OUTPUT_FORMAT_4_0_BB_K2);
1510 /* Update output only if tunnel blocks not included. */
1511 if (reg_val == (u32)PRS_ETH_OUTPUT_FORMAT)
1512 ecore_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2,
1513 (u32)PRS_ETH_TUNN_OUTPUT_FORMAT);
1516 /* Update NIG register */
1517 reg_val = ecore_rd(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE);
1518 SET_TUNNEL_TYPE_ENABLE_BIT(reg_val,
1519 NIG_REG_ENC_TYPE_ENABLE_ETH_OVER_GRE_ENABLE_SHIFT,
1521 SET_TUNNEL_TYPE_ENABLE_BIT(reg_val,
1522 NIG_REG_ENC_TYPE_ENABLE_IP_OVER_GRE_ENABLE_SHIFT,
1524 ecore_wr(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE, reg_val);
1526 /* Update DORQ registers */
1527 ecore_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_GRE_ETH_EN,
1528 eth_gre_enable ? 1 : 0);
1529 ecore_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_GRE_IP_EN,
1530 ip_gre_enable ? 1 : 0);
1533 void ecore_set_geneve_dest_port(struct ecore_hwfn *p_hwfn,
1534 struct ecore_ptt *p_ptt, u16 dest_port)
1536 /* Update PRS register */
1537 ecore_wr(p_hwfn, p_ptt, PRS_REG_NGE_PORT, dest_port);
1539 /* Update NIG register */
1540 ecore_wr(p_hwfn, p_ptt, NIG_REG_NGE_PORT, dest_port);
1542 /* Update PBF register */
1543 ecore_wr(p_hwfn, p_ptt, PBF_REG_NGE_PORT, dest_port);
1546 void ecore_set_geneve_enable(struct ecore_hwfn *p_hwfn,
1547 struct ecore_ptt *p_ptt,
1548 bool eth_geneve_enable, bool ip_geneve_enable)
1552 /* Update PRS register */
1553 reg_val = ecore_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN);
1554 SET_TUNNEL_TYPE_ENABLE_BIT(reg_val,
1555 PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GENEVE_ENABLE_SHIFT,
1557 SET_TUNNEL_TYPE_ENABLE_BIT(reg_val,
1558 PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GENEVE_ENABLE_SHIFT,
1560 ecore_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
1561 if (reg_val) { /* TODO: handle E5 init */
1562 reg_val = ecore_rd(p_hwfn, p_ptt,
1563 PRS_REG_OUTPUT_FORMAT_4_0_BB_K2);
1565 /* Update output only if tunnel blocks not included. */
1566 if (reg_val == (u32)PRS_ETH_OUTPUT_FORMAT)
1567 ecore_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2,
1568 (u32)PRS_ETH_TUNN_OUTPUT_FORMAT);
1571 /* Update NIG register */
1572 ecore_wr(p_hwfn, p_ptt, NIG_REG_NGE_ETH_ENABLE,
1573 eth_geneve_enable ? 1 : 0);
1574 ecore_wr(p_hwfn, p_ptt, NIG_REG_NGE_IP_ENABLE,
1575 ip_geneve_enable ? 1 : 0);
1577 /* EDPM with geneve tunnel not supported in BB */
1578 if (ECORE_IS_BB_B0(p_hwfn->p_dev))
1581 /* Update DORQ registers */
1582 ecore_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_NGE_ETH_EN_K2_E5,
1583 eth_geneve_enable ? 1 : 0);
1584 ecore_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_NGE_IP_EN_K2_E5,
1585 ip_geneve_enable ? 1 : 0);
1588 #define PRS_ETH_VXLAN_NO_L2_ENABLE_OFFSET 4
1589 #define PRS_ETH_VXLAN_NO_L2_OUTPUT_FORMAT -927094512
1591 void ecore_set_vxlan_no_l2_enable(struct ecore_hwfn *p_hwfn,
1592 struct ecore_ptt *p_ptt,
1595 u32 reg_val, cfg_mask;
1597 /* read PRS config register */
1598 reg_val = ecore_rd(p_hwfn, p_ptt, PRS_REG_MSG_INFO);
1600 /* set VXLAN_NO_L2_ENABLE mask */
1601 cfg_mask = (1 << PRS_ETH_VXLAN_NO_L2_ENABLE_OFFSET);
1604 /* set VXLAN_NO_L2_ENABLE flag */
1605 reg_val |= cfg_mask;
1607 /* update PRS FIC register */
1608 ecore_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2,
1609 (u32)PRS_ETH_VXLAN_NO_L2_OUTPUT_FORMAT);
1611 /* clear VXLAN_NO_L2_ENABLE flag */
1612 reg_val &= ~cfg_mask;
1615 /* write PRS config register */
1616 ecore_wr(p_hwfn, p_ptt, PRS_REG_MSG_INFO, reg_val);
1619 #define T_ETH_PACKET_ACTION_GFT_EVENTID 23
1620 #define PARSER_ETH_CONN_GFT_ACTION_CM_HDR 272
1621 #define T_ETH_PACKET_MATCH_RFS_EVENTID 25
1622 #define PARSER_ETH_CONN_CM_HDR 0
1623 #define CAM_LINE_SIZE sizeof(u32)
1624 #define RAM_LINE_SIZE sizeof(u64)
1625 #define REG_SIZE sizeof(u32)
1627 void ecore_gft_disable(struct ecore_hwfn *p_hwfn,
1628 struct ecore_ptt *p_ptt,
1631 /* disable gft search for PF */
1632 ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_GFT, 0);
1634 /* Clean ram & cam for next gft session*/
1637 ecore_wr(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE * pf_id, 0);
1640 ecore_wr(p_hwfn, p_ptt, PRS_REG_GFT_PROFILE_MASK_RAM +
1641 RAM_LINE_SIZE * pf_id, 0);
1642 ecore_wr(p_hwfn, p_ptt, PRS_REG_GFT_PROFILE_MASK_RAM +
1643 RAM_LINE_SIZE * pf_id + REG_SIZE, 0);
1647 void ecore_set_gft_event_id_cm_hdr(struct ecore_hwfn *p_hwfn,
1648 struct ecore_ptt *p_ptt)
1650 u32 rfs_cm_hdr_event_id;
1652 /* Set RFS event ID to be awakened i Tstorm By Prs */
1653 rfs_cm_hdr_event_id = ecore_rd(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT);
1654 rfs_cm_hdr_event_id |= T_ETH_PACKET_ACTION_GFT_EVENTID <<
1655 PRS_REG_CM_HDR_GFT_EVENT_ID_SHIFT;
1656 rfs_cm_hdr_event_id |= PARSER_ETH_CONN_GFT_ACTION_CM_HDR <<
1657 PRS_REG_CM_HDR_GFT_CM_HDR_SHIFT;
1658 ecore_wr(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT, rfs_cm_hdr_event_id);
1661 void ecore_gft_config(struct ecore_hwfn *p_hwfn,
1662 struct ecore_ptt *p_ptt,
1668 enum gft_profile_type profile_type)
1670 u32 reg_val, cam_line, ram_line_lo, ram_line_hi;
1673 DP_NOTICE(p_hwfn, true, "gft_config: must accept at least on of - ipv4 or ipv6'\n");
1675 DP_NOTICE(p_hwfn, true, "gft_config: must accept at least on of - udp or tcp\n");
1676 if (profile_type >= MAX_GFT_PROFILE_TYPE)
1677 DP_NOTICE(p_hwfn, true, "gft_config: unsupported gft_profile_type\n");
1679 /* Set RFS event ID to be awakened i Tstorm By Prs */
1680 reg_val = T_ETH_PACKET_MATCH_RFS_EVENTID <<
1681 PRS_REG_CM_HDR_GFT_EVENT_ID_SHIFT;
1682 reg_val |= PARSER_ETH_CONN_CM_HDR << PRS_REG_CM_HDR_GFT_CM_HDR_SHIFT;
1683 ecore_wr(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT, reg_val);
1685 /* Do not load context only cid in PRS on match. */
1686 ecore_wr(p_hwfn, p_ptt, PRS_REG_LOAD_L2_FILTER, 0);
1688 /* Do not use tenant ID exist bit for gft search*/
1689 ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TENANT_ID, 0);
1693 SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_VALID, 1);
1695 /* Filters are per PF!! */
1696 SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_PF_ID_MASK,
1697 GFT_CAM_LINE_MAPPED_PF_ID_MASK_MASK);
1698 SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_PF_ID, pf_id);
1700 if (!(tcp && udp)) {
1702 GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK,
1703 GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK_MASK);
1706 GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE,
1707 GFT_PROFILE_TCP_PROTOCOL);
1710 GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE,
1711 GFT_PROFILE_UDP_PROTOCOL);
1714 if (!(ipv4 && ipv6)) {
1715 SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_IP_VERSION_MASK, 1);
1717 SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_IP_VERSION,
1720 SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_IP_VERSION,
1724 /* Write characteristics to cam */
1725 ecore_wr(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE * pf_id,
1727 cam_line = ecore_rd(p_hwfn, p_ptt,
1728 PRS_REG_GFT_CAM + CAM_LINE_SIZE * pf_id);
1730 /* Write line to RAM - compare to filter 4 tuple */
1735 SET_FIELD(ram_line_lo, GFT_RAM_LINE_TUNNEL_DST_PORT, 1);
1736 SET_FIELD(ram_line_lo, GFT_RAM_LINE_TUNNEL_OVER_IP_PROTOCOL, 1);
1738 if (profile_type == GFT_PROFILE_TYPE_4_TUPLE) {
1739 SET_FIELD(ram_line_hi, GFT_RAM_LINE_DST_IP, 1);
1740 SET_FIELD(ram_line_hi, GFT_RAM_LINE_SRC_IP, 1);
1741 SET_FIELD(ram_line_hi, GFT_RAM_LINE_OVER_IP_PROTOCOL, 1);
1742 SET_FIELD(ram_line_lo, GFT_RAM_LINE_ETHERTYPE, 1);
1743 SET_FIELD(ram_line_lo, GFT_RAM_LINE_SRC_PORT, 1);
1744 SET_FIELD(ram_line_lo, GFT_RAM_LINE_DST_PORT, 1);
1745 } else if (profile_type == GFT_PROFILE_TYPE_L4_DST_PORT) {
1746 SET_FIELD(ram_line_hi, GFT_RAM_LINE_OVER_IP_PROTOCOL, 1);
1747 SET_FIELD(ram_line_lo, GFT_RAM_LINE_ETHERTYPE, 1);
1748 SET_FIELD(ram_line_lo, GFT_RAM_LINE_DST_PORT, 1);
1749 } else if (profile_type == GFT_PROFILE_TYPE_IP_DST_ADDR) {
1750 SET_FIELD(ram_line_hi, GFT_RAM_LINE_DST_IP, 1);
1751 SET_FIELD(ram_line_lo, GFT_RAM_LINE_ETHERTYPE, 1);
1752 } else if (profile_type == GFT_PROFILE_TYPE_IP_SRC_ADDR) {
1753 SET_FIELD(ram_line_hi, GFT_RAM_LINE_SRC_IP, 1);
1754 SET_FIELD(ram_line_lo, GFT_RAM_LINE_ETHERTYPE, 1);
1755 } else if (profile_type == GFT_PROFILE_TYPE_TUNNEL_TYPE) {
1756 SET_FIELD(ram_line_lo, GFT_RAM_LINE_TUNNEL_ETHERTYPE, 1);
1759 ecore_wr(p_hwfn, p_ptt,
1760 PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * pf_id,
1762 ecore_wr(p_hwfn, p_ptt,
1763 PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * pf_id +
1764 REG_SIZE, ram_line_hi);
1766 /* Set default profile so that no filter match will happen */
1767 ecore_wr(p_hwfn, p_ptt, PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE *
1768 PRS_GFT_CAM_LINES_NO_MATCH, 0xffffffff);
1769 ecore_wr(p_hwfn, p_ptt, PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE *
1770 PRS_GFT_CAM_LINES_NO_MATCH + REG_SIZE, 0x3ff);
1772 /* Enable gft search */
1773 ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_GFT, 1);
1776 /* Configure VF zone size mode */
1777 void ecore_config_vf_zone_size_mode(struct ecore_hwfn *p_hwfn,
1778 struct ecore_ptt *p_ptt, u16 mode,
1781 u32 msdm_vf_size_log = MSTORM_VF_ZONE_DEFAULT_SIZE_LOG;
1782 u32 msdm_vf_offset_mask;
1784 if (mode == VF_ZONE_SIZE_MODE_DOUBLE)
1785 msdm_vf_size_log += 1;
1786 else if (mode == VF_ZONE_SIZE_MODE_QUAD)
1787 msdm_vf_size_log += 2;
1789 msdm_vf_offset_mask = (1 << msdm_vf_size_log) - 1;
1792 STORE_RT_REG(p_hwfn,
1793 PGLUE_REG_B_MSDM_VF_SHIFT_B_RT_OFFSET,
1795 STORE_RT_REG(p_hwfn,
1796 PGLUE_REG_B_MSDM_OFFSET_MASK_B_RT_OFFSET,
1797 msdm_vf_offset_mask);
1799 ecore_wr(p_hwfn, p_ptt,
1800 PGLUE_B_REG_MSDM_VF_SHIFT_B, msdm_vf_size_log);
1801 ecore_wr(p_hwfn, p_ptt,
1802 PGLUE_B_REG_MSDM_OFFSET_MASK_B, msdm_vf_offset_mask);
1806 /* Get mstorm statistics for offset by VF zone size mode */
1807 u32 ecore_get_mstorm_queue_stat_offset(struct ecore_hwfn *p_hwfn,
1809 u16 vf_zone_size_mode)
1811 u32 offset = MSTORM_QUEUE_STAT_OFFSET(stat_cnt_id);
1813 if ((vf_zone_size_mode != VF_ZONE_SIZE_MODE_DEFAULT) &&
1814 (stat_cnt_id > MAX_NUM_PFS)) {
1815 if (vf_zone_size_mode == VF_ZONE_SIZE_MODE_DOUBLE)
1816 offset += (1 << MSTORM_VF_ZONE_DEFAULT_SIZE_LOG) *
1817 (stat_cnt_id - MAX_NUM_PFS);
1818 else if (vf_zone_size_mode == VF_ZONE_SIZE_MODE_QUAD)
1819 offset += 3 * (1 << MSTORM_VF_ZONE_DEFAULT_SIZE_LOG) *
1820 (stat_cnt_id - MAX_NUM_PFS);
1826 /* Get mstorm VF producer offset by VF zone size mode */
1827 u32 ecore_get_mstorm_eth_vf_prods_offset(struct ecore_hwfn *p_hwfn,
1830 u16 vf_zone_size_mode)
1832 u32 offset = MSTORM_ETH_VF_PRODS_OFFSET(vf_id, vf_queue_id);
1834 if (vf_zone_size_mode != VF_ZONE_SIZE_MODE_DEFAULT) {
1835 if (vf_zone_size_mode == VF_ZONE_SIZE_MODE_DOUBLE)
1836 offset += (1 << MSTORM_VF_ZONE_DEFAULT_SIZE_LOG) *
1838 else if (vf_zone_size_mode == VF_ZONE_SIZE_MODE_QUAD)
1839 offset += 3 * (1 << MSTORM_VF_ZONE_DEFAULT_SIZE_LOG) *
1846 #ifndef LINUX_REMOVE
1847 #define CRC8_INIT_VALUE 0xFF
1849 static u8 cdu_crc8_table[CRC8_TABLE_SIZE];
1851 /* Calculate and return CDU validation byte per connection type / region /
1854 static u8 ecore_calc_cdu_validation_byte(u8 conn_type, u8 region, u32 cid)
1856 const u8 validation_cfg = CDU_VALIDATION_DEFAULT_CFG;
1858 static u8 crc8_table_valid; /*automatically initialized to 0*/
1859 u8 crc, validation_byte = 0;
1860 u32 validation_string = 0;
1863 if (crc8_table_valid == 0) {
1864 OSAL_CRC8_POPULATE(cdu_crc8_table, 0x07);
1865 crc8_table_valid = 1;
1869 * The CRC is calculated on the String-to-compress:
1870 * [31:8] = {CID[31:20],CID[11:0]}
1874 if ((validation_cfg >> CDU_CONTEXT_VALIDATION_CFG_USE_CID) & 1)
1875 validation_string |= (cid & 0xFFF00000) | ((cid & 0xFFF) << 8);
1877 if ((validation_cfg >> CDU_CONTEXT_VALIDATION_CFG_USE_REGION) & 1)
1878 validation_string |= ((region & 0xF) << 4);
1880 if ((validation_cfg >> CDU_CONTEXT_VALIDATION_CFG_USE_TYPE) & 1)
1881 validation_string |= (conn_type & 0xF);
1883 /* Convert to big-endian and calculate CRC8*/
1884 data_to_crc = OSAL_BE32_TO_CPU(validation_string);
1886 crc = OSAL_CRC8(cdu_crc8_table, (u8 *)&data_to_crc, sizeof(data_to_crc),
1889 /* The validation byte [7:0] is composed:
1890 * for type A validation
1891 * [7] = active configuration bit
1894 * for type B validation
1895 * [7] = active configuration bit
1896 * [6:3] = connection_type[3:0]
1900 validation_byte |= ((validation_cfg >>
1901 CDU_CONTEXT_VALIDATION_CFG_USE_ACTIVE) & 1) << 7;
1903 if ((validation_cfg >>
1904 CDU_CONTEXT_VALIDATION_CFG_VALIDATION_TYPE_SHIFT) & 1)
1905 validation_byte |= ((conn_type & 0xF) << 3) | (crc & 0x7);
1907 validation_byte |= crc & 0x7F;
1909 return validation_byte;
1912 /* Calcualte and set validation bytes for session context */
1913 void ecore_calc_session_ctx_validation(void *p_ctx_mem, u16 ctx_size,
1914 u8 ctx_type, u32 cid)
1916 u8 *x_val_ptr, *t_val_ptr, *u_val_ptr, *p_ctx;
1918 p_ctx = (u8 *)p_ctx_mem;
1919 x_val_ptr = &p_ctx[con_region_offsets[0][ctx_type]];
1920 t_val_ptr = &p_ctx[con_region_offsets[1][ctx_type]];
1921 u_val_ptr = &p_ctx[con_region_offsets[2][ctx_type]];
1923 OSAL_MEMSET(p_ctx, 0, ctx_size);
1925 *x_val_ptr = ecore_calc_cdu_validation_byte(ctx_type, 3, cid);
1926 *t_val_ptr = ecore_calc_cdu_validation_byte(ctx_type, 4, cid);
1927 *u_val_ptr = ecore_calc_cdu_validation_byte(ctx_type, 5, cid);
1930 /* Calcualte and set validation bytes for task context */
1931 void ecore_calc_task_ctx_validation(void *p_ctx_mem, u16 ctx_size, u8 ctx_type,
1934 u8 *p_ctx, *region1_val_ptr;
1936 p_ctx = (u8 *)p_ctx_mem;
1937 region1_val_ptr = &p_ctx[task_region_offsets[0][ctx_type]];
1939 OSAL_MEMSET(p_ctx, 0, ctx_size);
1941 *region1_val_ptr = ecore_calc_cdu_validation_byte(ctx_type, 1, tid);
1944 /* Memset session context to 0 while preserving validation bytes */
1945 void ecore_memset_session_ctx(void *p_ctx_mem, u32 ctx_size, u8 ctx_type)
1947 u8 *x_val_ptr, *t_val_ptr, *u_val_ptr, *p_ctx;
1948 u8 x_val, t_val, u_val;
1950 p_ctx = (u8 *)p_ctx_mem;
1951 x_val_ptr = &p_ctx[con_region_offsets[0][ctx_type]];
1952 t_val_ptr = &p_ctx[con_region_offsets[1][ctx_type]];
1953 u_val_ptr = &p_ctx[con_region_offsets[2][ctx_type]];
1959 OSAL_MEMSET(p_ctx, 0, ctx_size);
1966 /* Memset task context to 0 while preserving validation bytes */
1967 void ecore_memset_task_ctx(void *p_ctx_mem, u32 ctx_size, u8 ctx_type)
1969 u8 *p_ctx, *region1_val_ptr;
1972 p_ctx = (u8 *)p_ctx_mem;
1973 region1_val_ptr = &p_ctx[task_region_offsets[0][ctx_type]];
1975 region1_val = *region1_val_ptr;
1977 OSAL_MEMSET(p_ctx, 0, ctx_size);
1979 *region1_val_ptr = region1_val;
1982 /* Enable and configure context validation */
1983 void ecore_enable_context_validation(struct ecore_hwfn *p_hwfn,
1984 struct ecore_ptt *p_ptt)
1988 /* Enable validation for connection region 3 - bits [31:24] */
1989 ctx_validation = CDU_VALIDATION_DEFAULT_CFG << 24;
1990 ecore_wr(p_hwfn, p_ptt, CDU_REG_CCFC_CTX_VALID0, ctx_validation);
1992 /* Enable validation for connection region 5 - bits [15: 8] */
1993 ctx_validation = CDU_VALIDATION_DEFAULT_CFG << 8;
1994 ecore_wr(p_hwfn, p_ptt, CDU_REG_CCFC_CTX_VALID1, ctx_validation);
1996 /* Enable validation for connection region 1 - bits [15: 8] */
1997 ctx_validation = CDU_VALIDATION_DEFAULT_CFG << 8;
1998 ecore_wr(p_hwfn, p_ptt, CDU_REG_TCFC_CTX_VALID0, ctx_validation);
2001 #define RSS_IND_TABLE_BASE_ADDR 4112
2002 #define RSS_IND_TABLE_VPORT_SIZE 16
2003 #define RSS_IND_TABLE_ENTRY_PER_LINE 8
2005 /* Update RSS indirection table entry. */
2006 void ecore_update_eth_rss_ind_table_entry(struct ecore_hwfn *p_hwfn,
2007 struct ecore_ptt *p_ptt,
2010 u16 ind_table_value)
2014 u16 rss_ind_entry[RSS_IND_TABLE_ENTRY_PER_LINE];
2015 u16 rss_ind_mask[RSS_IND_TABLE_ENTRY_PER_LINE];
2017 /* get entry address */
2018 rss_addr = RSS_IND_TABLE_BASE_ADDR +
2019 RSS_IND_TABLE_VPORT_SIZE * rss_id +
2020 ind_table_index / RSS_IND_TABLE_ENTRY_PER_LINE;
2022 /* prepare update command */
2023 ind_table_index %= RSS_IND_TABLE_ENTRY_PER_LINE;
2025 for (cnt = 0; cnt < RSS_IND_TABLE_ENTRY_PER_LINE; cnt++) {
2026 if (cnt == ind_table_index) {
2027 rss_ind_entry[cnt] = ind_table_value;
2028 rss_ind_mask[cnt] = 0xFFFF;
2030 rss_ind_entry[cnt] = 0;
2031 rss_ind_mask[cnt] = 0;
2035 /* Update entry in HW*/
2036 ecore_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_ADDR, rss_addr);
2038 reg_val = (u32 *)rss_ind_mask;
2039 ecore_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_MASK, reg_val[0]);
2040 ecore_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_MASK + 4, reg_val[1]);
2041 ecore_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_MASK + 8, reg_val[2]);
2042 ecore_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_MASK + 12, reg_val[3]);
2044 reg_val = (u32 *)rss_ind_entry;
2045 ecore_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_DATA, reg_val[0]);
2046 ecore_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_DATA + 4, reg_val[1]);
2047 ecore_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_DATA + 8, reg_val[2]);
2048 ecore_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_DATA + 12, reg_val[3]);