2 * Copyright (c) 2016 QLogic Corporation.
6 * See LICENSE.qede_pmd for copyright and licensing details.
11 #include "ecore_init_ops.h"
13 #include "ecore_rt_defs.h"
14 #include "ecore_hsi_common.h"
15 #include "ecore_hsi_init_func.h"
16 #include "ecore_hsi_eth.h"
17 #include "ecore_hsi_init_tool.h"
18 #include "ecore_iro.h"
19 #include "ecore_init_fw_funcs.h"
20 enum CmInterfaceEnum {
33 /* general constants */
34 #define QM_PQ_MEM_4KB(pq_size) \
35 (pq_size ? DIV_ROUND_UP((pq_size + 1) * QM_PQ_ELEMENT_SIZE, 0x1000) : 0)
36 #define QM_PQ_SIZE_256B(pq_size) \
37 (pq_size ? DIV_ROUND_UP(pq_size, 0x100) - 1 : 0)
38 #define QM_INVALID_PQ_ID 0xffff
40 #define QM_BYPASS_EN 1
41 #define QM_BYTE_CRD_EN 1
42 /* other PQ constants */
43 #define QM_OTHER_PQS_PER_PF 4
45 #define QM_WFQ_UPPER_BOUND 62500000
46 #define QM_WFQ_VP_PQ_VOQ_SHIFT 0
47 #define QM_WFQ_VP_PQ_PF_SHIFT 5
48 #define QM_WFQ_INC_VAL(weight) ((weight) * 0x9000)
49 #define QM_WFQ_MAX_INC_VAL 43750000
51 #define QM_RL_UPPER_BOUND 62500000
52 #define QM_RL_PERIOD 5
53 #define QM_RL_PERIOD_CLK_25M (25 * QM_RL_PERIOD)
54 #define QM_RL_MAX_INC_VAL 43750000
55 /* RL increment value - the factor of 1.01 was added after seeing only
56 * 99% factor reached in a 25Gbps port with DPDK RFC 2544 test.
57 * In this scenario the PF RL was reducing the line rate to 99% although
58 * the credit increment value was the correct one and FW calculated
59 * correct packet sizes. The reason for the inaccuracy of the RL is
60 * unknown at this point.
63 #define QM_RL_INC_VAL(rate) OSAL_MAX_T(u32, (u32)(((rate ? rate : 1000000) * \
64 QM_RL_PERIOD * 101) / (8 * 100)), 1)
65 /* AFullOprtnstcCrdMask constants */
66 #define QM_OPPOR_LINE_VOQ_DEF 1
67 #define QM_OPPOR_FW_STOP_DEF 0
68 #define QM_OPPOR_PQ_EMPTY_DEF 1
69 /* Command Queue constants */
70 #define PBF_CMDQ_PURE_LB_LINES 150
71 #define PBF_CMDQ_LINES_RT_OFFSET(voq) \
72 (PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET + \
73 voq * (PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET \
74 - PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET))
75 #define PBF_BTB_GUARANTEED_RT_OFFSET(voq) \
76 (PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET + voq * \
77 (PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET - PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET))
78 #define QM_VOQ_LINE_CRD(pbf_cmd_lines) \
79 ((((pbf_cmd_lines) - 4) * 2) | QM_LINE_CRD_REG_SIGN_BIT)
80 /* BTB: blocks constants (block size = 256B) */
81 #define BTB_JUMBO_PKT_BLOCKS 38 /* 256B blocks in 9700B packet */
82 /* headroom per-port */
83 #define BTB_HEADROOM_BLOCKS BTB_JUMBO_PKT_BLOCKS
84 #define BTB_PURE_LB_FACTOR 10
85 #define BTB_PURE_LB_RATIO 7 /* factored (hence really 0.7) */
86 /* QM stop command constants */
87 #define QM_STOP_PQ_MASK_WIDTH 32
88 #define QM_STOP_CMD_ADDR 0x2
89 #define QM_STOP_CMD_STRUCT_SIZE 2
90 #define QM_STOP_CMD_PAUSE_MASK_OFFSET 0
91 #define QM_STOP_CMD_PAUSE_MASK_SHIFT 0
92 #define QM_STOP_CMD_PAUSE_MASK_MASK 0xffffffff /* @DPDK */
93 #define QM_STOP_CMD_GROUP_ID_OFFSET 1
94 #define QM_STOP_CMD_GROUP_ID_SHIFT 16
95 #define QM_STOP_CMD_GROUP_ID_MASK 15
96 #define QM_STOP_CMD_PQ_TYPE_OFFSET 1
97 #define QM_STOP_CMD_PQ_TYPE_SHIFT 24
98 #define QM_STOP_CMD_PQ_TYPE_MASK 1
99 #define QM_STOP_CMD_MAX_POLL_COUNT 100
100 #define QM_STOP_CMD_POLL_PERIOD_US 500
101 /* QM command macros */
102 #define QM_CMD_STRUCT_SIZE(cmd) cmd##_STRUCT_SIZE
103 #define QM_CMD_SET_FIELD(var, cmd, field, value) \
104 SET_FIELD(var[cmd##_##field##_OFFSET], cmd##_##field, value)
106 #define PHYS_VOQ(port, tc, max_phys_tcs_per_port) \
107 ((port) * (max_phys_tcs_per_port) + (tc))
108 #define LB_VOQ(port) (MAX_PHYS_VOQS + (port))
109 #define VOQ(port, tc, max_phys_tcs_per_port) \
110 ((tc) < LB_TC ? PHYS_VOQ(port, tc, max_phys_tcs_per_port) : LB_VOQ(port))
111 /******************** INTERNAL IMPLEMENTATION *********************/
112 /* Prepare PF RL enable/disable runtime init values */
113 static void ecore_enable_pf_rl(struct ecore_hwfn *p_hwfn, bool pf_rl_en)
115 STORE_RT_REG(p_hwfn, QM_REG_RLPFENABLE_RT_OFFSET, pf_rl_en ? 1 : 0);
117 /* enable RLs for all VOQs */
118 STORE_RT_REG(p_hwfn, QM_REG_RLPFVOQENABLE_RT_OFFSET,
119 (1 << MAX_NUM_VOQS) - 1);
120 /* write RL period */
121 STORE_RT_REG(p_hwfn, QM_REG_RLPFPERIOD_RT_OFFSET,
122 QM_RL_PERIOD_CLK_25M);
123 STORE_RT_REG(p_hwfn, QM_REG_RLPFPERIODTIMER_RT_OFFSET,
124 QM_RL_PERIOD_CLK_25M);
125 /* set credit threshold for QM bypass flow */
127 STORE_RT_REG(p_hwfn, QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET,
132 /* Prepare PF WFQ enable/disable runtime init values */
133 static void ecore_enable_pf_wfq(struct ecore_hwfn *p_hwfn, bool pf_wfq_en)
135 STORE_RT_REG(p_hwfn, QM_REG_WFQPFENABLE_RT_OFFSET, pf_wfq_en ? 1 : 0);
136 /* set credit threshold for QM bypass flow */
137 if (pf_wfq_en && QM_BYPASS_EN)
138 STORE_RT_REG(p_hwfn, QM_REG_AFULLQMBYPTHRPFWFQ_RT_OFFSET,
142 /* Prepare VPORT RL enable/disable runtime init values */
143 static void ecore_enable_vport_rl(struct ecore_hwfn *p_hwfn, bool vport_rl_en)
145 STORE_RT_REG(p_hwfn, QM_REG_RLGLBLENABLE_RT_OFFSET,
146 vport_rl_en ? 1 : 0);
148 /* write RL period (use timer 0 only) */
149 STORE_RT_REG(p_hwfn, QM_REG_RLGLBLPERIOD_0_RT_OFFSET,
150 QM_RL_PERIOD_CLK_25M);
151 STORE_RT_REG(p_hwfn, QM_REG_RLGLBLPERIODTIMER_0_RT_OFFSET,
152 QM_RL_PERIOD_CLK_25M);
153 /* set credit threshold for QM bypass flow */
156 QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET,
161 /* Prepare VPORT WFQ enable/disable runtime init values */
162 static void ecore_enable_vport_wfq(struct ecore_hwfn *p_hwfn, bool vport_wfq_en)
164 STORE_RT_REG(p_hwfn, QM_REG_WFQVPENABLE_RT_OFFSET,
165 vport_wfq_en ? 1 : 0);
166 /* set credit threshold for QM bypass flow */
167 if (vport_wfq_en && QM_BYPASS_EN)
168 STORE_RT_REG(p_hwfn, QM_REG_AFULLQMBYPTHRVPWFQ_RT_OFFSET,
172 /* Prepare runtime init values to allocate PBF command queue lines for
175 static void ecore_cmdq_lines_voq_rt_init(struct ecore_hwfn *p_hwfn,
176 u8 voq, u16 cmdq_lines)
179 qm_line_crd = QM_VOQ_LINE_CRD(cmdq_lines);
180 OVERWRITE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(voq),
182 STORE_RT_REG(p_hwfn, QM_REG_VOQCRDLINE_RT_OFFSET + voq, qm_line_crd);
183 STORE_RT_REG(p_hwfn, QM_REG_VOQINITCRDLINE_RT_OFFSET + voq,
187 /* Prepare runtime init values to allocate PBF command queue lines. */
188 static void ecore_cmdq_lines_rt_init(struct ecore_hwfn *p_hwfn,
189 u8 max_ports_per_engine,
190 u8 max_phys_tcs_per_port,
191 struct init_qm_port_params
192 port_params[MAX_NUM_PORTS])
194 u8 tc, voq, port_id, num_tcs_in_port;
195 /* clear PBF lines for all VOQs */
196 for (voq = 0; voq < MAX_NUM_VOQS; voq++)
197 STORE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(voq), 0);
198 for (port_id = 0; port_id < max_ports_per_engine; port_id++) {
199 if (port_params[port_id].active) {
200 u16 phys_lines, phys_lines_per_tc;
201 /* find #lines to divide between active physical TCs */
203 port_params[port_id].num_pbf_cmd_lines -
204 PBF_CMDQ_PURE_LB_LINES;
205 /* find #lines per active physical TC */
207 for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) {
208 if (((port_params[port_id].active_phys_tcs >>
212 phys_lines_per_tc = phys_lines / num_tcs_in_port;
213 /* init registers per active TC */
214 for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) {
215 if (((port_params[port_id].active_phys_tcs >>
217 voq = PHYS_VOQ(port_id, tc,
218 max_phys_tcs_per_port);
219 ecore_cmdq_lines_voq_rt_init(p_hwfn,
220 voq, phys_lines_per_tc);
223 /* init registers for pure LB TC */
224 ecore_cmdq_lines_voq_rt_init(p_hwfn, LB_VOQ(port_id),
225 PBF_CMDQ_PURE_LB_LINES);
231 * Prepare runtime init values to allocate guaranteed BTB blocks for the
232 * specified port. The guaranteed BTB space is divided between the TCs as
233 * follows (shared space Is currently not used):
235 * B BTB blocks for this port
236 * C Number of physical TCs for this port
238 * a. 38 blocks (9700B jumbo frame) are allocated for global per port
240 * b. B = B 38 (remainder after global headroom allocation)
241 * c. MAX(38,B/(C+0.7)) blocks are allocated for the pure LB VOQ.
242 * d. B = B MAX(38, B/(C+0.7)) (remainder after pure LB allocation).
243 * e. B/C blocks are allocated for each physical TC.
245 * - MTU is up to 9700 bytes (38 blocks)
246 * - All TCs are considered symmetrical (same rate and packet size)
247 * - No optimization for lossy TC (all are considered lossless). Shared space is
248 * not enabled and allocated for each TC.
250 static void ecore_btb_blocks_rt_init(struct ecore_hwfn *p_hwfn,
251 u8 max_ports_per_engine,
252 u8 max_phys_tcs_per_port,
253 struct init_qm_port_params
254 port_params[MAX_NUM_PORTS])
256 u8 tc, voq, port_id, num_tcs_in_port;
257 u32 usable_blocks, pure_lb_blocks, phys_blocks;
258 for (port_id = 0; port_id < max_ports_per_engine; port_id++) {
259 if (port_params[port_id].active) {
260 /* subtract headroom blocks */
262 port_params[port_id].num_btb_blocks -
264 /* find blocks per physical TC. use factor to avoid floating arithmethic */
267 for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++)
268 if (((port_params[port_id].active_phys_tcs >>
272 (usable_blocks * BTB_PURE_LB_FACTOR) /
274 BTB_PURE_LB_FACTOR + BTB_PURE_LB_RATIO);
276 OSAL_MAX_T(u32, BTB_JUMBO_PKT_BLOCKS,
277 pure_lb_blocks / BTB_PURE_LB_FACTOR);
282 /* init physical TCs */
284 tc < NUM_OF_PHYS_TCS;
286 if (((port_params[port_id].active_phys_tcs >>
288 voq = PHYS_VOQ(port_id, tc,
289 max_phys_tcs_per_port);
291 PBF_BTB_GUARANTEED_RT_OFFSET(voq),
295 /* init pure LB TC */
297 PBF_BTB_GUARANTEED_RT_OFFSET(
298 LB_VOQ(port_id)), pure_lb_blocks);
303 /* Prepare Tx PQ mapping runtime init values for the specified PF */
304 static void ecore_tx_pq_map_rt_init(struct ecore_hwfn *p_hwfn,
305 struct ecore_ptt *p_ptt,
308 u8 max_phys_tcs_per_port,
316 u32 base_mem_addr_4kb,
317 struct init_qm_pq_params *pq_params,
318 struct init_qm_vport_params *vport_params)
320 u16 i, pq_id, pq_group;
321 u16 num_pqs = num_pf_pqs + num_vf_pqs;
322 u16 first_pq_group = start_pq / QM_PF_QUEUE_GROUP_SIZE;
323 u16 last_pq_group = (start_pq + num_pqs - 1) / QM_PF_QUEUE_GROUP_SIZE;
324 /* a bit per Tx PQ indicating if the PQ is associated with a VF */
325 u32 tx_pq_vf_mask[MAX_QM_TX_QUEUES / QM_PF_QUEUE_GROUP_SIZE] = { 0 };
326 u32 num_tx_pq_vf_masks = MAX_QM_TX_QUEUES / QM_PF_QUEUE_GROUP_SIZE;
327 u32 pq_mem_4kb = QM_PQ_MEM_4KB(num_pf_cids);
328 u32 vport_pq_mem_4kb = QM_PQ_MEM_4KB(num_vf_cids);
329 u32 mem_addr_4kb = base_mem_addr_4kb;
330 /* set mapping from PQ group to PF */
331 for (pq_group = first_pq_group; pq_group <= last_pq_group; pq_group++)
332 STORE_RT_REG(p_hwfn, QM_REG_PQTX2PF_0_RT_OFFSET + pq_group,
335 STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_0_RT_OFFSET,
336 QM_PQ_SIZE_256B(num_pf_cids));
337 STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_1_RT_OFFSET,
338 QM_PQ_SIZE_256B(num_vf_cids));
339 /* go over all Tx PQs */
340 for (i = 0, pq_id = start_pq; i < num_pqs; i++, pq_id++) {
341 struct qm_rf_pq_map tx_pq_map;
343 VOQ(port_id, pq_params[i].tc_id, max_phys_tcs_per_port);
344 bool is_vf_pq = (i >= num_pf_pqs);
345 /* added to avoid compilation warning */
346 u32 max_qm_global_rls = MAX_QM_GLOBAL_RLS;
347 bool rl_valid = pq_params[i].rl_valid &&
348 pq_params[i].vport_id < max_qm_global_rls;
349 /* update first Tx PQ of VPORT/TC */
350 u8 vport_id_in_pf = pq_params[i].vport_id - start_vport;
352 vport_params[vport_id_in_pf].first_tx_pq_id[pq_params[i].
354 if (first_tx_pq_id == QM_INVALID_PQ_ID) {
355 /* create new VP PQ */
356 vport_params[vport_id_in_pf].
357 first_tx_pq_id[pq_params[i].tc_id] = pq_id;
358 first_tx_pq_id = pq_id;
359 /* map VP PQ to VOQ and PF */
361 QM_REG_WFQVPMAP_RT_OFFSET + first_tx_pq_id,
362 (voq << QM_WFQ_VP_PQ_VOQ_SHIFT) | (pf_id <<
363 QM_WFQ_VP_PQ_PF_SHIFT));
366 if (pq_params[i].rl_valid && pq_params[i].vport_id >=
368 DP_NOTICE(p_hwfn, true,
369 "Invalid VPORT ID for rate limiter config");
370 /* fill PQ map entry */
371 OSAL_MEMSET(&tx_pq_map, 0, sizeof(tx_pq_map));
372 SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_PQ_VALID, 1);
373 SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_RL_VALID,
375 SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_VP_PQ_ID, first_tx_pq_id);
376 SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_RL_ID,
377 rl_valid ? pq_params[i].vport_id : 0);
378 SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_VOQ, voq);
379 SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_WRR_WEIGHT_GROUP,
380 pq_params[i].wrr_group);
381 /* write PQ map entry to CAM */
382 STORE_RT_REG(p_hwfn, QM_REG_TXPQMAP_RT_OFFSET + pq_id,
383 *((u32 *)&tx_pq_map));
384 /* set base address */
385 STORE_RT_REG(p_hwfn, QM_REG_BASEADDRTXPQ_RT_OFFSET + pq_id,
389 /* if PQ is associated with a VF, add indication to PQ
392 tx_pq_vf_mask[pq_id / QM_PF_QUEUE_GROUP_SIZE] |=
393 (1 << (pq_id % QM_PF_QUEUE_GROUP_SIZE));
394 mem_addr_4kb += vport_pq_mem_4kb;
396 mem_addr_4kb += pq_mem_4kb;
399 /* store Tx PQ VF mask to size select register */
400 for (i = 0; i < num_tx_pq_vf_masks; i++) {
401 if (tx_pq_vf_mask[i])
402 STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET +
403 i, tx_pq_vf_mask[i]);
407 /* Prepare Other PQ mapping runtime init values for the specified PF */
408 static void ecore_other_pq_map_rt_init(struct ecore_hwfn *p_hwfn,
412 u32 num_tids, u32 base_mem_addr_4kb)
415 /* a single other PQ grp is used in each PF, where PQ group i is used in PF i */
417 u16 pq_group = pf_id;
418 u32 pq_size = num_pf_cids + num_tids;
419 u32 pq_mem_4kb = QM_PQ_MEM_4KB(pq_size);
420 u32 mem_addr_4kb = base_mem_addr_4kb;
421 /* map PQ group to PF */
422 STORE_RT_REG(p_hwfn, QM_REG_PQOTHER2PF_0_RT_OFFSET + pq_group,
425 STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_2_RT_OFFSET,
426 QM_PQ_SIZE_256B(pq_size));
427 /* set base address */
428 for (i = 0, pq_id = pf_id * QM_PF_QUEUE_GROUP_SIZE;
429 i < QM_OTHER_PQS_PER_PF; i++, pq_id++) {
430 STORE_RT_REG(p_hwfn, QM_REG_BASEADDROTHERPQ_RT_OFFSET + pq_id,
432 mem_addr_4kb += pq_mem_4kb;
435 /* Prepare PF WFQ runtime init values for specified PF. Return -1 on error. */
436 static int ecore_pf_wfq_rt_init(struct ecore_hwfn *p_hwfn,
440 u8 max_phys_tcs_per_port,
442 struct init_qm_pq_params *pq_params)
448 MAX_NUM_PFS_BB ? QM_REG_WFQPFCRD_RT_OFFSET :
449 QM_REG_WFQPFCRD_MSB_RT_OFFSET) + (pf_id % MAX_NUM_PFS_BB);
450 inc_val = QM_WFQ_INC_VAL(pf_wfq);
451 if (inc_val == 0 || inc_val > QM_WFQ_MAX_INC_VAL) {
452 DP_NOTICE(p_hwfn, true, "Invalid PF WFQ weight configuration");
455 for (i = 0; i < num_tx_pqs; i++) {
457 VOQ(port_id, pq_params[i].tc_id, max_phys_tcs_per_port);
458 OVERWRITE_RT_REG(p_hwfn, crd_reg_offset + voq * MAX_NUM_PFS_BB,
459 (u32)QM_WFQ_CRD_REG_SIGN_BIT);
461 STORE_RT_REG(p_hwfn, QM_REG_WFQPFUPPERBOUND_RT_OFFSET + pf_id,
462 QM_WFQ_UPPER_BOUND | (u32)QM_WFQ_CRD_REG_SIGN_BIT);
463 STORE_RT_REG(p_hwfn, QM_REG_WFQPFWEIGHT_RT_OFFSET + pf_id, inc_val);
466 /* Prepare PF RL runtime init values for specified PF. Return -1 on error. */
467 static int ecore_pf_rl_rt_init(struct ecore_hwfn *p_hwfn, u8 pf_id, u32 pf_rl)
469 u32 inc_val = QM_RL_INC_VAL(pf_rl);
470 if (inc_val > QM_RL_MAX_INC_VAL) {
471 DP_NOTICE(p_hwfn, true, "Invalid PF rate limit configuration");
474 STORE_RT_REG(p_hwfn, QM_REG_RLPFCRD_RT_OFFSET + pf_id,
475 (u32)QM_RL_CRD_REG_SIGN_BIT);
476 STORE_RT_REG(p_hwfn, QM_REG_RLPFUPPERBOUND_RT_OFFSET + pf_id,
477 QM_RL_UPPER_BOUND | (u32)QM_RL_CRD_REG_SIGN_BIT);
478 STORE_RT_REG(p_hwfn, QM_REG_RLPFINCVAL_RT_OFFSET + pf_id, inc_val);
481 /* Prepare VPORT WFQ runtime init values for the specified VPORTs. Return -1 on
484 static int ecore_vp_wfq_rt_init(struct ecore_hwfn *p_hwfn,
486 struct init_qm_vport_params *vport_params)
490 /* go over all PF VPORTs */
491 for (i = 0; i < num_vports; i++) {
492 if (vport_params[i].vport_wfq) {
493 inc_val = QM_WFQ_INC_VAL(vport_params[i].vport_wfq);
494 if (inc_val > QM_WFQ_MAX_INC_VAL) {
495 DP_NOTICE(p_hwfn, true,
496 "Invalid VPORT WFQ weight config");
499 /* each VPORT can have several VPORT PQ IDs for
502 for (tc = 0; tc < NUM_OF_TCS; tc++) {
504 vport_params[i].first_tx_pq_id[tc];
505 if (vport_pq_id != QM_INVALID_PQ_ID) {
507 QM_REG_WFQVPCRD_RT_OFFSET +
509 (u32)QM_WFQ_CRD_REG_SIGN_BIT);
511 QM_REG_WFQVPWEIGHT_RT_OFFSET
512 + vport_pq_id, inc_val);
520 /* Prepare VPORT RL runtime init values for the specified VPORTs.
521 * Return -1 on error.
523 static int ecore_vport_rl_rt_init(struct ecore_hwfn *p_hwfn,
526 struct init_qm_vport_params *vport_params)
529 if (start_vport + num_vports >= MAX_QM_GLOBAL_RLS) {
530 DP_NOTICE(p_hwfn, true,
531 "Invalid VPORT ID for rate limiter configuration");
534 /* go over all PF VPORTs */
535 for (i = 0, vport_id = start_vport; i < num_vports; i++, vport_id++) {
536 u32 inc_val = QM_RL_INC_VAL(vport_params[i].vport_rl);
537 if (inc_val > QM_RL_MAX_INC_VAL) {
538 DP_NOTICE(p_hwfn, true,
539 "Invalid VPORT rate-limit configuration");
542 STORE_RT_REG(p_hwfn, QM_REG_RLGLBLCRD_RT_OFFSET + vport_id,
543 (u32)QM_RL_CRD_REG_SIGN_BIT);
545 QM_REG_RLGLBLUPPERBOUND_RT_OFFSET + vport_id,
546 QM_RL_UPPER_BOUND | (u32)QM_RL_CRD_REG_SIGN_BIT);
547 STORE_RT_REG(p_hwfn, QM_REG_RLGLBLINCVAL_RT_OFFSET + vport_id,
553 static bool ecore_poll_on_qm_cmd_ready(struct ecore_hwfn *p_hwfn,
554 struct ecore_ptt *p_ptt)
557 for (i = 0, reg_val = 0; i < QM_STOP_CMD_MAX_POLL_COUNT && reg_val == 0;
559 OSAL_UDELAY(QM_STOP_CMD_POLL_PERIOD_US);
560 reg_val = ecore_rd(p_hwfn, p_ptt, QM_REG_SDMCMDREADY);
562 /* check if timeout while waiting for SDM command ready */
563 if (i == QM_STOP_CMD_MAX_POLL_COUNT) {
564 DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG,
565 "Timeout waiting for QM SDM cmd ready signal\n");
571 static bool ecore_send_qm_cmd(struct ecore_hwfn *p_hwfn,
572 struct ecore_ptt *p_ptt,
573 u32 cmd_addr, u32 cmd_data_lsb, u32 cmd_data_msb)
575 if (!ecore_poll_on_qm_cmd_ready(p_hwfn, p_ptt))
577 ecore_wr(p_hwfn, p_ptt, QM_REG_SDMCMDADDR, cmd_addr);
578 ecore_wr(p_hwfn, p_ptt, QM_REG_SDMCMDDATALSB, cmd_data_lsb);
579 ecore_wr(p_hwfn, p_ptt, QM_REG_SDMCMDDATAMSB, cmd_data_msb);
580 ecore_wr(p_hwfn, p_ptt, QM_REG_SDMCMDGO, 1);
581 ecore_wr(p_hwfn, p_ptt, QM_REG_SDMCMDGO, 0);
582 return ecore_poll_on_qm_cmd_ready(p_hwfn, p_ptt);
585 /******************** INTERFACE IMPLEMENTATION *********************/
586 u32 ecore_qm_pf_mem_size(u8 pf_id,
589 u32 num_tids, u16 num_pf_pqs, u16 num_vf_pqs)
591 return QM_PQ_MEM_4KB(num_pf_cids) * num_pf_pqs +
592 QM_PQ_MEM_4KB(num_vf_cids) * num_vf_pqs +
593 QM_PQ_MEM_4KB(num_pf_cids + num_tids) * QM_OTHER_PQS_PER_PF;
596 int ecore_qm_common_rt_init(struct ecore_hwfn *p_hwfn,
597 u8 max_ports_per_engine,
598 u8 max_phys_tcs_per_port,
603 struct init_qm_port_params
604 port_params[MAX_NUM_PORTS])
606 /* init AFullOprtnstcCrdMask */
608 (QM_OPPOR_LINE_VOQ_DEF << QM_RF_OPPORTUNISTIC_MASK_LINEVOQ_SHIFT) |
609 (QM_BYTE_CRD_EN << QM_RF_OPPORTUNISTIC_MASK_BYTEVOQ_SHIFT) |
610 (pf_wfq_en << QM_RF_OPPORTUNISTIC_MASK_PFWFQ_SHIFT) |
611 (vport_wfq_en << QM_RF_OPPORTUNISTIC_MASK_VPWFQ_SHIFT) |
612 (pf_rl_en << QM_RF_OPPORTUNISTIC_MASK_PFRL_SHIFT) |
613 (vport_rl_en << QM_RF_OPPORTUNISTIC_MASK_VPQCNRL_SHIFT) |
614 (QM_OPPOR_FW_STOP_DEF << QM_RF_OPPORTUNISTIC_MASK_FWPAUSE_SHIFT) |
615 (QM_OPPOR_PQ_EMPTY_DEF <<
616 QM_RF_OPPORTUNISTIC_MASK_QUEUEEMPTY_SHIFT);
617 STORE_RT_REG(p_hwfn, QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET, mask);
618 /* enable/disable PF RL */
619 ecore_enable_pf_rl(p_hwfn, pf_rl_en);
620 /* enable/disable PF WFQ */
621 ecore_enable_pf_wfq(p_hwfn, pf_wfq_en);
622 /* enable/disable VPORT RL */
623 ecore_enable_vport_rl(p_hwfn, vport_rl_en);
624 /* enable/disable VPORT WFQ */
625 ecore_enable_vport_wfq(p_hwfn, vport_wfq_en);
626 /* init PBF CMDQ line credit */
627 ecore_cmdq_lines_rt_init(p_hwfn, max_ports_per_engine,
628 max_phys_tcs_per_port, port_params);
629 /* init BTB blocks in PBF */
630 ecore_btb_blocks_rt_init(p_hwfn, max_ports_per_engine,
631 max_phys_tcs_per_port, port_params);
635 int ecore_qm_pf_rt_init(struct ecore_hwfn *p_hwfn,
636 struct ecore_ptt *p_ptt,
639 u8 max_phys_tcs_per_port,
651 struct init_qm_pq_params *pq_params,
652 struct init_qm_vport_params *vport_params)
655 u32 other_mem_size_4kb =
656 QM_PQ_MEM_4KB(num_pf_cids + num_tids) * QM_OTHER_PQS_PER_PF;
657 /* clear first Tx PQ ID array for each VPORT */
658 for (i = 0; i < num_vports; i++)
659 for (tc = 0; tc < NUM_OF_TCS; tc++)
660 vport_params[i].first_tx_pq_id[tc] = QM_INVALID_PQ_ID;
661 /* map Other PQs (if any) */
662 #if QM_OTHER_PQS_PER_PF > 0
663 ecore_other_pq_map_rt_init(p_hwfn, port_id, pf_id, num_pf_cids,
667 ecore_tx_pq_map_rt_init(p_hwfn, p_ptt, port_id, pf_id,
668 max_phys_tcs_per_port, is_first_pf, num_pf_cids,
669 num_vf_cids, start_pq, num_pf_pqs, num_vf_pqs,
670 start_vport, other_mem_size_4kb, pq_params,
674 if (ecore_pf_wfq_rt_init
675 (p_hwfn, port_id, pf_id, pf_wfq, max_phys_tcs_per_port,
676 num_pf_pqs + num_vf_pqs, pq_params) != 0)
679 if (ecore_pf_rl_rt_init(p_hwfn, pf_id, pf_rl) != 0)
682 if (ecore_vp_wfq_rt_init(p_hwfn, num_vports, vport_params) != 0)
685 if (ecore_vport_rl_rt_init
686 (p_hwfn, start_vport, num_vports, vport_params) != 0)
691 int ecore_init_pf_wfq(struct ecore_hwfn *p_hwfn,
692 struct ecore_ptt *p_ptt, u8 pf_id, u16 pf_wfq)
694 u32 inc_val = QM_WFQ_INC_VAL(pf_wfq);
695 if (inc_val == 0 || inc_val > QM_WFQ_MAX_INC_VAL) {
696 DP_NOTICE(p_hwfn, true, "Invalid PF WFQ weight configuration");
699 ecore_wr(p_hwfn, p_ptt, QM_REG_WFQPFWEIGHT + pf_id * 4, inc_val);
703 int ecore_init_pf_rl(struct ecore_hwfn *p_hwfn,
704 struct ecore_ptt *p_ptt, u8 pf_id, u32 pf_rl)
706 u32 inc_val = QM_RL_INC_VAL(pf_rl);
707 if (inc_val > QM_RL_MAX_INC_VAL) {
708 DP_NOTICE(p_hwfn, true, "Invalid PF rate limit configuration");
711 ecore_wr(p_hwfn, p_ptt, QM_REG_RLPFCRD + pf_id * 4,
712 (u32)QM_RL_CRD_REG_SIGN_BIT);
713 ecore_wr(p_hwfn, p_ptt, QM_REG_RLPFINCVAL + pf_id * 4, inc_val);
717 int ecore_init_vport_wfq(struct ecore_hwfn *p_hwfn,
718 struct ecore_ptt *p_ptt,
719 u16 first_tx_pq_id[NUM_OF_TCS], u16 vport_wfq)
722 u32 inc_val = QM_WFQ_INC_VAL(vport_wfq);
723 if (inc_val == 0 || inc_val > QM_WFQ_MAX_INC_VAL) {
724 DP_NOTICE(p_hwfn, true,
725 "Invalid VPORT WFQ weight configuration");
728 for (tc = 0; tc < NUM_OF_TCS; tc++) {
729 u16 vport_pq_id = first_tx_pq_id[tc];
730 if (vport_pq_id != QM_INVALID_PQ_ID) {
731 ecore_wr(p_hwfn, p_ptt,
732 QM_REG_WFQVPWEIGHT + vport_pq_id * 4, inc_val);
738 int ecore_init_vport_rl(struct ecore_hwfn *p_hwfn,
739 struct ecore_ptt *p_ptt, u8 vport_id, u32 vport_rl)
741 u32 inc_val, max_qm_global_rls = MAX_QM_GLOBAL_RLS;
742 if (vport_id >= max_qm_global_rls) {
743 DP_NOTICE(p_hwfn, true,
744 "Invalid VPORT ID for rate limiter configuration");
747 inc_val = QM_RL_INC_VAL(vport_rl);
748 if (inc_val > QM_RL_MAX_INC_VAL) {
749 DP_NOTICE(p_hwfn, true,
750 "Invalid VPORT rate-limit configuration");
753 ecore_wr(p_hwfn, p_ptt, QM_REG_RLGLBLCRD + vport_id * 4,
754 (u32)QM_RL_CRD_REG_SIGN_BIT);
755 ecore_wr(p_hwfn, p_ptt, QM_REG_RLGLBLINCVAL + vport_id * 4, inc_val);
759 bool ecore_send_qm_stop_cmd(struct ecore_hwfn *p_hwfn,
760 struct ecore_ptt *p_ptt,
762 bool is_tx_pq, u16 start_pq, u16 num_pqs)
764 u32 cmd_arr[QM_CMD_STRUCT_SIZE(QM_STOP_CMD)] = { 0 };
765 u32 pq_mask = 0, last_pq = start_pq + num_pqs - 1, pq_id;
766 /* set command's PQ type */
767 QM_CMD_SET_FIELD(cmd_arr, QM_STOP_CMD, PQ_TYPE, is_tx_pq ? 0 : 1);
768 /* go over requested PQs */
769 for (pq_id = start_pq; pq_id <= last_pq; pq_id++) {
770 /* set PQ bit in mask (stop command only) */
772 pq_mask |= (1 << (pq_id % QM_STOP_PQ_MASK_WIDTH));
773 /* if last PQ or end of PQ mask, write command */
774 if ((pq_id == last_pq) ||
775 (pq_id % QM_STOP_PQ_MASK_WIDTH ==
776 (QM_STOP_PQ_MASK_WIDTH - 1))) {
777 QM_CMD_SET_FIELD(cmd_arr, QM_STOP_CMD, PAUSE_MASK,
779 QM_CMD_SET_FIELD(cmd_arr, QM_STOP_CMD, GROUP_ID,
780 pq_id / QM_STOP_PQ_MASK_WIDTH);
781 if (!ecore_send_qm_cmd
782 (p_hwfn, p_ptt, QM_STOP_CMD_ADDR, cmd_arr[0],
791 /* NIG: ETS configuration constants */
792 #define NIG_TX_ETS_CLIENT_OFFSET 4
793 #define NIG_LB_ETS_CLIENT_OFFSET 1
794 #define NIG_ETS_MIN_WFQ_BYTES 1600
795 /* NIG: ETS constants */
796 #define NIG_ETS_UP_BOUND(weight, mtu) \
797 (2 * ((weight) > (mtu) ? (weight) : (mtu)))
798 /* NIG: RL constants */
799 #define NIG_RL_BASE_TYPE 1 /* byte base type */
800 #define NIG_RL_PERIOD 1 /* in us */
801 #define NIG_RL_PERIOD_CLK_25M (25 * NIG_RL_PERIOD)
802 #define NIG_RL_INC_VAL(rate) (((rate) * NIG_RL_PERIOD) / 8)
803 #define NIG_RL_MAX_VAL(inc_val, mtu) \
804 (2 * ((inc_val) > (mtu) ? (inc_val) : (mtu)))
805 /* NIG: packet prioritry configuration constants */
806 #define NIG_PRIORITY_MAP_TC_BITS 4
807 void ecore_init_nig_ets(struct ecore_hwfn *p_hwfn,
808 struct ecore_ptt *p_ptt,
809 struct init_ets_req *req, bool is_lb)
811 u8 tc, sp_tc_map = 0, wfq_tc_map = 0;
812 u8 num_tc = is_lb ? NUM_OF_TCS : NUM_OF_PHYS_TCS;
813 u8 tc_client_offset =
814 is_lb ? NIG_LB_ETS_CLIENT_OFFSET : NIG_TX_ETS_CLIENT_OFFSET;
815 u32 min_weight = 0xffffffff;
816 u32 tc_weight_base_addr =
817 is_lb ? NIG_REG_LB_ARB_CREDIT_WEIGHT_0 :
818 NIG_REG_TX_ARB_CREDIT_WEIGHT_0;
819 u32 tc_weight_addr_diff =
820 is_lb ? NIG_REG_LB_ARB_CREDIT_WEIGHT_1 -
821 NIG_REG_LB_ARB_CREDIT_WEIGHT_0 : NIG_REG_TX_ARB_CREDIT_WEIGHT_1 -
822 NIG_REG_TX_ARB_CREDIT_WEIGHT_0;
823 u32 tc_bound_base_addr =
824 is_lb ? NIG_REG_LB_ARB_CREDIT_UPPER_BOUND_0 :
825 NIG_REG_TX_ARB_CREDIT_UPPER_BOUND_0;
826 u32 tc_bound_addr_diff =
827 is_lb ? NIG_REG_LB_ARB_CREDIT_UPPER_BOUND_1 -
828 NIG_REG_LB_ARB_CREDIT_UPPER_BOUND_0 :
829 NIG_REG_TX_ARB_CREDIT_UPPER_BOUND_1 -
830 NIG_REG_TX_ARB_CREDIT_UPPER_BOUND_0;
831 for (tc = 0; tc < num_tc; tc++) {
832 struct init_ets_tc_req *tc_req = &req->tc_req[tc];
835 sp_tc_map |= (1 << tc);
836 if (tc_req->use_wfq) {
838 wfq_tc_map |= (1 << tc);
839 /* find minimal weight */
840 if (tc_req->weight < min_weight)
841 min_weight = tc_req->weight;
845 ecore_wr(p_hwfn, p_ptt,
846 is_lb ? NIG_REG_LB_ARB_CLIENT_IS_STRICT :
847 NIG_REG_TX_ARB_CLIENT_IS_STRICT,
848 (sp_tc_map << tc_client_offset));
850 ecore_wr(p_hwfn, p_ptt,
851 is_lb ? NIG_REG_LB_ARB_CLIENT_IS_SUBJECT2WFQ :
852 NIG_REG_TX_ARB_CLIENT_IS_SUBJECT2WFQ,
853 (wfq_tc_map << tc_client_offset));
854 /* write WFQ weights */
855 for (tc = 0; tc < num_tc; tc++, tc_client_offset++) {
856 struct init_ets_tc_req *tc_req = &req->tc_req[tc];
857 if (tc_req->use_wfq) {
858 /* translate weight to bytes */
860 (NIG_ETS_MIN_WFQ_BYTES * tc_req->weight) /
862 /* write WFQ weight */
863 ecore_wr(p_hwfn, p_ptt,
864 tc_weight_base_addr +
865 tc_weight_addr_diff * tc_client_offset,
867 /* write WFQ upper bound */
868 ecore_wr(p_hwfn, p_ptt,
870 tc_bound_addr_diff * tc_client_offset,
871 NIG_ETS_UP_BOUND(byte_weight, req->mtu));
876 void ecore_init_nig_lb_rl(struct ecore_hwfn *p_hwfn,
877 struct ecore_ptt *p_ptt,
878 struct init_nig_lb_rl_req *req)
881 u32 ctrl, inc_val, reg_offset;
882 /* disable global MAC+LB RL */
885 NIG_REG_TX_LB_GLBRATELIMIT_CTRL_TX_LB_GLBRATELIMIT_BASE_TYPE_SHIFT;
886 ecore_wr(p_hwfn, p_ptt, NIG_REG_TX_LB_GLBRATELIMIT_CTRL, ctrl);
887 /* configure and enable global MAC+LB RL */
888 if (req->lb_mac_rate) {
890 ecore_wr(p_hwfn, p_ptt, NIG_REG_TX_LB_GLBRATELIMIT_INC_PERIOD,
891 NIG_RL_PERIOD_CLK_25M);
892 inc_val = NIG_RL_INC_VAL(req->lb_mac_rate);
893 ecore_wr(p_hwfn, p_ptt, NIG_REG_TX_LB_GLBRATELIMIT_INC_VALUE,
895 ecore_wr(p_hwfn, p_ptt, NIG_REG_TX_LB_GLBRATELIMIT_MAX_VALUE,
896 NIG_RL_MAX_VAL(inc_val, req->mtu));
900 NIG_REG_TX_LB_GLBRATELIMIT_CTRL_TX_LB_GLBRATELIMIT_EN_SHIFT;
901 ecore_wr(p_hwfn, p_ptt, NIG_REG_TX_LB_GLBRATELIMIT_CTRL, ctrl);
903 /* disable global LB-only RL */
906 NIG_REG_LB_BRBRATELIMIT_CTRL_LB_BRBRATELIMIT_BASE_TYPE_SHIFT;
907 ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_BRBRATELIMIT_CTRL, ctrl);
908 /* configure and enable global LB-only RL */
911 ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_BRBRATELIMIT_INC_PERIOD,
912 NIG_RL_PERIOD_CLK_25M);
913 inc_val = NIG_RL_INC_VAL(req->lb_rate);
914 ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_BRBRATELIMIT_INC_VALUE,
916 ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_BRBRATELIMIT_MAX_VALUE,
917 NIG_RL_MAX_VAL(inc_val, req->mtu));
920 1 << NIG_REG_LB_BRBRATELIMIT_CTRL_LB_BRBRATELIMIT_EN_SHIFT;
921 ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_BRBRATELIMIT_CTRL, ctrl);
924 for (tc = 0, reg_offset = 0; tc < NUM_OF_PHYS_TCS;
925 tc++, reg_offset += 4) {
929 NIG_REG_LB_TCRATELIMIT_CTRL_0_LB_TCRATELIMIT_BASE_TYPE_0_SHIFT;
930 ecore_wr(p_hwfn, p_ptt,
931 NIG_REG_LB_TCRATELIMIT_CTRL_0 + reg_offset, ctrl);
932 /* configure and enable TC RL */
933 if (req->tc_rate[tc]) {
935 ecore_wr(p_hwfn, p_ptt,
936 NIG_REG_LB_TCRATELIMIT_INC_PERIOD_0 +
937 reg_offset, NIG_RL_PERIOD_CLK_25M);
938 inc_val = NIG_RL_INC_VAL(req->tc_rate[tc]);
939 ecore_wr(p_hwfn, p_ptt,
940 NIG_REG_LB_TCRATELIMIT_INC_VALUE_0 +
941 reg_offset, inc_val);
942 ecore_wr(p_hwfn, p_ptt,
943 NIG_REG_LB_TCRATELIMIT_MAX_VALUE_0 +
944 reg_offset, NIG_RL_MAX_VAL(inc_val, req->mtu));
948 NIG_REG_LB_TCRATELIMIT_CTRL_0_LB_TCRATELIMIT_EN_0_SHIFT;
949 ecore_wr(p_hwfn, p_ptt,
950 NIG_REG_LB_TCRATELIMIT_CTRL_0 + reg_offset,
956 void ecore_init_nig_pri_tc_map(struct ecore_hwfn *p_hwfn,
957 struct ecore_ptt *p_ptt,
958 struct init_nig_pri_tc_map_req *req)
962 u8 tc_pri_mask[NUM_OF_PHYS_TCS] = { 0 };
963 for (pri = 0; pri < NUM_OF_VLAN_PRIORITIES; pri++) {
964 if (req->pri[pri].valid) {
967 tc_id << (pri * NIG_PRIORITY_MAP_TC_BITS));
968 tc_pri_mask[req->pri[pri].tc_id] |= (1 << pri);
971 /* write priority -> TC mask */
972 ecore_wr(p_hwfn, p_ptt, NIG_REG_PKT_PRIORITY_TO_TC, pri_tc_mask);
973 /* write TC -> priority mask */
974 for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) {
975 ecore_wr(p_hwfn, p_ptt, NIG_REG_PRIORITY_FOR_TC_0 + tc * 4,
977 ecore_wr(p_hwfn, p_ptt, NIG_REG_RX_TC0_PRIORITY_MASK + tc * 4,
982 /* PRS: ETS configuration constants */
983 #define PRS_ETS_MIN_WFQ_BYTES 1600
984 #define PRS_ETS_UP_BOUND(weight, mtu) \
985 (2 * ((weight) > (mtu) ? (weight) : (mtu)))
986 void ecore_init_prs_ets(struct ecore_hwfn *p_hwfn,
987 struct ecore_ptt *p_ptt, struct init_ets_req *req)
989 u8 tc, sp_tc_map = 0, wfq_tc_map = 0;
990 u32 min_weight = 0xffffffff;
991 u32 tc_weight_addr_diff =
992 PRS_REG_ETS_ARB_CREDIT_WEIGHT_1 - PRS_REG_ETS_ARB_CREDIT_WEIGHT_0;
993 u32 tc_bound_addr_diff =
994 PRS_REG_ETS_ARB_CREDIT_UPPER_BOUND_1 -
995 PRS_REG_ETS_ARB_CREDIT_UPPER_BOUND_0;
996 for (tc = 0; tc < NUM_OF_TCS; tc++) {
997 struct init_ets_tc_req *tc_req = &req->tc_req[tc];
1000 sp_tc_map |= (1 << tc);
1001 if (tc_req->use_wfq) {
1002 /* update WFQ map */
1003 wfq_tc_map |= (1 << tc);
1004 /* find minimal weight */
1005 if (tc_req->weight < min_weight)
1006 min_weight = tc_req->weight;
1010 ecore_wr(p_hwfn, p_ptt, PRS_REG_ETS_ARB_CLIENT_IS_STRICT, sp_tc_map);
1012 ecore_wr(p_hwfn, p_ptt, PRS_REG_ETS_ARB_CLIENT_IS_SUBJECT2WFQ,
1014 /* write WFQ weights */
1015 for (tc = 0; tc < NUM_OF_TCS; tc++) {
1016 struct init_ets_tc_req *tc_req = &req->tc_req[tc];
1017 if (tc_req->use_wfq) {
1018 /* translate weight to bytes */
1020 (PRS_ETS_MIN_WFQ_BYTES * tc_req->weight) /
1022 /* write WFQ weight */
1023 ecore_wr(p_hwfn, p_ptt,
1024 PRS_REG_ETS_ARB_CREDIT_WEIGHT_0 +
1025 tc * tc_weight_addr_diff, byte_weight);
1026 /* write WFQ upper bound */
1027 ecore_wr(p_hwfn, p_ptt,
1028 PRS_REG_ETS_ARB_CREDIT_UPPER_BOUND_0 +
1029 tc * tc_bound_addr_diff,
1030 PRS_ETS_UP_BOUND(byte_weight, req->mtu));
1035 /* BRB: RAM configuration constants */
1036 #define BRB_TOTAL_RAM_BLOCKS_BB 4800
1037 #define BRB_TOTAL_RAM_BLOCKS_K2 5632
1038 #define BRB_BLOCK_SIZE 128 /* in bytes */
1039 #define BRB_MIN_BLOCKS_PER_TC 9
1040 #define BRB_HYST_BYTES 10240
1041 #define BRB_HYST_BLOCKS (BRB_HYST_BYTES / BRB_BLOCK_SIZE)
1043 * temporary big RAM allocation - should be updated
1045 void ecore_init_brb_ram(struct ecore_hwfn *p_hwfn,
1046 struct ecore_ptt *p_ptt, struct init_brb_ram_req *req)
1048 u8 port, active_ports = 0;
1049 u32 active_port_blocks, reg_offset = 0;
1050 u32 tc_headroom_blocks =
1051 (u32)DIV_ROUND_UP(req->headroom_per_tc, BRB_BLOCK_SIZE);
1052 u32 min_pkt_size_blocks =
1053 (u32)DIV_ROUND_UP(req->min_pkt_size, BRB_BLOCK_SIZE);
1055 ECORE_IS_K2(p_hwfn->
1056 p_dev) ? BRB_TOTAL_RAM_BLOCKS_K2 :
1057 BRB_TOTAL_RAM_BLOCKS_BB;
1058 /* find number of active ports */
1059 for (port = 0; port < MAX_NUM_PORTS; port++)
1060 if (req->num_active_tcs[port])
1062 active_port_blocks = (u32)(total_blocks / active_ports);
1063 for (port = 0; port < req->max_ports_per_engine; port++) {
1064 /* calculate per-port sizes */
1065 u32 tc_guaranteed_blocks =
1066 (u32)DIV_ROUND_UP(req->guranteed_per_tc, BRB_BLOCK_SIZE);
1068 req->num_active_tcs[port] ? active_port_blocks : 0;
1069 u32 port_guaranteed_blocks =
1070 req->num_active_tcs[port] * tc_guaranteed_blocks;
1071 u32 port_shared_blocks = port_blocks - port_guaranteed_blocks;
1073 req->num_active_tcs[port] * BRB_MIN_BLOCKS_PER_TC;
1074 u32 full_xon_th = full_xoff_th + min_pkt_size_blocks;
1075 u32 pause_xoff_th = tc_headroom_blocks;
1076 u32 pause_xon_th = pause_xoff_th + min_pkt_size_blocks;
1078 /* init total size per port */
1079 ecore_wr(p_hwfn, p_ptt, BRB_REG_TOTAL_MAC_SIZE + port * 4,
1081 /* init shared size per port */
1082 ecore_wr(p_hwfn, p_ptt, BRB_REG_SHARED_HR_AREA + port * 4,
1083 port_shared_blocks);
1084 for (tc = 0; tc < NUM_OF_TCS; tc++, reg_offset += 4) {
1085 /* clear init values for non-active TCs */
1086 if (tc == req->num_active_tcs[port]) {
1087 tc_guaranteed_blocks = 0;
1093 /* init guaranteed size per TC */
1094 ecore_wr(p_hwfn, p_ptt,
1095 BRB_REG_TC_GUARANTIED_0 + reg_offset,
1096 tc_guaranteed_blocks);
1097 ecore_wr(p_hwfn, p_ptt,
1098 BRB_REG_MAIN_TC_GUARANTIED_HYST_0 + reg_offset,
1100 /* init pause/full thresholds per physical TC - for loopback traffic */
1102 ecore_wr(p_hwfn, p_ptt,
1103 BRB_REG_LB_TC_FULL_XOFF_THRESHOLD_0 +
1104 reg_offset, full_xoff_th);
1105 ecore_wr(p_hwfn, p_ptt,
1106 BRB_REG_LB_TC_FULL_XON_THRESHOLD_0 +
1107 reg_offset, full_xon_th);
1108 ecore_wr(p_hwfn, p_ptt,
1109 BRB_REG_LB_TC_PAUSE_XOFF_THRESHOLD_0 +
1110 reg_offset, pause_xoff_th);
1111 ecore_wr(p_hwfn, p_ptt,
1112 BRB_REG_LB_TC_PAUSE_XON_THRESHOLD_0 +
1113 reg_offset, pause_xon_th);
1114 /* init pause/full thresholds per physical TC - for main traffic */
1115 ecore_wr(p_hwfn, p_ptt,
1116 BRB_REG_MAIN_TC_FULL_XOFF_THRESHOLD_0 +
1117 reg_offset, full_xoff_th);
1118 ecore_wr(p_hwfn, p_ptt,
1119 BRB_REG_MAIN_TC_FULL_XON_THRESHOLD_0 +
1120 reg_offset, full_xon_th);
1121 ecore_wr(p_hwfn, p_ptt,
1122 BRB_REG_MAIN_TC_PAUSE_XOFF_THRESHOLD_0 +
1123 reg_offset, pause_xoff_th);
1124 ecore_wr(p_hwfn, p_ptt,
1125 BRB_REG_MAIN_TC_PAUSE_XON_THRESHOLD_0 +
1126 reg_offset, pause_xon_th);
1131 /*In MF should be called once per engine to set EtherType of OuterTag*/
1132 void ecore_set_engine_mf_ovlan_eth_type(struct ecore_hwfn *p_hwfn,
1133 struct ecore_ptt *p_ptt, u32 ethType)
1135 /* update PRS register */
1136 STORE_RT_REG(p_hwfn, PRS_REG_TAG_ETHERTYPE_0_RT_OFFSET, ethType);
1137 /* update NIG register */
1138 STORE_RT_REG(p_hwfn, NIG_REG_TAG_ETHERTYPE_0_RT_OFFSET, ethType);
1139 /* update PBF register */
1140 STORE_RT_REG(p_hwfn, PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET, ethType);
1143 /*In MF should be called once per port to set EtherType of OuterTag*/
1144 void ecore_set_port_mf_ovlan_eth_type(struct ecore_hwfn *p_hwfn,
1145 struct ecore_ptt *p_ptt, u32 ethType)
1147 /* update DORQ register */
1148 STORE_RT_REG(p_hwfn, DORQ_REG_TAG1_ETHERTYPE_RT_OFFSET, ethType);
1151 #define SET_TUNNEL_TYPE_ENABLE_BIT(var, offset, enable) \
1152 (var = ((var) & ~(1 << (offset))) | ((enable) ? (1 << (offset)) : 0))
1153 #define PRS_ETH_TUNN_FIC_FORMAT -188897008
1154 void ecore_set_vxlan_dest_port(struct ecore_hwfn *p_hwfn,
1155 struct ecore_ptt *p_ptt, u16 dest_port)
1157 /* update PRS register */
1158 ecore_wr(p_hwfn, p_ptt, PRS_REG_VXLAN_PORT, dest_port);
1159 /* update NIG register */
1160 ecore_wr(p_hwfn, p_ptt, NIG_REG_VXLAN_CTRL, dest_port);
1161 /* update PBF register */
1162 ecore_wr(p_hwfn, p_ptt, PBF_REG_VXLAN_PORT, dest_port);
1165 void ecore_set_vxlan_enable(struct ecore_hwfn *p_hwfn,
1166 struct ecore_ptt *p_ptt, bool vxlan_enable)
1169 /* update PRS register */
1170 reg_val = ecore_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN);
1171 SET_TUNNEL_TYPE_ENABLE_BIT(reg_val,
1172 PRS_REG_ENCAPSULATION_TYPE_EN_VXLAN_ENABLE_SHIFT,
1174 ecore_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
1176 ecore_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0,
1177 (u32)PRS_ETH_TUNN_FIC_FORMAT);
1179 /* update NIG register */
1180 reg_val = ecore_rd(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE);
1181 SET_TUNNEL_TYPE_ENABLE_BIT(reg_val,
1182 NIG_REG_ENC_TYPE_ENABLE_VXLAN_ENABLE_SHIFT,
1184 ecore_wr(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE, reg_val);
1185 /* update DORQ register */
1186 ecore_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_VXLAN_EN,
1187 vxlan_enable ? 1 : 0);
1190 void ecore_set_gre_enable(struct ecore_hwfn *p_hwfn,
1191 struct ecore_ptt *p_ptt,
1192 bool eth_gre_enable, bool ip_gre_enable)
1195 /* update PRS register */
1196 reg_val = ecore_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN);
1197 SET_TUNNEL_TYPE_ENABLE_BIT(reg_val,
1198 PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GRE_ENABLE_SHIFT,
1200 SET_TUNNEL_TYPE_ENABLE_BIT(reg_val,
1201 PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GRE_ENABLE_SHIFT,
1203 ecore_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
1205 ecore_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0,
1206 (u32)PRS_ETH_TUNN_FIC_FORMAT);
1208 /* update NIG register */
1209 reg_val = ecore_rd(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE);
1210 SET_TUNNEL_TYPE_ENABLE_BIT(reg_val,
1211 NIG_REG_ENC_TYPE_ENABLE_ETH_OVER_GRE_ENABLE_SHIFT,
1213 SET_TUNNEL_TYPE_ENABLE_BIT(reg_val,
1214 NIG_REG_ENC_TYPE_ENABLE_IP_OVER_GRE_ENABLE_SHIFT,
1216 ecore_wr(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE, reg_val);
1217 /* update DORQ registers */
1218 ecore_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_GRE_ETH_EN,
1219 eth_gre_enable ? 1 : 0);
1220 ecore_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_GRE_IP_EN,
1221 ip_gre_enable ? 1 : 0);
1224 void ecore_set_geneve_dest_port(struct ecore_hwfn *p_hwfn,
1225 struct ecore_ptt *p_ptt, u16 dest_port)
1227 /* update PRS register */
1228 ecore_wr(p_hwfn, p_ptt, PRS_REG_NGE_PORT, dest_port);
1229 /* update NIG register */
1230 ecore_wr(p_hwfn, p_ptt, NIG_REG_NGE_PORT, dest_port);
1231 /* update PBF register */
1232 ecore_wr(p_hwfn, p_ptt, PBF_REG_NGE_PORT, dest_port);
1235 void ecore_set_geneve_enable(struct ecore_hwfn *p_hwfn,
1236 struct ecore_ptt *p_ptt,
1237 bool eth_geneve_enable, bool ip_geneve_enable)
1240 /* update PRS register */
1241 reg_val = ecore_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN);
1242 SET_TUNNEL_TYPE_ENABLE_BIT(reg_val,
1243 PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GENEVE_ENABLE_SHIFT,
1245 SET_TUNNEL_TYPE_ENABLE_BIT(reg_val,
1246 PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GENEVE_ENABLE_SHIFT,
1248 ecore_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
1250 ecore_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0,
1251 (u32)PRS_ETH_TUNN_FIC_FORMAT);
1253 /* update NIG register */
1254 ecore_wr(p_hwfn, p_ptt, NIG_REG_NGE_ETH_ENABLE,
1255 eth_geneve_enable ? 1 : 0);
1256 ecore_wr(p_hwfn, p_ptt, NIG_REG_NGE_IP_ENABLE,
1257 ip_geneve_enable ? 1 : 0);
1258 /* EDPM with geneve tunnel not supported in BB_B0 */
1259 if (ECORE_IS_BB_B0(p_hwfn->p_dev))
1261 /* update DORQ registers */
1262 ecore_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_NGE_ETH_EN,
1263 eth_geneve_enable ? 1 : 0);
1264 ecore_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_NGE_IP_EN,
1265 ip_geneve_enable ? 1 : 0);
1268 #define T_ETH_PACKET_ACTION_GFT_EVENTID 23
1269 #define PARSER_ETH_CONN_GFT_ACTION_CM_HDR 272
1270 #define T_ETH_PACKET_MATCH_RFS_EVENTID 25
1271 #define PARSER_ETH_CONN_CM_HDR (0x0)
1272 #define CAM_LINE_SIZE sizeof(u32)
1273 #define RAM_LINE_SIZE sizeof(u64)
1274 #define REG_SIZE sizeof(u32)
1276 void ecore_set_gft_event_id_cm_hdr(struct ecore_hwfn *p_hwfn,
1277 struct ecore_ptt *p_ptt)
1279 /* set RFS event ID to be awakened i Tstorm By Prs */
1280 u32 rfs_cm_hdr_event_id = ecore_rd(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT);
1281 rfs_cm_hdr_event_id |= T_ETH_PACKET_ACTION_GFT_EVENTID <<
1282 PRS_REG_CM_HDR_GFT_EVENT_ID_SHIFT;
1283 rfs_cm_hdr_event_id |= PARSER_ETH_CONN_GFT_ACTION_CM_HDR <<
1284 PRS_REG_CM_HDR_GFT_CM_HDR_SHIFT;
1285 ecore_wr(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT, rfs_cm_hdr_event_id);
1288 void ecore_set_rfs_mode_enable(struct ecore_hwfn *p_hwfn,
1289 struct ecore_ptt *p_ptt,
1296 u32 rfs_cm_hdr_event_id = ecore_rd(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT);
1297 union gft_cam_line_union camLine;
1298 struct gft_ram_line ramLine;
1299 u32 *ramLinePointer = (u32 *)&ramLine;
1302 DP_NOTICE(p_hwfn, true,
1303 "set_rfs_mode_enable: must accept at "
1304 "least on of - ipv4 or ipv6");
1306 DP_NOTICE(p_hwfn, true,
1307 "set_rfs_mode_enable: must accept at "
1308 "least on of - udp or tcp");
1309 /* set RFS event ID to be awakened i Tstorm By Prs */
1310 rfs_cm_hdr_event_id |= T_ETH_PACKET_MATCH_RFS_EVENTID <<
1311 PRS_REG_CM_HDR_GFT_EVENT_ID_SHIFT;
1312 rfs_cm_hdr_event_id |= PARSER_ETH_CONN_CM_HDR <<
1313 PRS_REG_CM_HDR_GFT_CM_HDR_SHIFT;
1314 ecore_wr(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT, rfs_cm_hdr_event_id);
1315 /* Configure Registers for RFS mode */
1316 /* enable gft search */
1317 ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_GFT, 1);
1318 ecore_wr(p_hwfn, p_ptt, PRS_REG_LOAD_L2_FILTER, 0); /* do not load
1322 camLine.cam_line_mapped.camline = 0;
1323 /* cam line is now valid!! */
1324 SET_FIELD(camLine.cam_line_mapped.camline,
1325 GFT_CAM_LINE_MAPPED_VALID, 1);
1326 /* filters are per PF!! */
1327 SET_FIELD(camLine.cam_line_mapped.camline,
1328 GFT_CAM_LINE_MAPPED_PF_ID_MASK, 1);
1329 SET_FIELD(camLine.cam_line_mapped.camline,
1330 GFT_CAM_LINE_MAPPED_PF_ID, pf_id);
1331 if (!(tcp && udp)) {
1332 SET_FIELD(camLine.cam_line_mapped.camline,
1333 GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK, 1);
1335 SET_FIELD(camLine.cam_line_mapped.camline,
1336 GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE,
1337 GFT_PROFILE_TCP_PROTOCOL);
1339 SET_FIELD(camLine.cam_line_mapped.camline,
1340 GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE,
1341 GFT_PROFILE_UDP_PROTOCOL);
1343 if (!(ipv4 && ipv6)) {
1344 SET_FIELD(camLine.cam_line_mapped.camline,
1345 GFT_CAM_LINE_MAPPED_IP_VERSION_MASK, 1);
1347 SET_FIELD(camLine.cam_line_mapped.camline,
1348 GFT_CAM_LINE_MAPPED_IP_VERSION,
1351 SET_FIELD(camLine.cam_line_mapped.camline,
1352 GFT_CAM_LINE_MAPPED_IP_VERSION,
1355 /* write characteristics to cam */
1356 ecore_wr(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE * pf_id,
1357 camLine.cam_line_mapped.camline);
1358 camLine.cam_line_mapped.camline =
1359 ecore_rd(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE * pf_id);
1360 /* write line to RAM - compare to filter 4 tuple */
1361 ramLine.low32bits = 0;
1362 ramLine.high32bits = 0;
1363 SET_FIELD(ramLine.high32bits, GFT_RAM_LINE_DST_IP, 1);
1364 SET_FIELD(ramLine.high32bits, GFT_RAM_LINE_SRC_IP, 1);
1365 SET_FIELD(ramLine.low32bits, GFT_RAM_LINE_SRC_PORT, 1);
1366 SET_FIELD(ramLine.low32bits, GFT_RAM_LINE_DST_PORT, 1);
1367 /* each iteration write to reg */
1368 for (i = 0; i < RAM_LINE_SIZE / REG_SIZE; i++)
1369 ecore_wr(p_hwfn, p_ptt, PRS_REG_GFT_PROFILE_MASK_RAM +
1370 RAM_LINE_SIZE * pf_id +
1371 i * REG_SIZE, *(ramLinePointer + i));
1372 /* set default profile so that no filter match will happen */
1373 ramLine.low32bits = 0xffff;
1374 ramLine.high32bits = 0xffff;
1375 for (i = 0; i < RAM_LINE_SIZE / REG_SIZE; i++)
1376 ecore_wr(p_hwfn, p_ptt, PRS_REG_GFT_PROFILE_MASK_RAM +
1377 RAM_LINE_SIZE * PRS_GFT_CAM_LINES_NO_MATCH +
1378 i * REG_SIZE, *(ramLinePointer + i));
1381 /* Configure VF zone size mode*/
1382 void ecore_config_vf_zone_size_mode(struct ecore_hwfn *p_hwfn,
1383 struct ecore_ptt *p_ptt, u16 mode,
1386 u32 msdm_vf_size_log = MSTORM_VF_ZONE_DEFAULT_SIZE_LOG;
1387 u32 msdm_vf_offset_mask;
1388 if (mode == VF_ZONE_SIZE_MODE_DOUBLE)
1389 msdm_vf_size_log += 1;
1390 else if (mode == VF_ZONE_SIZE_MODE_QUAD)
1391 msdm_vf_size_log += 2;
1392 msdm_vf_offset_mask = (1 << msdm_vf_size_log) - 1;
1394 STORE_RT_REG(p_hwfn,
1395 PGLUE_REG_B_MSDM_VF_SHIFT_B_RT_OFFSET,
1397 STORE_RT_REG(p_hwfn,
1398 PGLUE_REG_B_MSDM_OFFSET_MASK_B_RT_OFFSET,
1399 msdm_vf_offset_mask);
1401 ecore_wr(p_hwfn, p_ptt,
1402 PGLUE_B_REG_MSDM_VF_SHIFT_B, msdm_vf_size_log);
1403 ecore_wr(p_hwfn, p_ptt,
1404 PGLUE_B_REG_MSDM_OFFSET_MASK_B, msdm_vf_offset_mask);
1408 /* get mstorm statistics for offset by VF zone size mode*/
1409 u32 ecore_get_mstorm_queue_stat_offset(struct ecore_hwfn *p_hwfn,
1411 u16 vf_zone_size_mode)
1413 u32 offset = MSTORM_QUEUE_STAT_OFFSET(stat_cnt_id);
1414 if ((vf_zone_size_mode != VF_ZONE_SIZE_MODE_DEFAULT) &&
1415 (stat_cnt_id > MAX_NUM_PFS)) {
1416 if (vf_zone_size_mode == VF_ZONE_SIZE_MODE_DOUBLE)
1417 offset += (1 << MSTORM_VF_ZONE_DEFAULT_SIZE_LOG) *
1418 (stat_cnt_id - MAX_NUM_PFS);
1419 else if (vf_zone_size_mode == VF_ZONE_SIZE_MODE_QUAD)
1420 offset += 3 * (1 << MSTORM_VF_ZONE_DEFAULT_SIZE_LOG) *
1421 (stat_cnt_id - MAX_NUM_PFS);
1426 /* get mstorm VF producer offset by VF zone size mode*/
1427 u32 ecore_get_mstorm_eth_vf_prods_offset(struct ecore_hwfn *p_hwfn,
1430 u16 vf_zone_size_mode)
1432 u32 offset = MSTORM_ETH_VF_PRODS_OFFSET(vf_id, vf_queue_id);
1433 if (vf_zone_size_mode != VF_ZONE_SIZE_MODE_DEFAULT) {
1434 if (vf_zone_size_mode == VF_ZONE_SIZE_MODE_DOUBLE)
1435 offset += (1 << MSTORM_VF_ZONE_DEFAULT_SIZE_LOG) *
1437 else if (vf_zone_size_mode == VF_ZONE_SIZE_MODE_QUAD)
1438 offset += 3 * (1 << MSTORM_VF_ZONE_DEFAULT_SIZE_LOG) *