2 * Copyright (c) 2016 QLogic Corporation.
6 * See LICENSE.qede_pmd for copyright and licensing details.
11 #include "ecore_init_ops.h"
13 #include "ecore_rt_defs.h"
14 #include "ecore_hsi_common.h"
15 #include "ecore_hsi_init_func.h"
16 #include "ecore_hsi_init_tool.h"
17 #include "ecore_init_fw_funcs.h"
19 /* @DPDK CmInterfaceEnum */
20 enum cm_interface_enum {
33 /* general constants */
34 #define QM_PQ_MEM_4KB(pq_size) \
35 (pq_size ? DIV_ROUND_UP((pq_size + 1) * QM_PQ_ELEMENT_SIZE, 0x1000) : 0)
36 #define QM_PQ_SIZE_256B(pq_size) \
37 (pq_size ? DIV_ROUND_UP(pq_size, 0x100) - 1 : 0)
38 #define QM_INVALID_PQ_ID 0xffff
40 #define QM_BYPASS_EN 1
41 #define QM_BYTE_CRD_EN 1
42 /* other PQ constants */
43 #define QM_OTHER_PQS_PER_PF 4
45 #define QM_WFQ_UPPER_BOUND 62500000
46 #define QM_WFQ_VP_PQ_VOQ_SHIFT 0
47 #define QM_WFQ_VP_PQ_PF_SHIFT 5
48 #define QM_WFQ_INC_VAL(weight) ((weight) * 0x9000)
49 #define QM_WFQ_MAX_INC_VAL 43750000
51 #define QM_RL_UPPER_BOUND 62500000
52 #define QM_RL_PERIOD 5
53 #define QM_RL_PERIOD_CLK_25M (25 * QM_RL_PERIOD)
54 #define QM_RL_INC_VAL(rate) \
55 OSAL_MAX_T(u32, (((rate ? rate : 1000000) * QM_RL_PERIOD * 1.01) / 8), 1)
56 #define QM_RL_MAX_INC_VAL 43750000
57 /* AFullOprtnstcCrdMask constants */
58 #define QM_OPPOR_LINE_VOQ_DEF 1
59 #define QM_OPPOR_FW_STOP_DEF 0
60 #define QM_OPPOR_PQ_EMPTY_DEF 1
61 #define EAGLE_WORKAROUND_TC 7
62 /* Command Queue constants */
63 #define PBF_CMDQ_PURE_LB_LINES 150
64 #define PBF_CMDQ_EAGLE_WORKAROUND_LINES 8 /* eagle workaround CmdQ */
65 #define PBF_CMDQ_LINES_RT_OFFSET(voq) \
66 (PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET + \
67 voq * (PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET \
68 - PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET))
69 #define PBF_BTB_GUARANTEED_RT_OFFSET(voq) \
70 (PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET + voq * \
71 (PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET - PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET))
72 #define QM_VOQ_LINE_CRD(pbf_cmd_lines) \
73 ((((pbf_cmd_lines) - 4) * 2) | QM_LINE_CRD_REG_SIGN_BIT)
74 /* BTB: blocks constants (block size = 256B) */
75 #define BTB_JUMBO_PKT_BLOCKS 38 /* 256B blocks in 9700B packet */
76 #define BTB_HEADROOM_BLOCKS BTB_JUMBO_PKT_BLOCKS /* headroom per-port */
77 #define BTB_EAGLE_WORKAROUND_BLOCKS 4 /* eagle workaround blocks */
78 #define BTB_PURE_LB_FACTOR 10
79 #define BTB_PURE_LB_RATIO 7 /* factored (hence really 0.7) */
80 /* QM stop command constants */
81 #define QM_STOP_PQ_MASK_WIDTH 32
82 #define QM_STOP_CMD_ADDR 0x2
83 #define QM_STOP_CMD_STRUCT_SIZE 2
84 #define QM_STOP_CMD_PAUSE_MASK_OFFSET 0
85 #define QM_STOP_CMD_PAUSE_MASK_SHIFT 0
86 #define QM_STOP_CMD_PAUSE_MASK_MASK -1
87 #define QM_STOP_CMD_GROUP_ID_OFFSET 1
88 #define QM_STOP_CMD_GROUP_ID_SHIFT 16
89 #define QM_STOP_CMD_GROUP_ID_MASK 15
90 #define QM_STOP_CMD_PQ_TYPE_OFFSET 1
91 #define QM_STOP_CMD_PQ_TYPE_SHIFT 24
92 #define QM_STOP_CMD_PQ_TYPE_MASK 1
93 #define QM_STOP_CMD_MAX_POLL_COUNT 100
94 #define QM_STOP_CMD_POLL_PERIOD_US 500
95 /* QM command macros */
96 #define QM_CMD_STRUCT_SIZE(cmd) cmd##_STRUCT_SIZE
97 #define QM_CMD_SET_FIELD(var, cmd, field, value) \
98 SET_FIELD(var[cmd##_##field##_OFFSET], cmd##_##field, value)
100 #define PHYS_VOQ(port, tc, max_phys_tcs_per_port) \
101 ((port) * (max_phys_tcs_per_port) + (tc))
102 #define LB_VOQ(port) (MAX_PHYS_VOQS + (port))
103 #define VOQ(port, tc, max_phys_tcs_per_port) \
104 ((tc) < LB_TC ? PHYS_VOQ(port, tc, max_phys_tcs_per_port) : LB_VOQ(port))
105 /******************** INTERNAL IMPLEMENTATION *********************/
106 /* Prepare PF RL enable/disable runtime init values */
107 static void ecore_enable_pf_rl(struct ecore_hwfn *p_hwfn, bool pf_rl_en)
109 STORE_RT_REG(p_hwfn, QM_REG_RLPFENABLE_RT_OFFSET, pf_rl_en ? 1 : 0);
111 /* enable RLs for all VOQs */
112 STORE_RT_REG(p_hwfn, QM_REG_RLPFVOQENABLE_RT_OFFSET,
113 (1 << MAX_NUM_VOQS) - 1);
114 /* write RL period */
115 STORE_RT_REG(p_hwfn, QM_REG_RLPFPERIOD_RT_OFFSET,
116 QM_RL_PERIOD_CLK_25M);
117 STORE_RT_REG(p_hwfn, QM_REG_RLPFPERIODTIMER_RT_OFFSET,
118 QM_RL_PERIOD_CLK_25M);
119 /* set credit threshold for QM bypass flow */
121 STORE_RT_REG(p_hwfn, QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET,
126 /* Prepare PF WFQ enable/disable runtime init values */
127 static void ecore_enable_pf_wfq(struct ecore_hwfn *p_hwfn, bool pf_wfq_en)
129 STORE_RT_REG(p_hwfn, QM_REG_WFQPFENABLE_RT_OFFSET, pf_wfq_en ? 1 : 0);
130 /* set credit threshold for QM bypass flow */
131 if (pf_wfq_en && QM_BYPASS_EN)
132 STORE_RT_REG(p_hwfn, QM_REG_AFULLQMBYPTHRPFWFQ_RT_OFFSET,
136 /* Prepare VPORT RL enable/disable runtime init values */
137 static void ecore_enable_vport_rl(struct ecore_hwfn *p_hwfn, bool vport_rl_en)
139 STORE_RT_REG(p_hwfn, QM_REG_RLGLBLENABLE_RT_OFFSET,
140 vport_rl_en ? 1 : 0);
142 /* write RL period (use timer 0 only) */
143 STORE_RT_REG(p_hwfn, QM_REG_RLGLBLPERIOD_0_RT_OFFSET,
144 QM_RL_PERIOD_CLK_25M);
145 STORE_RT_REG(p_hwfn, QM_REG_RLGLBLPERIODTIMER_0_RT_OFFSET,
146 QM_RL_PERIOD_CLK_25M);
147 /* set credit threshold for QM bypass flow */
150 QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET,
155 /* Prepare VPORT WFQ enable/disable runtime init values */
156 static void ecore_enable_vport_wfq(struct ecore_hwfn *p_hwfn, bool vport_wfq_en)
158 STORE_RT_REG(p_hwfn, QM_REG_WFQVPENABLE_RT_OFFSET,
159 vport_wfq_en ? 1 : 0);
160 /* set credit threshold for QM bypass flow */
161 if (vport_wfq_en && QM_BYPASS_EN)
162 STORE_RT_REG(p_hwfn, QM_REG_AFULLQMBYPTHRVPWFQ_RT_OFFSET,
166 /* Prepare runtime init values to allocate PBF command queue lines for
169 static void ecore_cmdq_lines_voq_rt_init(struct ecore_hwfn *p_hwfn,
170 u8 voq, u16 cmdq_lines)
173 bool is_bb_a0 = ECORE_IS_BB_A0(p_hwfn->p_dev);
175 cmdq_lines = OSAL_MIN_T(u32, cmdq_lines, 1022);
176 qm_line_crd = QM_VOQ_LINE_CRD(cmdq_lines);
177 OVERWRITE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(voq),
179 STORE_RT_REG(p_hwfn, QM_REG_VOQCRDLINE_RT_OFFSET + voq, qm_line_crd);
180 STORE_RT_REG(p_hwfn, QM_REG_VOQINITCRDLINE_RT_OFFSET + voq,
184 /* Prepare runtime init values to allocate PBF command queue lines. */
185 static void ecore_cmdq_lines_rt_init(struct ecore_hwfn *p_hwfn,
186 u8 max_ports_per_engine,
187 u8 max_phys_tcs_per_port,
188 struct init_qm_port_params
189 port_params[MAX_NUM_PORTS])
191 u8 tc, voq, port_id, num_tcs_in_port;
192 bool eagle_workaround = ENABLE_EAGLE_ENG1_WORKAROUND(p_hwfn);
193 /* clear PBF lines for all VOQs */
194 for (voq = 0; voq < MAX_NUM_VOQS; voq++)
195 STORE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(voq), 0);
196 for (port_id = 0; port_id < max_ports_per_engine; port_id++) {
197 if (port_params[port_id].active) {
198 u16 phys_lines, phys_lines_per_tc;
200 port_params[port_id].num_pbf_cmd_lines -
201 PBF_CMDQ_PURE_LB_LINES;
202 if (eagle_workaround)
203 phys_lines -= PBF_CMDQ_EAGLE_WORKAROUND_LINES;
204 /* find #lines per active physical TC */
206 for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) {
207 if (((port_params[port_id].active_phys_tcs >>
211 phys_lines_per_tc = phys_lines / num_tcs_in_port;
212 /* init registers per active TC */
213 for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) {
214 if (((port_params[port_id].active_phys_tcs >>
216 voq = PHYS_VOQ(port_id, tc,
217 max_phys_tcs_per_port);
218 ecore_cmdq_lines_voq_rt_init(p_hwfn,
219 voq, phys_lines_per_tc);
222 /* init registers for pure LB TC */
223 ecore_cmdq_lines_voq_rt_init(p_hwfn, LB_VOQ(port_id),
224 PBF_CMDQ_PURE_LB_LINES);
225 /* init registers for eagle workaround */
226 if (eagle_workaround) {
228 PHYS_VOQ(port_id, EAGLE_WORKAROUND_TC,
229 max_phys_tcs_per_port);
230 ecore_cmdq_lines_voq_rt_init(p_hwfn, voq,
231 PBF_CMDQ_EAGLE_WORKAROUND_LINES);
238 * Prepare runtime init values to allocate guaranteed BTB blocks for the
239 * specified port. The guaranteed BTB space is divided between the TCs as
240 * follows (shared space Is currently not used):
242 * B BTB blocks for this port
243 * C Number of physical TCs for this port
245 * a. 38 blocks (9700B jumbo frame) are allocated for global per port
247 * b. B = B 38 (remainder after global headroom allocation)
248 * c. MAX(38,B/(C+0.7)) blocks are allocated for the pure LB VOQ.
249 * d. B = B MAX(38, B/(C+0.7)) (remainder after pure LB allocation).
250 * e. B/C blocks are allocated for each physical TC.
252 * - MTU is up to 9700 bytes (38 blocks)
253 * - All TCs are considered symmetrical (same rate and packet size)
254 * - No optimization for lossy TC (all are considered lossless). Shared space is
255 * not enabled and allocated for each TC.
257 static void ecore_btb_blocks_rt_init(struct ecore_hwfn *p_hwfn,
258 u8 max_ports_per_engine,
259 u8 max_phys_tcs_per_port,
260 struct init_qm_port_params
261 port_params[MAX_NUM_PORTS])
263 u8 tc, voq, port_id, num_tcs_in_port;
264 u32 usable_blocks, pure_lb_blocks, phys_blocks;
265 bool eagle_workaround = ENABLE_EAGLE_ENG1_WORKAROUND(p_hwfn);
266 for (port_id = 0; port_id < max_ports_per_engine; port_id++) {
267 if (port_params[port_id].active) {
268 /* subtract headroom blocks */
270 port_params[port_id].num_btb_blocks -
272 if (eagle_workaround)
273 usable_blocks -= BTB_EAGLE_WORKAROUND_BLOCKS;
276 for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++)
277 if (((port_params[port_id].active_phys_tcs >>
281 (usable_blocks * BTB_PURE_LB_FACTOR) /
283 BTB_PURE_LB_FACTOR + BTB_PURE_LB_RATIO);
285 OSAL_MAX_T(u32, BTB_JUMBO_PKT_BLOCKS,
286 pure_lb_blocks / BTB_PURE_LB_FACTOR);
291 /* init physical TCs */
293 tc < NUM_OF_PHYS_TCS;
295 if (((port_params[port_id].active_phys_tcs >>
297 voq = PHYS_VOQ(port_id, tc,
298 max_phys_tcs_per_port);
300 PBF_BTB_GUARANTEED_RT_OFFSET(voq),
304 /* init pure LB TC */
306 PBF_BTB_GUARANTEED_RT_OFFSET(LB_VOQ
309 /* init eagle workaround */
310 if (eagle_workaround) {
312 PHYS_VOQ(port_id, EAGLE_WORKAROUND_TC,
313 max_phys_tcs_per_port);
315 PBF_BTB_GUARANTEED_RT_OFFSET(voq),
316 BTB_EAGLE_WORKAROUND_BLOCKS);
322 /* Prepare Tx PQ mapping runtime init values for the specified PF */
323 static void ecore_tx_pq_map_rt_init(struct ecore_hwfn *p_hwfn,
324 struct ecore_ptt *p_ptt,
327 u8 max_phys_tcs_per_port,
335 u32 base_mem_addr_4kb,
336 struct init_qm_pq_params *pq_params,
337 struct init_qm_vport_params *vport_params)
339 u16 i, pq_id, pq_group;
340 u16 num_pqs = num_pf_pqs + num_vf_pqs;
341 u16 first_pq_group = start_pq / QM_PF_QUEUE_GROUP_SIZE;
342 u16 last_pq_group = (start_pq + num_pqs - 1) / QM_PF_QUEUE_GROUP_SIZE;
343 bool is_bb_a0 = ECORE_IS_BB_A0(p_hwfn->p_dev);
344 /* a bit per Tx PQ indicating if the PQ is associated with a VF */
345 u32 tx_pq_vf_mask[MAX_QM_TX_QUEUES / QM_PF_QUEUE_GROUP_SIZE] = { 0 };
346 u32 tx_pq_vf_mask_width = is_bb_a0 ? 32 : QM_PF_QUEUE_GROUP_SIZE;
347 u32 num_tx_pq_vf_masks = MAX_QM_TX_QUEUES / tx_pq_vf_mask_width;
348 u32 pq_mem_4kb = QM_PQ_MEM_4KB(num_pf_cids);
349 u32 vport_pq_mem_4kb = QM_PQ_MEM_4KB(num_vf_cids);
350 u32 mem_addr_4kb = base_mem_addr_4kb;
351 /* set mapping from PQ group to PF */
352 for (pq_group = first_pq_group; pq_group <= last_pq_group; pq_group++)
353 STORE_RT_REG(p_hwfn, QM_REG_PQTX2PF_0_RT_OFFSET + pq_group,
356 STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_0_RT_OFFSET,
357 QM_PQ_SIZE_256B(num_pf_cids));
358 STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_1_RT_OFFSET,
359 QM_PQ_SIZE_256B(num_vf_cids));
360 /* go over all Tx PQs */
361 for (i = 0, pq_id = start_pq; i < num_pqs; i++, pq_id++) {
362 struct qm_rf_pq_map tx_pq_map;
364 VOQ(port_id, pq_params[i].tc_id, max_phys_tcs_per_port);
365 bool is_vf_pq = (i >= num_pf_pqs);
366 /* update first Tx PQ of VPORT/TC */
367 u8 vport_id_in_pf = pq_params[i].vport_id - start_vport;
369 vport_params[vport_id_in_pf].first_tx_pq_id[pq_params[i].
371 if (first_tx_pq_id == QM_INVALID_PQ_ID) {
372 /* create new VP PQ */
373 vport_params[vport_id_in_pf].
374 first_tx_pq_id[pq_params[i].tc_id] = pq_id;
375 first_tx_pq_id = pq_id;
376 /* map VP PQ to VOQ and PF */
378 QM_REG_WFQVPMAP_RT_OFFSET + first_tx_pq_id,
379 (voq << QM_WFQ_VP_PQ_VOQ_SHIFT) | (pf_id <<
380 QM_WFQ_VP_PQ_PF_SHIFT));
382 /* fill PQ map entry */
383 OSAL_MEMSET(&tx_pq_map, 0, sizeof(tx_pq_map));
384 SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_PQ_VALID, 1);
385 SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_RL_VALID,
387 SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_VP_PQ_ID, first_tx_pq_id);
388 SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_RL_ID,
389 is_vf_pq ? pq_params[i].vport_id : 0);
390 SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_VOQ, voq);
391 SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_WRR_WEIGHT_GROUP,
392 pq_params[i].wrr_group);
393 /* write PQ map entry to CAM */
394 STORE_RT_REG(p_hwfn, QM_REG_TXPQMAP_RT_OFFSET + pq_id,
395 *((u32 *)&tx_pq_map));
396 /* set base address */
397 STORE_RT_REG(p_hwfn, QM_REG_BASEADDRTXPQ_RT_OFFSET + pq_id,
401 tx_pq_vf_mask[pq_id / tx_pq_vf_mask_width] |=
402 (1 << (pq_id % tx_pq_vf_mask_width));
403 mem_addr_4kb += vport_pq_mem_4kb;
405 mem_addr_4kb += pq_mem_4kb;
408 /* store Tx PQ VF mask to size select register */
409 for (i = 0; i < num_tx_pq_vf_masks; i++) {
410 if (tx_pq_vf_mask[i]) {
413 is_first_pf ? 0 : ecore_rd(p_hwfn, p_ptt,
414 QM_REG_MAXPQSIZETXSEL_0
417 QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET +
418 i, curr_mask | tx_pq_vf_mask[i]);
421 QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET +
422 i, tx_pq_vf_mask[i]);
427 /* Prepare Other PQ mapping runtime init values for the specified PF */
428 static void ecore_other_pq_map_rt_init(struct ecore_hwfn *p_hwfn,
432 u32 num_tids, u32 base_mem_addr_4kb)
435 u16 pq_group = pf_id;
436 u32 pq_size = num_pf_cids + num_tids;
437 u32 pq_mem_4kb = QM_PQ_MEM_4KB(pq_size);
438 u32 mem_addr_4kb = base_mem_addr_4kb;
439 /* map PQ group to PF */
440 STORE_RT_REG(p_hwfn, QM_REG_PQOTHER2PF_0_RT_OFFSET + pq_group,
443 STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_2_RT_OFFSET,
444 QM_PQ_SIZE_256B(pq_size));
445 /* set base address */
446 for (i = 0, pq_id = pf_id * QM_PF_QUEUE_GROUP_SIZE;
447 i < QM_OTHER_PQS_PER_PF; i++, pq_id++) {
448 STORE_RT_REG(p_hwfn, QM_REG_BASEADDROTHERPQ_RT_OFFSET + pq_id,
450 mem_addr_4kb += pq_mem_4kb;
454 static int ecore_pf_wfq_rt_init(struct ecore_hwfn *p_hwfn,
458 u8 max_phys_tcs_per_port,
460 struct init_qm_pq_params *pq_params)
466 MAX_NUM_PFS_BB ? QM_REG_WFQPFCRD_RT_OFFSET :
467 QM_REG_WFQPFCRD_MSB_RT_OFFSET) + (pf_id % MAX_NUM_PFS_BB);
468 inc_val = QM_WFQ_INC_VAL(pf_wfq);
469 if (inc_val == 0 || inc_val > QM_WFQ_MAX_INC_VAL) {
470 DP_NOTICE(p_hwfn, true, "Invalid PF WFQ weight configuration");
473 for (i = 0; i < num_tx_pqs; i++) {
475 VOQ(port_id, pq_params[i].tc_id, max_phys_tcs_per_port);
476 OVERWRITE_RT_REG(p_hwfn, crd_reg_offset + voq * MAX_NUM_PFS_BB,
477 QM_WFQ_CRD_REG_SIGN_BIT);
479 STORE_RT_REG(p_hwfn, QM_REG_WFQPFUPPERBOUND_RT_OFFSET + pf_id,
480 QM_WFQ_UPPER_BOUND | QM_WFQ_CRD_REG_SIGN_BIT);
481 STORE_RT_REG(p_hwfn, QM_REG_WFQPFWEIGHT_RT_OFFSET + pf_id, inc_val);
485 /* Prepare PF RL runtime init values for the specified PF. Return -1 on err */
486 static int ecore_pf_rl_rt_init(struct ecore_hwfn *p_hwfn, u8 pf_id, u32 pf_rl)
488 u32 inc_val = QM_RL_INC_VAL(pf_rl);
489 if (inc_val > QM_RL_MAX_INC_VAL) {
490 DP_NOTICE(p_hwfn, true, "Invalid PF rate limit configuration");
493 STORE_RT_REG(p_hwfn, QM_REG_RLPFCRD_RT_OFFSET + pf_id,
494 QM_RL_CRD_REG_SIGN_BIT);
495 STORE_RT_REG(p_hwfn, QM_REG_RLPFUPPERBOUND_RT_OFFSET + pf_id,
496 QM_RL_UPPER_BOUND | QM_RL_CRD_REG_SIGN_BIT);
497 STORE_RT_REG(p_hwfn, QM_REG_RLPFINCVAL_RT_OFFSET + pf_id, inc_val);
501 static int ecore_vp_wfq_rt_init(struct ecore_hwfn *p_hwfn,
503 struct init_qm_vport_params *vport_params)
507 /* go over all PF VPORTs */
508 for (i = 0; i < num_vports; i++) {
509 if (vport_params[i].vport_wfq) {
510 inc_val = QM_WFQ_INC_VAL(vport_params[i].vport_wfq);
511 if (inc_val > QM_WFQ_MAX_INC_VAL) {
512 DP_NOTICE(p_hwfn, true,
513 "Invalid VPORT WFQ weight config");
516 for (tc = 0; tc < NUM_OF_TCS; tc++) {
518 vport_params[i].first_tx_pq_id[tc];
519 if (vport_pq_id != QM_INVALID_PQ_ID) {
521 QM_REG_WFQVPCRD_RT_OFFSET +
523 QM_WFQ_CRD_REG_SIGN_BIT);
525 QM_REG_WFQVPWEIGHT_RT_OFFSET
526 + vport_pq_id, inc_val);
534 /* Prepare VPORT RL runtime init values for specified VPORT. Ret -1 on error. */
535 static int ecore_vport_rl_rt_init(struct ecore_hwfn *p_hwfn,
538 struct init_qm_vport_params *vport_params)
541 /* go over all PF VPORTs */
542 for (i = 0, vport_id = start_vport; i < num_vports; i++, vport_id++) {
543 u32 inc_val = QM_RL_INC_VAL(vport_params[i].vport_rl);
544 if (inc_val > QM_RL_MAX_INC_VAL) {
545 DP_NOTICE(p_hwfn, true,
546 "Invalid VPORT rate-limit configuration");
549 STORE_RT_REG(p_hwfn, QM_REG_RLGLBLCRD_RT_OFFSET + vport_id,
550 QM_RL_CRD_REG_SIGN_BIT);
552 QM_REG_RLGLBLUPPERBOUND_RT_OFFSET + vport_id,
553 QM_RL_UPPER_BOUND | QM_RL_CRD_REG_SIGN_BIT);
554 STORE_RT_REG(p_hwfn, QM_REG_RLGLBLINCVAL_RT_OFFSET + vport_id,
560 static bool ecore_poll_on_qm_cmd_ready(struct ecore_hwfn *p_hwfn,
561 struct ecore_ptt *p_ptt)
564 for (i = 0, reg_val = 0; i < QM_STOP_CMD_MAX_POLL_COUNT && reg_val == 0;
566 OSAL_UDELAY(QM_STOP_CMD_POLL_PERIOD_US);
567 reg_val = ecore_rd(p_hwfn, p_ptt, QM_REG_SDMCMDREADY);
569 /* check if timeout while waiting for SDM command ready */
570 if (i == QM_STOP_CMD_MAX_POLL_COUNT) {
571 DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
572 "Timeout waiting for QM SDM cmd ready signal\n");
578 static bool ecore_send_qm_cmd(struct ecore_hwfn *p_hwfn,
579 struct ecore_ptt *p_ptt,
580 u32 cmd_addr, u32 cmd_data_lsb, u32 cmd_data_msb)
582 if (!ecore_poll_on_qm_cmd_ready(p_hwfn, p_ptt))
584 ecore_wr(p_hwfn, p_ptt, QM_REG_SDMCMDADDR, cmd_addr);
585 ecore_wr(p_hwfn, p_ptt, QM_REG_SDMCMDDATALSB, cmd_data_lsb);
586 ecore_wr(p_hwfn, p_ptt, QM_REG_SDMCMDDATAMSB, cmd_data_msb);
587 ecore_wr(p_hwfn, p_ptt, QM_REG_SDMCMDGO, 1);
588 ecore_wr(p_hwfn, p_ptt, QM_REG_SDMCMDGO, 0);
589 return ecore_poll_on_qm_cmd_ready(p_hwfn, p_ptt);
592 /******************** INTERFACE IMPLEMENTATION *********************/
593 u32 ecore_qm_pf_mem_size(u8 pf_id,
596 u32 num_tids, u16 num_pf_pqs, u16 num_vf_pqs)
598 return QM_PQ_MEM_4KB(num_pf_cids) * num_pf_pqs +
599 QM_PQ_MEM_4KB(num_vf_cids) * num_vf_pqs +
600 QM_PQ_MEM_4KB(num_pf_cids + num_tids) * QM_OTHER_PQS_PER_PF;
603 int ecore_qm_common_rt_init(struct ecore_hwfn *p_hwfn,
604 u8 max_ports_per_engine,
605 u8 max_phys_tcs_per_port,
610 struct init_qm_port_params
611 port_params[MAX_NUM_PORTS])
614 /* init AFullOprtnstcCrdMask */
616 (QM_OPPOR_LINE_VOQ_DEF << QM_RF_OPPORTUNISTIC_MASK_LINEVOQ_SHIFT) |
617 (QM_BYTE_CRD_EN << QM_RF_OPPORTUNISTIC_MASK_BYTEVOQ_SHIFT) |
618 (pf_wfq_en << QM_RF_OPPORTUNISTIC_MASK_PFWFQ_SHIFT) |
619 (vport_wfq_en << QM_RF_OPPORTUNISTIC_MASK_VPWFQ_SHIFT) |
620 (pf_rl_en << QM_RF_OPPORTUNISTIC_MASK_PFRL_SHIFT) |
621 (vport_rl_en << QM_RF_OPPORTUNISTIC_MASK_VPQCNRL_SHIFT) |
622 (QM_OPPOR_FW_STOP_DEF << QM_RF_OPPORTUNISTIC_MASK_FWPAUSE_SHIFT) |
623 (QM_OPPOR_PQ_EMPTY_DEF <<
624 QM_RF_OPPORTUNISTIC_MASK_QUEUEEMPTY_SHIFT);
625 STORE_RT_REG(p_hwfn, QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET, mask);
626 /* enable/disable PF RL */
627 ecore_enable_pf_rl(p_hwfn, pf_rl_en);
628 /* enable/disable PF WFQ */
629 ecore_enable_pf_wfq(p_hwfn, pf_wfq_en);
630 /* enable/disable VPORT RL */
631 ecore_enable_vport_rl(p_hwfn, vport_rl_en);
632 /* enable/disable VPORT WFQ */
633 ecore_enable_vport_wfq(p_hwfn, vport_wfq_en);
634 /* init PBF CMDQ line credit */
635 ecore_cmdq_lines_rt_init(p_hwfn, max_ports_per_engine,
636 max_phys_tcs_per_port, port_params);
637 /* init BTB blocks in PBF */
638 ecore_btb_blocks_rt_init(p_hwfn, max_ports_per_engine,
639 max_phys_tcs_per_port, port_params);
643 int ecore_qm_pf_rt_init(struct ecore_hwfn *p_hwfn,
644 struct ecore_ptt *p_ptt,
647 u8 max_phys_tcs_per_port,
659 struct init_qm_pq_params *pq_params,
660 struct init_qm_vport_params *vport_params)
663 u32 other_mem_size_4kb =
664 QM_PQ_MEM_4KB(num_pf_cids + num_tids) * QM_OTHER_PQS_PER_PF;
665 /* clear first Tx PQ ID array for each VPORT */
666 for (i = 0; i < num_vports; i++)
667 for (tc = 0; tc < NUM_OF_TCS; tc++)
668 vport_params[i].first_tx_pq_id[tc] = QM_INVALID_PQ_ID;
669 /* map Other PQs (if any) */
670 #if QM_OTHER_PQS_PER_PF > 0
671 ecore_other_pq_map_rt_init(p_hwfn, port_id, pf_id, num_pf_cids,
675 ecore_tx_pq_map_rt_init(p_hwfn, p_ptt, port_id, pf_id,
676 max_phys_tcs_per_port, is_first_pf, num_pf_cids,
677 num_vf_cids, start_pq, num_pf_pqs, num_vf_pqs,
678 start_vport, other_mem_size_4kb, pq_params,
682 if (ecore_pf_wfq_rt_init
683 (p_hwfn, port_id, pf_id, pf_wfq, max_phys_tcs_per_port,
684 num_pf_pqs + num_vf_pqs, pq_params) != 0)
687 if (ecore_pf_rl_rt_init(p_hwfn, pf_id, pf_rl) != 0)
690 if (ecore_vp_wfq_rt_init(p_hwfn, num_vports, vport_params) != 0)
693 if (ecore_vport_rl_rt_init
694 (p_hwfn, start_vport, num_vports, vport_params) != 0)
699 int ecore_init_pf_wfq(struct ecore_hwfn *p_hwfn,
700 struct ecore_ptt *p_ptt, u8 pf_id, u16 pf_wfq)
702 u32 inc_val = QM_WFQ_INC_VAL(pf_wfq);
703 if (inc_val == 0 || inc_val > QM_WFQ_MAX_INC_VAL) {
704 DP_NOTICE(p_hwfn, true, "Invalid PF WFQ weight configuration");
707 ecore_wr(p_hwfn, p_ptt, QM_REG_WFQPFWEIGHT + pf_id * 4, inc_val);
711 int ecore_init_pf_rl(struct ecore_hwfn *p_hwfn,
712 struct ecore_ptt *p_ptt, u8 pf_id, u32 pf_rl)
714 u32 inc_val = QM_RL_INC_VAL(pf_rl);
715 if (inc_val > QM_RL_MAX_INC_VAL) {
716 DP_NOTICE(p_hwfn, true, "Invalid PF rate limit configuration");
719 ecore_wr(p_hwfn, p_ptt, QM_REG_RLPFCRD + pf_id * 4,
720 QM_RL_CRD_REG_SIGN_BIT);
721 ecore_wr(p_hwfn, p_ptt, QM_REG_RLPFINCVAL + pf_id * 4, inc_val);
725 int ecore_init_vport_wfq(struct ecore_hwfn *p_hwfn,
726 struct ecore_ptt *p_ptt,
727 u16 first_tx_pq_id[NUM_OF_TCS], u16 vport_wfq)
730 u32 inc_val = QM_WFQ_INC_VAL(vport_wfq);
731 if (inc_val == 0 || inc_val > QM_WFQ_MAX_INC_VAL) {
732 DP_NOTICE(p_hwfn, true,
733 "Invalid VPORT WFQ weight configuration");
736 for (tc = 0; tc < NUM_OF_TCS; tc++) {
737 u16 vport_pq_id = first_tx_pq_id[tc];
738 if (vport_pq_id != QM_INVALID_PQ_ID) {
739 ecore_wr(p_hwfn, p_ptt,
740 QM_REG_WFQVPWEIGHT + vport_pq_id * 4, inc_val);
746 int ecore_init_vport_rl(struct ecore_hwfn *p_hwfn,
747 struct ecore_ptt *p_ptt, u8 vport_id, u32 vport_rl)
749 u32 inc_val = QM_RL_INC_VAL(vport_rl);
750 if (inc_val > QM_RL_MAX_INC_VAL) {
751 DP_NOTICE(p_hwfn, true,
752 "Invalid VPORT rate-limit configuration");
755 ecore_wr(p_hwfn, p_ptt, QM_REG_RLGLBLCRD + vport_id * 4,
756 QM_RL_CRD_REG_SIGN_BIT);
757 ecore_wr(p_hwfn, p_ptt, QM_REG_RLGLBLINCVAL + vport_id * 4, inc_val);
761 bool ecore_send_qm_stop_cmd(struct ecore_hwfn *p_hwfn,
762 struct ecore_ptt *p_ptt,
764 bool is_tx_pq, u16 start_pq, u16 num_pqs)
766 u32 cmd_arr[QM_CMD_STRUCT_SIZE(QM_STOP_CMD)] = { 0 };
767 u32 pq_mask = 0, last_pq = start_pq + num_pqs - 1, pq_id;
768 /* set command's PQ type */
769 QM_CMD_SET_FIELD(cmd_arr, QM_STOP_CMD, PQ_TYPE, is_tx_pq ? 0 : 1);
770 /* go over requested PQs */
771 for (pq_id = start_pq; pq_id <= last_pq; pq_id++) {
772 /* set PQ bit in mask (stop command only) */
774 pq_mask |= (1 << (pq_id % QM_STOP_PQ_MASK_WIDTH));
775 /* if last PQ or end of PQ mask, write command */
776 if ((pq_id == last_pq) ||
777 (pq_id % QM_STOP_PQ_MASK_WIDTH ==
778 (QM_STOP_PQ_MASK_WIDTH - 1))) {
779 QM_CMD_SET_FIELD(cmd_arr, QM_STOP_CMD, PAUSE_MASK,
781 QM_CMD_SET_FIELD(cmd_arr, QM_STOP_CMD, GROUP_ID,
782 pq_id / QM_STOP_PQ_MASK_WIDTH);
783 if (!ecore_send_qm_cmd
784 (p_hwfn, p_ptt, QM_STOP_CMD_ADDR, cmd_arr[0],
793 /* NIG: ETS configuration constants */
794 #define NIG_TX_ETS_CLIENT_OFFSET 4
795 #define NIG_LB_ETS_CLIENT_OFFSET 1
796 #define NIG_ETS_MIN_WFQ_BYTES 1600
797 /* NIG: ETS constants */
798 #define NIG_ETS_UP_BOUND(weight, mtu) \
799 (2 * ((weight) > (mtu) ? (weight) : (mtu)))
800 /* NIG: RL constants */
801 #define NIG_RL_BASE_TYPE 1 /* byte base type */
802 #define NIG_RL_PERIOD 1 /* in us */
803 #define NIG_RL_PERIOD_CLK_25M (25 * NIG_RL_PERIOD)
804 #define NIG_RL_INC_VAL(rate) (((rate) * NIG_RL_PERIOD) / 8)
805 #define NIG_RL_MAX_VAL(inc_val, mtu) \
806 (2 * ((inc_val) > (mtu) ? (inc_val) : (mtu)))
807 /* NIG: packet prioritry configuration constants */
808 #define NIG_PRIORITY_MAP_TC_BITS 4
809 void ecore_init_nig_ets(struct ecore_hwfn *p_hwfn,
810 struct ecore_ptt *p_ptt,
811 struct init_ets_req *req, bool is_lb)
813 u8 tc, sp_tc_map = 0, wfq_tc_map = 0;
814 u8 num_tc = is_lb ? NUM_OF_TCS : NUM_OF_PHYS_TCS;
815 u8 tc_client_offset =
816 is_lb ? NIG_LB_ETS_CLIENT_OFFSET : NIG_TX_ETS_CLIENT_OFFSET;
817 u32 min_weight = 0xffffffff;
818 u32 tc_weight_base_addr =
819 is_lb ? NIG_REG_LB_ARB_CREDIT_WEIGHT_0 :
820 NIG_REG_TX_ARB_CREDIT_WEIGHT_0;
821 u32 tc_weight_addr_diff =
822 is_lb ? NIG_REG_LB_ARB_CREDIT_WEIGHT_1 -
823 NIG_REG_LB_ARB_CREDIT_WEIGHT_0 : NIG_REG_TX_ARB_CREDIT_WEIGHT_1 -
824 NIG_REG_TX_ARB_CREDIT_WEIGHT_0;
825 u32 tc_bound_base_addr =
826 is_lb ? NIG_REG_LB_ARB_CREDIT_UPPER_BOUND_0 :
827 NIG_REG_TX_ARB_CREDIT_UPPER_BOUND_0;
828 u32 tc_bound_addr_diff =
829 is_lb ? NIG_REG_LB_ARB_CREDIT_UPPER_BOUND_1 -
830 NIG_REG_LB_ARB_CREDIT_UPPER_BOUND_0 :
831 NIG_REG_TX_ARB_CREDIT_UPPER_BOUND_1 -
832 NIG_REG_TX_ARB_CREDIT_UPPER_BOUND_0;
833 for (tc = 0; tc < num_tc; tc++) {
834 struct init_ets_tc_req *tc_req = &req->tc_req[tc];
837 sp_tc_map |= (1 << tc);
838 if (tc_req->use_wfq) {
840 wfq_tc_map |= (1 << tc);
841 /* find minimal weight */
842 if (tc_req->weight < min_weight)
843 min_weight = tc_req->weight;
847 ecore_wr(p_hwfn, p_ptt,
848 is_lb ? NIG_REG_LB_ARB_CLIENT_IS_STRICT :
849 NIG_REG_TX_ARB_CLIENT_IS_STRICT,
850 (sp_tc_map << tc_client_offset));
852 ecore_wr(p_hwfn, p_ptt,
853 is_lb ? NIG_REG_LB_ARB_CLIENT_IS_SUBJECT2WFQ :
854 NIG_REG_TX_ARB_CLIENT_IS_SUBJECT2WFQ,
855 (wfq_tc_map << tc_client_offset));
856 /* write WFQ weights */
857 for (tc = 0; tc < num_tc; tc++, tc_client_offset++) {
858 struct init_ets_tc_req *tc_req = &req->tc_req[tc];
859 if (tc_req->use_wfq) {
860 /* translate weight to bytes */
862 (NIG_ETS_MIN_WFQ_BYTES * tc_req->weight) /
864 /* write WFQ weight */
865 ecore_wr(p_hwfn, p_ptt,
866 tc_weight_base_addr +
867 tc_weight_addr_diff * tc_client_offset,
869 /* write WFQ upper bound */
870 ecore_wr(p_hwfn, p_ptt,
872 tc_bound_addr_diff * tc_client_offset,
873 NIG_ETS_UP_BOUND(byte_weight, req->mtu));
878 void ecore_init_nig_lb_rl(struct ecore_hwfn *p_hwfn,
879 struct ecore_ptt *p_ptt,
880 struct init_nig_lb_rl_req *req)
883 u32 ctrl, inc_val, reg_offset;
884 /* disable global MAC+LB RL */
887 NIG_REG_TX_LB_GLBRATELIMIT_CTRL_TX_LB_GLBRATELIMIT_BASE_TYPE_SHIFT;
888 ecore_wr(p_hwfn, p_ptt, NIG_REG_TX_LB_GLBRATELIMIT_CTRL, ctrl);
889 /* configure and enable global MAC+LB RL */
890 if (req->lb_mac_rate) {
892 ecore_wr(p_hwfn, p_ptt, NIG_REG_TX_LB_GLBRATELIMIT_INC_PERIOD,
893 NIG_RL_PERIOD_CLK_25M);
894 inc_val = NIG_RL_INC_VAL(req->lb_mac_rate);
895 ecore_wr(p_hwfn, p_ptt, NIG_REG_TX_LB_GLBRATELIMIT_INC_VALUE,
897 ecore_wr(p_hwfn, p_ptt, NIG_REG_TX_LB_GLBRATELIMIT_MAX_VALUE,
898 NIG_RL_MAX_VAL(inc_val, req->mtu));
902 NIG_REG_TX_LB_GLBRATELIMIT_CTRL_TX_LB_GLBRATELIMIT_EN_SHIFT;
903 ecore_wr(p_hwfn, p_ptt, NIG_REG_TX_LB_GLBRATELIMIT_CTRL, ctrl);
905 /* disable global LB-only RL */
908 NIG_REG_LB_BRBRATELIMIT_CTRL_LB_BRBRATELIMIT_BASE_TYPE_SHIFT;
909 ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_BRBRATELIMIT_CTRL, ctrl);
910 /* configure and enable global LB-only RL */
913 ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_BRBRATELIMIT_INC_PERIOD,
914 NIG_RL_PERIOD_CLK_25M);
915 inc_val = NIG_RL_INC_VAL(req->lb_rate);
916 ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_BRBRATELIMIT_INC_VALUE,
918 ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_BRBRATELIMIT_MAX_VALUE,
919 NIG_RL_MAX_VAL(inc_val, req->mtu));
922 1 << NIG_REG_LB_BRBRATELIMIT_CTRL_LB_BRBRATELIMIT_EN_SHIFT;
923 ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_BRBRATELIMIT_CTRL, ctrl);
926 for (tc = 0, reg_offset = 0; tc < NUM_OF_PHYS_TCS;
927 tc++, reg_offset += 4) {
931 NIG_REG_LB_TCRATELIMIT_CTRL_0_LB_TCRATELIMIT_BASE_TYPE_0_SHIFT;
932 ecore_wr(p_hwfn, p_ptt,
933 NIG_REG_LB_TCRATELIMIT_CTRL_0 + reg_offset, ctrl);
934 /* configure and enable TC RL */
935 if (req->tc_rate[tc]) {
937 ecore_wr(p_hwfn, p_ptt,
938 NIG_REG_LB_TCRATELIMIT_INC_PERIOD_0 +
939 reg_offset, NIG_RL_PERIOD_CLK_25M);
940 inc_val = NIG_RL_INC_VAL(req->tc_rate[tc]);
941 ecore_wr(p_hwfn, p_ptt,
942 NIG_REG_LB_TCRATELIMIT_INC_VALUE_0 +
943 reg_offset, inc_val);
944 ecore_wr(p_hwfn, p_ptt,
945 NIG_REG_LB_TCRATELIMIT_MAX_VALUE_0 +
946 reg_offset, NIG_RL_MAX_VAL(inc_val, req->mtu));
950 NIG_REG_LB_TCRATELIMIT_CTRL_0_LB_TCRATELIMIT_EN_0_SHIFT;
951 ecore_wr(p_hwfn, p_ptt,
952 NIG_REG_LB_TCRATELIMIT_CTRL_0 + reg_offset,
958 void ecore_init_nig_pri_tc_map(struct ecore_hwfn *p_hwfn,
959 struct ecore_ptt *p_ptt,
960 struct init_nig_pri_tc_map_req *req)
964 u8 tc_pri_mask[NUM_OF_PHYS_TCS] = { 0 };
965 for (pri = 0; pri < NUM_OF_VLAN_PRIORITIES; pri++) {
966 if (req->pri[pri].valid) {
969 tc_id << (pri * NIG_PRIORITY_MAP_TC_BITS));
970 tc_pri_mask[req->pri[pri].tc_id] |= (1 << pri);
973 /* write priority -> TC mask */
974 ecore_wr(p_hwfn, p_ptt, NIG_REG_PKT_PRIORITY_TO_TC, pri_tc_mask);
975 /* write TC -> priority mask */
976 for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) {
977 ecore_wr(p_hwfn, p_ptt, NIG_REG_PRIORITY_FOR_TC_0 + tc * 4,
979 ecore_wr(p_hwfn, p_ptt, NIG_REG_RX_TC0_PRIORITY_MASK + tc * 4,
984 /* PRS: ETS configuration constants */
985 #define PRS_ETS_MIN_WFQ_BYTES 1600
986 #define PRS_ETS_UP_BOUND(weight, mtu) \
987 (2 * ((weight) > (mtu) ? (weight) : (mtu)))
988 void ecore_init_prs_ets(struct ecore_hwfn *p_hwfn,
989 struct ecore_ptt *p_ptt, struct init_ets_req *req)
991 u8 tc, sp_tc_map = 0, wfq_tc_map = 0;
992 u32 min_weight = 0xffffffff;
993 u32 tc_weight_addr_diff =
994 PRS_REG_ETS_ARB_CREDIT_WEIGHT_1 - PRS_REG_ETS_ARB_CREDIT_WEIGHT_0;
995 u32 tc_bound_addr_diff =
996 PRS_REG_ETS_ARB_CREDIT_UPPER_BOUND_1 -
997 PRS_REG_ETS_ARB_CREDIT_UPPER_BOUND_0;
998 for (tc = 0; tc < NUM_OF_TCS; tc++) {
999 struct init_ets_tc_req *tc_req = &req->tc_req[tc];
1002 sp_tc_map |= (1 << tc);
1003 if (tc_req->use_wfq) {
1004 /* update WFQ map */
1005 wfq_tc_map |= (1 << tc);
1006 /* find minimal weight */
1007 if (tc_req->weight < min_weight)
1008 min_weight = tc_req->weight;
1012 ecore_wr(p_hwfn, p_ptt, PRS_REG_ETS_ARB_CLIENT_IS_STRICT, sp_tc_map);
1014 ecore_wr(p_hwfn, p_ptt, PRS_REG_ETS_ARB_CLIENT_IS_SUBJECT2WFQ,
1016 /* write WFQ weights */
1017 for (tc = 0; tc < NUM_OF_TCS; tc++) {
1018 struct init_ets_tc_req *tc_req = &req->tc_req[tc];
1019 if (tc_req->use_wfq) {
1020 /* translate weight to bytes */
1022 (PRS_ETS_MIN_WFQ_BYTES * tc_req->weight) /
1024 /* write WFQ weight */
1025 ecore_wr(p_hwfn, p_ptt,
1026 PRS_REG_ETS_ARB_CREDIT_WEIGHT_0 +
1027 tc * tc_weight_addr_diff, byte_weight);
1028 /* write WFQ upper bound */
1029 ecore_wr(p_hwfn, p_ptt,
1030 PRS_REG_ETS_ARB_CREDIT_UPPER_BOUND_0 +
1031 tc * tc_bound_addr_diff,
1032 PRS_ETS_UP_BOUND(byte_weight, req->mtu));
1037 /* BRB: RAM configuration constants */
1038 #define BRB_TOTAL_RAM_BLOCKS_BB 4800
1039 #define BRB_TOTAL_RAM_BLOCKS_K2 5632
1040 #define BRB_BLOCK_SIZE 128 /* in bytes */
1041 #define BRB_MIN_BLOCKS_PER_TC 9
1042 #define BRB_HYST_BYTES 10240
1043 #define BRB_HYST_BLOCKS (BRB_HYST_BYTES / BRB_BLOCK_SIZE)
1045 * temporary big RAM allocation - should be updated
1047 void ecore_init_brb_ram(struct ecore_hwfn *p_hwfn,
1048 struct ecore_ptt *p_ptt, struct init_brb_ram_req *req)
1050 u8 port, active_ports = 0;
1051 u32 active_port_blocks, reg_offset = 0;
1052 u32 tc_headroom_blocks =
1053 (u32)DIV_ROUND_UP(req->headroom_per_tc, BRB_BLOCK_SIZE);
1054 u32 min_pkt_size_blocks =
1055 (u32)DIV_ROUND_UP(req->min_pkt_size, BRB_BLOCK_SIZE);
1057 ECORE_IS_K2(p_hwfn->
1058 p_dev) ? BRB_TOTAL_RAM_BLOCKS_K2 :
1059 BRB_TOTAL_RAM_BLOCKS_BB;
1060 /* find number of active ports */
1061 for (port = 0; port < MAX_NUM_PORTS; port++)
1062 if (req->num_active_tcs[port])
1064 active_port_blocks = (u32)(total_blocks / active_ports);
1065 for (port = 0; port < req->max_ports_per_engine; port++) {
1066 /* calculate per-port sizes */
1067 u32 tc_guaranteed_blocks =
1068 (u32)DIV_ROUND_UP(req->guranteed_per_tc, BRB_BLOCK_SIZE);
1070 req->num_active_tcs[port] ? active_port_blocks : 0;
1071 u32 port_guaranteed_blocks =
1072 req->num_active_tcs[port] * tc_guaranteed_blocks;
1073 u32 port_shared_blocks = port_blocks - port_guaranteed_blocks;
1075 req->num_active_tcs[port] * BRB_MIN_BLOCKS_PER_TC;
1076 u32 full_xon_th = full_xoff_th + min_pkt_size_blocks;
1077 u32 pause_xoff_th = tc_headroom_blocks;
1078 u32 pause_xon_th = pause_xoff_th + min_pkt_size_blocks;
1080 /* init total size per port */
1081 ecore_wr(p_hwfn, p_ptt, BRB_REG_TOTAL_MAC_SIZE + port * 4,
1083 /* init shared size per port */
1084 ecore_wr(p_hwfn, p_ptt, BRB_REG_SHARED_HR_AREA + port * 4,
1085 port_shared_blocks);
1086 for (tc = 0; tc < NUM_OF_TCS; tc++, reg_offset += 4) {
1087 /* clear init values for non-active TCs */
1088 if (tc == req->num_active_tcs[port]) {
1089 tc_guaranteed_blocks = 0;
1095 /* init guaranteed size per TC */
1096 ecore_wr(p_hwfn, p_ptt,
1097 BRB_REG_TC_GUARANTIED_0 + reg_offset,
1098 tc_guaranteed_blocks);
1099 ecore_wr(p_hwfn, p_ptt,
1100 BRB_REG_MAIN_TC_GUARANTIED_HYST_0 + reg_offset,
1102 /* init pause/full thresholds per physical TC - for loopback traffic */
1104 ecore_wr(p_hwfn, p_ptt,
1105 BRB_REG_LB_TC_FULL_XOFF_THRESHOLD_0 +
1106 reg_offset, full_xoff_th);
1107 ecore_wr(p_hwfn, p_ptt,
1108 BRB_REG_LB_TC_FULL_XON_THRESHOLD_0 +
1109 reg_offset, full_xon_th);
1110 ecore_wr(p_hwfn, p_ptt,
1111 BRB_REG_LB_TC_PAUSE_XOFF_THRESHOLD_0 +
1112 reg_offset, pause_xoff_th);
1113 ecore_wr(p_hwfn, p_ptt,
1114 BRB_REG_LB_TC_PAUSE_XON_THRESHOLD_0 +
1115 reg_offset, pause_xon_th);
1116 /* init pause/full thresholds per physical TC - for main traffic */
1117 ecore_wr(p_hwfn, p_ptt,
1118 BRB_REG_MAIN_TC_FULL_XOFF_THRESHOLD_0 +
1119 reg_offset, full_xoff_th);
1120 ecore_wr(p_hwfn, p_ptt,
1121 BRB_REG_MAIN_TC_FULL_XON_THRESHOLD_0 +
1122 reg_offset, full_xon_th);
1123 ecore_wr(p_hwfn, p_ptt,
1124 BRB_REG_MAIN_TC_PAUSE_XOFF_THRESHOLD_0 +
1125 reg_offset, pause_xoff_th);
1126 ecore_wr(p_hwfn, p_ptt,
1127 BRB_REG_MAIN_TC_PAUSE_XON_THRESHOLD_0 +
1128 reg_offset, pause_xon_th);
1133 /*In MF should be called once per engine to set EtherType of OuterTag*/
1134 void ecore_set_engine_mf_ovlan_eth_type(struct ecore_hwfn *p_hwfn,
1135 struct ecore_ptt *p_ptt, u32 eth_type)
1137 /* update PRS register */
1138 STORE_RT_REG(p_hwfn, PRS_REG_TAG_ETHERTYPE_0_RT_OFFSET, eth_type);
1139 /* update NIG register */
1140 STORE_RT_REG(p_hwfn, NIG_REG_TAG_ETHERTYPE_0_RT_OFFSET, eth_type);
1141 /* update PBF register */
1142 STORE_RT_REG(p_hwfn, PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET, eth_type);
1145 /*In MF should be called once per port to set EtherType of OuterTag*/
1146 void ecore_set_port_mf_ovlan_eth_type(struct ecore_hwfn *p_hwfn,
1147 struct ecore_ptt *p_ptt, u32 eth_type)
1149 /* update DORQ register */
1150 STORE_RT_REG(p_hwfn, DORQ_REG_TAG1_ETHERTYPE_RT_OFFSET, eth_type);
1153 #define SET_TUNNEL_TYPE_ENABLE_BIT(var, offset, enable) \
1154 (var = ((var) & ~(1 << (offset))) | ((enable) ? (1 << (offset)) : 0))
1155 #define PRS_ETH_TUNN_FIC_FORMAT -188897008
1156 void ecore_set_vxlan_dest_port(struct ecore_hwfn *p_hwfn,
1157 struct ecore_ptt *p_ptt, u16 dest_port)
1159 /* update PRS register */
1160 ecore_wr(p_hwfn, p_ptt, PRS_REG_VXLAN_PORT, dest_port);
1161 /* update NIG register */
1162 ecore_wr(p_hwfn, p_ptt, NIG_REG_VXLAN_PORT, dest_port);
1163 /* update PBF register */
1164 ecore_wr(p_hwfn, p_ptt, PBF_REG_VXLAN_PORT, dest_port);
1167 void ecore_set_vxlan_enable(struct ecore_hwfn *p_hwfn,
1168 struct ecore_ptt *p_ptt, bool vxlan_enable)
1171 /* update PRS register */
1172 reg_val = ecore_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN);
1173 SET_TUNNEL_TYPE_ENABLE_BIT(reg_val,
1174 PRS_REG_ENCAPSULATION_TYPE_EN_VXLAN_ENABLE_SHIFT,
1176 ecore_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
1178 ecore_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0,
1179 PRS_ETH_TUNN_FIC_FORMAT);
1181 /* update NIG register */
1182 reg_val = ecore_rd(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE);
1183 SET_TUNNEL_TYPE_ENABLE_BIT(reg_val,
1184 NIG_REG_ENC_TYPE_ENABLE_VXLAN_ENABLE_SHIFT,
1186 ecore_wr(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE, reg_val);
1187 /* update DORQ register */
1188 ecore_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_VXLAN_EN,
1189 vxlan_enable ? 1 : 0);
1192 void ecore_set_gre_enable(struct ecore_hwfn *p_hwfn,
1193 struct ecore_ptt *p_ptt,
1194 bool eth_gre_enable, bool ip_gre_enable)
1197 /* update PRS register */
1198 reg_val = ecore_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN);
1199 SET_TUNNEL_TYPE_ENABLE_BIT(reg_val,
1200 PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GRE_ENABLE_SHIFT,
1202 SET_TUNNEL_TYPE_ENABLE_BIT(reg_val,
1203 PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GRE_ENABLE_SHIFT,
1205 ecore_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
1207 ecore_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0,
1208 PRS_ETH_TUNN_FIC_FORMAT);
1210 /* update NIG register */
1211 reg_val = ecore_rd(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE);
1212 SET_TUNNEL_TYPE_ENABLE_BIT(reg_val,
1213 NIG_REG_ENC_TYPE_ENABLE_ETH_OVER_GRE_ENABLE_SHIFT,
1215 SET_TUNNEL_TYPE_ENABLE_BIT(reg_val,
1216 NIG_REG_ENC_TYPE_ENABLE_IP_OVER_GRE_ENABLE_SHIFT,
1218 ecore_wr(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE, reg_val);
1219 /* update DORQ registers */
1220 ecore_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_GRE_ETH_EN,
1221 eth_gre_enable ? 1 : 0);
1222 ecore_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_GRE_IP_EN,
1223 ip_gre_enable ? 1 : 0);
1226 void ecore_set_geneve_dest_port(struct ecore_hwfn *p_hwfn,
1227 struct ecore_ptt *p_ptt, u16 dest_port)
1229 /* geneve tunnel not supported in BB_A0 */
1230 if (ECORE_IS_BB_A0(p_hwfn->p_dev))
1232 /* update PRS register */
1233 ecore_wr(p_hwfn, p_ptt, PRS_REG_NGE_PORT, dest_port);
1234 /* update NIG register */
1235 ecore_wr(p_hwfn, p_ptt, NIG_REG_NGE_PORT, dest_port);
1236 /* update PBF register */
1237 ecore_wr(p_hwfn, p_ptt, PBF_REG_NGE_PORT, dest_port);
1240 void ecore_set_geneve_enable(struct ecore_hwfn *p_hwfn,
1241 struct ecore_ptt *p_ptt,
1242 bool eth_geneve_enable, bool ip_geneve_enable)
1245 /* geneve tunnel not supported in BB_A0 */
1246 if (ECORE_IS_BB_A0(p_hwfn->p_dev))
1248 /* update PRS register */
1249 reg_val = ecore_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN);
1250 SET_TUNNEL_TYPE_ENABLE_BIT(reg_val,
1251 PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GENEVE_ENABLE_SHIFT,
1253 SET_TUNNEL_TYPE_ENABLE_BIT(reg_val,
1254 PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GENEVE_ENABLE_SHIFT,
1256 ecore_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
1258 ecore_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0,
1259 PRS_ETH_TUNN_FIC_FORMAT);
1261 /* update NIG register */
1262 ecore_wr(p_hwfn, p_ptt, NIG_REG_NGE_ETH_ENABLE,
1263 eth_geneve_enable ? 1 : 0);
1264 ecore_wr(p_hwfn, p_ptt, NIG_REG_NGE_IP_ENABLE,
1265 ip_geneve_enable ? 1 : 0);
1267 reg_val = (ip_geneve_enable || eth_geneve_enable) ? 1 : 0;
1268 ecore_wr(p_hwfn, p_ptt, NIG_REG_NGE_COMP_VER, reg_val);
1269 ecore_wr(p_hwfn, p_ptt, PBF_REG_NGE_COMP_VER, reg_val);
1270 ecore_wr(p_hwfn, p_ptt, PRS_REG_NGE_COMP_VER, reg_val);
1271 /* EDPM with geneve tunnel not supported in BB_B0 */
1272 if (ECORE_IS_BB_B0(p_hwfn->p_dev))
1274 /* update DORQ registers */
1275 ecore_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_NGE_ETH_EN,
1276 eth_geneve_enable ? 1 : 0);
1277 ecore_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_NGE_IP_EN,
1278 ip_geneve_enable ? 1 : 0);