2 * Copyright (c) 2016 QLogic Corporation.
6 * See LICENSE.qede_pmd for copyright and licensing details.
11 #include "ecore_init_ops.h"
13 #include "ecore_rt_defs.h"
14 #include "ecore_hsi_common.h"
15 #include "ecore_hsi_tools.h"
16 #include "ecore_init_fw_funcs.h"
18 /* @DPDK CmInterfaceEnum */
19 enum cm_interface_enum {
32 /* general constants */
33 #define QM_PQ_MEM_4KB(pq_size) \
34 (pq_size ? DIV_ROUND_UP((pq_size + 1) * QM_PQ_ELEMENT_SIZE, 0x1000) : 0)
35 #define QM_PQ_SIZE_256B(pq_size) \
36 (pq_size ? DIV_ROUND_UP(pq_size, 0x100) - 1 : 0)
37 #define QM_INVALID_PQ_ID 0xffff
39 #define QM_BYPASS_EN 1
40 #define QM_BYTE_CRD_EN 1
41 /* other PQ constants */
42 #define QM_OTHER_PQS_PER_PF 4
44 #define QM_WFQ_UPPER_BOUND 62500000
45 #define QM_WFQ_VP_PQ_VOQ_SHIFT 0
46 #define QM_WFQ_VP_PQ_PF_SHIFT 5
47 #define QM_WFQ_INC_VAL(weight) ((weight) * 0x9000)
48 #define QM_WFQ_MAX_INC_VAL 43750000
50 #define QM_RL_UPPER_BOUND 62500000
51 #define QM_RL_PERIOD 5
52 #define QM_RL_PERIOD_CLK_25M (25 * QM_RL_PERIOD)
53 #define QM_RL_INC_VAL(rate) \
54 OSAL_MAX_T(u32, (((rate ? rate : 1000000) * QM_RL_PERIOD * 1.01) / 8), 1)
55 #define QM_RL_MAX_INC_VAL 43750000
56 /* AFullOprtnstcCrdMask constants */
57 #define QM_OPPOR_LINE_VOQ_DEF 1
58 #define QM_OPPOR_FW_STOP_DEF 0
59 #define QM_OPPOR_PQ_EMPTY_DEF 1
60 #define EAGLE_WORKAROUND_TC 7
61 /* Command Queue constants */
62 #define PBF_CMDQ_PURE_LB_LINES 150
63 #define PBF_CMDQ_EAGLE_WORKAROUND_LINES 8 /* eagle workaround CmdQ */
64 #define PBF_CMDQ_LINES_RT_OFFSET(voq) \
65 (PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET + \
66 voq * (PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET \
67 - PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET))
68 #define PBF_BTB_GUARANTEED_RT_OFFSET(voq) \
69 (PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET + voq * \
70 (PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET - PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET))
71 #define QM_VOQ_LINE_CRD(pbf_cmd_lines) \
72 ((((pbf_cmd_lines) - 4) * 2) | QM_LINE_CRD_REG_SIGN_BIT)
73 /* BTB: blocks constants (block size = 256B) */
74 #define BTB_JUMBO_PKT_BLOCKS 38 /* 256B blocks in 9700B packet */
75 #define BTB_HEADROOM_BLOCKS BTB_JUMBO_PKT_BLOCKS /* headroom per-port */
76 #define BTB_EAGLE_WORKAROUND_BLOCKS 4 /* eagle workaround blocks */
77 #define BTB_PURE_LB_FACTOR 10
78 #define BTB_PURE_LB_RATIO 7 /* factored (hence really 0.7) */
79 /* QM stop command constants */
80 #define QM_STOP_PQ_MASK_WIDTH 32
81 #define QM_STOP_CMD_ADDR 0x2
82 #define QM_STOP_CMD_STRUCT_SIZE 2
83 #define QM_STOP_CMD_PAUSE_MASK_OFFSET 0
84 #define QM_STOP_CMD_PAUSE_MASK_SHIFT 0
85 #define QM_STOP_CMD_PAUSE_MASK_MASK -1
86 #define QM_STOP_CMD_GROUP_ID_OFFSET 1
87 #define QM_STOP_CMD_GROUP_ID_SHIFT 16
88 #define QM_STOP_CMD_GROUP_ID_MASK 15
89 #define QM_STOP_CMD_PQ_TYPE_OFFSET 1
90 #define QM_STOP_CMD_PQ_TYPE_SHIFT 24
91 #define QM_STOP_CMD_PQ_TYPE_MASK 1
92 #define QM_STOP_CMD_MAX_POLL_COUNT 100
93 #define QM_STOP_CMD_POLL_PERIOD_US 500
94 /* QM command macros */
95 #define QM_CMD_STRUCT_SIZE(cmd) cmd##_STRUCT_SIZE
96 #define QM_CMD_SET_FIELD(var, cmd, field, value) \
97 SET_FIELD(var[cmd##_##field##_OFFSET], cmd##_##field, value)
99 #define PHYS_VOQ(port, tc, max_phys_tcs_per_port) \
100 ((port) * (max_phys_tcs_per_port) + (tc))
101 #define LB_VOQ(port) (MAX_PHYS_VOQS + (port))
102 #define VOQ(port, tc, max_phys_tcs_per_port) \
103 ((tc) < LB_TC ? PHYS_VOQ(port, tc, max_phys_tcs_per_port) : LB_VOQ(port))
104 /******************** INTERNAL IMPLEMENTATION *********************/
105 /* Prepare PF RL enable/disable runtime init values */
106 static void ecore_enable_pf_rl(struct ecore_hwfn *p_hwfn, bool pf_rl_en)
108 STORE_RT_REG(p_hwfn, QM_REG_RLPFENABLE_RT_OFFSET, pf_rl_en ? 1 : 0);
110 /* enable RLs for all VOQs */
111 STORE_RT_REG(p_hwfn, QM_REG_RLPFVOQENABLE_RT_OFFSET,
112 (1 << MAX_NUM_VOQS) - 1);
113 /* write RL period */
114 STORE_RT_REG(p_hwfn, QM_REG_RLPFPERIOD_RT_OFFSET,
115 QM_RL_PERIOD_CLK_25M);
116 STORE_RT_REG(p_hwfn, QM_REG_RLPFPERIODTIMER_RT_OFFSET,
117 QM_RL_PERIOD_CLK_25M);
118 /* set credit threshold for QM bypass flow */
120 STORE_RT_REG(p_hwfn, QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET,
125 /* Prepare PF WFQ enable/disable runtime init values */
126 static void ecore_enable_pf_wfq(struct ecore_hwfn *p_hwfn, bool pf_wfq_en)
128 STORE_RT_REG(p_hwfn, QM_REG_WFQPFENABLE_RT_OFFSET, pf_wfq_en ? 1 : 0);
129 /* set credit threshold for QM bypass flow */
130 if (pf_wfq_en && QM_BYPASS_EN)
131 STORE_RT_REG(p_hwfn, QM_REG_AFULLQMBYPTHRPFWFQ_RT_OFFSET,
135 /* Prepare VPORT RL enable/disable runtime init values */
136 static void ecore_enable_vport_rl(struct ecore_hwfn *p_hwfn, bool vport_rl_en)
138 STORE_RT_REG(p_hwfn, QM_REG_RLGLBLENABLE_RT_OFFSET,
139 vport_rl_en ? 1 : 0);
141 /* write RL period (use timer 0 only) */
142 STORE_RT_REG(p_hwfn, QM_REG_RLGLBLPERIOD_0_RT_OFFSET,
143 QM_RL_PERIOD_CLK_25M);
144 STORE_RT_REG(p_hwfn, QM_REG_RLGLBLPERIODTIMER_0_RT_OFFSET,
145 QM_RL_PERIOD_CLK_25M);
146 /* set credit threshold for QM bypass flow */
149 QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET,
154 /* Prepare VPORT WFQ enable/disable runtime init values */
155 static void ecore_enable_vport_wfq(struct ecore_hwfn *p_hwfn, bool vport_wfq_en)
157 STORE_RT_REG(p_hwfn, QM_REG_WFQVPENABLE_RT_OFFSET,
158 vport_wfq_en ? 1 : 0);
159 /* set credit threshold for QM bypass flow */
160 if (vport_wfq_en && QM_BYPASS_EN)
161 STORE_RT_REG(p_hwfn, QM_REG_AFULLQMBYPTHRVPWFQ_RT_OFFSET,
165 /* Prepare runtime init values to allocate PBF command queue lines for
168 static void ecore_cmdq_lines_voq_rt_init(struct ecore_hwfn *p_hwfn,
169 u8 voq, u16 cmdq_lines)
172 bool is_bb_a0 = ECORE_IS_BB_A0(p_hwfn->p_dev);
174 cmdq_lines = OSAL_MIN_T(u32, cmdq_lines, 1022);
175 qm_line_crd = QM_VOQ_LINE_CRD(cmdq_lines);
176 OVERWRITE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(voq),
178 STORE_RT_REG(p_hwfn, QM_REG_VOQCRDLINE_RT_OFFSET + voq, qm_line_crd);
179 STORE_RT_REG(p_hwfn, QM_REG_VOQINITCRDLINE_RT_OFFSET + voq,
183 /* Prepare runtime init values to allocate PBF command queue lines. */
184 static void ecore_cmdq_lines_rt_init(struct ecore_hwfn *p_hwfn,
185 u8 max_ports_per_engine,
186 u8 max_phys_tcs_per_port,
187 struct init_qm_port_params
188 port_params[MAX_NUM_PORTS])
191 bool eagle_workaround = ENABLE_EAGLE_ENG1_WORKAROUND(p_hwfn);
192 /* clear PBF lines for all VOQs */
193 for (voq = 0; voq < MAX_NUM_VOQS; voq++)
194 STORE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(voq), 0);
195 for (port_id = 0; port_id < max_ports_per_engine; port_id++) {
196 if (port_params[port_id].active) {
197 u16 phys_lines, phys_lines_per_tc;
199 port_params[port_id].num_pbf_cmd_lines -
200 PBF_CMDQ_PURE_LB_LINES;
201 if (eagle_workaround)
202 phys_lines -= PBF_CMDQ_EAGLE_WORKAROUND_LINES;
203 /* find #lines per active physical TC */
206 port_params[port_id].num_active_phys_tcs;
207 /* init registers per active TC */
209 tc < port_params[port_id].num_active_phys_tcs;
212 PHYS_VOQ(port_id, tc,
213 max_phys_tcs_per_port);
214 ecore_cmdq_lines_voq_rt_init(p_hwfn, voq,
217 /* init registers for pure LB TC */
218 ecore_cmdq_lines_voq_rt_init(p_hwfn, LB_VOQ(port_id),
219 PBF_CMDQ_PURE_LB_LINES);
220 /* init registers for eagle workaround */
221 if (eagle_workaround) {
223 PHYS_VOQ(port_id, EAGLE_WORKAROUND_TC,
224 max_phys_tcs_per_port);
225 ecore_cmdq_lines_voq_rt_init(p_hwfn, voq,
226 PBF_CMDQ_EAGLE_WORKAROUND_LINES);
233 * Prepare runtime init values to allocate guaranteed BTB blocks for the
234 * specified port. The guaranteed BTB space is divided between the TCs as
235 * follows (shared space Is currently not used):
237 * B BTB blocks for this port
238 * C Number of physical TCs for this port
240 * a. 38 blocks (9700B jumbo frame) are allocated for global per port
242 * b. B = B 38 (remainder after global headroom allocation)
243 * c. MAX(38,B/(C+0.7)) blocks are allocated for the pure LB VOQ.
244 * d. B = B MAX(38, B/(C+0.7)) (remainder after pure LB allocation).
245 * e. B/C blocks are allocated for each physical TC.
247 * - MTU is up to 9700 bytes (38 blocks)
248 * - All TCs are considered symmetrical (same rate and packet size)
249 * - No optimization for lossy TC (all are considered lossless). Shared space is
250 * not enabled and allocated for each TC.
252 static void ecore_btb_blocks_rt_init(struct ecore_hwfn *p_hwfn,
253 u8 max_ports_per_engine,
254 u8 max_phys_tcs_per_port,
255 struct init_qm_port_params
256 port_params[MAX_NUM_PORTS])
259 u32 usable_blocks, pure_lb_blocks, phys_blocks;
260 bool eagle_workaround = ENABLE_EAGLE_ENG1_WORKAROUND(p_hwfn);
261 for (port_id = 0; port_id < max_ports_per_engine; port_id++) {
262 if (port_params[port_id].active) {
263 /* subtract headroom blocks */
265 port_params[port_id].num_btb_blocks -
267 if (eagle_workaround)
268 usable_blocks -= BTB_EAGLE_WORKAROUND_BLOCKS;
270 (usable_blocks * BTB_PURE_LB_FACTOR) /
271 (port_params[port_id].num_active_phys_tcs *
272 BTB_PURE_LB_FACTOR + BTB_PURE_LB_RATIO);
274 OSAL_MAX_T(u32, BTB_JUMBO_PKT_BLOCKS,
275 pure_lb_blocks / BTB_PURE_LB_FACTOR);
279 port_params[port_id].num_active_phys_tcs;
280 /* init physical TCs */
282 tc < port_params[port_id].num_active_phys_tcs;
285 PHYS_VOQ(port_id, tc,
286 max_phys_tcs_per_port);
288 PBF_BTB_GUARANTEED_RT_OFFSET(voq),
291 /* init pure LB TC */
293 PBF_BTB_GUARANTEED_RT_OFFSET(LB_VOQ
296 /* init eagle workaround */
297 if (eagle_workaround) {
299 PHYS_VOQ(port_id, EAGLE_WORKAROUND_TC,
300 max_phys_tcs_per_port);
302 PBF_BTB_GUARANTEED_RT_OFFSET(voq),
303 BTB_EAGLE_WORKAROUND_BLOCKS);
309 /* Prepare Tx PQ mapping runtime init values for the specified PF */
310 static void ecore_tx_pq_map_rt_init(struct ecore_hwfn *p_hwfn,
311 struct ecore_ptt *p_ptt,
314 u8 max_phys_tcs_per_port,
322 u32 base_mem_addr_4kb,
323 struct init_qm_pq_params *pq_params,
324 struct init_qm_vport_params *vport_params)
326 u16 i, pq_id, pq_group;
327 u16 num_pqs = num_pf_pqs + num_vf_pqs;
328 u16 first_pq_group = start_pq / QM_PF_QUEUE_GROUP_SIZE;
329 u16 last_pq_group = (start_pq + num_pqs - 1) / QM_PF_QUEUE_GROUP_SIZE;
330 bool is_bb_a0 = ECORE_IS_BB_A0(p_hwfn->p_dev);
331 /* a bit per Tx PQ indicating if the PQ is associated with a VF */
332 u32 tx_pq_vf_mask[MAX_QM_TX_QUEUES / QM_PF_QUEUE_GROUP_SIZE] = { 0 };
333 u32 tx_pq_vf_mask_width = is_bb_a0 ? 32 : QM_PF_QUEUE_GROUP_SIZE;
334 u32 num_tx_pq_vf_masks = MAX_QM_TX_QUEUES / tx_pq_vf_mask_width;
335 u32 pq_mem_4kb = QM_PQ_MEM_4KB(num_pf_cids);
336 u32 vport_pq_mem_4kb = QM_PQ_MEM_4KB(num_vf_cids);
337 u32 mem_addr_4kb = base_mem_addr_4kb;
338 /* set mapping from PQ group to PF */
339 for (pq_group = first_pq_group; pq_group <= last_pq_group; pq_group++)
340 STORE_RT_REG(p_hwfn, QM_REG_PQTX2PF_0_RT_OFFSET + pq_group,
343 STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_0_RT_OFFSET,
344 QM_PQ_SIZE_256B(num_pf_cids));
345 STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_1_RT_OFFSET,
346 QM_PQ_SIZE_256B(num_vf_cids));
347 /* go over all Tx PQs */
348 for (i = 0, pq_id = start_pq; i < num_pqs; i++, pq_id++) {
349 struct qm_rf_pq_map tx_pq_map;
351 VOQ(port_id, pq_params[i].tc_id, max_phys_tcs_per_port);
352 bool is_vf_pq = (i >= num_pf_pqs);
353 /* update first Tx PQ of VPORT/TC */
354 u8 vport_id_in_pf = pq_params[i].vport_id - start_vport;
356 vport_params[vport_id_in_pf].first_tx_pq_id[pq_params[i].
358 if (first_tx_pq_id == QM_INVALID_PQ_ID) {
359 /* create new VP PQ */
360 vport_params[vport_id_in_pf].
361 first_tx_pq_id[pq_params[i].tc_id] = pq_id;
362 first_tx_pq_id = pq_id;
363 /* map VP PQ to VOQ and PF */
365 QM_REG_WFQVPMAP_RT_OFFSET + first_tx_pq_id,
366 (voq << QM_WFQ_VP_PQ_VOQ_SHIFT) | (pf_id <<
367 QM_WFQ_VP_PQ_PF_SHIFT));
369 /* fill PQ map entry */
370 OSAL_MEMSET(&tx_pq_map, 0, sizeof(tx_pq_map));
371 SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_PQ_VALID, 1);
372 SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_RL_VALID,
374 SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_VP_PQ_ID, first_tx_pq_id);
375 SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_RL_ID,
376 is_vf_pq ? pq_params[i].vport_id : 0);
377 SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_VOQ, voq);
378 SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_WRR_WEIGHT_GROUP,
379 pq_params[i].wrr_group);
380 /* write PQ map entry to CAM */
381 STORE_RT_REG(p_hwfn, QM_REG_TXPQMAP_RT_OFFSET + pq_id,
382 *((u32 *)&tx_pq_map));
383 /* set base address */
384 STORE_RT_REG(p_hwfn, QM_REG_BASEADDRTXPQ_RT_OFFSET + pq_id,
388 tx_pq_vf_mask[pq_id / tx_pq_vf_mask_width] |=
389 (1 << (pq_id % tx_pq_vf_mask_width));
390 mem_addr_4kb += vport_pq_mem_4kb;
392 mem_addr_4kb += pq_mem_4kb;
395 /* store Tx PQ VF mask to size select register */
396 for (i = 0; i < num_tx_pq_vf_masks; i++) {
397 if (tx_pq_vf_mask[i]) {
400 is_first_pf ? 0 : ecore_rd(p_hwfn, p_ptt,
401 QM_REG_MAXPQSIZETXSEL_0
404 QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET +
405 i, curr_mask | tx_pq_vf_mask[i]);
408 QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET +
409 i, tx_pq_vf_mask[i]);
414 /* Prepare Other PQ mapping runtime init values for the specified PF */
415 static void ecore_other_pq_map_rt_init(struct ecore_hwfn *p_hwfn,
419 u32 num_tids, u32 base_mem_addr_4kb)
422 u16 pq_group = pf_id;
423 u32 pq_size = num_pf_cids + num_tids;
424 u32 pq_mem_4kb = QM_PQ_MEM_4KB(pq_size);
425 u32 mem_addr_4kb = base_mem_addr_4kb;
426 /* map PQ group to PF */
427 STORE_RT_REG(p_hwfn, QM_REG_PQOTHER2PF_0_RT_OFFSET + pq_group,
430 STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_2_RT_OFFSET,
431 QM_PQ_SIZE_256B(pq_size));
432 /* set base address */
433 for (i = 0, pq_id = pf_id * QM_PF_QUEUE_GROUP_SIZE;
434 i < QM_OTHER_PQS_PER_PF; i++, pq_id++) {
435 STORE_RT_REG(p_hwfn, QM_REG_BASEADDROTHERPQ_RT_OFFSET + pq_id,
437 mem_addr_4kb += pq_mem_4kb;
441 static int ecore_pf_wfq_rt_init(struct ecore_hwfn *p_hwfn,
445 u8 max_phys_tcs_per_port,
447 struct init_qm_pq_params *pq_params)
453 MAX_NUM_PFS_BB ? QM_REG_WFQPFCRD_RT_OFFSET :
454 QM_REG_WFQPFCRD_MSB_RT_OFFSET) + (pf_id % MAX_NUM_PFS_BB);
455 inc_val = QM_WFQ_INC_VAL(pf_wfq);
456 if (inc_val == 0 || inc_val > QM_WFQ_MAX_INC_VAL) {
457 DP_NOTICE(p_hwfn, true, "Invalid PF WFQ weight configuration");
460 for (i = 0; i < num_tx_pqs; i++) {
462 VOQ(port_id, pq_params[i].tc_id, max_phys_tcs_per_port);
463 OVERWRITE_RT_REG(p_hwfn, crd_reg_offset + voq * MAX_NUM_PFS_BB,
464 QM_WFQ_CRD_REG_SIGN_BIT);
466 STORE_RT_REG(p_hwfn, QM_REG_WFQPFUPPERBOUND_RT_OFFSET + pf_id,
467 QM_WFQ_UPPER_BOUND | QM_WFQ_CRD_REG_SIGN_BIT);
468 STORE_RT_REG(p_hwfn, QM_REG_WFQPFWEIGHT_RT_OFFSET + pf_id, inc_val);
472 /* Prepare PF RL runtime init values for the specified PF. Return -1 on err */
473 static int ecore_pf_rl_rt_init(struct ecore_hwfn *p_hwfn, u8 pf_id, u32 pf_rl)
475 u32 inc_val = QM_RL_INC_VAL(pf_rl);
476 if (inc_val > QM_RL_MAX_INC_VAL) {
477 DP_NOTICE(p_hwfn, true, "Invalid PF rate limit configuration");
480 STORE_RT_REG(p_hwfn, QM_REG_RLPFCRD_RT_OFFSET + pf_id,
481 QM_RL_CRD_REG_SIGN_BIT);
482 STORE_RT_REG(p_hwfn, QM_REG_RLPFUPPERBOUND_RT_OFFSET + pf_id,
483 QM_RL_UPPER_BOUND | QM_RL_CRD_REG_SIGN_BIT);
484 STORE_RT_REG(p_hwfn, QM_REG_RLPFINCVAL_RT_OFFSET + pf_id, inc_val);
488 static int ecore_vp_wfq_rt_init(struct ecore_hwfn *p_hwfn,
490 struct init_qm_vport_params *vport_params)
494 /* go over all PF VPORTs */
495 for (i = 0; i < num_vports; i++) {
496 if (vport_params[i].vport_wfq) {
497 inc_val = QM_WFQ_INC_VAL(vport_params[i].vport_wfq);
498 if (inc_val > QM_WFQ_MAX_INC_VAL) {
499 DP_NOTICE(p_hwfn, true,
500 "Invalid VPORT WFQ weight config");
503 for (tc = 0; tc < NUM_OF_TCS; tc++) {
505 vport_params[i].first_tx_pq_id[tc];
506 if (vport_pq_id != QM_INVALID_PQ_ID) {
508 QM_REG_WFQVPCRD_RT_OFFSET +
510 QM_WFQ_CRD_REG_SIGN_BIT);
512 QM_REG_WFQVPWEIGHT_RT_OFFSET
513 + vport_pq_id, inc_val);
521 /* Prepare VPORT RL runtime init values for specified VPORT. Ret -1 on error. */
522 static int ecore_vport_rl_rt_init(struct ecore_hwfn *p_hwfn,
525 struct init_qm_vport_params *vport_params)
528 /* go over all PF VPORTs */
529 for (i = 0, vport_id = start_vport; i < num_vports; i++, vport_id++) {
530 u32 inc_val = QM_RL_INC_VAL(vport_params[i].vport_rl);
531 if (inc_val > QM_RL_MAX_INC_VAL) {
532 DP_NOTICE(p_hwfn, true,
533 "Invalid VPORT rate-limit configuration");
536 STORE_RT_REG(p_hwfn, QM_REG_RLGLBLCRD_RT_OFFSET + vport_id,
537 QM_RL_CRD_REG_SIGN_BIT);
539 QM_REG_RLGLBLUPPERBOUND_RT_OFFSET + vport_id,
540 QM_RL_UPPER_BOUND | QM_RL_CRD_REG_SIGN_BIT);
541 STORE_RT_REG(p_hwfn, QM_REG_RLGLBLINCVAL_RT_OFFSET + vport_id,
547 static bool ecore_poll_on_qm_cmd_ready(struct ecore_hwfn *p_hwfn,
548 struct ecore_ptt *p_ptt)
551 for (i = 0, reg_val = 0; i < QM_STOP_CMD_MAX_POLL_COUNT && reg_val == 0;
553 OSAL_UDELAY(QM_STOP_CMD_POLL_PERIOD_US);
554 reg_val = ecore_rd(p_hwfn, p_ptt, QM_REG_SDMCMDREADY);
556 /* check if timeout while waiting for SDM command ready */
557 if (i == QM_STOP_CMD_MAX_POLL_COUNT) {
558 DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
559 "Timeout waiting for QM SDM cmd ready signal\n");
565 static bool ecore_send_qm_cmd(struct ecore_hwfn *p_hwfn,
566 struct ecore_ptt *p_ptt,
567 u32 cmd_addr, u32 cmd_data_lsb, u32 cmd_data_msb)
569 if (!ecore_poll_on_qm_cmd_ready(p_hwfn, p_ptt))
571 ecore_wr(p_hwfn, p_ptt, QM_REG_SDMCMDADDR, cmd_addr);
572 ecore_wr(p_hwfn, p_ptt, QM_REG_SDMCMDDATALSB, cmd_data_lsb);
573 ecore_wr(p_hwfn, p_ptt, QM_REG_SDMCMDDATAMSB, cmd_data_msb);
574 ecore_wr(p_hwfn, p_ptt, QM_REG_SDMCMDGO, 1);
575 ecore_wr(p_hwfn, p_ptt, QM_REG_SDMCMDGO, 0);
576 return ecore_poll_on_qm_cmd_ready(p_hwfn, p_ptt);
579 /******************** INTERFACE IMPLEMENTATION *********************/
580 u32 ecore_qm_pf_mem_size(u8 pf_id,
583 u32 num_tids, u16 num_pf_pqs, u16 num_vf_pqs)
585 return QM_PQ_MEM_4KB(num_pf_cids) * num_pf_pqs +
586 QM_PQ_MEM_4KB(num_vf_cids) * num_vf_pqs +
587 QM_PQ_MEM_4KB(num_pf_cids + num_tids) * QM_OTHER_PQS_PER_PF;
590 int ecore_qm_common_rt_init(struct ecore_hwfn *p_hwfn,
591 u8 max_ports_per_engine,
592 u8 max_phys_tcs_per_port,
597 struct init_qm_port_params
598 port_params[MAX_NUM_PORTS])
601 /* init AFullOprtnstcCrdMask */
603 (QM_OPPOR_LINE_VOQ_DEF << QM_RF_OPPORTUNISTIC_MASK_LINEVOQ_SHIFT) |
604 (QM_BYTE_CRD_EN << QM_RF_OPPORTUNISTIC_MASK_BYTEVOQ_SHIFT) |
605 (pf_wfq_en << QM_RF_OPPORTUNISTIC_MASK_PFWFQ_SHIFT) |
606 (vport_wfq_en << QM_RF_OPPORTUNISTIC_MASK_VPWFQ_SHIFT) |
607 (pf_rl_en << QM_RF_OPPORTUNISTIC_MASK_PFRL_SHIFT) |
608 (vport_rl_en << QM_RF_OPPORTUNISTIC_MASK_VPQCNRL_SHIFT) |
609 (QM_OPPOR_FW_STOP_DEF << QM_RF_OPPORTUNISTIC_MASK_FWPAUSE_SHIFT) |
610 (QM_OPPOR_PQ_EMPTY_DEF <<
611 QM_RF_OPPORTUNISTIC_MASK_QUEUEEMPTY_SHIFT);
612 STORE_RT_REG(p_hwfn, QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET, mask);
613 /* check eagle workaround */
614 for (port_id = 0; port_id < max_ports_per_engine; port_id++) {
615 if (port_params[port_id].active &&
616 port_params[port_id].num_active_phys_tcs >
617 EAGLE_WORKAROUND_TC &&
618 ENABLE_EAGLE_ENG1_WORKAROUND(p_hwfn)) {
619 DP_NOTICE(p_hwfn, true,
620 "Can't config 8 TCs with Eagle"
625 /* enable/disable PF RL */
626 ecore_enable_pf_rl(p_hwfn, pf_rl_en);
627 /* enable/disable PF WFQ */
628 ecore_enable_pf_wfq(p_hwfn, pf_wfq_en);
629 /* enable/disable VPORT RL */
630 ecore_enable_vport_rl(p_hwfn, vport_rl_en);
631 /* enable/disable VPORT WFQ */
632 ecore_enable_vport_wfq(p_hwfn, vport_wfq_en);
633 /* init PBF CMDQ line credit */
634 ecore_cmdq_lines_rt_init(p_hwfn, max_ports_per_engine,
635 max_phys_tcs_per_port, port_params);
636 /* init BTB blocks in PBF */
637 ecore_btb_blocks_rt_init(p_hwfn, max_ports_per_engine,
638 max_phys_tcs_per_port, port_params);
642 int ecore_qm_pf_rt_init(struct ecore_hwfn *p_hwfn,
643 struct ecore_ptt *p_ptt,
646 u8 max_phys_tcs_per_port,
658 struct init_qm_pq_params *pq_params,
659 struct init_qm_vport_params *vport_params)
662 u32 other_mem_size_4kb =
663 QM_PQ_MEM_4KB(num_pf_cids + num_tids) * QM_OTHER_PQS_PER_PF;
664 /* clear first Tx PQ ID array for each VPORT */
665 for (i = 0; i < num_vports; i++)
666 for (tc = 0; tc < NUM_OF_TCS; tc++)
667 vport_params[i].first_tx_pq_id[tc] = QM_INVALID_PQ_ID;
668 /* map Other PQs (if any) */
669 #if QM_OTHER_PQS_PER_PF > 0
670 ecore_other_pq_map_rt_init(p_hwfn, port_id, pf_id, num_pf_cids,
674 ecore_tx_pq_map_rt_init(p_hwfn, p_ptt, port_id, pf_id,
675 max_phys_tcs_per_port, is_first_pf, num_pf_cids,
676 num_vf_cids, start_pq, num_pf_pqs, num_vf_pqs,
677 start_vport, other_mem_size_4kb, pq_params,
681 if (ecore_pf_wfq_rt_init
682 (p_hwfn, port_id, pf_id, pf_wfq, max_phys_tcs_per_port,
683 num_pf_pqs + num_vf_pqs, pq_params) != 0)
686 if (ecore_pf_rl_rt_init(p_hwfn, pf_id, pf_rl) != 0)
689 if (ecore_vp_wfq_rt_init(p_hwfn, num_vports, vport_params) != 0)
692 if (ecore_vport_rl_rt_init
693 (p_hwfn, start_vport, num_vports, vport_params) != 0)
698 int ecore_init_pf_wfq(struct ecore_hwfn *p_hwfn,
699 struct ecore_ptt *p_ptt, u8 pf_id, u16 pf_wfq)
701 u32 inc_val = QM_WFQ_INC_VAL(pf_wfq);
702 if (inc_val == 0 || inc_val > QM_WFQ_MAX_INC_VAL) {
703 DP_NOTICE(p_hwfn, true, "Invalid PF WFQ weight configuration");
706 ecore_wr(p_hwfn, p_ptt, QM_REG_WFQPFWEIGHT + pf_id * 4, inc_val);
710 int ecore_init_pf_rl(struct ecore_hwfn *p_hwfn,
711 struct ecore_ptt *p_ptt, u8 pf_id, u32 pf_rl)
713 u32 inc_val = QM_RL_INC_VAL(pf_rl);
714 if (inc_val > QM_RL_MAX_INC_VAL) {
715 DP_NOTICE(p_hwfn, true, "Invalid PF rate limit configuration");
718 ecore_wr(p_hwfn, p_ptt, QM_REG_RLPFCRD + pf_id * 4,
719 QM_RL_CRD_REG_SIGN_BIT);
720 ecore_wr(p_hwfn, p_ptt, QM_REG_RLPFINCVAL + pf_id * 4, inc_val);
724 int ecore_init_vport_wfq(struct ecore_hwfn *p_hwfn,
725 struct ecore_ptt *p_ptt,
726 u16 first_tx_pq_id[NUM_OF_TCS], u16 vport_wfq)
729 u32 inc_val = QM_WFQ_INC_VAL(vport_wfq);
730 if (inc_val == 0 || inc_val > QM_WFQ_MAX_INC_VAL) {
731 DP_NOTICE(p_hwfn, true,
732 "Invalid VPORT WFQ weight configuration");
735 for (tc = 0; tc < NUM_OF_TCS; tc++) {
736 u16 vport_pq_id = first_tx_pq_id[tc];
737 if (vport_pq_id != QM_INVALID_PQ_ID) {
738 ecore_wr(p_hwfn, p_ptt,
739 QM_REG_WFQVPWEIGHT + vport_pq_id * 4, inc_val);
745 int ecore_init_vport_rl(struct ecore_hwfn *p_hwfn,
746 struct ecore_ptt *p_ptt, u8 vport_id, u32 vport_rl)
748 u32 inc_val = QM_RL_INC_VAL(vport_rl);
749 if (inc_val > QM_RL_MAX_INC_VAL) {
750 DP_NOTICE(p_hwfn, true,
751 "Invalid VPORT rate-limit configuration");
754 ecore_wr(p_hwfn, p_ptt, QM_REG_RLGLBLCRD + vport_id * 4,
755 QM_RL_CRD_REG_SIGN_BIT);
756 ecore_wr(p_hwfn, p_ptt, QM_REG_RLGLBLINCVAL + vport_id * 4, inc_val);
760 bool ecore_send_qm_stop_cmd(struct ecore_hwfn *p_hwfn,
761 struct ecore_ptt *p_ptt,
763 bool is_tx_pq, u16 start_pq, u16 num_pqs)
765 u32 cmd_arr[QM_CMD_STRUCT_SIZE(QM_STOP_CMD)] = { 0 };
766 u32 pq_mask = 0, last_pq = start_pq + num_pqs - 1, pq_id;
767 /* set command's PQ type */
768 QM_CMD_SET_FIELD(cmd_arr, QM_STOP_CMD, PQ_TYPE, is_tx_pq ? 0 : 1);
769 /* go over requested PQs */
770 for (pq_id = start_pq; pq_id <= last_pq; pq_id++) {
771 /* set PQ bit in mask (stop command only) */
773 pq_mask |= (1 << (pq_id % QM_STOP_PQ_MASK_WIDTH));
774 /* if last PQ or end of PQ mask, write command */
775 if ((pq_id == last_pq) ||
776 (pq_id % QM_STOP_PQ_MASK_WIDTH ==
777 (QM_STOP_PQ_MASK_WIDTH - 1))) {
778 QM_CMD_SET_FIELD(cmd_arr, QM_STOP_CMD, PAUSE_MASK,
780 QM_CMD_SET_FIELD(cmd_arr, QM_STOP_CMD, GROUP_ID,
781 pq_id / QM_STOP_PQ_MASK_WIDTH);
782 if (!ecore_send_qm_cmd
783 (p_hwfn, p_ptt, QM_STOP_CMD_ADDR, cmd_arr[0],
792 /* NIG: ETS configuration constants */
793 #define NIG_TX_ETS_CLIENT_OFFSET 4
794 #define NIG_LB_ETS_CLIENT_OFFSET 1
795 #define NIG_ETS_MIN_WFQ_BYTES 1600
796 /* NIG: ETS constants */
797 #define NIG_ETS_UP_BOUND(weight, mtu) \
798 (2 * ((weight) > (mtu) ? (weight) : (mtu)))
799 /* NIG: RL constants */
800 #define NIG_RL_BASE_TYPE 1 /* byte base type */
801 #define NIG_RL_PERIOD 1 /* in us */
802 #define NIG_RL_PERIOD_CLK_25M (25 * NIG_RL_PERIOD)
803 #define NIG_RL_INC_VAL(rate) (((rate) * NIG_RL_PERIOD) / 8)
804 #define NIG_RL_MAX_VAL(inc_val, mtu) \
805 (2 * ((inc_val) > (mtu) ? (inc_val) : (mtu)))
806 /* NIG: packet prioritry configuration constants */
807 #define NIG_PRIORITY_MAP_TC_BITS 4
808 void ecore_init_nig_ets(struct ecore_hwfn *p_hwfn,
809 struct ecore_ptt *p_ptt,
810 struct init_ets_req *req, bool is_lb)
812 u8 tc, sp_tc_map = 0, wfq_tc_map = 0;
813 u8 num_tc = is_lb ? NUM_OF_TCS : NUM_OF_PHYS_TCS;
814 u8 tc_client_offset =
815 is_lb ? NIG_LB_ETS_CLIENT_OFFSET : NIG_TX_ETS_CLIENT_OFFSET;
816 u32 min_weight = 0xffffffff;
817 u32 tc_weight_base_addr =
818 is_lb ? NIG_REG_LB_ARB_CREDIT_WEIGHT_0 :
819 NIG_REG_TX_ARB_CREDIT_WEIGHT_0;
820 u32 tc_weight_addr_diff =
821 is_lb ? NIG_REG_LB_ARB_CREDIT_WEIGHT_1 -
822 NIG_REG_LB_ARB_CREDIT_WEIGHT_0 : NIG_REG_TX_ARB_CREDIT_WEIGHT_1 -
823 NIG_REG_TX_ARB_CREDIT_WEIGHT_0;
824 u32 tc_bound_base_addr =
825 is_lb ? NIG_REG_LB_ARB_CREDIT_UPPER_BOUND_0 :
826 NIG_REG_TX_ARB_CREDIT_UPPER_BOUND_0;
827 u32 tc_bound_addr_diff =
828 is_lb ? NIG_REG_LB_ARB_CREDIT_UPPER_BOUND_1 -
829 NIG_REG_LB_ARB_CREDIT_UPPER_BOUND_0 :
830 NIG_REG_TX_ARB_CREDIT_UPPER_BOUND_1 -
831 NIG_REG_TX_ARB_CREDIT_UPPER_BOUND_0;
832 for (tc = 0; tc < num_tc; tc++) {
833 struct init_ets_tc_req *tc_req = &req->tc_req[tc];
836 sp_tc_map |= (1 << tc);
837 if (tc_req->use_wfq) {
839 wfq_tc_map |= (1 << tc);
840 /* find minimal weight */
841 if (tc_req->weight < min_weight)
842 min_weight = tc_req->weight;
846 ecore_wr(p_hwfn, p_ptt,
847 is_lb ? NIG_REG_LB_ARB_CLIENT_IS_STRICT :
848 NIG_REG_TX_ARB_CLIENT_IS_STRICT,
849 (sp_tc_map << tc_client_offset));
851 ecore_wr(p_hwfn, p_ptt,
852 is_lb ? NIG_REG_LB_ARB_CLIENT_IS_SUBJECT2WFQ :
853 NIG_REG_TX_ARB_CLIENT_IS_SUBJECT2WFQ,
854 (wfq_tc_map << tc_client_offset));
855 /* write WFQ weights */
856 for (tc = 0; tc < num_tc; tc++, tc_client_offset++) {
857 struct init_ets_tc_req *tc_req = &req->tc_req[tc];
858 if (tc_req->use_wfq) {
859 /* translate weight to bytes */
861 (NIG_ETS_MIN_WFQ_BYTES * tc_req->weight) /
863 /* write WFQ weight */
864 ecore_wr(p_hwfn, p_ptt,
865 tc_weight_base_addr +
866 tc_weight_addr_diff * tc_client_offset,
868 /* write WFQ upper bound */
869 ecore_wr(p_hwfn, p_ptt,
871 tc_bound_addr_diff * tc_client_offset,
872 NIG_ETS_UP_BOUND(byte_weight, req->mtu));
877 void ecore_init_nig_lb_rl(struct ecore_hwfn *p_hwfn,
878 struct ecore_ptt *p_ptt,
879 struct init_nig_lb_rl_req *req)
882 u32 ctrl, inc_val, reg_offset;
883 /* disable global MAC+LB RL */
886 NIG_REG_TX_LB_GLBRATELIMIT_CTRL_TX_LB_GLBRATELIMIT_BASE_TYPE_SHIFT;
887 ecore_wr(p_hwfn, p_ptt, NIG_REG_TX_LB_GLBRATELIMIT_CTRL, ctrl);
888 /* configure and enable global MAC+LB RL */
889 if (req->lb_mac_rate) {
891 ecore_wr(p_hwfn, p_ptt, NIG_REG_TX_LB_GLBRATELIMIT_INC_PERIOD,
892 NIG_RL_PERIOD_CLK_25M);
893 inc_val = NIG_RL_INC_VAL(req->lb_mac_rate);
894 ecore_wr(p_hwfn, p_ptt, NIG_REG_TX_LB_GLBRATELIMIT_INC_VALUE,
896 ecore_wr(p_hwfn, p_ptt, NIG_REG_TX_LB_GLBRATELIMIT_MAX_VALUE,
897 NIG_RL_MAX_VAL(inc_val, req->mtu));
901 NIG_REG_TX_LB_GLBRATELIMIT_CTRL_TX_LB_GLBRATELIMIT_EN_SHIFT;
902 ecore_wr(p_hwfn, p_ptt, NIG_REG_TX_LB_GLBRATELIMIT_CTRL, ctrl);
904 /* disable global LB-only RL */
907 NIG_REG_LB_BRBRATELIMIT_CTRL_LB_BRBRATELIMIT_BASE_TYPE_SHIFT;
908 ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_BRBRATELIMIT_CTRL, ctrl);
909 /* configure and enable global LB-only RL */
912 ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_BRBRATELIMIT_INC_PERIOD,
913 NIG_RL_PERIOD_CLK_25M);
914 inc_val = NIG_RL_INC_VAL(req->lb_rate);
915 ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_BRBRATELIMIT_INC_VALUE,
917 ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_BRBRATELIMIT_MAX_VALUE,
918 NIG_RL_MAX_VAL(inc_val, req->mtu));
921 1 << NIG_REG_LB_BRBRATELIMIT_CTRL_LB_BRBRATELIMIT_EN_SHIFT;
922 ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_BRBRATELIMIT_CTRL, ctrl);
925 for (tc = 0, reg_offset = 0; tc < NUM_OF_PHYS_TCS;
926 tc++, reg_offset += 4) {
930 NIG_REG_LB_TCRATELIMIT_CTRL_0_LB_TCRATELIMIT_BASE_TYPE_0_SHIFT;
931 ecore_wr(p_hwfn, p_ptt,
932 NIG_REG_LB_TCRATELIMIT_CTRL_0 + reg_offset, ctrl);
933 /* configure and enable TC RL */
934 if (req->tc_rate[tc]) {
936 ecore_wr(p_hwfn, p_ptt,
937 NIG_REG_LB_TCRATELIMIT_INC_PERIOD_0 +
938 reg_offset, NIG_RL_PERIOD_CLK_25M);
939 inc_val = NIG_RL_INC_VAL(req->tc_rate[tc]);
940 ecore_wr(p_hwfn, p_ptt,
941 NIG_REG_LB_TCRATELIMIT_INC_VALUE_0 +
942 reg_offset, inc_val);
943 ecore_wr(p_hwfn, p_ptt,
944 NIG_REG_LB_TCRATELIMIT_MAX_VALUE_0 +
945 reg_offset, NIG_RL_MAX_VAL(inc_val, req->mtu));
949 NIG_REG_LB_TCRATELIMIT_CTRL_0_LB_TCRATELIMIT_EN_0_SHIFT;
950 ecore_wr(p_hwfn, p_ptt,
951 NIG_REG_LB_TCRATELIMIT_CTRL_0 + reg_offset,
957 void ecore_init_nig_pri_tc_map(struct ecore_hwfn *p_hwfn,
958 struct ecore_ptt *p_ptt,
959 struct init_nig_pri_tc_map_req *req)
963 u8 tc_pri_mask[NUM_OF_PHYS_TCS] = { 0 };
964 for (pri = 0; pri < NUM_OF_VLAN_PRIORITIES; pri++) {
965 if (req->pri[pri].valid) {
968 tc_id << (pri * NIG_PRIORITY_MAP_TC_BITS));
969 tc_pri_mask[req->pri[pri].tc_id] |= (1 << pri);
972 /* write priority -> TC mask */
973 ecore_wr(p_hwfn, p_ptt, NIG_REG_PKT_PRIORITY_TO_TC, pri_tc_mask);
974 /* write TC -> priority mask */
975 for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) {
976 ecore_wr(p_hwfn, p_ptt, NIG_REG_PRIORITY_FOR_TC_0 + tc * 4,
978 ecore_wr(p_hwfn, p_ptt, NIG_REG_RX_TC0_PRIORITY_MASK + tc * 4,
983 /* PRS: ETS configuration constants */
984 #define PRS_ETS_MIN_WFQ_BYTES 1600
985 #define PRS_ETS_UP_BOUND(weight, mtu) \
986 (2 * ((weight) > (mtu) ? (weight) : (mtu)))
987 void ecore_init_prs_ets(struct ecore_hwfn *p_hwfn,
988 struct ecore_ptt *p_ptt, struct init_ets_req *req)
990 u8 tc, sp_tc_map = 0, wfq_tc_map = 0;
991 u32 min_weight = 0xffffffff;
992 u32 tc_weight_addr_diff =
993 PRS_REG_ETS_ARB_CREDIT_WEIGHT_1 - PRS_REG_ETS_ARB_CREDIT_WEIGHT_0;
994 u32 tc_bound_addr_diff =
995 PRS_REG_ETS_ARB_CREDIT_UPPER_BOUND_1 -
996 PRS_REG_ETS_ARB_CREDIT_UPPER_BOUND_0;
997 for (tc = 0; tc < NUM_OF_TCS; tc++) {
998 struct init_ets_tc_req *tc_req = &req->tc_req[tc];
1001 sp_tc_map |= (1 << tc);
1002 if (tc_req->use_wfq) {
1003 /* update WFQ map */
1004 wfq_tc_map |= (1 << tc);
1005 /* find minimal weight */
1006 if (tc_req->weight < min_weight)
1007 min_weight = tc_req->weight;
1011 ecore_wr(p_hwfn, p_ptt, PRS_REG_ETS_ARB_CLIENT_IS_STRICT, sp_tc_map);
1013 ecore_wr(p_hwfn, p_ptt, PRS_REG_ETS_ARB_CLIENT_IS_SUBJECT2WFQ,
1015 /* write WFQ weights */
1016 for (tc = 0; tc < NUM_OF_TCS; tc++) {
1017 struct init_ets_tc_req *tc_req = &req->tc_req[tc];
1018 if (tc_req->use_wfq) {
1019 /* translate weight to bytes */
1021 (PRS_ETS_MIN_WFQ_BYTES * tc_req->weight) /
1023 /* write WFQ weight */
1024 ecore_wr(p_hwfn, p_ptt,
1025 PRS_REG_ETS_ARB_CREDIT_WEIGHT_0 +
1026 tc * tc_weight_addr_diff, byte_weight);
1027 /* write WFQ upper bound */
1028 ecore_wr(p_hwfn, p_ptt,
1029 PRS_REG_ETS_ARB_CREDIT_UPPER_BOUND_0 +
1030 tc * tc_bound_addr_diff,
1031 PRS_ETS_UP_BOUND(byte_weight, req->mtu));
1036 /* BRB: RAM configuration constants */
1037 #define BRB_TOTAL_RAM_BLOCKS_BB 4800
1038 #define BRB_TOTAL_RAM_BLOCKS_K2 5632
1039 #define BRB_BLOCK_SIZE 128 /* in bytes */
1040 #define BRB_MIN_BLOCKS_PER_TC 9
1041 #define BRB_HYST_BYTES 10240
1042 #define BRB_HYST_BLOCKS (BRB_HYST_BYTES / BRB_BLOCK_SIZE)
1044 * temporary big RAM allocation - should be updated
1046 void ecore_init_brb_ram(struct ecore_hwfn *p_hwfn,
1047 struct ecore_ptt *p_ptt, struct init_brb_ram_req *req)
1049 u8 port, active_ports = 0;
1050 u32 active_port_blocks, reg_offset = 0;
1051 u32 tc_headroom_blocks =
1052 (u32)DIV_ROUND_UP(req->headroom_per_tc, BRB_BLOCK_SIZE);
1053 u32 min_pkt_size_blocks =
1054 (u32)DIV_ROUND_UP(req->min_pkt_size, BRB_BLOCK_SIZE);
1056 ECORE_IS_K2(p_hwfn->
1057 p_dev) ? BRB_TOTAL_RAM_BLOCKS_K2 :
1058 BRB_TOTAL_RAM_BLOCKS_BB;
1059 /* find number of active ports */
1060 for (port = 0; port < MAX_NUM_PORTS; port++)
1061 if (req->num_active_tcs[port])
1063 active_port_blocks = (u32)(total_blocks / active_ports);
1064 for (port = 0; port < req->max_ports_per_engine; port++) {
1065 /* calculate per-port sizes */
1066 u32 tc_guaranteed_blocks =
1067 (u32)DIV_ROUND_UP(req->guranteed_per_tc, BRB_BLOCK_SIZE);
1069 req->num_active_tcs[port] ? active_port_blocks : 0;
1070 u32 port_guaranteed_blocks =
1071 req->num_active_tcs[port] * tc_guaranteed_blocks;
1072 u32 port_shared_blocks = port_blocks - port_guaranteed_blocks;
1074 req->num_active_tcs[port] * BRB_MIN_BLOCKS_PER_TC;
1075 u32 full_xon_th = full_xoff_th + min_pkt_size_blocks;
1076 u32 pause_xoff_th = tc_headroom_blocks;
1077 u32 pause_xon_th = pause_xoff_th + min_pkt_size_blocks;
1079 /* init total size per port */
1080 ecore_wr(p_hwfn, p_ptt, BRB_REG_TOTAL_MAC_SIZE + port * 4,
1082 /* init shared size per port */
1083 ecore_wr(p_hwfn, p_ptt, BRB_REG_SHARED_HR_AREA + port * 4,
1084 port_shared_blocks);
1085 for (tc = 0; tc < NUM_OF_TCS; tc++, reg_offset += 4) {
1086 /* clear init values for non-active TCs */
1087 if (tc == req->num_active_tcs[port]) {
1088 tc_guaranteed_blocks = 0;
1094 /* init guaranteed size per TC */
1095 ecore_wr(p_hwfn, p_ptt,
1096 BRB_REG_TC_GUARANTIED_0 + reg_offset,
1097 tc_guaranteed_blocks);
1098 ecore_wr(p_hwfn, p_ptt,
1099 BRB_REG_MAIN_TC_GUARANTIED_HYST_0 + reg_offset,
1101 ecore_wr(p_hwfn, p_ptt,
1102 BRB_REG_LB_TC_FULL_XOFF_THRESHOLD_0 +
1103 reg_offset, full_xoff_th);
1104 ecore_wr(p_hwfn, p_ptt,
1105 BRB_REG_LB_TC_FULL_XON_THRESHOLD_0 +
1106 reg_offset, full_xon_th);
1107 ecore_wr(p_hwfn, p_ptt,
1108 BRB_REG_LB_TC_PAUSE_XOFF_THRESHOLD_0 +
1109 reg_offset, pause_xoff_th);
1110 ecore_wr(p_hwfn, p_ptt,
1111 BRB_REG_LB_TC_PAUSE_XON_THRESHOLD_0 +
1112 reg_offset, pause_xon_th);
1113 ecore_wr(p_hwfn, p_ptt,
1114 BRB_REG_MAIN_TC_FULL_XOFF_THRESHOLD_0 +
1115 reg_offset, full_xoff_th);
1116 ecore_wr(p_hwfn, p_ptt,
1117 BRB_REG_MAIN_TC_FULL_XON_THRESHOLD_0 +
1118 reg_offset, full_xon_th);
1119 ecore_wr(p_hwfn, p_ptt,
1120 BRB_REG_MAIN_TC_PAUSE_XOFF_THRESHOLD_0 +
1121 reg_offset, pause_xoff_th);
1122 ecore_wr(p_hwfn, p_ptt,
1123 BRB_REG_MAIN_TC_PAUSE_XON_THRESHOLD_0 +
1124 reg_offset, pause_xon_th);
1129 /*In MF should be called once per engine to set EtherType of OuterTag*/
1130 void ecore_set_engine_mf_ovlan_eth_type(struct ecore_hwfn *p_hwfn,
1131 struct ecore_ptt *p_ptt, u32 eth_type)
1133 /* update PRS register */
1134 STORE_RT_REG(p_hwfn, PRS_REG_TAG_ETHERTYPE_0_RT_OFFSET, eth_type);
1135 /* update NIG register */
1136 STORE_RT_REG(p_hwfn, NIG_REG_TAG_ETHERTYPE_0_RT_OFFSET, eth_type);
1137 /* update PBF register */
1138 STORE_RT_REG(p_hwfn, PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET, eth_type);
1141 /*In MF should be called once per port to set EtherType of OuterTag*/
1142 void ecore_set_port_mf_ovlan_eth_type(struct ecore_hwfn *p_hwfn,
1143 struct ecore_ptt *p_ptt, u32 eth_type)
1145 /* update DORQ register */
1146 STORE_RT_REG(p_hwfn, DORQ_REG_TAG1_ETHERTYPE_RT_OFFSET, eth_type);
1149 #define SET_TUNNEL_TYPE_ENABLE_BIT(var, offset, enable) \
1150 (var = ((var) & ~(1 << (offset))) | ((enable) ? (1 << (offset)) : 0))
1151 #define PRS_ETH_TUNN_FIC_FORMAT -188897008
1152 void ecore_set_vxlan_dest_port(struct ecore_hwfn *p_hwfn,
1153 struct ecore_ptt *p_ptt, u16 dest_port)
1155 /* update PRS register */
1156 ecore_wr(p_hwfn, p_ptt, PRS_REG_VXLAN_PORT, dest_port);
1157 /* update NIG register */
1158 ecore_wr(p_hwfn, p_ptt, NIG_REG_VXLAN_PORT, dest_port);
1159 /* update PBF register */
1160 ecore_wr(p_hwfn, p_ptt, PBF_REG_VXLAN_PORT, dest_port);
1163 void ecore_set_vxlan_enable(struct ecore_hwfn *p_hwfn,
1164 struct ecore_ptt *p_ptt, bool vxlan_enable)
1167 /* update PRS register */
1168 reg_val = ecore_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN);
1169 SET_TUNNEL_TYPE_ENABLE_BIT(reg_val,
1170 PRS_REG_ENCAPSULATION_TYPE_EN_VXLAN_ENABLE_SHIFT,
1172 ecore_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
1174 ecore_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0,
1175 PRS_ETH_TUNN_FIC_FORMAT);
1177 /* update NIG register */
1178 reg_val = ecore_rd(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE);
1179 SET_TUNNEL_TYPE_ENABLE_BIT(reg_val,
1180 NIG_REG_ENC_TYPE_ENABLE_VXLAN_ENABLE_SHIFT,
1182 ecore_wr(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE, reg_val);
1183 /* update DORQ register */
1184 ecore_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_VXLAN_EN,
1185 vxlan_enable ? 1 : 0);
1188 void ecore_set_gre_enable(struct ecore_hwfn *p_hwfn,
1189 struct ecore_ptt *p_ptt,
1190 bool eth_gre_enable, bool ip_gre_enable)
1193 /* update PRS register */
1194 reg_val = ecore_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN);
1195 SET_TUNNEL_TYPE_ENABLE_BIT(reg_val,
1196 PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GRE_ENABLE_SHIFT,
1198 SET_TUNNEL_TYPE_ENABLE_BIT(reg_val,
1199 PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GRE_ENABLE_SHIFT,
1201 ecore_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
1203 ecore_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0,
1204 PRS_ETH_TUNN_FIC_FORMAT);
1206 /* update NIG register */
1207 reg_val = ecore_rd(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE);
1208 SET_TUNNEL_TYPE_ENABLE_BIT(reg_val,
1209 NIG_REG_ENC_TYPE_ENABLE_ETH_OVER_GRE_ENABLE_SHIFT,
1211 SET_TUNNEL_TYPE_ENABLE_BIT(reg_val,
1212 NIG_REG_ENC_TYPE_ENABLE_IP_OVER_GRE_ENABLE_SHIFT,
1214 ecore_wr(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE, reg_val);
1215 /* update DORQ registers */
1216 ecore_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_GRE_ETH_EN,
1217 eth_gre_enable ? 1 : 0);
1218 ecore_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_GRE_IP_EN,
1219 ip_gre_enable ? 1 : 0);
1222 void ecore_set_geneve_dest_port(struct ecore_hwfn *p_hwfn,
1223 struct ecore_ptt *p_ptt, u16 dest_port)
1225 /* geneve tunnel not supported in BB_A0 */
1226 if (ECORE_IS_BB_A0(p_hwfn->p_dev))
1228 /* update PRS register */
1229 ecore_wr(p_hwfn, p_ptt, PRS_REG_NGE_PORT, dest_port);
1230 /* update NIG register */
1231 ecore_wr(p_hwfn, p_ptt, NIG_REG_NGE_PORT, dest_port);
1232 /* update PBF register */
1233 ecore_wr(p_hwfn, p_ptt, PBF_REG_NGE_PORT, dest_port);
1236 void ecore_set_geneve_enable(struct ecore_hwfn *p_hwfn,
1237 struct ecore_ptt *p_ptt,
1238 bool eth_geneve_enable, bool ip_geneve_enable)
1241 /* geneve tunnel not supported in BB_A0 */
1242 if (ECORE_IS_BB_A0(p_hwfn->p_dev))
1244 /* update PRS register */
1245 reg_val = ecore_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN);
1246 SET_TUNNEL_TYPE_ENABLE_BIT(reg_val,
1247 PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GENEVE_ENABLE_SHIFT,
1249 SET_TUNNEL_TYPE_ENABLE_BIT(reg_val,
1250 PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GENEVE_ENABLE_SHIFT,
1252 ecore_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
1254 ecore_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0,
1255 PRS_ETH_TUNN_FIC_FORMAT);
1257 /* update NIG register */
1258 ecore_wr(p_hwfn, p_ptt, NIG_REG_NGE_ETH_ENABLE,
1259 eth_geneve_enable ? 1 : 0);
1260 ecore_wr(p_hwfn, p_ptt, NIG_REG_NGE_IP_ENABLE,
1261 ip_geneve_enable ? 1 : 0);
1263 reg_val = (ip_geneve_enable || eth_geneve_enable) ? 1 : 0;
1264 ecore_wr(p_hwfn, p_ptt, NIG_REG_NGE_COMP_VER, reg_val);
1265 ecore_wr(p_hwfn, p_ptt, PBF_REG_NGE_COMP_VER, reg_val);
1266 ecore_wr(p_hwfn, p_ptt, PRS_REG_NGE_COMP_VER, reg_val);
1267 /* EDPM with geneve tunnel not supported in BB_B0 */
1268 if (ECORE_IS_BB_B0(p_hwfn->p_dev))
1270 /* update DORQ registers */
1271 ecore_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_NGE_ETH_EN,
1272 eth_geneve_enable ? 1 : 0);
1273 ecore_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_NGE_IP_EN,
1274 ip_geneve_enable ? 1 : 0);