net/qede/base: rename HSI datatypes and functions
[dpdk.git] / drivers / net / qede / base / ecore_init_fw_funcs.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright (c) 2016 - 2018 Cavium Inc.
3  * All rights reserved.
4  * www.cavium.com
5  */
6
7 #include "bcm_osal.h"
8 #include "ecore_hw.h"
9 #include "ecore_init_ops.h"
10 #include "reg_addr.h"
11 #include "ecore_rt_defs.h"
12 #include "ecore_hsi_common.h"
13 #include "ecore_hsi_init_func.h"
14 #include "ecore_hsi_eth.h"
15 #include "ecore_hsi_init_tool.h"
16 #include "ecore_iro.h"
17 #include "ecore_init_fw_funcs.h"
18
19 #define CDU_VALIDATION_DEFAULT_CFG 61
20
21 static u16 con_region_offsets[3][NUM_OF_CONNECTION_TYPES] = {
22         { 400,  336,  352,  304,  304,  384,  416,  352}, /* region 3 offsets */
23         { 528,  496,  416,  448,  448,  512,  544,  480}, /* region 4 offsets */
24         { 608,  544,  496,  512,  576,  592,  624,  560}  /* region 5 offsets */
25 };
26 static u16 task_region_offsets[1][NUM_OF_CONNECTION_TYPES] = {
27         { 240,  240,  112,    0,    0,    0,    0,   96}  /* region 1 offsets */
28 };
29
30 /* General constants */
31 #define QM_PQ_MEM_4KB(pq_size) (pq_size ? DIV_ROUND_UP((pq_size + 1) * \
32                                 QM_PQ_ELEMENT_SIZE, 0x1000) : 0)
33 #define QM_PQ_SIZE_256B(pq_size) (pq_size ? DIV_ROUND_UP(pq_size, 0x100) - 1 : \
34                                   0)
35 #define QM_INVALID_PQ_ID                0xffff
36
37 /* Feature enable */
38 #define QM_BYPASS_EN                    1
39 #define QM_BYTE_CRD_EN                  1
40
41 /* Other PQ constants */
42 #define QM_OTHER_PQS_PER_PF             4
43
44 /* VOQ constants */
45 #define QM_E5_NUM_EXT_VOQ               (MAX_NUM_PORTS_E5 * NUM_OF_TCS)
46
47 /* WFQ constants: */
48
49 /* Upper bound in MB, 10 * burst size of 1ms in 50Gbps */
50 #define QM_WFQ_UPPER_BOUND              62500000
51
52 /* Bit  of VOQ in WFQ VP PQ map */
53 #define QM_WFQ_VP_PQ_VOQ_SHIFT          0
54
55 /* Bit  of PF in WFQ VP PQ map */
56 #define QM_WFQ_VP_PQ_PF_E4_SHIFT        5
57 #define QM_WFQ_VP_PQ_PF_E5_SHIFT        6
58
59 /* 0x9000 = 4*9*1024 */
60 #define QM_WFQ_INC_VAL(weight)          ((weight) * 0x9000)
61
62 /* Max WFQ increment value is 0.7 * upper bound */
63 #define QM_WFQ_MAX_INC_VAL              ((QM_WFQ_UPPER_BOUND * 7) / 10)
64
65 /* Number of VOQs in E5 QmWfqCrd register */
66 #define QM_WFQ_CRD_E5_NUM_VOQS          16
67
68 /* RL constants: */
69
70 /* Period in us */
71 #define QM_RL_PERIOD                    5
72
73 /* Period in 25MHz cycles */
74 #define QM_RL_PERIOD_CLK_25M            (25 * QM_RL_PERIOD)
75
76 /* RL increment value - rate is specified in mbps. the factor of 1.01 was
77  * added after seeing only 99% factor reached in a 25Gbps port with DPDK RFC
78  * 2544 test. In this scenario the PF RL was reducing the line rate to 99%
79  * although the credit increment value was the correct one and FW calculated
80  * correct packet sizes. The reason for the inaccuracy of the RL is unknown at
81  * this point.
82  */
83 #define QM_RL_INC_VAL(rate) \
84         OSAL_MAX_T(u32, (u32)(((rate ? rate : 100000) * QM_RL_PERIOD * 101) / \
85         (8 * 100)), 1)
86
87 /* PF RL Upper bound is set to 10 * burst size of 1ms in 50Gbps */
88 #define QM_PF_RL_UPPER_BOUND            62500000
89
90 /* Max PF RL increment value is 0.7 * upper bound */
91 #define QM_PF_RL_MAX_INC_VAL            ((QM_PF_RL_UPPER_BOUND * 7) / 10)
92
93 /* Vport RL Upper bound, link speed is in Mpbs */
94 #define QM_VP_RL_UPPER_BOUND(speed) \
95         ((u32)OSAL_MAX_T(u32, QM_RL_INC_VAL(speed), 9700 + 1000))
96
97 /* Max Vport RL increment value is the Vport RL upper bound */
98 #define QM_VP_RL_MAX_INC_VAL(speed)     QM_VP_RL_UPPER_BOUND(speed)
99
100 /* Vport RL credit threshold in case of QM bypass */
101 #define QM_VP_RL_BYPASS_THRESH_SPEED    (QM_VP_RL_UPPER_BOUND(10000) - 1)
102
103 /* AFullOprtnstcCrdMask constants */
104 #define QM_OPPOR_LINE_VOQ_DEF           1
105 #define QM_OPPOR_FW_STOP_DEF            0
106 #define QM_OPPOR_PQ_EMPTY_DEF           1
107
108 /* Command Queue constants: */
109
110 /* Pure LB CmdQ lines (+spare) */
111 #define PBF_CMDQ_PURE_LB_LINES          150
112
113 #define PBF_CMDQ_LINES_E5_RSVD_RATIO    8
114
115 #define PBF_CMDQ_LINES_RT_OFFSET(ext_voq) \
116         (PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET + \
117          ext_voq * \
118          (PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET - \
119           PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET))
120
121 #define PBF_BTB_GUARANTEED_RT_OFFSET(ext_voq) \
122         (PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET + \
123          ext_voq * \
124          (PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET - \
125           PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET))
126
127 #define QM_VOQ_LINE_CRD(pbf_cmd_lines) \
128 ((((pbf_cmd_lines) - 4) * 2) | QM_LINE_CRD_REG_SIGN_BIT)
129
130 /* BTB: blocks constants (block size = 256B) */
131
132 /* 256B blocks in 9700B packet */
133 #define BTB_JUMBO_PKT_BLOCKS            38
134
135 /* Headroom per-port */
136 #define BTB_HEADROOM_BLOCKS             BTB_JUMBO_PKT_BLOCKS
137 #define BTB_PURE_LB_FACTOR              10
138
139 /* Factored (hence really 0.7) */
140 #define BTB_PURE_LB_RATIO               7
141
142 /* QM stop command constants */
143 #define QM_STOP_PQ_MASK_WIDTH           32
144 #define QM_STOP_CMD_ADDR                2
145 #define QM_STOP_CMD_STRUCT_SIZE         2
146 #define QM_STOP_CMD_PAUSE_MASK_OFFSET   0
147 #define QM_STOP_CMD_PAUSE_MASK_SHIFT    0
148 #define QM_STOP_CMD_PAUSE_MASK_MASK     0xffffffff /* @DPDK */
149 #define QM_STOP_CMD_GROUP_ID_OFFSET     1
150 #define QM_STOP_CMD_GROUP_ID_SHIFT      16
151 #define QM_STOP_CMD_GROUP_ID_MASK       15
152 #define QM_STOP_CMD_PQ_TYPE_OFFSET      1
153 #define QM_STOP_CMD_PQ_TYPE_SHIFT       24
154 #define QM_STOP_CMD_PQ_TYPE_MASK        1
155 #define QM_STOP_CMD_MAX_POLL_COUNT      100
156 #define QM_STOP_CMD_POLL_PERIOD_US      500
157
158 /* QM command macros */
159 #define QM_CMD_STRUCT_SIZE(cmd) cmd##_STRUCT_SIZE
160 #define QM_CMD_SET_FIELD(var, cmd, field, value) \
161         SET_FIELD(var[cmd##_##field##_OFFSET], cmd##_##field, value)
162
163 #define QM_INIT_TX_PQ_MAP(p_hwfn, map, pq_id, vp_pq_id, \
164                            rl_valid, rl_id, voq, wrr) \
165         do { \
166                 OSAL_MEMSET(&(map), 0, sizeof(map)); \
167                 SET_FIELD(map.reg, QM_RF_PQ_MAP_PQ_VALID, 1); \
168                 SET_FIELD(map.reg, QM_RF_PQ_MAP_RL_VALID, rl_valid ? 1 : 0); \
169                 SET_FIELD(map.reg, QM_RF_PQ_MAP_RL_ID, rl_id); \
170                 SET_FIELD(map.reg, QM_RF_PQ_MAP_VP_PQ_ID, vp_pq_id); \
171                 SET_FIELD(map.reg, QM_RF_PQ_MAP_VOQ, voq); \
172                 SET_FIELD(map.reg, QM_RF_PQ_MAP_WRR_WEIGHT_GROUP, wrr); \
173                 STORE_RT_REG(p_hwfn, QM_REG_TXPQMAP_RT_OFFSET + (pq_id), \
174                              *((u32 *)&(map))); \
175         } while (0)
176
177 #define WRITE_PQ_INFO_TO_RAM            1
178 #define PQ_INFO_ELEMENT(vp, pf, tc, port, rl_valid, rl) \
179         (((vp) << 0) | ((pf) << 12) | ((tc) << 16) |    \
180          ((port) << 20) | ((rl_valid) << 22) | ((rl) << 24))
181 #define PQ_INFO_RAM_GRC_ADDRESS(pq_id) \
182         (XSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM + 21776 + (pq_id) * 4)
183
184 /******************** INTERNAL IMPLEMENTATION *********************/
185
186 /* Returns the external VOQ number */
187 static u8 ecore_get_ext_voq(struct ecore_hwfn *p_hwfn,
188                             u8 port_id,
189                             u8 tc,
190                             u8 max_phys_tcs_per_port)
191 {
192         if (tc == PURE_LB_TC)
193                 return NUM_OF_PHYS_TCS * (MAX_NUM_PORTS_BB) + port_id;
194         else
195                 return port_id * (max_phys_tcs_per_port) + tc;
196 }
197
198 /* Prepare PF RL enable/disable runtime init values */
199 static void ecore_enable_pf_rl(struct ecore_hwfn *p_hwfn, bool pf_rl_en)
200 {
201         STORE_RT_REG(p_hwfn, QM_REG_RLPFENABLE_RT_OFFSET, pf_rl_en ? 1 : 0);
202         if (pf_rl_en) {
203                 u8 num_ext_voqs = MAX_NUM_VOQS_E4;
204                 u64 voq_bit_mask = ((u64)1 << num_ext_voqs) - 1;
205
206                 /* Enable RLs for all VOQs */
207                 STORE_RT_REG(p_hwfn, QM_REG_RLPFVOQENABLE_RT_OFFSET,
208                              (u32)voq_bit_mask);
209 #ifdef QM_REG_RLPFVOQENABLE_MSB_RT_OFFSET
210                 if (num_ext_voqs >= 32)
211                         STORE_RT_REG(p_hwfn, QM_REG_RLPFVOQENABLE_MSB_RT_OFFSET,
212                                      (u32)(voq_bit_mask >> 32));
213 #endif
214
215                 /* Write RL period */
216                 STORE_RT_REG(p_hwfn, QM_REG_RLPFPERIOD_RT_OFFSET,
217                              QM_RL_PERIOD_CLK_25M);
218                 STORE_RT_REG(p_hwfn, QM_REG_RLPFPERIODTIMER_RT_OFFSET,
219                              QM_RL_PERIOD_CLK_25M);
220
221                 /* Set credit threshold for QM bypass flow */
222                 if (QM_BYPASS_EN)
223                         STORE_RT_REG(p_hwfn, QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET,
224                                      QM_PF_RL_UPPER_BOUND);
225         }
226 }
227
228 /* Prepare PF WFQ enable/disable runtime init values */
229 static void ecore_enable_pf_wfq(struct ecore_hwfn *p_hwfn, bool pf_wfq_en)
230 {
231         STORE_RT_REG(p_hwfn, QM_REG_WFQPFENABLE_RT_OFFSET, pf_wfq_en ? 1 : 0);
232
233         /* Set credit threshold for QM bypass flow */
234         if (pf_wfq_en && QM_BYPASS_EN)
235                 STORE_RT_REG(p_hwfn, QM_REG_AFULLQMBYPTHRPFWFQ_RT_OFFSET,
236                              QM_WFQ_UPPER_BOUND);
237 }
238
239 /* Prepare VPORT RL enable/disable runtime init values */
240 static void ecore_enable_vport_rl(struct ecore_hwfn *p_hwfn, bool vport_rl_en)
241 {
242         STORE_RT_REG(p_hwfn, QM_REG_RLGLBLENABLE_RT_OFFSET,
243                      vport_rl_en ? 1 : 0);
244         if (vport_rl_en) {
245                 /* Write RL period (use timer 0 only) */
246                 STORE_RT_REG(p_hwfn, QM_REG_RLGLBLPERIOD_0_RT_OFFSET,
247                              QM_RL_PERIOD_CLK_25M);
248                 STORE_RT_REG(p_hwfn, QM_REG_RLGLBLPERIODTIMER_0_RT_OFFSET,
249                              QM_RL_PERIOD_CLK_25M);
250
251                 /* Set credit threshold for QM bypass flow */
252                 if (QM_BYPASS_EN)
253                         STORE_RT_REG(p_hwfn,
254                                      QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET,
255                                      QM_VP_RL_BYPASS_THRESH_SPEED);
256         }
257 }
258
259 /* Prepare VPORT WFQ enable/disable runtime init values */
260 static void ecore_enable_vport_wfq(struct ecore_hwfn *p_hwfn, bool vport_wfq_en)
261 {
262         STORE_RT_REG(p_hwfn, QM_REG_WFQVPENABLE_RT_OFFSET,
263                      vport_wfq_en ? 1 : 0);
264
265         /* Set credit threshold for QM bypass flow */
266         if (vport_wfq_en && QM_BYPASS_EN)
267                 STORE_RT_REG(p_hwfn, QM_REG_AFULLQMBYPTHRVPWFQ_RT_OFFSET,
268                              QM_WFQ_UPPER_BOUND);
269 }
270
271 /* Prepare runtime init values to allocate PBF command queue lines for
272  * the specified VOQ
273  */
274 static void ecore_cmdq_lines_voq_rt_init(struct ecore_hwfn *p_hwfn,
275                                          u8 ext_voq,
276                                          u16 cmdq_lines)
277 {
278         u32 qm_line_crd;
279
280         qm_line_crd = QM_VOQ_LINE_CRD(cmdq_lines);
281
282         OVERWRITE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(ext_voq),
283                          (u32)cmdq_lines);
284         STORE_RT_REG(p_hwfn, QM_REG_VOQCRDLINE_RT_OFFSET + ext_voq,
285                          qm_line_crd);
286         STORE_RT_REG(p_hwfn, QM_REG_VOQINITCRDLINE_RT_OFFSET + ext_voq,
287                          qm_line_crd);
288 }
289
290 /* Prepare runtime init values to allocate PBF command queue lines. */
291 static void ecore_cmdq_lines_rt_init(struct ecore_hwfn *p_hwfn,
292                                      u8 max_ports_per_engine,
293                                      u8 max_phys_tcs_per_port,
294                                      struct init_qm_port_params
295                                      port_params[MAX_NUM_PORTS])
296 {
297         u8 tc, ext_voq, port_id, num_tcs_in_port;
298         u8 num_ext_voqs = MAX_NUM_VOQS_E4;
299
300         /* Clear PBF lines of all VOQs */
301         for (ext_voq = 0; ext_voq < num_ext_voqs; ext_voq++)
302                 STORE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(ext_voq), 0);
303
304         for (port_id = 0; port_id < max_ports_per_engine; port_id++) {
305                 u16 phys_lines, phys_lines_per_tc;
306
307                 if (!port_params[port_id].active)
308                         continue;
309
310                 /* Find number of command queue lines to divide between the
311                  * active physical TCs. In E5, 1/8 of the lines are reserved.
312                  * the lines for pure LB TC are subtracted.
313                  */
314                 phys_lines = port_params[port_id].num_pbf_cmd_lines;
315                 phys_lines -= PBF_CMDQ_PURE_LB_LINES;
316
317                 /* Find #lines per active physical TC */
318                 num_tcs_in_port = 0;
319                 for (tc = 0; tc < max_phys_tcs_per_port; tc++)
320                         if (((port_params[port_id].active_phys_tcs >> tc) &
321                               0x1) == 1)
322                                 num_tcs_in_port++;
323                 phys_lines_per_tc = phys_lines / num_tcs_in_port;
324
325                 /* Init registers per active TC */
326                 for (tc = 0; tc < max_phys_tcs_per_port; tc++) {
327                         ext_voq = ecore_get_ext_voq(p_hwfn, port_id, tc,
328                                                     max_phys_tcs_per_port);
329                         if (((port_params[port_id].active_phys_tcs >> tc) &
330                             0x1) == 1)
331                                 ecore_cmdq_lines_voq_rt_init(p_hwfn, ext_voq,
332                                                              phys_lines_per_tc);
333                 }
334
335                 /* Init registers for pure LB TC */
336                 ext_voq = ecore_get_ext_voq(p_hwfn, port_id, PURE_LB_TC,
337                                             max_phys_tcs_per_port);
338                 ecore_cmdq_lines_voq_rt_init(p_hwfn, ext_voq,
339                                              PBF_CMDQ_PURE_LB_LINES);
340         }
341 }
342
343 /*
344  * Prepare runtime init values to allocate guaranteed BTB blocks for the
345  * specified port. The guaranteed BTB space is divided between the TCs as
346  * follows (shared space Is currently not used):
347  * 1. Parameters:
348  *     B BTB blocks for this port
349  *     C Number of physical TCs for this port
350  * 2. Calculation:
351  *     a. 38 blocks (9700B jumbo frame) are allocated for global per port
352  *        headroom
353  *     b. B = B 38 (remainder after global headroom allocation)
354  *     c. MAX(38,B/(C+0.7)) blocks are allocated for the pure LB VOQ.
355  *     d. B = B MAX(38, B/(C+0.7)) (remainder after pure LB allocation).
356  *     e. B/C blocks are allocated for each physical TC.
357  * Assumptions:
358  * - MTU is up to 9700 bytes (38 blocks)
359  * - All TCs are considered symmetrical (same rate and packet size)
360  * - No optimization for lossy TC (all are considered lossless). Shared space is
361  *   not enabled and allocated for each TC.
362  */
363 static void ecore_btb_blocks_rt_init(struct ecore_hwfn *p_hwfn,
364                                      u8 max_ports_per_engine,
365                                      u8 max_phys_tcs_per_port,
366                                      struct init_qm_port_params
367                                      port_params[MAX_NUM_PORTS])
368 {
369         u32 usable_blocks, pure_lb_blocks, phys_blocks;
370         u8 tc, ext_voq, port_id, num_tcs_in_port;
371
372         for (port_id = 0; port_id < max_ports_per_engine; port_id++) {
373                 if (!port_params[port_id].active)
374                         continue;
375
376                 /* Subtract headroom blocks */
377                 usable_blocks = port_params[port_id].num_btb_blocks -
378                                 BTB_HEADROOM_BLOCKS;
379
380                 /* Find blocks per physical TC. use factor to avoid floating
381                  * arithmethic.
382                  */
383                 num_tcs_in_port = 0;
384                 for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++)
385                         if (((port_params[port_id].active_phys_tcs >> tc) &
386                               0x1) == 1)
387                                 num_tcs_in_port++;
388
389                 pure_lb_blocks = (usable_blocks * BTB_PURE_LB_FACTOR) /
390                                   (num_tcs_in_port * BTB_PURE_LB_FACTOR +
391                                    BTB_PURE_LB_RATIO);
392                 pure_lb_blocks = OSAL_MAX_T(u32, BTB_JUMBO_PKT_BLOCKS,
393                                             pure_lb_blocks /
394                                             BTB_PURE_LB_FACTOR);
395                 phys_blocks = (usable_blocks - pure_lb_blocks) /
396                               num_tcs_in_port;
397
398                 /* Init physical TCs */
399                 for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) {
400                         if (((port_params[port_id].active_phys_tcs >> tc) &
401                              0x1) == 1) {
402                                 ext_voq = ecore_get_ext_voq(p_hwfn, port_id, tc,
403                                                          max_phys_tcs_per_port);
404                                 STORE_RT_REG(p_hwfn,
405                                         PBF_BTB_GUARANTEED_RT_OFFSET(ext_voq),
406                                         phys_blocks);
407                         }
408                 }
409
410                 /* Init pure LB TC */
411                 ext_voq = ecore_get_ext_voq(p_hwfn, port_id, PURE_LB_TC,
412                                             max_phys_tcs_per_port);
413                 STORE_RT_REG(p_hwfn, PBF_BTB_GUARANTEED_RT_OFFSET(ext_voq),
414                              pure_lb_blocks);
415         }
416 }
417
418 /* Prepare Tx PQ mapping runtime init values for the specified PF */
419 static void ecore_tx_pq_map_rt_init(struct ecore_hwfn *p_hwfn,
420                                     struct ecore_ptt *p_ptt,
421                                     u8 pf_id,
422                                     u8 max_phys_tcs_per_port,
423                                                 bool is_pf_loading,
424                                     u32 num_pf_cids,
425                                     u32 num_vf_cids,
426                                     u16 start_pq,
427                                     u16 num_pf_pqs,
428                                     u16 num_vf_pqs,
429                                     u8 start_vport,
430                                     u32 base_mem_addr_4kb,
431                                     struct init_qm_pq_params *pq_params,
432                                     struct init_qm_vport_params *vport_params)
433 {
434         /* A bit per Tx PQ indicating if the PQ is associated with a VF */
435         u32 tx_pq_vf_mask[MAX_QM_TX_QUEUES / QM_PF_QUEUE_GROUP_SIZE] = { 0 };
436         u32 num_tx_pq_vf_masks = MAX_QM_TX_QUEUES / QM_PF_QUEUE_GROUP_SIZE;
437         u16 num_pqs, first_pq_group, last_pq_group, i, j, pq_id, pq_group;
438         u32 pq_mem_4kb, vport_pq_mem_4kb, mem_addr_4kb;
439
440         num_pqs = num_pf_pqs + num_vf_pqs;
441
442         first_pq_group = start_pq / QM_PF_QUEUE_GROUP_SIZE;
443         last_pq_group = (start_pq + num_pqs - 1) / QM_PF_QUEUE_GROUP_SIZE;
444
445         pq_mem_4kb = QM_PQ_MEM_4KB(num_pf_cids);
446         vport_pq_mem_4kb = QM_PQ_MEM_4KB(num_vf_cids);
447         mem_addr_4kb = base_mem_addr_4kb;
448
449         /* Set mapping from PQ group to PF */
450         for (pq_group = first_pq_group; pq_group <= last_pq_group; pq_group++)
451                 STORE_RT_REG(p_hwfn, QM_REG_PQTX2PF_0_RT_OFFSET + pq_group,
452                              (u32)(pf_id));
453
454         /* Set PQ sizes */
455         STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_0_RT_OFFSET,
456                      QM_PQ_SIZE_256B(num_pf_cids));
457         STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_1_RT_OFFSET,
458                      QM_PQ_SIZE_256B(num_vf_cids));
459
460         /* Go over all Tx PQs */
461         for (i = 0, pq_id = start_pq; i < num_pqs; i++, pq_id++) {
462                 u32 max_qm_global_rls = MAX_QM_GLOBAL_RLS;
463                 u8 ext_voq, vport_id_in_pf;
464                 bool is_vf_pq, rl_valid;
465                 u16 first_tx_pq_id;
466
467                 ext_voq = ecore_get_ext_voq(p_hwfn, pq_params[i].port_id,
468                                             pq_params[i].tc_id,
469                                             max_phys_tcs_per_port);
470                 is_vf_pq = (i >= num_pf_pqs);
471                 rl_valid = pq_params[i].rl_valid > 0;
472
473                 /* Update first Tx PQ of VPORT/TC */
474                 vport_id_in_pf = pq_params[i].vport_id - start_vport;
475                 first_tx_pq_id =
476                 vport_params[vport_id_in_pf].first_tx_pq_id[pq_params[i].tc_id];
477                 if (first_tx_pq_id == QM_INVALID_PQ_ID) {
478                         u32 map_val = (ext_voq << QM_WFQ_VP_PQ_VOQ_SHIFT) |
479                                        (pf_id << (QM_WFQ_VP_PQ_PF_E4_SHIFT));
480
481                         /* Create new VP PQ */
482                         vport_params[vport_id_in_pf].
483                             first_tx_pq_id[pq_params[i].tc_id] = pq_id;
484                         first_tx_pq_id = pq_id;
485
486                         /* Map VP PQ to VOQ and PF */
487                         STORE_RT_REG(p_hwfn, QM_REG_WFQVPMAP_RT_OFFSET +
488                                      first_tx_pq_id, map_val);
489                 }
490
491                 /* Check RL ID */
492                 if (rl_valid && pq_params[i].vport_id >= max_qm_global_rls) {
493                         DP_NOTICE(p_hwfn, true,
494                                   "Invalid VPORT ID for rate limiter config\n");
495                         rl_valid = false;
496                 }
497
498                 /* Prepare PQ map entry */
499                 struct qm_rf_pq_map tx_pq_map;
500
501                 QM_INIT_TX_PQ_MAP(p_hwfn, tx_pq_map, pq_id, first_tx_pq_id,
502                                   rl_valid ? 1 : 0,
503                                   rl_valid ? pq_params[i].vport_id : 0,
504                                   ext_voq, pq_params[i].wrr_group);
505
506                 /* Set PQ base address */
507                 STORE_RT_REG(p_hwfn, QM_REG_BASEADDRTXPQ_RT_OFFSET + pq_id,
508                              mem_addr_4kb);
509
510                 /* Clear PQ pointer table entry (64 bit) */
511                 if (is_pf_loading)
512                         for (j = 0; j < 2; j++)
513                                 STORE_RT_REG(p_hwfn, QM_REG_PTRTBLTX_RT_OFFSET +
514                                              (pq_id * 2) + j, 0);
515
516                 /* Write PQ info to RAM */
517                 if (WRITE_PQ_INFO_TO_RAM != 0) {
518                         u32 pq_info = 0;
519
520                         pq_info = PQ_INFO_ELEMENT(first_tx_pq_id, pf_id,
521                                                   pq_params[i].tc_id,
522                                                   pq_params[i].port_id,
523                                                   rl_valid ? 1 : 0, rl_valid ?
524                                                   pq_params[i].vport_id : 0);
525                         ecore_wr(p_hwfn, p_ptt, PQ_INFO_RAM_GRC_ADDRESS(pq_id),
526                                  pq_info);
527                 }
528
529                 /* If VF PQ, add indication to PQ VF mask */
530                 if (is_vf_pq) {
531                         tx_pq_vf_mask[pq_id / QM_PF_QUEUE_GROUP_SIZE] |=
532                                 (1 << (pq_id % QM_PF_QUEUE_GROUP_SIZE));
533                         mem_addr_4kb += vport_pq_mem_4kb;
534                 } else {
535                         mem_addr_4kb += pq_mem_4kb;
536                 }
537         }
538
539         /* Store Tx PQ VF mask to size select register */
540         for (i = 0; i < num_tx_pq_vf_masks; i++)
541                 if (tx_pq_vf_mask[i])
542                         STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET +
543                                      i, tx_pq_vf_mask[i]);
544 }
545
546 /* Prepare Other PQ mapping runtime init values for the specified PF */
547 static void ecore_other_pq_map_rt_init(struct ecore_hwfn *p_hwfn,
548                                        u8 pf_id,
549                                        bool is_pf_loading,
550                                        u32 num_pf_cids,
551                                        u32 num_tids,
552                                        u32 base_mem_addr_4kb)
553 {
554         u32 pq_size, pq_mem_4kb, mem_addr_4kb;
555         u16 i, j, pq_id, pq_group;
556
557         /* A single other PQ group is used in each PF, where PQ group i is used
558          * in PF i.
559          */
560         pq_group = pf_id;
561         pq_size = num_pf_cids + num_tids;
562         pq_mem_4kb = QM_PQ_MEM_4KB(pq_size);
563         mem_addr_4kb = base_mem_addr_4kb;
564
565         /* Map PQ group to PF */
566         STORE_RT_REG(p_hwfn, QM_REG_PQOTHER2PF_0_RT_OFFSET + pq_group,
567                      (u32)(pf_id));
568
569         /* Set PQ sizes */
570         STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_2_RT_OFFSET,
571                      QM_PQ_SIZE_256B(pq_size));
572
573         for (i = 0, pq_id = pf_id * QM_PF_QUEUE_GROUP_SIZE;
574              i < QM_OTHER_PQS_PER_PF; i++, pq_id++) {
575                 /* Set PQ base address */
576                 STORE_RT_REG(p_hwfn, QM_REG_BASEADDROTHERPQ_RT_OFFSET + pq_id,
577                              mem_addr_4kb);
578
579                 /* Clear PQ pointer table entry */
580                 if (is_pf_loading)
581                         for (j = 0; j < 2; j++)
582                                 STORE_RT_REG(p_hwfn,
583                                              QM_REG_PTRTBLOTHER_RT_OFFSET +
584                                              (pq_id * 2) + j, 0);
585
586                 mem_addr_4kb += pq_mem_4kb;
587         }
588 }
589
590 /* Prepare PF WFQ runtime init values for the specified PF.
591  * Return -1 on error.
592  */
593 static int ecore_pf_wfq_rt_init(struct ecore_hwfn *p_hwfn,
594                                 u8 pf_id,
595                                 u16 pf_wfq,
596                                 u8 max_phys_tcs_per_port,
597                                 u16 num_tx_pqs,
598                                 struct init_qm_pq_params *pq_params)
599 {
600         u32 inc_val, crd_reg_offset;
601         u8 ext_voq;
602         u16 i;
603
604         inc_val = QM_WFQ_INC_VAL(pf_wfq);
605         if (!inc_val || inc_val > QM_WFQ_MAX_INC_VAL) {
606                 DP_NOTICE(p_hwfn, true,
607                           "Invalid PF WFQ weight configuration\n");
608                 return -1;
609         }
610
611         for (i = 0; i < num_tx_pqs; i++) {
612                 ext_voq = ecore_get_ext_voq(p_hwfn, pq_params[i].port_id,
613                                             pq_params[i].tc_id,
614                                             max_phys_tcs_per_port);
615                 crd_reg_offset = (pf_id < MAX_NUM_PFS_BB ?
616                                   QM_REG_WFQPFCRD_RT_OFFSET :
617                                   QM_REG_WFQPFCRD_MSB_RT_OFFSET) +
618                                  ext_voq * MAX_NUM_PFS_BB +
619                                  (pf_id % MAX_NUM_PFS_BB);
620                 OVERWRITE_RT_REG(p_hwfn, crd_reg_offset,
621                                  (u32)QM_WFQ_CRD_REG_SIGN_BIT);
622         }
623
624         STORE_RT_REG(p_hwfn, QM_REG_WFQPFUPPERBOUND_RT_OFFSET +
625                      pf_id, QM_WFQ_UPPER_BOUND | (u32)QM_WFQ_CRD_REG_SIGN_BIT);
626         STORE_RT_REG(p_hwfn, QM_REG_WFQPFWEIGHT_RT_OFFSET + pf_id, inc_val);
627
628         return 0;
629 }
630
631 /* Prepare PF RL runtime init values for the specified PF.
632  * Return -1 on error.
633  */
634 static int ecore_pf_rl_rt_init(struct ecore_hwfn *p_hwfn, u8 pf_id, u32 pf_rl)
635 {
636         u32 inc_val;
637
638         inc_val = QM_RL_INC_VAL(pf_rl);
639         if (inc_val > QM_PF_RL_MAX_INC_VAL) {
640                 DP_NOTICE(p_hwfn, true,
641                           "Invalid PF rate limit configuration\n");
642                 return -1;
643         }
644
645         STORE_RT_REG(p_hwfn, QM_REG_RLPFCRD_RT_OFFSET + pf_id,
646                      (u32)QM_RL_CRD_REG_SIGN_BIT);
647         STORE_RT_REG(p_hwfn, QM_REG_RLPFUPPERBOUND_RT_OFFSET + pf_id,
648                      QM_PF_RL_UPPER_BOUND | (u32)QM_RL_CRD_REG_SIGN_BIT);
649         STORE_RT_REG(p_hwfn, QM_REG_RLPFINCVAL_RT_OFFSET + pf_id, inc_val);
650
651         return 0;
652 }
653
654 /* Prepare VPORT WFQ runtime init values for the specified VPORTs.
655  * Return -1 on error.
656  */
657 static int ecore_vp_wfq_rt_init(struct ecore_hwfn *p_hwfn,
658                                 u8 num_vports,
659                                 struct init_qm_vport_params *vport_params)
660 {
661         u16 vport_pq_id;
662         u32 inc_val;
663         u8 tc, i;
664
665         /* Go over all PF VPORTs */
666         for (i = 0; i < num_vports; i++) {
667                 if (!vport_params[i].vport_wfq)
668                         continue;
669
670                 inc_val = QM_WFQ_INC_VAL(vport_params[i].vport_wfq);
671                 if (inc_val > QM_WFQ_MAX_INC_VAL) {
672                         DP_NOTICE(p_hwfn, true,
673                                   "Invalid VPORT WFQ weight configuration\n");
674                         return -1;
675                 }
676
677                 /* Each VPORT can have several VPORT PQ IDs for various TCs */
678                 for (tc = 0; tc < NUM_OF_TCS; tc++) {
679                         vport_pq_id = vport_params[i].first_tx_pq_id[tc];
680                         if (vport_pq_id != QM_INVALID_PQ_ID) {
681                                 STORE_RT_REG(p_hwfn, QM_REG_WFQVPCRD_RT_OFFSET +
682                                              vport_pq_id,
683                                              (u32)QM_WFQ_CRD_REG_SIGN_BIT);
684                                 STORE_RT_REG(p_hwfn,
685                                              QM_REG_WFQVPWEIGHT_RT_OFFSET +
686                                              vport_pq_id, inc_val);
687                         }
688                 }
689         }
690         return 0;
691 }
692
693 /* Prepare VPORT RL runtime init values for the specified VPORTs.
694  * Return -1 on error.
695  */
696 static int ecore_vport_rl_rt_init(struct ecore_hwfn *p_hwfn,
697                                   u8 start_vport,
698                                   u8 num_vports,
699                                   u32 link_speed,
700                                   struct init_qm_vport_params *vport_params)
701 {
702         u8 i, vport_id;
703         u32 inc_val;
704
705         if (start_vport + num_vports >= MAX_QM_GLOBAL_RLS) {
706                 DP_NOTICE(p_hwfn, true,
707                           "Invalid VPORT ID for rate limiter configuration\n");
708                 return -1;
709         }
710
711         /* Go over all PF VPORTs */
712         for (i = 0, vport_id = start_vport; i < num_vports; i++, vport_id++) {
713                 inc_val = QM_RL_INC_VAL(vport_params[i].vport_rl ?
714                           vport_params[i].vport_rl : link_speed);
715                 if (inc_val > QM_VP_RL_MAX_INC_VAL(link_speed)) {
716                         DP_NOTICE(p_hwfn, true,
717                                   "Invalid VPORT rate-limit configuration\n");
718                         return -1;
719                 }
720
721                 STORE_RT_REG(p_hwfn, QM_REG_RLGLBLCRD_RT_OFFSET + vport_id,
722                              (u32)QM_RL_CRD_REG_SIGN_BIT);
723                 STORE_RT_REG(p_hwfn,
724                              QM_REG_RLGLBLUPPERBOUND_RT_OFFSET + vport_id,
725                              QM_VP_RL_UPPER_BOUND(link_speed) |
726                              (u32)QM_RL_CRD_REG_SIGN_BIT);
727                 STORE_RT_REG(p_hwfn, QM_REG_RLGLBLINCVAL_RT_OFFSET + vport_id,
728                              inc_val);
729         }
730
731         return 0;
732 }
733
734 static bool ecore_poll_on_qm_cmd_ready(struct ecore_hwfn *p_hwfn,
735                                        struct ecore_ptt *p_ptt)
736 {
737         u32 reg_val, i;
738
739         for (i = 0, reg_val = 0; i < QM_STOP_CMD_MAX_POLL_COUNT && !reg_val;
740              i++) {
741                 OSAL_UDELAY(QM_STOP_CMD_POLL_PERIOD_US);
742                 reg_val = ecore_rd(p_hwfn, p_ptt, QM_REG_SDMCMDREADY);
743         }
744
745         /* Check if timeout while waiting for SDM command ready */
746         if (i == QM_STOP_CMD_MAX_POLL_COUNT) {
747                 DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG,
748                            "Timeout waiting for QM SDM cmd ready signal\n");
749                 return false;
750         }
751
752         return true;
753 }
754
755 static bool ecore_send_qm_cmd(struct ecore_hwfn *p_hwfn,
756                               struct ecore_ptt *p_ptt,
757                                                           u32 cmd_addr,
758                                                           u32 cmd_data_lsb,
759                                                           u32 cmd_data_msb)
760 {
761         if (!ecore_poll_on_qm_cmd_ready(p_hwfn, p_ptt))
762                 return false;
763
764         ecore_wr(p_hwfn, p_ptt, QM_REG_SDMCMDADDR, cmd_addr);
765         ecore_wr(p_hwfn, p_ptt, QM_REG_SDMCMDDATALSB, cmd_data_lsb);
766         ecore_wr(p_hwfn, p_ptt, QM_REG_SDMCMDDATAMSB, cmd_data_msb);
767         ecore_wr(p_hwfn, p_ptt, QM_REG_SDMCMDGO, 1);
768         ecore_wr(p_hwfn, p_ptt, QM_REG_SDMCMDGO, 0);
769
770         return ecore_poll_on_qm_cmd_ready(p_hwfn, p_ptt);
771 }
772
773
774 /******************** INTERFACE IMPLEMENTATION *********************/
775
776 u32 ecore_qm_pf_mem_size(u32 num_pf_cids,
777                                                  u32 num_vf_cids,
778                                                  u32 num_tids,
779                                                  u16 num_pf_pqs,
780                                                  u16 num_vf_pqs)
781 {
782         return QM_PQ_MEM_4KB(num_pf_cids) * num_pf_pqs +
783             QM_PQ_MEM_4KB(num_vf_cids) * num_vf_pqs +
784             QM_PQ_MEM_4KB(num_pf_cids + num_tids) * QM_OTHER_PQS_PER_PF;
785 }
786
787 int ecore_qm_common_rt_init(struct ecore_hwfn *p_hwfn,
788                             u8 max_ports_per_engine,
789                             u8 max_phys_tcs_per_port,
790                             bool pf_rl_en,
791                             bool pf_wfq_en,
792                             bool vport_rl_en,
793                             bool vport_wfq_en,
794                             struct init_qm_port_params
795                             port_params[MAX_NUM_PORTS])
796 {
797         u32 mask;
798
799         /* Init AFullOprtnstcCrdMask */
800         mask = (QM_OPPOR_LINE_VOQ_DEF <<
801                 QM_RF_OPPORTUNISTIC_MASK_LINEVOQ_SHIFT) |
802                 (QM_BYTE_CRD_EN << QM_RF_OPPORTUNISTIC_MASK_BYTEVOQ_SHIFT) |
803                 (pf_wfq_en << QM_RF_OPPORTUNISTIC_MASK_PFWFQ_SHIFT) |
804                 (vport_wfq_en << QM_RF_OPPORTUNISTIC_MASK_VPWFQ_SHIFT) |
805                 (pf_rl_en << QM_RF_OPPORTUNISTIC_MASK_PFRL_SHIFT) |
806                 (vport_rl_en << QM_RF_OPPORTUNISTIC_MASK_VPQCNRL_SHIFT) |
807                 (QM_OPPOR_FW_STOP_DEF <<
808                  QM_RF_OPPORTUNISTIC_MASK_FWPAUSE_SHIFT) |
809                 (QM_OPPOR_PQ_EMPTY_DEF <<
810                  QM_RF_OPPORTUNISTIC_MASK_QUEUEEMPTY_SHIFT);
811         STORE_RT_REG(p_hwfn, QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET, mask);
812
813         /* Enable/disable PF RL */
814         ecore_enable_pf_rl(p_hwfn, pf_rl_en);
815
816         /* Enable/disable PF WFQ */
817         ecore_enable_pf_wfq(p_hwfn, pf_wfq_en);
818
819         /* Enable/disable VPORT RL */
820         ecore_enable_vport_rl(p_hwfn, vport_rl_en);
821
822         /* Enable/disable VPORT WFQ */
823         ecore_enable_vport_wfq(p_hwfn, vport_wfq_en);
824
825         /* Init PBF CMDQ line credit */
826         ecore_cmdq_lines_rt_init(p_hwfn, max_ports_per_engine,
827                                  max_phys_tcs_per_port, port_params);
828
829         /* Init BTB blocks in PBF */
830         ecore_btb_blocks_rt_init(p_hwfn, max_ports_per_engine,
831                                  max_phys_tcs_per_port, port_params);
832
833         return 0;
834 }
835
836 int ecore_qm_pf_rt_init(struct ecore_hwfn *p_hwfn,
837                         struct ecore_ptt *p_ptt,
838                         u8 pf_id,
839                         u8 max_phys_tcs_per_port,
840                         bool is_pf_loading,
841                         u32 num_pf_cids,
842                         u32 num_vf_cids,
843                         u32 num_tids,
844                         u16 start_pq,
845                         u16 num_pf_pqs,
846                         u16 num_vf_pqs,
847                         u8 start_vport,
848                         u8 num_vports,
849                         u16 pf_wfq,
850                         u32 pf_rl,
851                         u32 link_speed,
852                         struct init_qm_pq_params *pq_params,
853                         struct init_qm_vport_params *vport_params)
854 {
855         u32 other_mem_size_4kb;
856         u8 tc, i;
857
858         other_mem_size_4kb = QM_PQ_MEM_4KB(num_pf_cids + num_tids) *
859                              QM_OTHER_PQS_PER_PF;
860
861         /* Clear first Tx PQ ID array for each VPORT */
862         for (i = 0; i < num_vports; i++)
863                 for (tc = 0; tc < NUM_OF_TCS; tc++)
864                         vport_params[i].first_tx_pq_id[tc] = QM_INVALID_PQ_ID;
865
866         /* Map Other PQs (if any) */
867 #if QM_OTHER_PQS_PER_PF > 0
868         ecore_other_pq_map_rt_init(p_hwfn, pf_id, is_pf_loading, num_pf_cids,
869                                    num_tids, 0);
870 #endif
871
872         /* Map Tx PQs */
873         ecore_tx_pq_map_rt_init(p_hwfn, p_ptt, pf_id, max_phys_tcs_per_port,
874                                 is_pf_loading, num_pf_cids, num_vf_cids,
875                                 start_pq, num_pf_pqs, num_vf_pqs, start_vport,
876                                 other_mem_size_4kb, pq_params, vport_params);
877
878         /* Init PF WFQ */
879         if (pf_wfq)
880                 if (ecore_pf_wfq_rt_init(p_hwfn, pf_id, pf_wfq,
881                                          max_phys_tcs_per_port,
882                                          num_pf_pqs + num_vf_pqs, pq_params))
883                         return -1;
884
885         /* Init PF RL */
886         if (ecore_pf_rl_rt_init(p_hwfn, pf_id, pf_rl))
887                 return -1;
888
889         /* Set VPORT WFQ */
890         if (ecore_vp_wfq_rt_init(p_hwfn, num_vports, vport_params))
891                 return -1;
892
893         /* Set VPORT RL */
894         if (ecore_vport_rl_rt_init
895             (p_hwfn, start_vport, num_vports, link_speed, vport_params))
896                 return -1;
897
898         return 0;
899 }
900
901 int ecore_init_pf_wfq(struct ecore_hwfn *p_hwfn,
902                       struct ecore_ptt *p_ptt, u8 pf_id, u16 pf_wfq)
903 {
904         u32 inc_val;
905
906         inc_val = QM_WFQ_INC_VAL(pf_wfq);
907         if (!inc_val || inc_val > QM_WFQ_MAX_INC_VAL) {
908                 DP_NOTICE(p_hwfn, true,
909                           "Invalid PF WFQ weight configuration\n");
910                 return -1;
911         }
912
913         ecore_wr(p_hwfn, p_ptt, QM_REG_WFQPFWEIGHT + pf_id * 4, inc_val);
914
915         return 0;
916 }
917
918 int ecore_init_pf_rl(struct ecore_hwfn *p_hwfn,
919                      struct ecore_ptt *p_ptt, u8 pf_id, u32 pf_rl)
920 {
921         u32 inc_val;
922
923         inc_val = QM_RL_INC_VAL(pf_rl);
924         if (inc_val > QM_PF_RL_MAX_INC_VAL) {
925                 DP_NOTICE(p_hwfn, true,
926                           "Invalid PF rate limit configuration\n");
927                 return -1;
928         }
929
930         ecore_wr(p_hwfn, p_ptt, QM_REG_RLPFCRD + pf_id * 4,
931                  (u32)QM_RL_CRD_REG_SIGN_BIT);
932         ecore_wr(p_hwfn, p_ptt, QM_REG_RLPFINCVAL + pf_id * 4, inc_val);
933
934         return 0;
935 }
936
937 int ecore_init_vport_wfq(struct ecore_hwfn *p_hwfn,
938                          struct ecore_ptt *p_ptt,
939                          u16 first_tx_pq_id[NUM_OF_TCS], u16 vport_wfq)
940 {
941         u16 vport_pq_id;
942         u32 inc_val;
943         u8 tc;
944
945         inc_val = QM_WFQ_INC_VAL(vport_wfq);
946         if (!inc_val || inc_val > QM_WFQ_MAX_INC_VAL) {
947                 DP_NOTICE(p_hwfn, true,
948                           "Invalid VPORT WFQ weight configuration\n");
949                 return -1;
950         }
951
952         for (tc = 0; tc < NUM_OF_TCS; tc++) {
953                 vport_pq_id = first_tx_pq_id[tc];
954                 if (vport_pq_id != QM_INVALID_PQ_ID) {
955                         ecore_wr(p_hwfn, p_ptt,
956                                  QM_REG_WFQVPWEIGHT + vport_pq_id * 4, inc_val);
957                 }
958         }
959
960         return 0;
961 }
962
963 int ecore_init_vport_rl(struct ecore_hwfn *p_hwfn,
964                         struct ecore_ptt *p_ptt, u8 vport_id,
965                                                 u32 vport_rl,
966                                                 u32 link_speed)
967 {
968         u32 inc_val, max_qm_global_rls = MAX_QM_GLOBAL_RLS;
969
970         if (vport_id >= max_qm_global_rls) {
971                 DP_NOTICE(p_hwfn, true,
972                           "Invalid VPORT ID for rate limiter configuration\n");
973                 return -1;
974         }
975
976         inc_val = QM_RL_INC_VAL(vport_rl ? vport_rl : link_speed);
977         if (inc_val > QM_VP_RL_MAX_INC_VAL(link_speed)) {
978                 DP_NOTICE(p_hwfn, true,
979                           "Invalid VPORT rate-limit configuration\n");
980                 return -1;
981         }
982
983         ecore_wr(p_hwfn, p_ptt, QM_REG_RLGLBLCRD + vport_id * 4,
984                  (u32)QM_RL_CRD_REG_SIGN_BIT);
985         ecore_wr(p_hwfn, p_ptt, QM_REG_RLGLBLINCVAL + vport_id * 4, inc_val);
986
987         return 0;
988 }
989
990 bool ecore_send_qm_stop_cmd(struct ecore_hwfn *p_hwfn,
991                             struct ecore_ptt *p_ptt,
992                             bool is_release_cmd,
993                             bool is_tx_pq, u16 start_pq, u16 num_pqs)
994 {
995         u32 cmd_arr[QM_CMD_STRUCT_SIZE(QM_STOP_CMD)] = { 0 };
996         u32 pq_mask = 0, last_pq, pq_id;
997
998         last_pq = start_pq + num_pqs - 1;
999
1000         /* Set command's PQ type */
1001         QM_CMD_SET_FIELD(cmd_arr, QM_STOP_CMD, PQ_TYPE, is_tx_pq ? 0 : 1);
1002
1003         /* Go over requested PQs */
1004         for (pq_id = start_pq; pq_id <= last_pq; pq_id++) {
1005                 /* Set PQ bit in mask (stop command only) */
1006                 if (!is_release_cmd)
1007                         pq_mask |= (1 << (pq_id % QM_STOP_PQ_MASK_WIDTH));
1008
1009                 /* If last PQ or end of PQ mask, write command */
1010                 if ((pq_id == last_pq) ||
1011                     (pq_id % QM_STOP_PQ_MASK_WIDTH ==
1012                     (QM_STOP_PQ_MASK_WIDTH - 1))) {
1013                         QM_CMD_SET_FIELD(cmd_arr, QM_STOP_CMD, PAUSE_MASK,
1014                                          pq_mask);
1015                         QM_CMD_SET_FIELD(cmd_arr, QM_STOP_CMD, GROUP_ID,
1016                                          pq_id / QM_STOP_PQ_MASK_WIDTH);
1017                         if (!ecore_send_qm_cmd
1018                             (p_hwfn, p_ptt, QM_STOP_CMD_ADDR, cmd_arr[0],
1019                              cmd_arr[1]))
1020                                 return false;
1021                         pq_mask = 0;
1022                 }
1023         }
1024
1025         return true;
1026 }
1027
1028
1029 /* NIG: ETS configuration constants */
1030 #define NIG_TX_ETS_CLIENT_OFFSET        4
1031 #define NIG_LB_ETS_CLIENT_OFFSET        1
1032 #define NIG_ETS_MIN_WFQ_BYTES           1600
1033
1034 /* NIG: ETS constants */
1035 #define NIG_ETS_UP_BOUND(weight, mtu) \
1036         (2 * ((weight) > (mtu) ? (weight) : (mtu)))
1037
1038 /* NIG: RL constants */
1039
1040 /* Byte base type value */
1041 #define NIG_RL_BASE_TYPE                1
1042
1043 /* Period in us */
1044 #define NIG_RL_PERIOD                   1
1045
1046 /* Period in 25MHz cycles */
1047 #define NIG_RL_PERIOD_CLK_25M           (25 * NIG_RL_PERIOD)
1048
1049 /* Rate in mbps */
1050 #define NIG_RL_INC_VAL(rate)            (((rate) * NIG_RL_PERIOD) / 8)
1051
1052 #define NIG_RL_MAX_VAL(inc_val, mtu) \
1053         (2 * ((inc_val) > (mtu) ? (inc_val) : (mtu)))
1054
1055 /* NIG: packet prioritry configuration constants */
1056 #define NIG_PRIORITY_MAP_TC_BITS        4
1057
1058
1059 void ecore_init_nig_ets(struct ecore_hwfn *p_hwfn,
1060                         struct ecore_ptt *p_ptt,
1061                         struct init_ets_req *req, bool is_lb)
1062 {
1063         u32 min_weight, tc_weight_base_addr, tc_weight_addr_diff;
1064         u32 tc_bound_base_addr, tc_bound_addr_diff;
1065         u8 sp_tc_map = 0, wfq_tc_map = 0;
1066         u8 tc, num_tc, tc_client_offset;
1067
1068         num_tc = is_lb ? NUM_OF_TCS : NUM_OF_PHYS_TCS;
1069         tc_client_offset = is_lb ? NIG_LB_ETS_CLIENT_OFFSET :
1070                                    NIG_TX_ETS_CLIENT_OFFSET;
1071         min_weight = 0xffffffff;
1072         tc_weight_base_addr = is_lb ? NIG_REG_LB_ARB_CREDIT_WEIGHT_0 :
1073                                       NIG_REG_TX_ARB_CREDIT_WEIGHT_0;
1074         tc_weight_addr_diff = is_lb ? NIG_REG_LB_ARB_CREDIT_WEIGHT_1 -
1075                                       NIG_REG_LB_ARB_CREDIT_WEIGHT_0 :
1076                                       NIG_REG_TX_ARB_CREDIT_WEIGHT_1 -
1077                                       NIG_REG_TX_ARB_CREDIT_WEIGHT_0;
1078         tc_bound_base_addr = is_lb ? NIG_REG_LB_ARB_CREDIT_UPPER_BOUND_0 :
1079                                      NIG_REG_TX_ARB_CREDIT_UPPER_BOUND_0;
1080         tc_bound_addr_diff = is_lb ? NIG_REG_LB_ARB_CREDIT_UPPER_BOUND_1 -
1081                                      NIG_REG_LB_ARB_CREDIT_UPPER_BOUND_0 :
1082                                      NIG_REG_TX_ARB_CREDIT_UPPER_BOUND_1 -
1083                                      NIG_REG_TX_ARB_CREDIT_UPPER_BOUND_0;
1084
1085         for (tc = 0; tc < num_tc; tc++) {
1086                 struct init_ets_tc_req *tc_req = &req->tc_req[tc];
1087
1088                 /* Update SP map */
1089                 if (tc_req->use_sp)
1090                         sp_tc_map |= (1 << tc);
1091
1092                 if (!tc_req->use_wfq)
1093                         continue;
1094
1095                 /* Update WFQ map */
1096                 wfq_tc_map |= (1 << tc);
1097
1098                 /* Find minimal weight */
1099                 if (tc_req->weight < min_weight)
1100                         min_weight = tc_req->weight;
1101         }
1102
1103         /* Write SP map */
1104         ecore_wr(p_hwfn, p_ptt,
1105                  is_lb ? NIG_REG_LB_ARB_CLIENT_IS_STRICT :
1106                  NIG_REG_TX_ARB_CLIENT_IS_STRICT,
1107                  (sp_tc_map << tc_client_offset));
1108
1109         /* Write WFQ map */
1110         ecore_wr(p_hwfn, p_ptt,
1111                  is_lb ? NIG_REG_LB_ARB_CLIENT_IS_SUBJECT2WFQ :
1112                  NIG_REG_TX_ARB_CLIENT_IS_SUBJECT2WFQ,
1113                  (wfq_tc_map << tc_client_offset));
1114         /* write WFQ weights */
1115         for (tc = 0; tc < num_tc; tc++, tc_client_offset++) {
1116                 struct init_ets_tc_req *tc_req = &req->tc_req[tc];
1117                 u32 byte_weight;
1118
1119                 if (!tc_req->use_wfq)
1120                         continue;
1121
1122                 /* Translate weight to bytes */
1123                 byte_weight = (NIG_ETS_MIN_WFQ_BYTES * tc_req->weight) /
1124                               min_weight;
1125
1126                 /* Write WFQ weight */
1127                 ecore_wr(p_hwfn, p_ptt, tc_weight_base_addr +
1128                          tc_weight_addr_diff * tc_client_offset, byte_weight);
1129
1130                 /* Write WFQ upper bound */
1131                 ecore_wr(p_hwfn, p_ptt, tc_bound_base_addr +
1132                          tc_bound_addr_diff * tc_client_offset,
1133                          NIG_ETS_UP_BOUND(byte_weight, req->mtu));
1134         }
1135 }
1136
1137 void ecore_init_nig_lb_rl(struct ecore_hwfn *p_hwfn,
1138                           struct ecore_ptt *p_ptt,
1139                           struct init_nig_lb_rl_req *req)
1140 {
1141         u32 ctrl, inc_val, reg_offset;
1142         u8 tc;
1143
1144         /* Disable global MAC+LB RL */
1145         ctrl =
1146             NIG_RL_BASE_TYPE <<
1147             NIG_REG_TX_LB_GLBRATELIMIT_CTRL_TX_LB_GLBRATELIMIT_BASE_TYPE_SHIFT;
1148         ecore_wr(p_hwfn, p_ptt, NIG_REG_TX_LB_GLBRATELIMIT_CTRL, ctrl);
1149
1150         /* Configure and enable global MAC+LB RL */
1151         if (req->lb_mac_rate) {
1152                 /* Configure  */
1153                 ecore_wr(p_hwfn, p_ptt, NIG_REG_TX_LB_GLBRATELIMIT_INC_PERIOD,
1154                          NIG_RL_PERIOD_CLK_25M);
1155                 inc_val = NIG_RL_INC_VAL(req->lb_mac_rate);
1156                 ecore_wr(p_hwfn, p_ptt, NIG_REG_TX_LB_GLBRATELIMIT_INC_VALUE,
1157                          inc_val);
1158                 ecore_wr(p_hwfn, p_ptt, NIG_REG_TX_LB_GLBRATELIMIT_MAX_VALUE,
1159                          NIG_RL_MAX_VAL(inc_val, req->mtu));
1160
1161                 /* Enable */
1162                 ctrl |=
1163                     1 <<
1164                     NIG_REG_TX_LB_GLBRATELIMIT_CTRL_TX_LB_GLBRATELIMIT_EN_SHIFT;
1165                 ecore_wr(p_hwfn, p_ptt, NIG_REG_TX_LB_GLBRATELIMIT_CTRL, ctrl);
1166         }
1167
1168         /* Disable global LB-only RL */
1169         ctrl =
1170             NIG_RL_BASE_TYPE <<
1171             NIG_REG_LB_BRBRATELIMIT_CTRL_LB_BRBRATELIMIT_BASE_TYPE_SHIFT;
1172         ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_BRBRATELIMIT_CTRL, ctrl);
1173
1174         /* Configure and enable global LB-only RL */
1175         if (req->lb_rate) {
1176                 /* Configure  */
1177                 ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_BRBRATELIMIT_INC_PERIOD,
1178                          NIG_RL_PERIOD_CLK_25M);
1179                 inc_val = NIG_RL_INC_VAL(req->lb_rate);
1180                 ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_BRBRATELIMIT_INC_VALUE,
1181                          inc_val);
1182                 ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_BRBRATELIMIT_MAX_VALUE,
1183                          NIG_RL_MAX_VAL(inc_val, req->mtu));
1184
1185                 /* Enable */
1186                 ctrl |=
1187                     1 << NIG_REG_LB_BRBRATELIMIT_CTRL_LB_BRBRATELIMIT_EN_SHIFT;
1188                 ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_BRBRATELIMIT_CTRL, ctrl);
1189         }
1190
1191         /* Per-TC RLs */
1192         for (tc = 0, reg_offset = 0; tc < NUM_OF_PHYS_TCS;
1193              tc++, reg_offset += 4) {
1194                 /* Disable TC RL */
1195                 ctrl =
1196                     NIG_RL_BASE_TYPE <<
1197                 NIG_REG_LB_TCRATELIMIT_CTRL_0_LB_TCRATELIMIT_BASE_TYPE_0_SHIFT;
1198                 ecore_wr(p_hwfn, p_ptt,
1199                          NIG_REG_LB_TCRATELIMIT_CTRL_0 + reg_offset, ctrl);
1200
1201                 /* Configure and enable TC RL */
1202                 if (!req->tc_rate[tc])
1203                         continue;
1204
1205                 /* Configure */
1206                 ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_TCRATELIMIT_INC_PERIOD_0 +
1207                          reg_offset, NIG_RL_PERIOD_CLK_25M);
1208                 inc_val = NIG_RL_INC_VAL(req->tc_rate[tc]);
1209                 ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_TCRATELIMIT_INC_VALUE_0 +
1210                          reg_offset, inc_val);
1211                 ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_TCRATELIMIT_MAX_VALUE_0 +
1212                          reg_offset, NIG_RL_MAX_VAL(inc_val, req->mtu));
1213
1214                 /* Enable */
1215                 ctrl |= 1 <<
1216                         NIG_REG_LB_TCRATELIMIT_CTRL_0_LB_TCRATELIMIT_EN_0_SHIFT;
1217                 ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_TCRATELIMIT_CTRL_0 +
1218                          reg_offset, ctrl);
1219         }
1220 }
1221
1222 void ecore_init_nig_pri_tc_map(struct ecore_hwfn *p_hwfn,
1223                                struct ecore_ptt *p_ptt,
1224                                struct init_nig_pri_tc_map_req *req)
1225 {
1226         u8 tc_pri_mask[NUM_OF_PHYS_TCS] = { 0 };
1227         u32 pri_tc_mask = 0;
1228         u8 pri, tc;
1229
1230         for (pri = 0; pri < NUM_OF_VLAN_PRIORITIES; pri++) {
1231                 if (!req->pri[pri].valid)
1232                         continue;
1233
1234                 pri_tc_mask |= (req->pri[pri].tc_id <<
1235                                 (pri * NIG_PRIORITY_MAP_TC_BITS));
1236                 tc_pri_mask[req->pri[pri].tc_id] |= (1 << pri);
1237         }
1238
1239         /* Write priority -> TC mask */
1240         ecore_wr(p_hwfn, p_ptt, NIG_REG_PKT_PRIORITY_TO_TC, pri_tc_mask);
1241
1242         /* Write TC -> priority mask */
1243         for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) {
1244                 ecore_wr(p_hwfn, p_ptt, NIG_REG_PRIORITY_FOR_TC_0 + tc * 4,
1245                          tc_pri_mask[tc]);
1246                 ecore_wr(p_hwfn, p_ptt, NIG_REG_RX_TC0_PRIORITY_MASK + tc * 4,
1247                          tc_pri_mask[tc]);
1248         }
1249 }
1250
1251
1252 /* PRS: ETS configuration constants */
1253 #define PRS_ETS_MIN_WFQ_BYTES           1600
1254 #define PRS_ETS_UP_BOUND(weight, mtu) \
1255         (2 * ((weight) > (mtu) ? (weight) : (mtu)))
1256
1257
1258 void ecore_init_prs_ets(struct ecore_hwfn *p_hwfn,
1259                         struct ecore_ptt *p_ptt, struct init_ets_req *req)
1260 {
1261         u32 tc_weight_addr_diff, tc_bound_addr_diff, min_weight = 0xffffffff;
1262         u8 tc, sp_tc_map = 0, wfq_tc_map = 0;
1263
1264         tc_weight_addr_diff = PRS_REG_ETS_ARB_CREDIT_WEIGHT_1 -
1265                               PRS_REG_ETS_ARB_CREDIT_WEIGHT_0;
1266         tc_bound_addr_diff = PRS_REG_ETS_ARB_CREDIT_UPPER_BOUND_1 -
1267                              PRS_REG_ETS_ARB_CREDIT_UPPER_BOUND_0;
1268
1269         for (tc = 0; tc < NUM_OF_TCS; tc++) {
1270                 struct init_ets_tc_req *tc_req = &req->tc_req[tc];
1271
1272                 /* Update SP map */
1273                 if (tc_req->use_sp)
1274                         sp_tc_map |= (1 << tc);
1275
1276                 if (!tc_req->use_wfq)
1277                         continue;
1278
1279                 /* Update WFQ map */
1280                 wfq_tc_map |= (1 << tc);
1281
1282                 /* Find minimal weight */
1283                 if (tc_req->weight < min_weight)
1284                         min_weight = tc_req->weight;
1285         }
1286
1287         /* write SP map */
1288         ecore_wr(p_hwfn, p_ptt, PRS_REG_ETS_ARB_CLIENT_IS_STRICT, sp_tc_map);
1289
1290         /* write WFQ map */
1291         ecore_wr(p_hwfn, p_ptt, PRS_REG_ETS_ARB_CLIENT_IS_SUBJECT2WFQ,
1292                  wfq_tc_map);
1293
1294         /* write WFQ weights */
1295         for (tc = 0; tc < NUM_OF_TCS; tc++) {
1296                 struct init_ets_tc_req *tc_req = &req->tc_req[tc];
1297                 u32 byte_weight;
1298
1299                 if (!tc_req->use_wfq)
1300                         continue;
1301
1302                 /* Translate weight to bytes */
1303                 byte_weight = (PRS_ETS_MIN_WFQ_BYTES * tc_req->weight) /
1304                               min_weight;
1305
1306                 /* Write WFQ weight */
1307                 ecore_wr(p_hwfn, p_ptt, PRS_REG_ETS_ARB_CREDIT_WEIGHT_0 + tc *
1308                          tc_weight_addr_diff, byte_weight);
1309
1310                 /* Write WFQ upper bound */
1311                 ecore_wr(p_hwfn, p_ptt, PRS_REG_ETS_ARB_CREDIT_UPPER_BOUND_0 +
1312                          tc * tc_bound_addr_diff, PRS_ETS_UP_BOUND(byte_weight,
1313                                                                    req->mtu));
1314         }
1315 }
1316
1317
1318 /* BRB: RAM configuration constants */
1319 #define BRB_TOTAL_RAM_BLOCKS_BB 4800
1320 #define BRB_TOTAL_RAM_BLOCKS_K2 5632
1321 #define BRB_BLOCK_SIZE          128
1322 #define BRB_MIN_BLOCKS_PER_TC   9
1323 #define BRB_HYST_BYTES          10240
1324 #define BRB_HYST_BLOCKS         (BRB_HYST_BYTES / BRB_BLOCK_SIZE)
1325
1326 /* Temporary big RAM allocation - should be updated */
1327 void ecore_init_brb_ram(struct ecore_hwfn *p_hwfn,
1328                         struct ecore_ptt *p_ptt, struct init_brb_ram_req *req)
1329 {
1330         u32 tc_headroom_blocks, min_pkt_size_blocks, total_blocks;
1331         u32 active_port_blocks, reg_offset = 0;
1332         u8 port, active_ports = 0;
1333
1334         tc_headroom_blocks = (u32)DIV_ROUND_UP(req->headroom_per_tc,
1335                                                BRB_BLOCK_SIZE);
1336         min_pkt_size_blocks = (u32)DIV_ROUND_UP(req->min_pkt_size,
1337                                                 BRB_BLOCK_SIZE);
1338         total_blocks = ECORE_IS_K2(p_hwfn->p_dev) ? BRB_TOTAL_RAM_BLOCKS_K2 :
1339                                                     BRB_TOTAL_RAM_BLOCKS_BB;
1340
1341         /* Find number of active ports */
1342         for (port = 0; port < MAX_NUM_PORTS; port++)
1343                 if (req->num_active_tcs[port])
1344                         active_ports++;
1345
1346         active_port_blocks = (u32)(total_blocks / active_ports);
1347
1348         for (port = 0; port < req->max_ports_per_engine; port++) {
1349                 u32 port_blocks, port_shared_blocks, port_guaranteed_blocks;
1350                 u32 full_xoff_th, full_xon_th, pause_xoff_th, pause_xon_th;
1351                 u32 tc_guaranteed_blocks;
1352                 u8 tc;
1353
1354                 /* Calculate per-port sizes */
1355                 tc_guaranteed_blocks = (u32)DIV_ROUND_UP(req->guranteed_per_tc,
1356                                                          BRB_BLOCK_SIZE);
1357                 port_blocks = req->num_active_tcs[port] ? active_port_blocks :
1358                                                           0;
1359                 port_guaranteed_blocks = req->num_active_tcs[port] *
1360                                          tc_guaranteed_blocks;
1361                 port_shared_blocks = port_blocks - port_guaranteed_blocks;
1362                 full_xoff_th = req->num_active_tcs[port] *
1363                                BRB_MIN_BLOCKS_PER_TC;
1364                 full_xon_th = full_xoff_th + min_pkt_size_blocks;
1365                 pause_xoff_th = tc_headroom_blocks;
1366                 pause_xon_th = pause_xoff_th + min_pkt_size_blocks;
1367
1368                 /* Init total size per port */
1369                 ecore_wr(p_hwfn, p_ptt, BRB_REG_TOTAL_MAC_SIZE + port * 4,
1370                          port_blocks);
1371
1372                 /* Init shared size per port */
1373                 ecore_wr(p_hwfn, p_ptt, BRB_REG_SHARED_HR_AREA + port * 4,
1374                          port_shared_blocks);
1375
1376                 for (tc = 0; tc < NUM_OF_TCS; tc++, reg_offset += 4) {
1377                         /* Clear init values for non-active TCs */
1378                         if (tc == req->num_active_tcs[port]) {
1379                                 tc_guaranteed_blocks = 0;
1380                                 full_xoff_th = 0;
1381                                 full_xon_th = 0;
1382                                 pause_xoff_th = 0;
1383                                 pause_xon_th = 0;
1384                         }
1385
1386                         /* Init guaranteed size per TC */
1387                         ecore_wr(p_hwfn, p_ptt,
1388                                  BRB_REG_TC_GUARANTIED_0 + reg_offset,
1389                                  tc_guaranteed_blocks);
1390                         ecore_wr(p_hwfn, p_ptt,
1391                                  BRB_REG_MAIN_TC_GUARANTIED_HYST_0 + reg_offset,
1392                                  BRB_HYST_BLOCKS);
1393
1394                         /* Init pause/full thresholds per physical TC - for
1395                          * loopback traffic.
1396                          */
1397                         ecore_wr(p_hwfn, p_ptt,
1398                                  BRB_REG_LB_TC_FULL_XOFF_THRESHOLD_0 +
1399                                  reg_offset, full_xoff_th);
1400                         ecore_wr(p_hwfn, p_ptt,
1401                                  BRB_REG_LB_TC_FULL_XON_THRESHOLD_0 +
1402                                  reg_offset, full_xon_th);
1403                         ecore_wr(p_hwfn, p_ptt,
1404                                  BRB_REG_LB_TC_PAUSE_XOFF_THRESHOLD_0 +
1405                                  reg_offset, pause_xoff_th);
1406                         ecore_wr(p_hwfn, p_ptt,
1407                                  BRB_REG_LB_TC_PAUSE_XON_THRESHOLD_0 +
1408                                  reg_offset, pause_xon_th);
1409
1410                         /* Init pause/full thresholds per physical TC - for
1411                          * main traffic.
1412                          */
1413                         ecore_wr(p_hwfn, p_ptt,
1414                                  BRB_REG_MAIN_TC_FULL_XOFF_THRESHOLD_0 +
1415                                  reg_offset, full_xoff_th);
1416                         ecore_wr(p_hwfn, p_ptt,
1417                                  BRB_REG_MAIN_TC_FULL_XON_THRESHOLD_0 +
1418                                  reg_offset, full_xon_th);
1419                         ecore_wr(p_hwfn, p_ptt,
1420                                  BRB_REG_MAIN_TC_PAUSE_XOFF_THRESHOLD_0 +
1421                                  reg_offset, pause_xoff_th);
1422                         ecore_wr(p_hwfn, p_ptt,
1423                                  BRB_REG_MAIN_TC_PAUSE_XON_THRESHOLD_0 +
1424                                  reg_offset, pause_xon_th);
1425                 }
1426         }
1427 }
1428
1429 /* In MF should be called once per port to set EtherType of OuterTag */
1430 void ecore_set_port_mf_ovlan_eth_type(struct ecore_hwfn *p_hwfn, u32 ethType)
1431 {
1432         /* Update DORQ register */
1433         STORE_RT_REG(p_hwfn, DORQ_REG_TAG1_ETHERTYPE_RT_OFFSET, ethType);
1434 }
1435
1436 #define SET_TUNNEL_TYPE_ENABLE_BIT(var, offset, enable) \
1437 (var = ((var) & ~(1 << (offset))) | ((enable) ? (1 << (offset)) : 0))
1438 #define PRS_ETH_TUNN_OUTPUT_FORMAT        -188897008
1439 #define PRS_ETH_OUTPUT_FORMAT             -46832
1440
1441 void ecore_set_vxlan_dest_port(struct ecore_hwfn *p_hwfn,
1442                                struct ecore_ptt *p_ptt, u16 dest_port)
1443 {
1444         /* Update PRS register */
1445         ecore_wr(p_hwfn, p_ptt, PRS_REG_VXLAN_PORT, dest_port);
1446
1447         /* Update NIG register */
1448         ecore_wr(p_hwfn, p_ptt, NIG_REG_VXLAN_CTRL, dest_port);
1449
1450         /* Update PBF register */
1451         ecore_wr(p_hwfn, p_ptt, PBF_REG_VXLAN_PORT, dest_port);
1452 }
1453
1454 void ecore_set_vxlan_enable(struct ecore_hwfn *p_hwfn,
1455                             struct ecore_ptt *p_ptt, bool vxlan_enable)
1456 {
1457         u32 reg_val;
1458
1459         /* Update PRS register */
1460         reg_val = ecore_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN);
1461         SET_TUNNEL_TYPE_ENABLE_BIT(reg_val,
1462                            PRS_REG_ENCAPSULATION_TYPE_EN_VXLAN_ENABLE_SHIFT,
1463                            vxlan_enable);
1464         ecore_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
1465         if (reg_val) { /* TODO: handle E5 init */
1466                 reg_val = ecore_rd(p_hwfn, p_ptt,
1467                                    PRS_REG_OUTPUT_FORMAT_4_0_BB_K2);
1468
1469                 /* Update output  only if tunnel blocks not included. */
1470                 if (reg_val == (u32)PRS_ETH_OUTPUT_FORMAT)
1471                         ecore_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2,
1472                                  (u32)PRS_ETH_TUNN_OUTPUT_FORMAT);
1473         }
1474
1475         /* Update NIG register */
1476         reg_val = ecore_rd(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE);
1477         SET_TUNNEL_TYPE_ENABLE_BIT(reg_val,
1478                                    NIG_REG_ENC_TYPE_ENABLE_VXLAN_ENABLE_SHIFT,
1479                                    vxlan_enable);
1480         ecore_wr(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE, reg_val);
1481
1482         /* Update DORQ register */
1483         ecore_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_VXLAN_EN,
1484                  vxlan_enable ? 1 : 0);
1485 }
1486
1487 void ecore_set_gre_enable(struct ecore_hwfn *p_hwfn,
1488                           struct ecore_ptt *p_ptt,
1489                           bool eth_gre_enable, bool ip_gre_enable)
1490 {
1491         u32 reg_val;
1492
1493         /* Update PRS register */
1494         reg_val = ecore_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN);
1495         SET_TUNNEL_TYPE_ENABLE_BIT(reg_val,
1496                    PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GRE_ENABLE_SHIFT,
1497                    eth_gre_enable);
1498         SET_TUNNEL_TYPE_ENABLE_BIT(reg_val,
1499                    PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GRE_ENABLE_SHIFT,
1500                    ip_gre_enable);
1501         ecore_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
1502         if (reg_val) { /* TODO: handle E5 init */
1503                 reg_val = ecore_rd(p_hwfn, p_ptt,
1504                                    PRS_REG_OUTPUT_FORMAT_4_0_BB_K2);
1505
1506                 /* Update output  only if tunnel blocks not included. */
1507                 if (reg_val == (u32)PRS_ETH_OUTPUT_FORMAT)
1508                         ecore_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2,
1509                                  (u32)PRS_ETH_TUNN_OUTPUT_FORMAT);
1510         }
1511
1512         /* Update NIG register */
1513         reg_val = ecore_rd(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE);
1514         SET_TUNNEL_TYPE_ENABLE_BIT(reg_val,
1515                    NIG_REG_ENC_TYPE_ENABLE_ETH_OVER_GRE_ENABLE_SHIFT,
1516                    eth_gre_enable);
1517         SET_TUNNEL_TYPE_ENABLE_BIT(reg_val,
1518                    NIG_REG_ENC_TYPE_ENABLE_IP_OVER_GRE_ENABLE_SHIFT,
1519                    ip_gre_enable);
1520         ecore_wr(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE, reg_val);
1521
1522         /* Update DORQ registers */
1523         ecore_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_GRE_ETH_EN,
1524                  eth_gre_enable ? 1 : 0);
1525         ecore_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_GRE_IP_EN,
1526                  ip_gre_enable ? 1 : 0);
1527 }
1528
1529 void ecore_set_geneve_dest_port(struct ecore_hwfn *p_hwfn,
1530                                 struct ecore_ptt *p_ptt, u16 dest_port)
1531 {
1532         /* Update PRS register */
1533         ecore_wr(p_hwfn, p_ptt, PRS_REG_NGE_PORT, dest_port);
1534
1535         /* Update NIG register */
1536         ecore_wr(p_hwfn, p_ptt, NIG_REG_NGE_PORT, dest_port);
1537
1538         /* Update PBF register */
1539         ecore_wr(p_hwfn, p_ptt, PBF_REG_NGE_PORT, dest_port);
1540 }
1541
1542 void ecore_set_geneve_enable(struct ecore_hwfn *p_hwfn,
1543                              struct ecore_ptt *p_ptt,
1544                              bool eth_geneve_enable, bool ip_geneve_enable)
1545 {
1546         u32 reg_val;
1547
1548         /* Update PRS register */
1549         reg_val = ecore_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN);
1550         SET_TUNNEL_TYPE_ENABLE_BIT(reg_val,
1551                    PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GENEVE_ENABLE_SHIFT,
1552                    eth_geneve_enable);
1553         SET_TUNNEL_TYPE_ENABLE_BIT(reg_val,
1554                    PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GENEVE_ENABLE_SHIFT,
1555                    ip_geneve_enable);
1556         ecore_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
1557         if (reg_val) { /* TODO: handle E5 init */
1558                 reg_val = ecore_rd(p_hwfn, p_ptt,
1559                                    PRS_REG_OUTPUT_FORMAT_4_0_BB_K2);
1560
1561                 /* Update output  only if tunnel blocks not included. */
1562                 if (reg_val == (u32)PRS_ETH_OUTPUT_FORMAT)
1563                         ecore_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2,
1564                                  (u32)PRS_ETH_TUNN_OUTPUT_FORMAT);
1565         }
1566
1567         /* Update NIG register */
1568         ecore_wr(p_hwfn, p_ptt, NIG_REG_NGE_ETH_ENABLE,
1569                  eth_geneve_enable ? 1 : 0);
1570         ecore_wr(p_hwfn, p_ptt, NIG_REG_NGE_IP_ENABLE,
1571                  ip_geneve_enable ? 1 : 0);
1572
1573         /* EDPM with geneve tunnel not supported in BB */
1574         if (ECORE_IS_BB_B0(p_hwfn->p_dev))
1575                 return;
1576
1577         /* Update DORQ registers */
1578         ecore_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_NGE_ETH_EN_K2,
1579                  eth_geneve_enable ? 1 : 0);
1580         ecore_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_NGE_IP_EN_K2,
1581                  ip_geneve_enable ? 1 : 0);
1582 }
1583
1584 #define PRS_ETH_VXLAN_NO_L2_ENABLE_OFFSET   4
1585 #define PRS_ETH_VXLAN_NO_L2_OUTPUT_FORMAT      -927094512
1586
1587 void ecore_set_vxlan_no_l2_enable(struct ecore_hwfn *p_hwfn,
1588                                   struct ecore_ptt *p_ptt,
1589                                   bool enable)
1590 {
1591         u32 reg_val, cfg_mask;
1592
1593         /* read PRS config register */
1594         reg_val = ecore_rd(p_hwfn, p_ptt, PRS_REG_MSG_INFO);
1595
1596         /* set VXLAN_NO_L2_ENABLE mask */
1597         cfg_mask = (1 << PRS_ETH_VXLAN_NO_L2_ENABLE_OFFSET);
1598
1599         if (enable) {
1600                 /* set VXLAN_NO_L2_ENABLE flag */
1601                 reg_val |= cfg_mask;
1602
1603                 /* update PRS FIC  register */
1604                 ecore_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2,
1605                  (u32)PRS_ETH_VXLAN_NO_L2_OUTPUT_FORMAT);
1606         } else  {
1607                 /* clear VXLAN_NO_L2_ENABLE flag */
1608                 reg_val &= ~cfg_mask;
1609         }
1610
1611         /* write PRS config register */
1612         ecore_wr(p_hwfn, p_ptt, PRS_REG_MSG_INFO, reg_val);
1613 }
1614
1615 #define T_ETH_PACKET_ACTION_GFT_EVENTID  23
1616 #define PARSER_ETH_CONN_GFT_ACTION_CM_HDR  272
1617 #define T_ETH_PACKET_MATCH_RFS_EVENTID 25
1618 #define PARSER_ETH_CONN_CM_HDR 0
1619 #define CAM_LINE_SIZE sizeof(u32)
1620 #define RAM_LINE_SIZE sizeof(u64)
1621 #define REG_SIZE sizeof(u32)
1622
1623 void ecore_gft_disable(struct ecore_hwfn *p_hwfn,
1624                        struct ecore_ptt *p_ptt,
1625                        u16 pf_id)
1626 {
1627         /* disable gft search for PF */
1628         ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_GFT, 0);
1629
1630         /* Clean ram & cam for next gft session*/
1631
1632         /* Zero camline */
1633         ecore_wr(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE * pf_id, 0);
1634
1635         /* Zero ramline */
1636         ecore_wr(p_hwfn, p_ptt, PRS_REG_GFT_PROFILE_MASK_RAM +
1637                                 RAM_LINE_SIZE * pf_id, 0);
1638         ecore_wr(p_hwfn, p_ptt, PRS_REG_GFT_PROFILE_MASK_RAM +
1639                                 RAM_LINE_SIZE * pf_id + REG_SIZE, 0);
1640 }
1641
1642
1643 void ecore_set_gft_event_id_cm_hdr(struct ecore_hwfn *p_hwfn,
1644                                    struct ecore_ptt *p_ptt)
1645 {
1646         u32 rfs_cm_hdr_event_id;
1647
1648         /* Set RFS event ID to be awakened i Tstorm By Prs */
1649         rfs_cm_hdr_event_id = ecore_rd(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT);
1650         rfs_cm_hdr_event_id |= T_ETH_PACKET_ACTION_GFT_EVENTID <<
1651             PRS_REG_CM_HDR_GFT_EVENT_ID_SHIFT;
1652         rfs_cm_hdr_event_id |= PARSER_ETH_CONN_GFT_ACTION_CM_HDR <<
1653             PRS_REG_CM_HDR_GFT_CM_HDR_SHIFT;
1654         ecore_wr(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT, rfs_cm_hdr_event_id);
1655 }
1656
1657 void ecore_gft_config(struct ecore_hwfn *p_hwfn,
1658                                struct ecore_ptt *p_ptt,
1659                                u16 pf_id,
1660                                bool tcp,
1661                                bool udp,
1662                                bool ipv4,
1663                                bool ipv6,
1664                                enum gft_profile_type profile_type)
1665 {
1666         u32 reg_val, cam_line, ram_line_lo, ram_line_hi, search_non_ip_as_gft;
1667
1668         if (!ipv6 && !ipv4)
1669                 DP_NOTICE(p_hwfn, true, "gft_config: must accept at least on of - ipv4 or ipv6'\n");
1670         if (!tcp && !udp)
1671                 DP_NOTICE(p_hwfn, true, "gft_config: must accept at least on of - udp or tcp\n");
1672         if (profile_type >= MAX_GFT_PROFILE_TYPE)
1673                 DP_NOTICE(p_hwfn, true, "gft_config: unsupported gft_profile_type\n");
1674
1675         /* Set RFS event ID to be awakened i Tstorm By Prs */
1676         reg_val = T_ETH_PACKET_MATCH_RFS_EVENTID <<
1677                   PRS_REG_CM_HDR_GFT_EVENT_ID_SHIFT;
1678         reg_val |= PARSER_ETH_CONN_CM_HDR << PRS_REG_CM_HDR_GFT_CM_HDR_SHIFT;
1679         ecore_wr(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT, reg_val);
1680
1681         /* Do not load context only cid in PRS on match. */
1682         ecore_wr(p_hwfn, p_ptt, PRS_REG_LOAD_L2_FILTER, 0);
1683
1684         /* Do not use tenant ID exist bit for gft search*/
1685         ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TENANT_ID, 0);
1686
1687         /* Set Cam */
1688         cam_line = 0;
1689         SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_VALID, 1);
1690
1691         /* Filters are per PF!! */
1692         SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_PF_ID_MASK,
1693                   GFT_CAM_LINE_MAPPED_PF_ID_MASK_MASK);
1694         SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_PF_ID, pf_id);
1695
1696         if (!(tcp && udp)) {
1697                 SET_FIELD(cam_line,
1698                           GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK,
1699                           GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK_MASK);
1700                 if (tcp)
1701                         SET_FIELD(cam_line,
1702                                   GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE,
1703                                   GFT_PROFILE_TCP_PROTOCOL);
1704                 else
1705                         SET_FIELD(cam_line,
1706                                   GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE,
1707                                   GFT_PROFILE_UDP_PROTOCOL);
1708         }
1709
1710         if (!(ipv4 && ipv6)) {
1711                 SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_IP_VERSION_MASK, 1);
1712                 if (ipv4)
1713                         SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_IP_VERSION,
1714                                   GFT_PROFILE_IPV4);
1715                 else
1716                         SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_IP_VERSION,
1717                                   GFT_PROFILE_IPV6);
1718         }
1719
1720         /* Write characteristics to cam */
1721         ecore_wr(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE * pf_id,
1722                  cam_line);
1723         cam_line = ecore_rd(p_hwfn, p_ptt,
1724                             PRS_REG_GFT_CAM + CAM_LINE_SIZE * pf_id);
1725
1726         /* Write line to RAM - compare to filter 4 tuple */
1727         ram_line_lo = 0;
1728         ram_line_hi = 0;
1729
1730         /* Search no IP as GFT */
1731         search_non_ip_as_gft = 0;
1732
1733         /* Tunnel type */
1734         SET_FIELD(ram_line_lo, GFT_RAM_LINE_TUNNEL_DST_PORT, 1);
1735         SET_FIELD(ram_line_lo, GFT_RAM_LINE_TUNNEL_OVER_IP_PROTOCOL, 1);
1736
1737         if (profile_type == GFT_PROFILE_TYPE_4_TUPLE) {
1738                 SET_FIELD(ram_line_hi, GFT_RAM_LINE_DST_IP, 1);
1739                 SET_FIELD(ram_line_hi, GFT_RAM_LINE_SRC_IP, 1);
1740                 SET_FIELD(ram_line_hi, GFT_RAM_LINE_OVER_IP_PROTOCOL, 1);
1741                 SET_FIELD(ram_line_lo, GFT_RAM_LINE_ETHERTYPE, 1);
1742                 SET_FIELD(ram_line_lo, GFT_RAM_LINE_SRC_PORT, 1);
1743                 SET_FIELD(ram_line_lo, GFT_RAM_LINE_DST_PORT, 1);
1744         } else if (profile_type == GFT_PROFILE_TYPE_L4_DST_PORT) {
1745                 SET_FIELD(ram_line_hi, GFT_RAM_LINE_OVER_IP_PROTOCOL, 1);
1746                 SET_FIELD(ram_line_lo, GFT_RAM_LINE_ETHERTYPE, 1);
1747                 SET_FIELD(ram_line_lo, GFT_RAM_LINE_DST_PORT, 1);
1748         } else if (profile_type == GFT_PROFILE_TYPE_IP_DST_ADDR) {
1749                 SET_FIELD(ram_line_hi, GFT_RAM_LINE_DST_IP, 1);
1750                 SET_FIELD(ram_line_lo, GFT_RAM_LINE_ETHERTYPE, 1);
1751         } else if (profile_type == GFT_PROFILE_TYPE_IP_SRC_ADDR) {
1752                 SET_FIELD(ram_line_hi, GFT_RAM_LINE_SRC_IP, 1);
1753                 SET_FIELD(ram_line_lo, GFT_RAM_LINE_ETHERTYPE, 1);
1754         } else if (profile_type == GFT_PROFILE_TYPE_TUNNEL_TYPE) {
1755                 SET_FIELD(ram_line_lo, GFT_RAM_LINE_TUNNEL_ETHERTYPE, 1);
1756
1757                 /* Allow tunneled traffic without inner IP */
1758                 search_non_ip_as_gft = 1;
1759         }
1760
1761         ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_NON_IP_AS_GFT,
1762                  search_non_ip_as_gft);
1763         ecore_wr(p_hwfn, p_ptt,
1764                  PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * pf_id,
1765                  ram_line_lo);
1766         ecore_wr(p_hwfn, p_ptt,
1767                  PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * pf_id +
1768                  REG_SIZE, ram_line_hi);
1769
1770         /* Set default profile so that no filter match will happen */
1771         ecore_wr(p_hwfn, p_ptt, PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE *
1772                  PRS_GFT_CAM_LINES_NO_MATCH, 0xffffffff);
1773         ecore_wr(p_hwfn, p_ptt, PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE *
1774                  PRS_GFT_CAM_LINES_NO_MATCH + REG_SIZE, 0x3ff);
1775
1776         /* Enable gft search */
1777         ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_GFT, 1);
1778 }
1779
1780 /* Configure VF zone size mode */
1781 void ecore_config_vf_zone_size_mode(struct ecore_hwfn *p_hwfn,
1782                                     struct ecore_ptt *p_ptt, u16 mode,
1783                                     bool runtime_init)
1784 {
1785         u32 msdm_vf_size_log = MSTORM_VF_ZONE_DEFAULT_SIZE_LOG;
1786         u32 msdm_vf_offset_mask;
1787
1788         if (mode == VF_ZONE_SIZE_MODE_DOUBLE)
1789                 msdm_vf_size_log += 1;
1790         else if (mode == VF_ZONE_SIZE_MODE_QUAD)
1791                 msdm_vf_size_log += 2;
1792
1793         msdm_vf_offset_mask = (1 << msdm_vf_size_log) - 1;
1794
1795         if (runtime_init) {
1796                 STORE_RT_REG(p_hwfn,
1797                              PGLUE_REG_B_MSDM_VF_SHIFT_B_RT_OFFSET,
1798                              msdm_vf_size_log);
1799                 STORE_RT_REG(p_hwfn,
1800                              PGLUE_REG_B_MSDM_OFFSET_MASK_B_RT_OFFSET,
1801                              msdm_vf_offset_mask);
1802         } else {
1803                 ecore_wr(p_hwfn, p_ptt,
1804                          PGLUE_B_REG_MSDM_VF_SHIFT_B, msdm_vf_size_log);
1805                 ecore_wr(p_hwfn, p_ptt,
1806                          PGLUE_B_REG_MSDM_OFFSET_MASK_B, msdm_vf_offset_mask);
1807         }
1808 }
1809
1810 /* Get mstorm statistics for offset by VF zone size mode */
1811 u32 ecore_get_mstorm_queue_stat_offset(struct ecore_hwfn *p_hwfn,
1812                                        u16 stat_cnt_id,
1813                                        u16 vf_zone_size_mode)
1814 {
1815         u32 offset = MSTORM_QUEUE_STAT_OFFSET(stat_cnt_id);
1816
1817         if ((vf_zone_size_mode != VF_ZONE_SIZE_MODE_DEFAULT) &&
1818             (stat_cnt_id > MAX_NUM_PFS)) {
1819                 if (vf_zone_size_mode == VF_ZONE_SIZE_MODE_DOUBLE)
1820                         offset += (1 << MSTORM_VF_ZONE_DEFAULT_SIZE_LOG) *
1821                             (stat_cnt_id - MAX_NUM_PFS);
1822                 else if (vf_zone_size_mode == VF_ZONE_SIZE_MODE_QUAD)
1823                         offset += 3 * (1 << MSTORM_VF_ZONE_DEFAULT_SIZE_LOG) *
1824                             (stat_cnt_id - MAX_NUM_PFS);
1825         }
1826
1827         return offset;
1828 }
1829
1830 /* Get mstorm VF producer offset by VF zone size mode */
1831 u32 ecore_get_mstorm_eth_vf_prods_offset(struct ecore_hwfn *p_hwfn,
1832                                          u8 vf_id,
1833                                          u8 vf_queue_id,
1834                                          u16 vf_zone_size_mode)
1835 {
1836         u32 offset = MSTORM_ETH_VF_PRODS_OFFSET(vf_id, vf_queue_id);
1837
1838         if (vf_zone_size_mode != VF_ZONE_SIZE_MODE_DEFAULT) {
1839                 if (vf_zone_size_mode == VF_ZONE_SIZE_MODE_DOUBLE)
1840                         offset += (1 << MSTORM_VF_ZONE_DEFAULT_SIZE_LOG) *
1841                                    vf_id;
1842                 else if (vf_zone_size_mode == VF_ZONE_SIZE_MODE_QUAD)
1843                         offset += 3 * (1 << MSTORM_VF_ZONE_DEFAULT_SIZE_LOG) *
1844                                   vf_id;
1845         }
1846
1847         return offset;
1848 }
1849
1850 #ifndef LINUX_REMOVE
1851 #define CRC8_INIT_VALUE 0xFF
1852 #endif
1853 static u8 cdu_crc8_table[CRC8_TABLE_SIZE];
1854
1855 /* Calculate and return CDU validation byte per connection type / region /
1856  * cid
1857  */
1858 static u8 ecore_calc_cdu_validation_byte(u8 conn_type, u8 region, u32 cid)
1859 {
1860         const u8 validation_cfg = CDU_VALIDATION_DEFAULT_CFG;
1861
1862         static u8 crc8_table_valid;     /*automatically initialized to 0*/
1863         u8 crc, validation_byte = 0;
1864         u32 validation_string = 0;
1865         u32 data_to_crc;
1866
1867         if (crc8_table_valid == 0) {
1868                 OSAL_CRC8_POPULATE(cdu_crc8_table, 0x07);
1869                 crc8_table_valid = 1;
1870         }
1871
1872         /*
1873          * The CRC is calculated on the String-to-compress:
1874          * [31:8]  = {CID[31:20],CID[11:0]}
1875          * [7:4]   = Region
1876          * [3:0]   = Type
1877          */
1878         if ((validation_cfg >> CDU_CONTEXT_VALIDATION_CFG_USE_CID) & 1)
1879                 validation_string |= (cid & 0xFFF00000) | ((cid & 0xFFF) << 8);
1880
1881         if ((validation_cfg >> CDU_CONTEXT_VALIDATION_CFG_USE_REGION) & 1)
1882                 validation_string |= ((region & 0xF) << 4);
1883
1884         if ((validation_cfg >> CDU_CONTEXT_VALIDATION_CFG_USE_TYPE) & 1)
1885                 validation_string |= (conn_type & 0xF);
1886
1887         /* Convert to big-endian and calculate CRC8*/
1888         data_to_crc = OSAL_BE32_TO_CPU(validation_string);
1889
1890         crc = OSAL_CRC8(cdu_crc8_table, (u8 *)&data_to_crc, sizeof(data_to_crc),
1891                         CRC8_INIT_VALUE);
1892
1893         /* The validation byte [7:0] is composed:
1894          * for type A validation
1895          * [7]          = active configuration bit
1896          * [6:0]        = crc[6:0]
1897          *
1898          * for type B validation
1899          * [7]          = active configuration bit
1900          * [6:3]        = connection_type[3:0]
1901          * [2:0]        = crc[2:0]
1902          */
1903
1904         validation_byte |= ((validation_cfg >>
1905                              CDU_CONTEXT_VALIDATION_CFG_USE_ACTIVE) & 1) << 7;
1906
1907         if ((validation_cfg >>
1908              CDU_CONTEXT_VALIDATION_CFG_VALIDATION_TYPE_SHIFT) & 1)
1909                 validation_byte |= ((conn_type & 0xF) << 3) | (crc & 0x7);
1910         else
1911                 validation_byte |= crc & 0x7F;
1912
1913         return validation_byte;
1914 }
1915
1916 /* Calcualte and set validation bytes for session context */
1917 void ecore_calc_session_ctx_validation(void *p_ctx_mem, u16 ctx_size,
1918                                        u8 ctx_type, u32 cid)
1919 {
1920         u8 *x_val_ptr, *t_val_ptr, *u_val_ptr, *p_ctx;
1921
1922         p_ctx = (u8 *)p_ctx_mem;
1923         x_val_ptr = &p_ctx[con_region_offsets[0][ctx_type]];
1924         t_val_ptr = &p_ctx[con_region_offsets[1][ctx_type]];
1925         u_val_ptr = &p_ctx[con_region_offsets[2][ctx_type]];
1926
1927         OSAL_MEMSET(p_ctx, 0, ctx_size);
1928
1929         *x_val_ptr = ecore_calc_cdu_validation_byte(ctx_type, 3, cid);
1930         *t_val_ptr = ecore_calc_cdu_validation_byte(ctx_type, 4, cid);
1931         *u_val_ptr = ecore_calc_cdu_validation_byte(ctx_type, 5, cid);
1932 }
1933
1934 /* Calcualte and set validation bytes for task context */
1935 void ecore_calc_task_ctx_validation(void *p_ctx_mem, u16 ctx_size, u8 ctx_type,
1936                                     u32 tid)
1937 {
1938         u8 *p_ctx, *region1_val_ptr;
1939
1940         p_ctx = (u8 *)p_ctx_mem;
1941         region1_val_ptr = &p_ctx[task_region_offsets[0][ctx_type]];
1942
1943         OSAL_MEMSET(p_ctx, 0, ctx_size);
1944
1945         *region1_val_ptr = ecore_calc_cdu_validation_byte(ctx_type, 1, tid);
1946 }
1947
1948 /* Memset session context to 0 while preserving validation bytes */
1949 void ecore_memset_session_ctx(void *p_ctx_mem, u32 ctx_size, u8 ctx_type)
1950 {
1951         u8 *x_val_ptr, *t_val_ptr, *u_val_ptr, *p_ctx;
1952         u8 x_val, t_val, u_val;
1953
1954         p_ctx = (u8 *)p_ctx_mem;
1955         x_val_ptr = &p_ctx[con_region_offsets[0][ctx_type]];
1956         t_val_ptr = &p_ctx[con_region_offsets[1][ctx_type]];
1957         u_val_ptr = &p_ctx[con_region_offsets[2][ctx_type]];
1958
1959         x_val = *x_val_ptr;
1960         t_val = *t_val_ptr;
1961         u_val = *u_val_ptr;
1962
1963         OSAL_MEMSET(p_ctx, 0, ctx_size);
1964
1965         *x_val_ptr = x_val;
1966         *t_val_ptr = t_val;
1967         *u_val_ptr = u_val;
1968 }
1969
1970 /* Memset task context to 0 while preserving validation bytes */
1971 void ecore_memset_task_ctx(void *p_ctx_mem, u32 ctx_size, u8 ctx_type)
1972 {
1973         u8 *p_ctx, *region1_val_ptr;
1974         u8 region1_val;
1975
1976         p_ctx = (u8 *)p_ctx_mem;
1977         region1_val_ptr = &p_ctx[task_region_offsets[0][ctx_type]];
1978
1979         region1_val = *region1_val_ptr;
1980
1981         OSAL_MEMSET(p_ctx, 0, ctx_size);
1982
1983         *region1_val_ptr = region1_val;
1984 }
1985
1986 /* Enable and configure context validation */
1987 void ecore_enable_context_validation(struct ecore_hwfn *p_hwfn,
1988                                      struct ecore_ptt *p_ptt)
1989 {
1990         u32 ctx_validation;
1991
1992         /* Enable validation for connection region 3 - bits [31:24] */
1993         ctx_validation = CDU_VALIDATION_DEFAULT_CFG << 24;
1994         ecore_wr(p_hwfn, p_ptt, CDU_REG_CCFC_CTX_VALID0, ctx_validation);
1995
1996         /* Enable validation for connection region 5 - bits [15: 8] */
1997         ctx_validation = CDU_VALIDATION_DEFAULT_CFG << 8;
1998         ecore_wr(p_hwfn, p_ptt, CDU_REG_CCFC_CTX_VALID1, ctx_validation);
1999
2000         /* Enable validation for connection region 1 - bits [15: 8] */
2001         ctx_validation = CDU_VALIDATION_DEFAULT_CFG << 8;
2002         ecore_wr(p_hwfn, p_ptt, CDU_REG_TCFC_CTX_VALID0, ctx_validation);
2003 }
2004
2005
2006 /*******************************************************************************
2007  * File name : rdma_init.c
2008  * Author    : Michael Shteinbok
2009  *******************************************************************************
2010  *******************************************************************************
2011  * Description:
2012  * RDMA HSI functions
2013  *
2014  *******************************************************************************
2015  * Notes: This is the input to the auto generated file drv_init_fw_funcs.c
2016  *
2017  *******************************************************************************
2018  */
2019 static u32 ecore_get_rdma_assert_ram_addr(struct ecore_hwfn *p_hwfn,
2020                                           u8 storm_id)
2021 {
2022         switch (storm_id) {
2023         case 0: return TSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM +
2024                        TSTORM_RDMA_ASSERT_LEVEL_OFFSET(p_hwfn->rel_pf_id);
2025         case 1: return MSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM +
2026                        MSTORM_RDMA_ASSERT_LEVEL_OFFSET(p_hwfn->rel_pf_id);
2027         case 2: return USEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM +
2028                        USTORM_RDMA_ASSERT_LEVEL_OFFSET(p_hwfn->rel_pf_id);
2029         case 3: return XSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM +
2030                        XSTORM_RDMA_ASSERT_LEVEL_OFFSET(p_hwfn->rel_pf_id);
2031         case 4: return YSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM +
2032                        YSTORM_RDMA_ASSERT_LEVEL_OFFSET(p_hwfn->rel_pf_id);
2033         case 5: return PSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM +
2034                        PSTORM_RDMA_ASSERT_LEVEL_OFFSET(p_hwfn->rel_pf_id);
2035
2036         default: return 0;
2037         }
2038 }
2039
2040 void ecore_set_rdma_error_level(struct ecore_hwfn *p_hwfn,
2041                                 struct ecore_ptt *p_ptt,
2042                                 u8 assert_level[NUM_STORMS])
2043 {
2044         u8 storm_id;
2045         for (storm_id = 0; storm_id < NUM_STORMS; storm_id++) {
2046                 u32 ram_addr = ecore_get_rdma_assert_ram_addr(p_hwfn, storm_id);
2047
2048                 ecore_wr(p_hwfn, p_ptt, ram_addr, assert_level[storm_id]);
2049         }
2050 }