a0a6e3aba2ea30de533eb36bc3053c77403bdd08
[dpdk.git] / drivers / net / qede / base / ecore_init_fw_funcs.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright (c) 2016 - 2018 Cavium Inc.
3  * All rights reserved.
4  * www.cavium.com
5  */
6
7 #include "bcm_osal.h"
8 #include "ecore_hw.h"
9 #include "ecore_init_ops.h"
10 #include "reg_addr.h"
11 #include "ecore_rt_defs.h"
12 #include "ecore_hsi_common.h"
13 #include "ecore_hsi_init_func.h"
14 #include "ecore_hsi_eth.h"
15 #include "ecore_hsi_init_tool.h"
16 #include "ecore_iro.h"
17 #include "ecore_init_fw_funcs.h"
18
19 #define CDU_VALIDATION_DEFAULT_CFG 61
20
21 static u16 con_region_offsets[3][NUM_OF_CONNECTION_TYPES] = {
22         { 400,  336,  352,  304,  304,  384,  416,  352}, /* region 3 offsets */
23         { 528,  496,  416,  448,  448,  512,  544,  480}, /* region 4 offsets */
24         { 608,  544,  496,  512,  576,  592,  624,  560}  /* region 5 offsets */
25 };
26 static u16 task_region_offsets[1][NUM_OF_CONNECTION_TYPES] = {
27         { 240,  240,  112,    0,    0,    0,    0,   96}  /* region 1 offsets */
28 };
29
30 /* General constants */
31 #define QM_PQ_MEM_4KB(pq_size) (pq_size ? DIV_ROUND_UP((pq_size + 1) * \
32                                 QM_PQ_ELEMENT_SIZE, 0x1000) : 0)
33 #define QM_PQ_SIZE_256B(pq_size) (pq_size ? DIV_ROUND_UP(pq_size, 0x100) - 1 : \
34                                   0)
35 #define QM_INVALID_PQ_ID                0xffff
36
37 /* Feature enable */
38 #define QM_BYPASS_EN                    1
39 #define QM_BYTE_CRD_EN                  1
40
41 /* Other PQ constants */
42 #define QM_OTHER_PQS_PER_PF             4
43
44 /* VOQ constants */
45 #define QM_E5_NUM_EXT_VOQ               (MAX_NUM_PORTS_E5 * NUM_OF_TCS)
46
47 /* WFQ constants: */
48
49 /* Upper bound in MB, 10 * burst size of 1ms in 50Gbps */
50 #define QM_WFQ_UPPER_BOUND              62500000
51
52 /* Bit  of VOQ in WFQ VP PQ map */
53 #define QM_WFQ_VP_PQ_VOQ_SHIFT          0
54
55 /* Bit  of PF in WFQ VP PQ map */
56 #define QM_WFQ_VP_PQ_PF_E4_SHIFT        5
57 #define QM_WFQ_VP_PQ_PF_E5_SHIFT        6
58
59 /* 0x9000 = 4*9*1024 */
60 #define QM_WFQ_INC_VAL(weight)          ((weight) * 0x9000)
61
62 /* Max WFQ increment value is 0.7 * upper bound */
63 #define QM_WFQ_MAX_INC_VAL              ((QM_WFQ_UPPER_BOUND * 7) / 10)
64
65 /* Number of VOQs in E5 QmWfqCrd register */
66 #define QM_WFQ_CRD_E5_NUM_VOQS          16
67
68 /* RL constants: */
69
70 /* Period in us */
71 #define QM_RL_PERIOD                    5
72
73 /* Period in 25MHz cycles */
74 #define QM_RL_PERIOD_CLK_25M            (25 * QM_RL_PERIOD)
75
76 /* RL increment value - rate is specified in mbps. the factor of 1.01 was
77  * added after seeing only 99% factor reached in a 25Gbps port with DPDK RFC
78  * 2544 test. In this scenario the PF RL was reducing the line rate to 99%
79  * although the credit increment value was the correct one and FW calculated
80  * correct packet sizes. The reason for the inaccuracy of the RL is unknown at
81  * this point.
82  */
83 #define QM_RL_INC_VAL(rate) \
84         OSAL_MAX_T(u32, (u32)(((rate ? rate : 100000) * QM_RL_PERIOD * 101) / \
85         (8 * 100)), 1)
86
87 /* PF RL Upper bound is set to 10 * burst size of 1ms in 50Gbps */
88 #define QM_PF_RL_UPPER_BOUND            62500000
89
90 /* Max PF RL increment value is 0.7 * upper bound */
91 #define QM_PF_RL_MAX_INC_VAL            ((QM_PF_RL_UPPER_BOUND * 7) / 10)
92
93 /* Vport RL Upper bound, link speed is in Mpbs */
94 #define QM_VP_RL_UPPER_BOUND(speed) \
95         ((u32)OSAL_MAX_T(u32, QM_RL_INC_VAL(speed), 9700 + 1000))
96
97 /* Max Vport RL increment value is the Vport RL upper bound */
98 #define QM_VP_RL_MAX_INC_VAL(speed)     QM_VP_RL_UPPER_BOUND(speed)
99
100 /* Vport RL credit threshold in case of QM bypass */
101 #define QM_VP_RL_BYPASS_THRESH_SPEED    (QM_VP_RL_UPPER_BOUND(10000) - 1)
102
103 /* AFullOprtnstcCrdMask constants */
104 #define QM_OPPOR_LINE_VOQ_DEF           1
105 #define QM_OPPOR_FW_STOP_DEF            0
106 #define QM_OPPOR_PQ_EMPTY_DEF           1
107
108 /* Command Queue constants: */
109
110 /* Pure LB CmdQ lines (+spare) */
111 #define PBF_CMDQ_PURE_LB_LINES          150
112
113 #define PBF_CMDQ_LINES_E5_RSVD_RATIO    8
114
115 #define PBF_CMDQ_LINES_RT_OFFSET(ext_voq) \
116         (PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET + \
117          ext_voq * \
118          (PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET - \
119           PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET))
120
121 #define PBF_BTB_GUARANTEED_RT_OFFSET(ext_voq) \
122         (PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET + \
123          ext_voq * \
124          (PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET - \
125           PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET))
126
127 #define QM_VOQ_LINE_CRD(pbf_cmd_lines) \
128 ((((pbf_cmd_lines) - 4) * 2) | QM_LINE_CRD_REG_SIGN_BIT)
129
130 /* BTB: blocks constants (block size = 256B) */
131
132 /* 256B blocks in 9700B packet */
133 #define BTB_JUMBO_PKT_BLOCKS            38
134
135 /* Headroom per-port */
136 #define BTB_HEADROOM_BLOCKS             BTB_JUMBO_PKT_BLOCKS
137 #define BTB_PURE_LB_FACTOR              10
138
139 /* Factored (hence really 0.7) */
140 #define BTB_PURE_LB_RATIO               7
141
142 /* QM stop command constants */
143 #define QM_STOP_PQ_MASK_WIDTH           32
144 #define QM_STOP_CMD_ADDR                2
145 #define QM_STOP_CMD_STRUCT_SIZE         2
146 #define QM_STOP_CMD_PAUSE_MASK_OFFSET   0
147 #define QM_STOP_CMD_PAUSE_MASK_SHIFT    0
148 #define QM_STOP_CMD_PAUSE_MASK_MASK     0xffffffff /* @DPDK */
149 #define QM_STOP_CMD_GROUP_ID_OFFSET     1
150 #define QM_STOP_CMD_GROUP_ID_SHIFT      16
151 #define QM_STOP_CMD_GROUP_ID_MASK       15
152 #define QM_STOP_CMD_PQ_TYPE_OFFSET      1
153 #define QM_STOP_CMD_PQ_TYPE_SHIFT       24
154 #define QM_STOP_CMD_PQ_TYPE_MASK        1
155 #define QM_STOP_CMD_MAX_POLL_COUNT      100
156 #define QM_STOP_CMD_POLL_PERIOD_US      500
157
158 /* QM command macros */
159 #define QM_CMD_STRUCT_SIZE(cmd) cmd##_STRUCT_SIZE
160 #define QM_CMD_SET_FIELD(var, cmd, field, value) \
161         SET_FIELD(var[cmd##_##field##_OFFSET], cmd##_##field, value)
162
163 #define QM_INIT_TX_PQ_MAP(p_hwfn, map, pq_id, vp_pq_id, \
164                            rl_valid, rl_id, voq, wrr) \
165         do { \
166                 OSAL_MEMSET(&(map), 0, sizeof(map)); \
167                 SET_FIELD(map.reg, QM_RF_PQ_MAP_PQ_VALID, 1); \
168                 SET_FIELD(map.reg, QM_RF_PQ_MAP_RL_VALID, rl_valid ? 1 : 0); \
169                 SET_FIELD(map.reg, QM_RF_PQ_MAP_RL_ID, rl_id); \
170                 SET_FIELD(map.reg, QM_RF_PQ_MAP_VP_PQ_ID, vp_pq_id); \
171                 SET_FIELD(map.reg, QM_RF_PQ_MAP_VOQ, voq); \
172                 SET_FIELD(map.reg, QM_RF_PQ_MAP_WRR_WEIGHT_GROUP, wrr); \
173                 STORE_RT_REG(p_hwfn, QM_REG_TXPQMAP_RT_OFFSET + (pq_id), \
174                              *((u32 *)&(map))); \
175         } while (0)
176
177 #define WRITE_PQ_INFO_TO_RAM            1
178 #define PQ_INFO_ELEMENT(vp, pf, tc, port, rl_valid, rl) \
179         (((vp) << 0) | ((pf) << 12) | ((tc) << 16) |    \
180          ((port) << 20) | ((rl_valid) << 22) | ((rl) << 24))
181 #define PQ_INFO_RAM_GRC_ADDRESS(pq_id) \
182         (XSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM + 21776 + (pq_id) * 4)
183
184 /******************** INTERNAL IMPLEMENTATION *********************/
185
186 /* Returns the external VOQ number */
187 static u8 ecore_get_ext_voq(struct ecore_hwfn *p_hwfn,
188                             u8 port_id,
189                             u8 tc,
190                             u8 max_phys_tcs_per_port)
191 {
192         if (tc == PURE_LB_TC)
193                 return NUM_OF_PHYS_TCS * (MAX_NUM_PORTS_BB) + port_id;
194         else
195                 return port_id * (max_phys_tcs_per_port) + tc;
196 }
197
198 /* Prepare PF RL enable/disable runtime init values */
199 static void ecore_enable_pf_rl(struct ecore_hwfn *p_hwfn, bool pf_rl_en)
200 {
201         STORE_RT_REG(p_hwfn, QM_REG_RLPFENABLE_RT_OFFSET, pf_rl_en ? 1 : 0);
202         if (pf_rl_en) {
203                 u8 num_ext_voqs = MAX_NUM_VOQS_E4;
204                 u64 voq_bit_mask = ((u64)1 << num_ext_voqs) - 1;
205
206                 /* Enable RLs for all VOQs */
207                 STORE_RT_REG(p_hwfn, QM_REG_RLPFVOQENABLE_RT_OFFSET,
208                              (u32)voq_bit_mask);
209 #ifdef QM_REG_RLPFVOQENABLE_MSB_RT_OFFSET
210                 if (num_ext_voqs >= 32)
211                         STORE_RT_REG(p_hwfn, QM_REG_RLPFVOQENABLE_MSB_RT_OFFSET,
212                                      (u32)(voq_bit_mask >> 32));
213 #endif
214
215                 /* Write RL period */
216                 STORE_RT_REG(p_hwfn, QM_REG_RLPFPERIOD_RT_OFFSET,
217                              QM_RL_PERIOD_CLK_25M);
218                 STORE_RT_REG(p_hwfn, QM_REG_RLPFPERIODTIMER_RT_OFFSET,
219                              QM_RL_PERIOD_CLK_25M);
220
221                 /* Set credit threshold for QM bypass flow */
222                 if (QM_BYPASS_EN)
223                         STORE_RT_REG(p_hwfn, QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET,
224                                      QM_PF_RL_UPPER_BOUND);
225         }
226 }
227
228 /* Prepare PF WFQ enable/disable runtime init values */
229 static void ecore_enable_pf_wfq(struct ecore_hwfn *p_hwfn, bool pf_wfq_en)
230 {
231         STORE_RT_REG(p_hwfn, QM_REG_WFQPFENABLE_RT_OFFSET, pf_wfq_en ? 1 : 0);
232
233         /* Set credit threshold for QM bypass flow */
234         if (pf_wfq_en && QM_BYPASS_EN)
235                 STORE_RT_REG(p_hwfn, QM_REG_AFULLQMBYPTHRPFWFQ_RT_OFFSET,
236                              QM_WFQ_UPPER_BOUND);
237 }
238
239 /* Prepare VPORT RL enable/disable runtime init values */
240 static void ecore_enable_vport_rl(struct ecore_hwfn *p_hwfn, bool vport_rl_en)
241 {
242         STORE_RT_REG(p_hwfn, QM_REG_RLGLBLENABLE_RT_OFFSET,
243                      vport_rl_en ? 1 : 0);
244         if (vport_rl_en) {
245                 /* Write RL period (use timer 0 only) */
246                 STORE_RT_REG(p_hwfn, QM_REG_RLGLBLPERIOD_0_RT_OFFSET,
247                              QM_RL_PERIOD_CLK_25M);
248                 STORE_RT_REG(p_hwfn, QM_REG_RLGLBLPERIODTIMER_0_RT_OFFSET,
249                              QM_RL_PERIOD_CLK_25M);
250
251                 /* Set credit threshold for QM bypass flow */
252                 if (QM_BYPASS_EN)
253                         STORE_RT_REG(p_hwfn,
254                                      QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET,
255                                      QM_VP_RL_BYPASS_THRESH_SPEED);
256         }
257 }
258
259 /* Prepare VPORT WFQ enable/disable runtime init values */
260 static void ecore_enable_vport_wfq(struct ecore_hwfn *p_hwfn, bool vport_wfq_en)
261 {
262         STORE_RT_REG(p_hwfn, QM_REG_WFQVPENABLE_RT_OFFSET,
263                      vport_wfq_en ? 1 : 0);
264
265         /* Set credit threshold for QM bypass flow */
266         if (vport_wfq_en && QM_BYPASS_EN)
267                 STORE_RT_REG(p_hwfn, QM_REG_AFULLQMBYPTHRVPWFQ_RT_OFFSET,
268                              QM_WFQ_UPPER_BOUND);
269 }
270
271 /* Prepare runtime init values to allocate PBF command queue lines for
272  * the specified VOQ
273  */
274 static void ecore_cmdq_lines_voq_rt_init(struct ecore_hwfn *p_hwfn,
275                                          u8 ext_voq,
276                                          u16 cmdq_lines)
277 {
278         u32 qm_line_crd;
279
280         qm_line_crd = QM_VOQ_LINE_CRD(cmdq_lines);
281
282         OVERWRITE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(ext_voq),
283                          (u32)cmdq_lines);
284         STORE_RT_REG(p_hwfn, QM_REG_VOQCRDLINE_RT_OFFSET + ext_voq,
285                          qm_line_crd);
286         STORE_RT_REG(p_hwfn, QM_REG_VOQINITCRDLINE_RT_OFFSET + ext_voq,
287                          qm_line_crd);
288 }
289
290 /* Prepare runtime init values to allocate PBF command queue lines. */
291 static void ecore_cmdq_lines_rt_init(struct ecore_hwfn *p_hwfn,
292                                      u8 max_ports_per_engine,
293                                      u8 max_phys_tcs_per_port,
294                                      struct init_qm_port_params
295                                      port_params[MAX_NUM_PORTS])
296 {
297         u8 tc, ext_voq, port_id, num_tcs_in_port;
298         u8 num_ext_voqs = MAX_NUM_VOQS_E4;
299
300         /* Clear PBF lines of all VOQs */
301         for (ext_voq = 0; ext_voq < num_ext_voqs; ext_voq++)
302                 STORE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(ext_voq), 0);
303
304         for (port_id = 0; port_id < max_ports_per_engine; port_id++) {
305                 u16 phys_lines, phys_lines_per_tc;
306
307                 if (!port_params[port_id].active)
308                         continue;
309
310                 /* Find number of command queue lines to divide between the
311                  * active physical TCs. In E5, 1/8 of the lines are reserved.
312                  * the lines for pure LB TC are subtracted.
313                  */
314                 phys_lines = port_params[port_id].num_pbf_cmd_lines;
315                 phys_lines -= PBF_CMDQ_PURE_LB_LINES;
316
317                 /* Find #lines per active physical TC */
318                 num_tcs_in_port = 0;
319                 for (tc = 0; tc < max_phys_tcs_per_port; tc++)
320                         if (((port_params[port_id].active_phys_tcs >> tc) &
321                               0x1) == 1)
322                                 num_tcs_in_port++;
323                 phys_lines_per_tc = phys_lines / num_tcs_in_port;
324
325                 /* Init registers per active TC */
326                 for (tc = 0; tc < max_phys_tcs_per_port; tc++) {
327                         ext_voq = ecore_get_ext_voq(p_hwfn, port_id, tc,
328                                                     max_phys_tcs_per_port);
329                         if (((port_params[port_id].active_phys_tcs >> tc) &
330                             0x1) == 1)
331                                 ecore_cmdq_lines_voq_rt_init(p_hwfn, ext_voq,
332                                                              phys_lines_per_tc);
333                 }
334
335                 /* Init registers for pure LB TC */
336                 ext_voq = ecore_get_ext_voq(p_hwfn, port_id, PURE_LB_TC,
337                                             max_phys_tcs_per_port);
338                 ecore_cmdq_lines_voq_rt_init(p_hwfn, ext_voq,
339                                              PBF_CMDQ_PURE_LB_LINES);
340         }
341 }
342
343 /*
344  * Prepare runtime init values to allocate guaranteed BTB blocks for the
345  * specified port. The guaranteed BTB space is divided between the TCs as
346  * follows (shared space Is currently not used):
347  * 1. Parameters:
348  *     B BTB blocks for this port
349  *     C Number of physical TCs for this port
350  * 2. Calculation:
351  *     a. 38 blocks (9700B jumbo frame) are allocated for global per port
352  *        headroom
353  *     b. B = B 38 (remainder after global headroom allocation)
354  *     c. MAX(38,B/(C+0.7)) blocks are allocated for the pure LB VOQ.
355  *     d. B = B MAX(38, B/(C+0.7)) (remainder after pure LB allocation).
356  *     e. B/C blocks are allocated for each physical TC.
357  * Assumptions:
358  * - MTU is up to 9700 bytes (38 blocks)
359  * - All TCs are considered symmetrical (same rate and packet size)
360  * - No optimization for lossy TC (all are considered lossless). Shared space is
361  *   not enabled and allocated for each TC.
362  */
363 static void ecore_btb_blocks_rt_init(struct ecore_hwfn *p_hwfn,
364                                      u8 max_ports_per_engine,
365                                      u8 max_phys_tcs_per_port,
366                                      struct init_qm_port_params
367                                      port_params[MAX_NUM_PORTS])
368 {
369         u32 usable_blocks, pure_lb_blocks, phys_blocks;
370         u8 tc, ext_voq, port_id, num_tcs_in_port;
371
372         for (port_id = 0; port_id < max_ports_per_engine; port_id++) {
373                 if (!port_params[port_id].active)
374                         continue;
375
376                 /* Subtract headroom blocks */
377                 usable_blocks = port_params[port_id].num_btb_blocks -
378                                 BTB_HEADROOM_BLOCKS;
379
380                 /* Find blocks per physical TC. use factor to avoid floating
381                  * arithmethic.
382                  */
383                 num_tcs_in_port = 0;
384                 for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++)
385                         if (((port_params[port_id].active_phys_tcs >> tc) &
386                               0x1) == 1)
387                                 num_tcs_in_port++;
388
389                 pure_lb_blocks = (usable_blocks * BTB_PURE_LB_FACTOR) /
390                                   (num_tcs_in_port * BTB_PURE_LB_FACTOR +
391                                    BTB_PURE_LB_RATIO);
392                 pure_lb_blocks = OSAL_MAX_T(u32, BTB_JUMBO_PKT_BLOCKS,
393                                             pure_lb_blocks /
394                                             BTB_PURE_LB_FACTOR);
395                 phys_blocks = (usable_blocks - pure_lb_blocks) /
396                               num_tcs_in_port;
397
398                 /* Init physical TCs */
399                 for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) {
400                         if (((port_params[port_id].active_phys_tcs >> tc) &
401                              0x1) == 1) {
402                                 ext_voq = ecore_get_ext_voq(p_hwfn, port_id, tc,
403                                                          max_phys_tcs_per_port);
404                                 STORE_RT_REG(p_hwfn,
405                                         PBF_BTB_GUARANTEED_RT_OFFSET(ext_voq),
406                                         phys_blocks);
407                         }
408                 }
409
410                 /* Init pure LB TC */
411                 ext_voq = ecore_get_ext_voq(p_hwfn, port_id, PURE_LB_TC,
412                                             max_phys_tcs_per_port);
413                 STORE_RT_REG(p_hwfn, PBF_BTB_GUARANTEED_RT_OFFSET(ext_voq),
414                              pure_lb_blocks);
415         }
416 }
417
418 /* Prepare Tx PQ mapping runtime init values for the specified PF */
419 static void ecore_tx_pq_map_rt_init(struct ecore_hwfn *p_hwfn,
420                                     struct ecore_ptt *p_ptt,
421                                     u8 pf_id,
422                                     u8 max_phys_tcs_per_port,
423                                                 bool is_pf_loading,
424                                     u32 num_pf_cids,
425                                     u32 num_vf_cids,
426                                     u16 start_pq,
427                                     u16 num_pf_pqs,
428                                     u16 num_vf_pqs,
429                                     u8 start_vport,
430                                     u32 base_mem_addr_4kb,
431                                     struct init_qm_pq_params *pq_params,
432                                     struct init_qm_vport_params *vport_params)
433 {
434         /* A bit per Tx PQ indicating if the PQ is associated with a VF */
435         u32 tx_pq_vf_mask[MAX_QM_TX_QUEUES / QM_PF_QUEUE_GROUP_SIZE] = { 0 };
436         u32 num_tx_pq_vf_masks = MAX_QM_TX_QUEUES / QM_PF_QUEUE_GROUP_SIZE;
437         u16 num_pqs, first_pq_group, last_pq_group, i, j, pq_id, pq_group;
438         u32 pq_mem_4kb, vport_pq_mem_4kb, mem_addr_4kb;
439
440         num_pqs = num_pf_pqs + num_vf_pqs;
441
442         first_pq_group = start_pq / QM_PF_QUEUE_GROUP_SIZE;
443         last_pq_group = (start_pq + num_pqs - 1) / QM_PF_QUEUE_GROUP_SIZE;
444
445         pq_mem_4kb = QM_PQ_MEM_4KB(num_pf_cids);
446         vport_pq_mem_4kb = QM_PQ_MEM_4KB(num_vf_cids);
447         mem_addr_4kb = base_mem_addr_4kb;
448
449         /* Set mapping from PQ group to PF */
450         for (pq_group = first_pq_group; pq_group <= last_pq_group; pq_group++)
451                 STORE_RT_REG(p_hwfn, QM_REG_PQTX2PF_0_RT_OFFSET + pq_group,
452                              (u32)(pf_id));
453
454         /* Set PQ sizes */
455         STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_0_RT_OFFSET,
456                      QM_PQ_SIZE_256B(num_pf_cids));
457         STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_1_RT_OFFSET,
458                      QM_PQ_SIZE_256B(num_vf_cids));
459
460         /* Go over all Tx PQs */
461         for (i = 0, pq_id = start_pq; i < num_pqs; i++, pq_id++) {
462                 u32 max_qm_global_rls = MAX_QM_GLOBAL_RLS;
463                 u8 ext_voq, vport_id_in_pf;
464                 bool is_vf_pq, rl_valid;
465                 u16 first_tx_pq_id;
466
467                 ext_voq = ecore_get_ext_voq(p_hwfn, pq_params[i].port_id,
468                                             pq_params[i].tc_id,
469                                             max_phys_tcs_per_port);
470                 is_vf_pq = (i >= num_pf_pqs);
471                 rl_valid = pq_params[i].rl_valid > 0;
472
473                 /* Update first Tx PQ of VPORT/TC */
474                 vport_id_in_pf = pq_params[i].vport_id - start_vport;
475                 first_tx_pq_id =
476                 vport_params[vport_id_in_pf].first_tx_pq_id[pq_params[i].tc_id];
477                 if (first_tx_pq_id == QM_INVALID_PQ_ID) {
478                         u32 map_val = (ext_voq << QM_WFQ_VP_PQ_VOQ_SHIFT) |
479                                        (pf_id << (QM_WFQ_VP_PQ_PF_E4_SHIFT));
480
481                         /* Create new VP PQ */
482                         vport_params[vport_id_in_pf].
483                             first_tx_pq_id[pq_params[i].tc_id] = pq_id;
484                         first_tx_pq_id = pq_id;
485
486                         /* Map VP PQ to VOQ and PF */
487                         STORE_RT_REG(p_hwfn, QM_REG_WFQVPMAP_RT_OFFSET +
488                                      first_tx_pq_id, map_val);
489                 }
490
491                 /* Check RL ID */
492                 if (rl_valid && pq_params[i].vport_id >= max_qm_global_rls) {
493                         DP_NOTICE(p_hwfn, true,
494                                   "Invalid VPORT ID for rate limiter config\n");
495                         rl_valid = false;
496                 }
497
498                 /* Prepare PQ map entry */
499                 struct qm_rf_pq_map tx_pq_map;
500
501                 QM_INIT_TX_PQ_MAP(p_hwfn, tx_pq_map, pq_id, first_tx_pq_id,
502                                   rl_valid ? 1 : 0,
503                                   rl_valid ? pq_params[i].vport_id : 0,
504                                   ext_voq, pq_params[i].wrr_group);
505
506                 /* Set PQ base address */
507                 STORE_RT_REG(p_hwfn, QM_REG_BASEADDRTXPQ_RT_OFFSET + pq_id,
508                              mem_addr_4kb);
509
510                 /* Clear PQ pointer table entry (64 bit) */
511                 if (is_pf_loading)
512                         for (j = 0; j < 2; j++)
513                                 STORE_RT_REG(p_hwfn, QM_REG_PTRTBLTX_RT_OFFSET +
514                                              (pq_id * 2) + j, 0);
515
516                 /* Write PQ info to RAM */
517                 if (WRITE_PQ_INFO_TO_RAM != 0) {
518                         u32 pq_info = 0;
519
520                         pq_info = PQ_INFO_ELEMENT(first_tx_pq_id, pf_id,
521                                                   pq_params[i].tc_id,
522                                                   pq_params[i].port_id,
523                                                   rl_valid ? 1 : 0, rl_valid ?
524                                                   pq_params[i].vport_id : 0);
525                         ecore_wr(p_hwfn, p_ptt, PQ_INFO_RAM_GRC_ADDRESS(pq_id),
526                                  pq_info);
527                 }
528
529                 /* If VF PQ, add indication to PQ VF mask */
530                 if (is_vf_pq) {
531                         tx_pq_vf_mask[pq_id / QM_PF_QUEUE_GROUP_SIZE] |=
532                                 (1 << (pq_id % QM_PF_QUEUE_GROUP_SIZE));
533                         mem_addr_4kb += vport_pq_mem_4kb;
534                 } else {
535                         mem_addr_4kb += pq_mem_4kb;
536                 }
537         }
538
539         /* Store Tx PQ VF mask to size select register */
540         for (i = 0; i < num_tx_pq_vf_masks; i++)
541                 if (tx_pq_vf_mask[i])
542                         STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET +
543                                      i, tx_pq_vf_mask[i]);
544 }
545
546 /* Prepare Other PQ mapping runtime init values for the specified PF */
547 static void ecore_other_pq_map_rt_init(struct ecore_hwfn *p_hwfn,
548                                        u8 pf_id,
549                                        bool is_pf_loading,
550                                        u32 num_pf_cids,
551                                        u32 num_tids,
552                                        u32 base_mem_addr_4kb)
553 {
554         u32 pq_size, pq_mem_4kb, mem_addr_4kb;
555         u16 i, j, pq_id, pq_group;
556
557         /* A single other PQ group is used in each PF, where PQ group i is used
558          * in PF i.
559          */
560         pq_group = pf_id;
561         pq_size = num_pf_cids + num_tids;
562         pq_mem_4kb = QM_PQ_MEM_4KB(pq_size);
563         mem_addr_4kb = base_mem_addr_4kb;
564
565         /* Map PQ group to PF */
566         STORE_RT_REG(p_hwfn, QM_REG_PQOTHER2PF_0_RT_OFFSET + pq_group,
567                      (u32)(pf_id));
568
569         /* Set PQ sizes */
570         STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_2_RT_OFFSET,
571                      QM_PQ_SIZE_256B(pq_size));
572
573         for (i = 0, pq_id = pf_id * QM_PF_QUEUE_GROUP_SIZE;
574              i < QM_OTHER_PQS_PER_PF; i++, pq_id++) {
575                 /* Set PQ base address */
576                 STORE_RT_REG(p_hwfn, QM_REG_BASEADDROTHERPQ_RT_OFFSET + pq_id,
577                              mem_addr_4kb);
578
579                 /* Clear PQ pointer table entry */
580                 if (is_pf_loading)
581                         for (j = 0; j < 2; j++)
582                                 STORE_RT_REG(p_hwfn,
583                                              QM_REG_PTRTBLOTHER_RT_OFFSET +
584                                              (pq_id * 2) + j, 0);
585
586                 mem_addr_4kb += pq_mem_4kb;
587         }
588 }
589
590 /* Prepare PF WFQ runtime init values for the specified PF.
591  * Return -1 on error.
592  */
593 static int ecore_pf_wfq_rt_init(struct ecore_hwfn *p_hwfn,
594                                 u8 pf_id,
595                                 u16 pf_wfq,
596                                 u8 max_phys_tcs_per_port,
597                                 u16 num_tx_pqs,
598                                 struct init_qm_pq_params *pq_params)
599 {
600         u32 inc_val, crd_reg_offset;
601         u8 ext_voq;
602         u16 i;
603
604         inc_val = QM_WFQ_INC_VAL(pf_wfq);
605         if (!inc_val || inc_val > QM_WFQ_MAX_INC_VAL) {
606                 DP_NOTICE(p_hwfn, true,
607                           "Invalid PF WFQ weight configuration\n");
608                 return -1;
609         }
610
611         for (i = 0; i < num_tx_pqs; i++) {
612                 ext_voq = ecore_get_ext_voq(p_hwfn, pq_params[i].port_id,
613                                             pq_params[i].tc_id,
614                                             max_phys_tcs_per_port);
615                 crd_reg_offset = (pf_id < MAX_NUM_PFS_BB ?
616                                   QM_REG_WFQPFCRD_RT_OFFSET :
617                                   QM_REG_WFQPFCRD_MSB_RT_OFFSET) +
618                                  ext_voq * MAX_NUM_PFS_BB +
619                                  (pf_id % MAX_NUM_PFS_BB);
620                 OVERWRITE_RT_REG(p_hwfn, crd_reg_offset,
621                                  (u32)QM_WFQ_CRD_REG_SIGN_BIT);
622         }
623
624         STORE_RT_REG(p_hwfn, QM_REG_WFQPFUPPERBOUND_RT_OFFSET +
625                      pf_id, QM_WFQ_UPPER_BOUND | (u32)QM_WFQ_CRD_REG_SIGN_BIT);
626         STORE_RT_REG(p_hwfn, QM_REG_WFQPFWEIGHT_RT_OFFSET + pf_id, inc_val);
627
628         return 0;
629 }
630
631 /* Prepare PF RL runtime init values for the specified PF.
632  * Return -1 on error.
633  */
634 static int ecore_pf_rl_rt_init(struct ecore_hwfn *p_hwfn, u8 pf_id, u32 pf_rl)
635 {
636         u32 inc_val;
637
638         inc_val = QM_RL_INC_VAL(pf_rl);
639         if (inc_val > QM_PF_RL_MAX_INC_VAL) {
640                 DP_NOTICE(p_hwfn, true,
641                           "Invalid PF rate limit configuration\n");
642                 return -1;
643         }
644
645         STORE_RT_REG(p_hwfn, QM_REG_RLPFCRD_RT_OFFSET + pf_id,
646                      (u32)QM_RL_CRD_REG_SIGN_BIT);
647         STORE_RT_REG(p_hwfn, QM_REG_RLPFUPPERBOUND_RT_OFFSET + pf_id,
648                      QM_PF_RL_UPPER_BOUND | (u32)QM_RL_CRD_REG_SIGN_BIT);
649         STORE_RT_REG(p_hwfn, QM_REG_RLPFINCVAL_RT_OFFSET + pf_id, inc_val);
650
651         return 0;
652 }
653
654 /* Prepare VPORT WFQ runtime init values for the specified VPORTs.
655  * Return -1 on error.
656  */
657 static int ecore_vp_wfq_rt_init(struct ecore_hwfn *p_hwfn,
658                                 u8 num_vports,
659                                 struct init_qm_vport_params *vport_params)
660 {
661         u16 vport_pq_id;
662         u32 inc_val;
663         u8 tc, i;
664
665         /* Go over all PF VPORTs */
666         for (i = 0; i < num_vports; i++) {
667                 if (!vport_params[i].wfq)
668                         continue;
669
670                 inc_val = QM_WFQ_INC_VAL(vport_params[i].wfq);
671                 if (inc_val > QM_WFQ_MAX_INC_VAL) {
672                         DP_NOTICE(p_hwfn, true,
673                                   "Invalid VPORT WFQ weight configuration\n");
674                         return -1;
675                 }
676
677                 /* Each VPORT can have several VPORT PQ IDs for various TCs */
678                 for (tc = 0; tc < NUM_OF_TCS; tc++) {
679                         vport_pq_id = vport_params[i].first_tx_pq_id[tc];
680                         if (vport_pq_id != QM_INVALID_PQ_ID) {
681                                 STORE_RT_REG(p_hwfn, QM_REG_WFQVPCRD_RT_OFFSET +
682                                              vport_pq_id,
683                                              (u32)QM_WFQ_CRD_REG_SIGN_BIT);
684                                 STORE_RT_REG(p_hwfn,
685                                              QM_REG_WFQVPWEIGHT_RT_OFFSET +
686                                              vport_pq_id, inc_val);
687                         }
688                 }
689         }
690         return 0;
691 }
692
693 /* Prepare VPORT RL runtime init values for the specified VPORTs.
694  * Return -1 on error.
695  */
696 static int ecore_vport_rl_rt_init(struct ecore_hwfn *p_hwfn,
697                                   u8 start_vport,
698                                   u8 num_vports,
699                                   u32 link_speed,
700                                   struct init_qm_vport_params *vport_params)
701 {
702         u8 i, vport_id;
703         u32 inc_val;
704
705         if (start_vport + num_vports >= MAX_QM_GLOBAL_RLS) {
706                 DP_NOTICE(p_hwfn, true,
707                           "Invalid VPORT ID for rate limiter configuration\n");
708                 return -1;
709         }
710
711         /* Go over all PF VPORTs */
712         for (i = 0, vport_id = start_vport; i < num_vports; i++, vport_id++) {
713                 inc_val = QM_RL_INC_VAL(link_speed);
714                 if (inc_val > QM_VP_RL_MAX_INC_VAL(link_speed)) {
715                         DP_NOTICE(p_hwfn, true,
716                                   "Invalid VPORT rate-limit configuration\n");
717                         return -1;
718                 }
719
720                 STORE_RT_REG(p_hwfn, QM_REG_RLGLBLCRD_RT_OFFSET + vport_id,
721                              (u32)QM_RL_CRD_REG_SIGN_BIT);
722                 STORE_RT_REG(p_hwfn,
723                              QM_REG_RLGLBLUPPERBOUND_RT_OFFSET + vport_id,
724                              QM_VP_RL_UPPER_BOUND(link_speed) |
725                              (u32)QM_RL_CRD_REG_SIGN_BIT);
726                 STORE_RT_REG(p_hwfn, QM_REG_RLGLBLINCVAL_RT_OFFSET + vport_id,
727                              inc_val);
728         }
729
730         return 0;
731 }
732
733 static bool ecore_poll_on_qm_cmd_ready(struct ecore_hwfn *p_hwfn,
734                                        struct ecore_ptt *p_ptt)
735 {
736         u32 reg_val, i;
737
738         for (i = 0, reg_val = 0; i < QM_STOP_CMD_MAX_POLL_COUNT && !reg_val;
739              i++) {
740                 OSAL_UDELAY(QM_STOP_CMD_POLL_PERIOD_US);
741                 reg_val = ecore_rd(p_hwfn, p_ptt, QM_REG_SDMCMDREADY);
742         }
743
744         /* Check if timeout while waiting for SDM command ready */
745         if (i == QM_STOP_CMD_MAX_POLL_COUNT) {
746                 DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG,
747                            "Timeout waiting for QM SDM cmd ready signal\n");
748                 return false;
749         }
750
751         return true;
752 }
753
754 static bool ecore_send_qm_cmd(struct ecore_hwfn *p_hwfn,
755                               struct ecore_ptt *p_ptt,
756                                                           u32 cmd_addr,
757                                                           u32 cmd_data_lsb,
758                                                           u32 cmd_data_msb)
759 {
760         if (!ecore_poll_on_qm_cmd_ready(p_hwfn, p_ptt))
761                 return false;
762
763         ecore_wr(p_hwfn, p_ptt, QM_REG_SDMCMDADDR, cmd_addr);
764         ecore_wr(p_hwfn, p_ptt, QM_REG_SDMCMDDATALSB, cmd_data_lsb);
765         ecore_wr(p_hwfn, p_ptt, QM_REG_SDMCMDDATAMSB, cmd_data_msb);
766         ecore_wr(p_hwfn, p_ptt, QM_REG_SDMCMDGO, 1);
767         ecore_wr(p_hwfn, p_ptt, QM_REG_SDMCMDGO, 0);
768
769         return ecore_poll_on_qm_cmd_ready(p_hwfn, p_ptt);
770 }
771
772
773 /******************** INTERFACE IMPLEMENTATION *********************/
774
775 u32 ecore_qm_pf_mem_size(u32 num_pf_cids,
776                                                  u32 num_vf_cids,
777                                                  u32 num_tids,
778                                                  u16 num_pf_pqs,
779                                                  u16 num_vf_pqs)
780 {
781         return QM_PQ_MEM_4KB(num_pf_cids) * num_pf_pqs +
782             QM_PQ_MEM_4KB(num_vf_cids) * num_vf_pqs +
783             QM_PQ_MEM_4KB(num_pf_cids + num_tids) * QM_OTHER_PQS_PER_PF;
784 }
785
786 int ecore_qm_common_rt_init(struct ecore_hwfn *p_hwfn,
787                             u8 max_ports_per_engine,
788                             u8 max_phys_tcs_per_port,
789                             bool pf_rl_en,
790                             bool pf_wfq_en,
791                             bool vport_rl_en,
792                             bool vport_wfq_en,
793                             struct init_qm_port_params
794                             port_params[MAX_NUM_PORTS])
795 {
796         u32 mask;
797
798         /* Init AFullOprtnstcCrdMask */
799         mask = (QM_OPPOR_LINE_VOQ_DEF <<
800                 QM_RF_OPPORTUNISTIC_MASK_LINEVOQ_SHIFT) |
801                 (QM_BYTE_CRD_EN << QM_RF_OPPORTUNISTIC_MASK_BYTEVOQ_SHIFT) |
802                 (pf_wfq_en << QM_RF_OPPORTUNISTIC_MASK_PFWFQ_SHIFT) |
803                 (vport_wfq_en << QM_RF_OPPORTUNISTIC_MASK_VPWFQ_SHIFT) |
804                 (pf_rl_en << QM_RF_OPPORTUNISTIC_MASK_PFRL_SHIFT) |
805                 (vport_rl_en << QM_RF_OPPORTUNISTIC_MASK_VPQCNRL_SHIFT) |
806                 (QM_OPPOR_FW_STOP_DEF <<
807                  QM_RF_OPPORTUNISTIC_MASK_FWPAUSE_SHIFT) |
808                 (QM_OPPOR_PQ_EMPTY_DEF <<
809                  QM_RF_OPPORTUNISTIC_MASK_QUEUEEMPTY_SHIFT);
810         STORE_RT_REG(p_hwfn, QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET, mask);
811
812         /* Enable/disable PF RL */
813         ecore_enable_pf_rl(p_hwfn, pf_rl_en);
814
815         /* Enable/disable PF WFQ */
816         ecore_enable_pf_wfq(p_hwfn, pf_wfq_en);
817
818         /* Enable/disable VPORT RL */
819         ecore_enable_vport_rl(p_hwfn, vport_rl_en);
820
821         /* Enable/disable VPORT WFQ */
822         ecore_enable_vport_wfq(p_hwfn, vport_wfq_en);
823
824         /* Init PBF CMDQ line credit */
825         ecore_cmdq_lines_rt_init(p_hwfn, max_ports_per_engine,
826                                  max_phys_tcs_per_port, port_params);
827
828         /* Init BTB blocks in PBF */
829         ecore_btb_blocks_rt_init(p_hwfn, max_ports_per_engine,
830                                  max_phys_tcs_per_port, port_params);
831
832         return 0;
833 }
834
835 int ecore_qm_pf_rt_init(struct ecore_hwfn *p_hwfn,
836                         struct ecore_ptt *p_ptt,
837                         u8 pf_id,
838                         u8 max_phys_tcs_per_port,
839                         bool is_pf_loading,
840                         u32 num_pf_cids,
841                         u32 num_vf_cids,
842                         u32 num_tids,
843                         u16 start_pq,
844                         u16 num_pf_pqs,
845                         u16 num_vf_pqs,
846                         u8 start_vport,
847                         u8 num_vports,
848                         u16 pf_wfq,
849                         u32 pf_rl,
850                         u32 link_speed,
851                         struct init_qm_pq_params *pq_params,
852                         struct init_qm_vport_params *vport_params)
853 {
854         u32 other_mem_size_4kb;
855         u8 tc, i;
856
857         other_mem_size_4kb = QM_PQ_MEM_4KB(num_pf_cids + num_tids) *
858                              QM_OTHER_PQS_PER_PF;
859
860         /* Clear first Tx PQ ID array for each VPORT */
861         for (i = 0; i < num_vports; i++)
862                 for (tc = 0; tc < NUM_OF_TCS; tc++)
863                         vport_params[i].first_tx_pq_id[tc] = QM_INVALID_PQ_ID;
864
865         /* Map Other PQs (if any) */
866 #if QM_OTHER_PQS_PER_PF > 0
867         ecore_other_pq_map_rt_init(p_hwfn, pf_id, is_pf_loading, num_pf_cids,
868                                    num_tids, 0);
869 #endif
870
871         /* Map Tx PQs */
872         ecore_tx_pq_map_rt_init(p_hwfn, p_ptt, pf_id, max_phys_tcs_per_port,
873                                 is_pf_loading, num_pf_cids, num_vf_cids,
874                                 start_pq, num_pf_pqs, num_vf_pqs, start_vport,
875                                 other_mem_size_4kb, pq_params, vport_params);
876
877         /* Init PF WFQ */
878         if (pf_wfq)
879                 if (ecore_pf_wfq_rt_init(p_hwfn, pf_id, pf_wfq,
880                                          max_phys_tcs_per_port,
881                                          num_pf_pqs + num_vf_pqs, pq_params))
882                         return -1;
883
884         /* Init PF RL */
885         if (ecore_pf_rl_rt_init(p_hwfn, pf_id, pf_rl))
886                 return -1;
887
888         /* Set VPORT WFQ */
889         if (ecore_vp_wfq_rt_init(p_hwfn, num_vports, vport_params))
890                 return -1;
891
892         /* Set VPORT RL */
893         if (ecore_vport_rl_rt_init
894             (p_hwfn, start_vport, num_vports, link_speed, vport_params))
895                 return -1;
896
897         return 0;
898 }
899
900 int ecore_init_pf_wfq(struct ecore_hwfn *p_hwfn,
901                       struct ecore_ptt *p_ptt, u8 pf_id, u16 pf_wfq)
902 {
903         u32 inc_val;
904
905         inc_val = QM_WFQ_INC_VAL(pf_wfq);
906         if (!inc_val || inc_val > QM_WFQ_MAX_INC_VAL) {
907                 DP_NOTICE(p_hwfn, true,
908                           "Invalid PF WFQ weight configuration\n");
909                 return -1;
910         }
911
912         ecore_wr(p_hwfn, p_ptt, QM_REG_WFQPFWEIGHT + pf_id * 4, inc_val);
913
914         return 0;
915 }
916
917 int ecore_init_pf_rl(struct ecore_hwfn *p_hwfn,
918                      struct ecore_ptt *p_ptt, u8 pf_id, u32 pf_rl)
919 {
920         u32 inc_val;
921
922         inc_val = QM_RL_INC_VAL(pf_rl);
923         if (inc_val > QM_PF_RL_MAX_INC_VAL) {
924                 DP_NOTICE(p_hwfn, true,
925                           "Invalid PF rate limit configuration\n");
926                 return -1;
927         }
928
929         ecore_wr(p_hwfn, p_ptt, QM_REG_RLPFCRD + pf_id * 4,
930                  (u32)QM_RL_CRD_REG_SIGN_BIT);
931         ecore_wr(p_hwfn, p_ptt, QM_REG_RLPFINCVAL + pf_id * 4, inc_val);
932
933         return 0;
934 }
935
936 int ecore_init_vport_wfq(struct ecore_hwfn *p_hwfn,
937                          struct ecore_ptt *p_ptt,
938                          u16 first_tx_pq_id[NUM_OF_TCS], u16 vport_wfq)
939 {
940         u16 vport_pq_id;
941         u32 inc_val;
942         u8 tc;
943
944         inc_val = QM_WFQ_INC_VAL(vport_wfq);
945         if (!inc_val || inc_val > QM_WFQ_MAX_INC_VAL) {
946                 DP_NOTICE(p_hwfn, true,
947                           "Invalid VPORT WFQ weight configuration\n");
948                 return -1;
949         }
950
951         for (tc = 0; tc < NUM_OF_TCS; tc++) {
952                 vport_pq_id = first_tx_pq_id[tc];
953                 if (vport_pq_id != QM_INVALID_PQ_ID) {
954                         ecore_wr(p_hwfn, p_ptt,
955                                  QM_REG_WFQVPWEIGHT + vport_pq_id * 4, inc_val);
956                 }
957         }
958
959         return 0;
960 }
961
962 int ecore_init_vport_rl(struct ecore_hwfn *p_hwfn,
963                         struct ecore_ptt *p_ptt, u8 vport_id,
964                                                 u32 vport_rl,
965                                                 u32 link_speed)
966 {
967         u32 inc_val, max_qm_global_rls = MAX_QM_GLOBAL_RLS;
968
969         if (vport_id >= max_qm_global_rls) {
970                 DP_NOTICE(p_hwfn, true,
971                           "Invalid VPORT ID for rate limiter configuration\n");
972                 return -1;
973         }
974
975         inc_val = QM_RL_INC_VAL(vport_rl ? vport_rl : link_speed);
976         if (inc_val > QM_VP_RL_MAX_INC_VAL(link_speed)) {
977                 DP_NOTICE(p_hwfn, true,
978                           "Invalid VPORT rate-limit configuration\n");
979                 return -1;
980         }
981
982         ecore_wr(p_hwfn, p_ptt, QM_REG_RLGLBLCRD + vport_id * 4,
983                  (u32)QM_RL_CRD_REG_SIGN_BIT);
984         ecore_wr(p_hwfn, p_ptt, QM_REG_RLGLBLINCVAL + vport_id * 4, inc_val);
985
986         return 0;
987 }
988
989 bool ecore_send_qm_stop_cmd(struct ecore_hwfn *p_hwfn,
990                             struct ecore_ptt *p_ptt,
991                             bool is_release_cmd,
992                             bool is_tx_pq, u16 start_pq, u16 num_pqs)
993 {
994         u32 cmd_arr[QM_CMD_STRUCT_SIZE(QM_STOP_CMD)] = { 0 };
995         u32 pq_mask = 0, last_pq, pq_id;
996
997         last_pq = start_pq + num_pqs - 1;
998
999         /* Set command's PQ type */
1000         QM_CMD_SET_FIELD(cmd_arr, QM_STOP_CMD, PQ_TYPE, is_tx_pq ? 0 : 1);
1001
1002         /* Go over requested PQs */
1003         for (pq_id = start_pq; pq_id <= last_pq; pq_id++) {
1004                 /* Set PQ bit in mask (stop command only) */
1005                 if (!is_release_cmd)
1006                         pq_mask |= (1 << (pq_id % QM_STOP_PQ_MASK_WIDTH));
1007
1008                 /* If last PQ or end of PQ mask, write command */
1009                 if ((pq_id == last_pq) ||
1010                     (pq_id % QM_STOP_PQ_MASK_WIDTH ==
1011                     (QM_STOP_PQ_MASK_WIDTH - 1))) {
1012                         QM_CMD_SET_FIELD(cmd_arr, QM_STOP_CMD, PAUSE_MASK,
1013                                          pq_mask);
1014                         QM_CMD_SET_FIELD(cmd_arr, QM_STOP_CMD, GROUP_ID,
1015                                          pq_id / QM_STOP_PQ_MASK_WIDTH);
1016                         if (!ecore_send_qm_cmd
1017                             (p_hwfn, p_ptt, QM_STOP_CMD_ADDR, cmd_arr[0],
1018                              cmd_arr[1]))
1019                                 return false;
1020                         pq_mask = 0;
1021                 }
1022         }
1023
1024         return true;
1025 }
1026
1027
1028 /* NIG: ETS configuration constants */
1029 #define NIG_TX_ETS_CLIENT_OFFSET        4
1030 #define NIG_LB_ETS_CLIENT_OFFSET        1
1031 #define NIG_ETS_MIN_WFQ_BYTES           1600
1032
1033 /* NIG: ETS constants */
1034 #define NIG_ETS_UP_BOUND(weight, mtu) \
1035         (2 * ((weight) > (mtu) ? (weight) : (mtu)))
1036
1037 /* NIG: RL constants */
1038
1039 /* Byte base type value */
1040 #define NIG_RL_BASE_TYPE                1
1041
1042 /* Period in us */
1043 #define NIG_RL_PERIOD                   1
1044
1045 /* Period in 25MHz cycles */
1046 #define NIG_RL_PERIOD_CLK_25M           (25 * NIG_RL_PERIOD)
1047
1048 /* Rate in mbps */
1049 #define NIG_RL_INC_VAL(rate)            (((rate) * NIG_RL_PERIOD) / 8)
1050
1051 #define NIG_RL_MAX_VAL(inc_val, mtu) \
1052         (2 * ((inc_val) > (mtu) ? (inc_val) : (mtu)))
1053
1054 /* NIG: packet prioritry configuration constants */
1055 #define NIG_PRIORITY_MAP_TC_BITS        4
1056
1057
1058 void ecore_init_nig_ets(struct ecore_hwfn *p_hwfn,
1059                         struct ecore_ptt *p_ptt,
1060                         struct init_ets_req *req, bool is_lb)
1061 {
1062         u32 min_weight, tc_weight_base_addr, tc_weight_addr_diff;
1063         u32 tc_bound_base_addr, tc_bound_addr_diff;
1064         u8 sp_tc_map = 0, wfq_tc_map = 0;
1065         u8 tc, num_tc, tc_client_offset;
1066
1067         num_tc = is_lb ? NUM_OF_TCS : NUM_OF_PHYS_TCS;
1068         tc_client_offset = is_lb ? NIG_LB_ETS_CLIENT_OFFSET :
1069                                    NIG_TX_ETS_CLIENT_OFFSET;
1070         min_weight = 0xffffffff;
1071         tc_weight_base_addr = is_lb ? NIG_REG_LB_ARB_CREDIT_WEIGHT_0 :
1072                                       NIG_REG_TX_ARB_CREDIT_WEIGHT_0;
1073         tc_weight_addr_diff = is_lb ? NIG_REG_LB_ARB_CREDIT_WEIGHT_1 -
1074                                       NIG_REG_LB_ARB_CREDIT_WEIGHT_0 :
1075                                       NIG_REG_TX_ARB_CREDIT_WEIGHT_1 -
1076                                       NIG_REG_TX_ARB_CREDIT_WEIGHT_0;
1077         tc_bound_base_addr = is_lb ? NIG_REG_LB_ARB_CREDIT_UPPER_BOUND_0 :
1078                                      NIG_REG_TX_ARB_CREDIT_UPPER_BOUND_0;
1079         tc_bound_addr_diff = is_lb ? NIG_REG_LB_ARB_CREDIT_UPPER_BOUND_1 -
1080                                      NIG_REG_LB_ARB_CREDIT_UPPER_BOUND_0 :
1081                                      NIG_REG_TX_ARB_CREDIT_UPPER_BOUND_1 -
1082                                      NIG_REG_TX_ARB_CREDIT_UPPER_BOUND_0;
1083
1084         for (tc = 0; tc < num_tc; tc++) {
1085                 struct init_ets_tc_req *tc_req = &req->tc_req[tc];
1086
1087                 /* Update SP map */
1088                 if (tc_req->use_sp)
1089                         sp_tc_map |= (1 << tc);
1090
1091                 if (!tc_req->use_wfq)
1092                         continue;
1093
1094                 /* Update WFQ map */
1095                 wfq_tc_map |= (1 << tc);
1096
1097                 /* Find minimal weight */
1098                 if (tc_req->weight < min_weight)
1099                         min_weight = tc_req->weight;
1100         }
1101
1102         /* Write SP map */
1103         ecore_wr(p_hwfn, p_ptt,
1104                  is_lb ? NIG_REG_LB_ARB_CLIENT_IS_STRICT :
1105                  NIG_REG_TX_ARB_CLIENT_IS_STRICT,
1106                  (sp_tc_map << tc_client_offset));
1107
1108         /* Write WFQ map */
1109         ecore_wr(p_hwfn, p_ptt,
1110                  is_lb ? NIG_REG_LB_ARB_CLIENT_IS_SUBJECT2WFQ :
1111                  NIG_REG_TX_ARB_CLIENT_IS_SUBJECT2WFQ,
1112                  (wfq_tc_map << tc_client_offset));
1113         /* write WFQ weights */
1114         for (tc = 0; tc < num_tc; tc++, tc_client_offset++) {
1115                 struct init_ets_tc_req *tc_req = &req->tc_req[tc];
1116                 u32 byte_weight;
1117
1118                 if (!tc_req->use_wfq)
1119                         continue;
1120
1121                 /* Translate weight to bytes */
1122                 byte_weight = (NIG_ETS_MIN_WFQ_BYTES * tc_req->weight) /
1123                               min_weight;
1124
1125                 /* Write WFQ weight */
1126                 ecore_wr(p_hwfn, p_ptt, tc_weight_base_addr +
1127                          tc_weight_addr_diff * tc_client_offset, byte_weight);
1128
1129                 /* Write WFQ upper bound */
1130                 ecore_wr(p_hwfn, p_ptt, tc_bound_base_addr +
1131                          tc_bound_addr_diff * tc_client_offset,
1132                          NIG_ETS_UP_BOUND(byte_weight, req->mtu));
1133         }
1134 }
1135
1136 void ecore_init_nig_lb_rl(struct ecore_hwfn *p_hwfn,
1137                           struct ecore_ptt *p_ptt,
1138                           struct init_nig_lb_rl_req *req)
1139 {
1140         u32 ctrl, inc_val, reg_offset;
1141         u8 tc;
1142
1143         /* Disable global MAC+LB RL */
1144         ctrl =
1145             NIG_RL_BASE_TYPE <<
1146             NIG_REG_TX_LB_GLBRATELIMIT_CTRL_TX_LB_GLBRATELIMIT_BASE_TYPE_SHIFT;
1147         ecore_wr(p_hwfn, p_ptt, NIG_REG_TX_LB_GLBRATELIMIT_CTRL, ctrl);
1148
1149         /* Configure and enable global MAC+LB RL */
1150         if (req->lb_mac_rate) {
1151                 /* Configure  */
1152                 ecore_wr(p_hwfn, p_ptt, NIG_REG_TX_LB_GLBRATELIMIT_INC_PERIOD,
1153                          NIG_RL_PERIOD_CLK_25M);
1154                 inc_val = NIG_RL_INC_VAL(req->lb_mac_rate);
1155                 ecore_wr(p_hwfn, p_ptt, NIG_REG_TX_LB_GLBRATELIMIT_INC_VALUE,
1156                          inc_val);
1157                 ecore_wr(p_hwfn, p_ptt, NIG_REG_TX_LB_GLBRATELIMIT_MAX_VALUE,
1158                          NIG_RL_MAX_VAL(inc_val, req->mtu));
1159
1160                 /* Enable */
1161                 ctrl |=
1162                     1 <<
1163                     NIG_REG_TX_LB_GLBRATELIMIT_CTRL_TX_LB_GLBRATELIMIT_EN_SHIFT;
1164                 ecore_wr(p_hwfn, p_ptt, NIG_REG_TX_LB_GLBRATELIMIT_CTRL, ctrl);
1165         }
1166
1167         /* Disable global LB-only RL */
1168         ctrl =
1169             NIG_RL_BASE_TYPE <<
1170             NIG_REG_LB_BRBRATELIMIT_CTRL_LB_BRBRATELIMIT_BASE_TYPE_SHIFT;
1171         ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_BRBRATELIMIT_CTRL, ctrl);
1172
1173         /* Configure and enable global LB-only RL */
1174         if (req->lb_rate) {
1175                 /* Configure  */
1176                 ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_BRBRATELIMIT_INC_PERIOD,
1177                          NIG_RL_PERIOD_CLK_25M);
1178                 inc_val = NIG_RL_INC_VAL(req->lb_rate);
1179                 ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_BRBRATELIMIT_INC_VALUE,
1180                          inc_val);
1181                 ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_BRBRATELIMIT_MAX_VALUE,
1182                          NIG_RL_MAX_VAL(inc_val, req->mtu));
1183
1184                 /* Enable */
1185                 ctrl |=
1186                     1 << NIG_REG_LB_BRBRATELIMIT_CTRL_LB_BRBRATELIMIT_EN_SHIFT;
1187                 ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_BRBRATELIMIT_CTRL, ctrl);
1188         }
1189
1190         /* Per-TC RLs */
1191         for (tc = 0, reg_offset = 0; tc < NUM_OF_PHYS_TCS;
1192              tc++, reg_offset += 4) {
1193                 /* Disable TC RL */
1194                 ctrl =
1195                     NIG_RL_BASE_TYPE <<
1196                 NIG_REG_LB_TCRATELIMIT_CTRL_0_LB_TCRATELIMIT_BASE_TYPE_0_SHIFT;
1197                 ecore_wr(p_hwfn, p_ptt,
1198                          NIG_REG_LB_TCRATELIMIT_CTRL_0 + reg_offset, ctrl);
1199
1200                 /* Configure and enable TC RL */
1201                 if (!req->tc_rate[tc])
1202                         continue;
1203
1204                 /* Configure */
1205                 ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_TCRATELIMIT_INC_PERIOD_0 +
1206                          reg_offset, NIG_RL_PERIOD_CLK_25M);
1207                 inc_val = NIG_RL_INC_VAL(req->tc_rate[tc]);
1208                 ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_TCRATELIMIT_INC_VALUE_0 +
1209                          reg_offset, inc_val);
1210                 ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_TCRATELIMIT_MAX_VALUE_0 +
1211                          reg_offset, NIG_RL_MAX_VAL(inc_val, req->mtu));
1212
1213                 /* Enable */
1214                 ctrl |= 1 <<
1215                         NIG_REG_LB_TCRATELIMIT_CTRL_0_LB_TCRATELIMIT_EN_0_SHIFT;
1216                 ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_TCRATELIMIT_CTRL_0 +
1217                          reg_offset, ctrl);
1218         }
1219 }
1220
1221 void ecore_init_nig_pri_tc_map(struct ecore_hwfn *p_hwfn,
1222                                struct ecore_ptt *p_ptt,
1223                                struct init_nig_pri_tc_map_req *req)
1224 {
1225         u8 tc_pri_mask[NUM_OF_PHYS_TCS] = { 0 };
1226         u32 pri_tc_mask = 0;
1227         u8 pri, tc;
1228
1229         for (pri = 0; pri < NUM_OF_VLAN_PRIORITIES; pri++) {
1230                 if (!req->pri[pri].valid)
1231                         continue;
1232
1233                 pri_tc_mask |= (req->pri[pri].tc_id <<
1234                                 (pri * NIG_PRIORITY_MAP_TC_BITS));
1235                 tc_pri_mask[req->pri[pri].tc_id] |= (1 << pri);
1236         }
1237
1238         /* Write priority -> TC mask */
1239         ecore_wr(p_hwfn, p_ptt, NIG_REG_PKT_PRIORITY_TO_TC, pri_tc_mask);
1240
1241         /* Write TC -> priority mask */
1242         for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) {
1243                 ecore_wr(p_hwfn, p_ptt, NIG_REG_PRIORITY_FOR_TC_0 + tc * 4,
1244                          tc_pri_mask[tc]);
1245                 ecore_wr(p_hwfn, p_ptt, NIG_REG_RX_TC0_PRIORITY_MASK + tc * 4,
1246                          tc_pri_mask[tc]);
1247         }
1248 }
1249
1250
1251 /* PRS: ETS configuration constants */
1252 #define PRS_ETS_MIN_WFQ_BYTES           1600
1253 #define PRS_ETS_UP_BOUND(weight, mtu) \
1254         (2 * ((weight) > (mtu) ? (weight) : (mtu)))
1255
1256
1257 void ecore_init_prs_ets(struct ecore_hwfn *p_hwfn,
1258                         struct ecore_ptt *p_ptt, struct init_ets_req *req)
1259 {
1260         u32 tc_weight_addr_diff, tc_bound_addr_diff, min_weight = 0xffffffff;
1261         u8 tc, sp_tc_map = 0, wfq_tc_map = 0;
1262
1263         tc_weight_addr_diff = PRS_REG_ETS_ARB_CREDIT_WEIGHT_1 -
1264                               PRS_REG_ETS_ARB_CREDIT_WEIGHT_0;
1265         tc_bound_addr_diff = PRS_REG_ETS_ARB_CREDIT_UPPER_BOUND_1 -
1266                              PRS_REG_ETS_ARB_CREDIT_UPPER_BOUND_0;
1267
1268         for (tc = 0; tc < NUM_OF_TCS; tc++) {
1269                 struct init_ets_tc_req *tc_req = &req->tc_req[tc];
1270
1271                 /* Update SP map */
1272                 if (tc_req->use_sp)
1273                         sp_tc_map |= (1 << tc);
1274
1275                 if (!tc_req->use_wfq)
1276                         continue;
1277
1278                 /* Update WFQ map */
1279                 wfq_tc_map |= (1 << tc);
1280
1281                 /* Find minimal weight */
1282                 if (tc_req->weight < min_weight)
1283                         min_weight = tc_req->weight;
1284         }
1285
1286         /* write SP map */
1287         ecore_wr(p_hwfn, p_ptt, PRS_REG_ETS_ARB_CLIENT_IS_STRICT, sp_tc_map);
1288
1289         /* write WFQ map */
1290         ecore_wr(p_hwfn, p_ptt, PRS_REG_ETS_ARB_CLIENT_IS_SUBJECT2WFQ,
1291                  wfq_tc_map);
1292
1293         /* write WFQ weights */
1294         for (tc = 0; tc < NUM_OF_TCS; tc++) {
1295                 struct init_ets_tc_req *tc_req = &req->tc_req[tc];
1296                 u32 byte_weight;
1297
1298                 if (!tc_req->use_wfq)
1299                         continue;
1300
1301                 /* Translate weight to bytes */
1302                 byte_weight = (PRS_ETS_MIN_WFQ_BYTES * tc_req->weight) /
1303                               min_weight;
1304
1305                 /* Write WFQ weight */
1306                 ecore_wr(p_hwfn, p_ptt, PRS_REG_ETS_ARB_CREDIT_WEIGHT_0 + tc *
1307                          tc_weight_addr_diff, byte_weight);
1308
1309                 /* Write WFQ upper bound */
1310                 ecore_wr(p_hwfn, p_ptt, PRS_REG_ETS_ARB_CREDIT_UPPER_BOUND_0 +
1311                          tc * tc_bound_addr_diff, PRS_ETS_UP_BOUND(byte_weight,
1312                                                                    req->mtu));
1313         }
1314 }
1315
1316
1317 /* BRB: RAM configuration constants */
1318 #define BRB_TOTAL_RAM_BLOCKS_BB 4800
1319 #define BRB_TOTAL_RAM_BLOCKS_K2 5632
1320 #define BRB_BLOCK_SIZE          128
1321 #define BRB_MIN_BLOCKS_PER_TC   9
1322 #define BRB_HYST_BYTES          10240
1323 #define BRB_HYST_BLOCKS         (BRB_HYST_BYTES / BRB_BLOCK_SIZE)
1324
1325 /* Temporary big RAM allocation - should be updated */
1326 void ecore_init_brb_ram(struct ecore_hwfn *p_hwfn,
1327                         struct ecore_ptt *p_ptt, struct init_brb_ram_req *req)
1328 {
1329         u32 tc_headroom_blocks, min_pkt_size_blocks, total_blocks;
1330         u32 active_port_blocks, reg_offset = 0;
1331         u8 port, active_ports = 0;
1332
1333         tc_headroom_blocks = (u32)DIV_ROUND_UP(req->headroom_per_tc,
1334                                                BRB_BLOCK_SIZE);
1335         min_pkt_size_blocks = (u32)DIV_ROUND_UP(req->min_pkt_size,
1336                                                 BRB_BLOCK_SIZE);
1337         total_blocks = ECORE_IS_K2(p_hwfn->p_dev) ? BRB_TOTAL_RAM_BLOCKS_K2 :
1338                                                     BRB_TOTAL_RAM_BLOCKS_BB;
1339
1340         /* Find number of active ports */
1341         for (port = 0; port < MAX_NUM_PORTS; port++)
1342                 if (req->num_active_tcs[port])
1343                         active_ports++;
1344
1345         active_port_blocks = (u32)(total_blocks / active_ports);
1346
1347         for (port = 0; port < req->max_ports_per_engine; port++) {
1348                 u32 port_blocks, port_shared_blocks, port_guaranteed_blocks;
1349                 u32 full_xoff_th, full_xon_th, pause_xoff_th, pause_xon_th;
1350                 u32 tc_guaranteed_blocks;
1351                 u8 tc;
1352
1353                 /* Calculate per-port sizes */
1354                 tc_guaranteed_blocks = (u32)DIV_ROUND_UP(req->guranteed_per_tc,
1355                                                          BRB_BLOCK_SIZE);
1356                 port_blocks = req->num_active_tcs[port] ? active_port_blocks :
1357                                                           0;
1358                 port_guaranteed_blocks = req->num_active_tcs[port] *
1359                                          tc_guaranteed_blocks;
1360                 port_shared_blocks = port_blocks - port_guaranteed_blocks;
1361                 full_xoff_th = req->num_active_tcs[port] *
1362                                BRB_MIN_BLOCKS_PER_TC;
1363                 full_xon_th = full_xoff_th + min_pkt_size_blocks;
1364                 pause_xoff_th = tc_headroom_blocks;
1365                 pause_xon_th = pause_xoff_th + min_pkt_size_blocks;
1366
1367                 /* Init total size per port */
1368                 ecore_wr(p_hwfn, p_ptt, BRB_REG_TOTAL_MAC_SIZE + port * 4,
1369                          port_blocks);
1370
1371                 /* Init shared size per port */
1372                 ecore_wr(p_hwfn, p_ptt, BRB_REG_SHARED_HR_AREA + port * 4,
1373                          port_shared_blocks);
1374
1375                 for (tc = 0; tc < NUM_OF_TCS; tc++, reg_offset += 4) {
1376                         /* Clear init values for non-active TCs */
1377                         if (tc == req->num_active_tcs[port]) {
1378                                 tc_guaranteed_blocks = 0;
1379                                 full_xoff_th = 0;
1380                                 full_xon_th = 0;
1381                                 pause_xoff_th = 0;
1382                                 pause_xon_th = 0;
1383                         }
1384
1385                         /* Init guaranteed size per TC */
1386                         ecore_wr(p_hwfn, p_ptt,
1387                                  BRB_REG_TC_GUARANTIED_0 + reg_offset,
1388                                  tc_guaranteed_blocks);
1389                         ecore_wr(p_hwfn, p_ptt,
1390                                  BRB_REG_MAIN_TC_GUARANTIED_HYST_0 + reg_offset,
1391                                  BRB_HYST_BLOCKS);
1392
1393                         /* Init pause/full thresholds per physical TC - for
1394                          * loopback traffic.
1395                          */
1396                         ecore_wr(p_hwfn, p_ptt,
1397                                  BRB_REG_LB_TC_FULL_XOFF_THRESHOLD_0 +
1398                                  reg_offset, full_xoff_th);
1399                         ecore_wr(p_hwfn, p_ptt,
1400                                  BRB_REG_LB_TC_FULL_XON_THRESHOLD_0 +
1401                                  reg_offset, full_xon_th);
1402                         ecore_wr(p_hwfn, p_ptt,
1403                                  BRB_REG_LB_TC_PAUSE_XOFF_THRESHOLD_0 +
1404                                  reg_offset, pause_xoff_th);
1405                         ecore_wr(p_hwfn, p_ptt,
1406                                  BRB_REG_LB_TC_PAUSE_XON_THRESHOLD_0 +
1407                                  reg_offset, pause_xon_th);
1408
1409                         /* Init pause/full thresholds per physical TC - for
1410                          * main traffic.
1411                          */
1412                         ecore_wr(p_hwfn, p_ptt,
1413                                  BRB_REG_MAIN_TC_FULL_XOFF_THRESHOLD_0 +
1414                                  reg_offset, full_xoff_th);
1415                         ecore_wr(p_hwfn, p_ptt,
1416                                  BRB_REG_MAIN_TC_FULL_XON_THRESHOLD_0 +
1417                                  reg_offset, full_xon_th);
1418                         ecore_wr(p_hwfn, p_ptt,
1419                                  BRB_REG_MAIN_TC_PAUSE_XOFF_THRESHOLD_0 +
1420                                  reg_offset, pause_xoff_th);
1421                         ecore_wr(p_hwfn, p_ptt,
1422                                  BRB_REG_MAIN_TC_PAUSE_XON_THRESHOLD_0 +
1423                                  reg_offset, pause_xon_th);
1424                 }
1425         }
1426 }
1427
1428 /* In MF should be called once per port to set EtherType of OuterTag */
1429 void ecore_set_port_mf_ovlan_eth_type(struct ecore_hwfn *p_hwfn, u32 ethType)
1430 {
1431         /* Update DORQ register */
1432         STORE_RT_REG(p_hwfn, DORQ_REG_TAG1_ETHERTYPE_RT_OFFSET, ethType);
1433 }
1434
1435 #define SET_TUNNEL_TYPE_ENABLE_BIT(var, offset, enable) \
1436 (var = ((var) & ~(1 << (offset))) | ((enable) ? (1 << (offset)) : 0))
1437 #define PRS_ETH_TUNN_OUTPUT_FORMAT        -188897008
1438 #define PRS_ETH_OUTPUT_FORMAT             -46832
1439
1440 void ecore_set_vxlan_dest_port(struct ecore_hwfn *p_hwfn,
1441                                struct ecore_ptt *p_ptt, u16 dest_port)
1442 {
1443         /* Update PRS register */
1444         ecore_wr(p_hwfn, p_ptt, PRS_REG_VXLAN_PORT, dest_port);
1445
1446         /* Update NIG register */
1447         ecore_wr(p_hwfn, p_ptt, NIG_REG_VXLAN_CTRL, dest_port);
1448
1449         /* Update PBF register */
1450         ecore_wr(p_hwfn, p_ptt, PBF_REG_VXLAN_PORT, dest_port);
1451 }
1452
1453 void ecore_set_vxlan_enable(struct ecore_hwfn *p_hwfn,
1454                             struct ecore_ptt *p_ptt, bool vxlan_enable)
1455 {
1456         u32 reg_val;
1457
1458         /* Update PRS register */
1459         reg_val = ecore_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN);
1460         SET_TUNNEL_TYPE_ENABLE_BIT(reg_val,
1461                            PRS_REG_ENCAPSULATION_TYPE_EN_VXLAN_ENABLE_SHIFT,
1462                            vxlan_enable);
1463         ecore_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
1464         if (reg_val) { /* TODO: handle E5 init */
1465                 reg_val = ecore_rd(p_hwfn, p_ptt,
1466                                    PRS_REG_OUTPUT_FORMAT_4_0_BB_K2);
1467
1468                 /* Update output  only if tunnel blocks not included. */
1469                 if (reg_val == (u32)PRS_ETH_OUTPUT_FORMAT)
1470                         ecore_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2,
1471                                  (u32)PRS_ETH_TUNN_OUTPUT_FORMAT);
1472         }
1473
1474         /* Update NIG register */
1475         reg_val = ecore_rd(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE);
1476         SET_TUNNEL_TYPE_ENABLE_BIT(reg_val,
1477                                    NIG_REG_ENC_TYPE_ENABLE_VXLAN_ENABLE_SHIFT,
1478                                    vxlan_enable);
1479         ecore_wr(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE, reg_val);
1480
1481         /* Update DORQ register */
1482         ecore_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_VXLAN_EN,
1483                  vxlan_enable ? 1 : 0);
1484 }
1485
1486 void ecore_set_gre_enable(struct ecore_hwfn *p_hwfn,
1487                           struct ecore_ptt *p_ptt,
1488                           bool eth_gre_enable, bool ip_gre_enable)
1489 {
1490         u32 reg_val;
1491
1492         /* Update PRS register */
1493         reg_val = ecore_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN);
1494         SET_TUNNEL_TYPE_ENABLE_BIT(reg_val,
1495                    PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GRE_ENABLE_SHIFT,
1496                    eth_gre_enable);
1497         SET_TUNNEL_TYPE_ENABLE_BIT(reg_val,
1498                    PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GRE_ENABLE_SHIFT,
1499                    ip_gre_enable);
1500         ecore_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
1501         if (reg_val) { /* TODO: handle E5 init */
1502                 reg_val = ecore_rd(p_hwfn, p_ptt,
1503                                    PRS_REG_OUTPUT_FORMAT_4_0_BB_K2);
1504
1505                 /* Update output  only if tunnel blocks not included. */
1506                 if (reg_val == (u32)PRS_ETH_OUTPUT_FORMAT)
1507                         ecore_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2,
1508                                  (u32)PRS_ETH_TUNN_OUTPUT_FORMAT);
1509         }
1510
1511         /* Update NIG register */
1512         reg_val = ecore_rd(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE);
1513         SET_TUNNEL_TYPE_ENABLE_BIT(reg_val,
1514                    NIG_REG_ENC_TYPE_ENABLE_ETH_OVER_GRE_ENABLE_SHIFT,
1515                    eth_gre_enable);
1516         SET_TUNNEL_TYPE_ENABLE_BIT(reg_val,
1517                    NIG_REG_ENC_TYPE_ENABLE_IP_OVER_GRE_ENABLE_SHIFT,
1518                    ip_gre_enable);
1519         ecore_wr(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE, reg_val);
1520
1521         /* Update DORQ registers */
1522         ecore_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_GRE_ETH_EN,
1523                  eth_gre_enable ? 1 : 0);
1524         ecore_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_GRE_IP_EN,
1525                  ip_gre_enable ? 1 : 0);
1526 }
1527
1528 void ecore_set_geneve_dest_port(struct ecore_hwfn *p_hwfn,
1529                                 struct ecore_ptt *p_ptt, u16 dest_port)
1530 {
1531         /* Update PRS register */
1532         ecore_wr(p_hwfn, p_ptt, PRS_REG_NGE_PORT, dest_port);
1533
1534         /* Update NIG register */
1535         ecore_wr(p_hwfn, p_ptt, NIG_REG_NGE_PORT, dest_port);
1536
1537         /* Update PBF register */
1538         ecore_wr(p_hwfn, p_ptt, PBF_REG_NGE_PORT, dest_port);
1539 }
1540
1541 void ecore_set_geneve_enable(struct ecore_hwfn *p_hwfn,
1542                              struct ecore_ptt *p_ptt,
1543                              bool eth_geneve_enable, bool ip_geneve_enable)
1544 {
1545         u32 reg_val;
1546
1547         /* Update PRS register */
1548         reg_val = ecore_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN);
1549         SET_TUNNEL_TYPE_ENABLE_BIT(reg_val,
1550                    PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GENEVE_ENABLE_SHIFT,
1551                    eth_geneve_enable);
1552         SET_TUNNEL_TYPE_ENABLE_BIT(reg_val,
1553                    PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GENEVE_ENABLE_SHIFT,
1554                    ip_geneve_enable);
1555         ecore_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
1556         if (reg_val) { /* TODO: handle E5 init */
1557                 reg_val = ecore_rd(p_hwfn, p_ptt,
1558                                    PRS_REG_OUTPUT_FORMAT_4_0_BB_K2);
1559
1560                 /* Update output  only if tunnel blocks not included. */
1561                 if (reg_val == (u32)PRS_ETH_OUTPUT_FORMAT)
1562                         ecore_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2,
1563                                  (u32)PRS_ETH_TUNN_OUTPUT_FORMAT);
1564         }
1565
1566         /* Update NIG register */
1567         ecore_wr(p_hwfn, p_ptt, NIG_REG_NGE_ETH_ENABLE,
1568                  eth_geneve_enable ? 1 : 0);
1569         ecore_wr(p_hwfn, p_ptt, NIG_REG_NGE_IP_ENABLE,
1570                  ip_geneve_enable ? 1 : 0);
1571
1572         /* EDPM with geneve tunnel not supported in BB */
1573         if (ECORE_IS_BB_B0(p_hwfn->p_dev))
1574                 return;
1575
1576         /* Update DORQ registers */
1577         ecore_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_NGE_ETH_EN_K2,
1578                  eth_geneve_enable ? 1 : 0);
1579         ecore_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_NGE_IP_EN_K2,
1580                  ip_geneve_enable ? 1 : 0);
1581 }
1582
1583 #define PRS_ETH_VXLAN_NO_L2_ENABLE_OFFSET   4
1584 #define PRS_ETH_VXLAN_NO_L2_OUTPUT_FORMAT      -927094512
1585
1586 void ecore_set_vxlan_no_l2_enable(struct ecore_hwfn *p_hwfn,
1587                                   struct ecore_ptt *p_ptt,
1588                                   bool enable)
1589 {
1590         u32 reg_val, cfg_mask;
1591
1592         /* read PRS config register */
1593         reg_val = ecore_rd(p_hwfn, p_ptt, PRS_REG_MSG_INFO);
1594
1595         /* set VXLAN_NO_L2_ENABLE mask */
1596         cfg_mask = (1 << PRS_ETH_VXLAN_NO_L2_ENABLE_OFFSET);
1597
1598         if (enable) {
1599                 /* set VXLAN_NO_L2_ENABLE flag */
1600                 reg_val |= cfg_mask;
1601
1602                 /* update PRS FIC  register */
1603                 ecore_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2,
1604                  (u32)PRS_ETH_VXLAN_NO_L2_OUTPUT_FORMAT);
1605         } else  {
1606                 /* clear VXLAN_NO_L2_ENABLE flag */
1607                 reg_val &= ~cfg_mask;
1608         }
1609
1610         /* write PRS config register */
1611         ecore_wr(p_hwfn, p_ptt, PRS_REG_MSG_INFO, reg_val);
1612 }
1613
1614 #define T_ETH_PACKET_ACTION_GFT_EVENTID  23
1615 #define PARSER_ETH_CONN_GFT_ACTION_CM_HDR  272
1616 #define T_ETH_PACKET_MATCH_RFS_EVENTID 25
1617 #define PARSER_ETH_CONN_CM_HDR 0
1618 #define CAM_LINE_SIZE sizeof(u32)
1619 #define RAM_LINE_SIZE sizeof(u64)
1620 #define REG_SIZE sizeof(u32)
1621
1622 void ecore_gft_disable(struct ecore_hwfn *p_hwfn,
1623                        struct ecore_ptt *p_ptt,
1624                        u16 pf_id)
1625 {
1626         /* disable gft search for PF */
1627         ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_GFT, 0);
1628
1629         /* Clean ram & cam for next gft session*/
1630
1631         /* Zero camline */
1632         ecore_wr(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE * pf_id, 0);
1633
1634         /* Zero ramline */
1635         ecore_wr(p_hwfn, p_ptt, PRS_REG_GFT_PROFILE_MASK_RAM +
1636                                 RAM_LINE_SIZE * pf_id, 0);
1637         ecore_wr(p_hwfn, p_ptt, PRS_REG_GFT_PROFILE_MASK_RAM +
1638                                 RAM_LINE_SIZE * pf_id + REG_SIZE, 0);
1639 }
1640
1641
1642 void ecore_set_gft_event_id_cm_hdr(struct ecore_hwfn *p_hwfn,
1643                                    struct ecore_ptt *p_ptt)
1644 {
1645         u32 rfs_cm_hdr_event_id;
1646
1647         /* Set RFS event ID to be awakened i Tstorm By Prs */
1648         rfs_cm_hdr_event_id = ecore_rd(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT);
1649         rfs_cm_hdr_event_id |= T_ETH_PACKET_ACTION_GFT_EVENTID <<
1650             PRS_REG_CM_HDR_GFT_EVENT_ID_SHIFT;
1651         rfs_cm_hdr_event_id |= PARSER_ETH_CONN_GFT_ACTION_CM_HDR <<
1652             PRS_REG_CM_HDR_GFT_CM_HDR_SHIFT;
1653         ecore_wr(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT, rfs_cm_hdr_event_id);
1654 }
1655
1656 void ecore_gft_config(struct ecore_hwfn *p_hwfn,
1657                                struct ecore_ptt *p_ptt,
1658                                u16 pf_id,
1659                                bool tcp,
1660                                bool udp,
1661                                bool ipv4,
1662                                bool ipv6,
1663                                enum gft_profile_type profile_type)
1664 {
1665         u32 reg_val, cam_line, ram_line_lo, ram_line_hi, search_non_ip_as_gft;
1666
1667         if (!ipv6 && !ipv4)
1668                 DP_NOTICE(p_hwfn, true, "gft_config: must accept at least on of - ipv4 or ipv6'\n");
1669         if (!tcp && !udp)
1670                 DP_NOTICE(p_hwfn, true, "gft_config: must accept at least on of - udp or tcp\n");
1671         if (profile_type >= MAX_GFT_PROFILE_TYPE)
1672                 DP_NOTICE(p_hwfn, true, "gft_config: unsupported gft_profile_type\n");
1673
1674         /* Set RFS event ID to be awakened i Tstorm By Prs */
1675         reg_val = T_ETH_PACKET_MATCH_RFS_EVENTID <<
1676                   PRS_REG_CM_HDR_GFT_EVENT_ID_SHIFT;
1677         reg_val |= PARSER_ETH_CONN_CM_HDR << PRS_REG_CM_HDR_GFT_CM_HDR_SHIFT;
1678         ecore_wr(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT, reg_val);
1679
1680         /* Do not load context only cid in PRS on match. */
1681         ecore_wr(p_hwfn, p_ptt, PRS_REG_LOAD_L2_FILTER, 0);
1682
1683         /* Do not use tenant ID exist bit for gft search*/
1684         ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TENANT_ID, 0);
1685
1686         /* Set Cam */
1687         cam_line = 0;
1688         SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_VALID, 1);
1689
1690         /* Filters are per PF!! */
1691         SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_PF_ID_MASK,
1692                   GFT_CAM_LINE_MAPPED_PF_ID_MASK_MASK);
1693         SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_PF_ID, pf_id);
1694
1695         if (!(tcp && udp)) {
1696                 SET_FIELD(cam_line,
1697                           GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK,
1698                           GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK_MASK);
1699                 if (tcp)
1700                         SET_FIELD(cam_line,
1701                                   GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE,
1702                                   GFT_PROFILE_TCP_PROTOCOL);
1703                 else
1704                         SET_FIELD(cam_line,
1705                                   GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE,
1706                                   GFT_PROFILE_UDP_PROTOCOL);
1707         }
1708
1709         if (!(ipv4 && ipv6)) {
1710                 SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_IP_VERSION_MASK, 1);
1711                 if (ipv4)
1712                         SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_IP_VERSION,
1713                                   GFT_PROFILE_IPV4);
1714                 else
1715                         SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_IP_VERSION,
1716                                   GFT_PROFILE_IPV6);
1717         }
1718
1719         /* Write characteristics to cam */
1720         ecore_wr(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE * pf_id,
1721                  cam_line);
1722         cam_line = ecore_rd(p_hwfn, p_ptt,
1723                             PRS_REG_GFT_CAM + CAM_LINE_SIZE * pf_id);
1724
1725         /* Write line to RAM - compare to filter 4 tuple */
1726         ram_line_lo = 0;
1727         ram_line_hi = 0;
1728
1729         /* Search no IP as GFT */
1730         search_non_ip_as_gft = 0;
1731
1732         /* Tunnel type */
1733         SET_FIELD(ram_line_lo, GFT_RAM_LINE_TUNNEL_DST_PORT, 1);
1734         SET_FIELD(ram_line_lo, GFT_RAM_LINE_TUNNEL_OVER_IP_PROTOCOL, 1);
1735
1736         if (profile_type == GFT_PROFILE_TYPE_4_TUPLE) {
1737                 SET_FIELD(ram_line_hi, GFT_RAM_LINE_DST_IP, 1);
1738                 SET_FIELD(ram_line_hi, GFT_RAM_LINE_SRC_IP, 1);
1739                 SET_FIELD(ram_line_hi, GFT_RAM_LINE_OVER_IP_PROTOCOL, 1);
1740                 SET_FIELD(ram_line_lo, GFT_RAM_LINE_ETHERTYPE, 1);
1741                 SET_FIELD(ram_line_lo, GFT_RAM_LINE_SRC_PORT, 1);
1742                 SET_FIELD(ram_line_lo, GFT_RAM_LINE_DST_PORT, 1);
1743         } else if (profile_type == GFT_PROFILE_TYPE_L4_DST_PORT) {
1744                 SET_FIELD(ram_line_hi, GFT_RAM_LINE_OVER_IP_PROTOCOL, 1);
1745                 SET_FIELD(ram_line_lo, GFT_RAM_LINE_ETHERTYPE, 1);
1746                 SET_FIELD(ram_line_lo, GFT_RAM_LINE_DST_PORT, 1);
1747         } else if (profile_type == GFT_PROFILE_TYPE_IP_DST_ADDR) {
1748                 SET_FIELD(ram_line_hi, GFT_RAM_LINE_DST_IP, 1);
1749                 SET_FIELD(ram_line_lo, GFT_RAM_LINE_ETHERTYPE, 1);
1750         } else if (profile_type == GFT_PROFILE_TYPE_IP_SRC_ADDR) {
1751                 SET_FIELD(ram_line_hi, GFT_RAM_LINE_SRC_IP, 1);
1752                 SET_FIELD(ram_line_lo, GFT_RAM_LINE_ETHERTYPE, 1);
1753         } else if (profile_type == GFT_PROFILE_TYPE_TUNNEL_TYPE) {
1754                 SET_FIELD(ram_line_lo, GFT_RAM_LINE_TUNNEL_ETHERTYPE, 1);
1755
1756                 /* Allow tunneled traffic without inner IP */
1757                 search_non_ip_as_gft = 1;
1758         }
1759
1760         ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_NON_IP_AS_GFT,
1761                  search_non_ip_as_gft);
1762         ecore_wr(p_hwfn, p_ptt,
1763                  PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * pf_id,
1764                  ram_line_lo);
1765         ecore_wr(p_hwfn, p_ptt,
1766                  PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * pf_id +
1767                  REG_SIZE, ram_line_hi);
1768
1769         /* Set default profile so that no filter match will happen */
1770         ecore_wr(p_hwfn, p_ptt, PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE *
1771                  PRS_GFT_CAM_LINES_NO_MATCH, 0xffffffff);
1772         ecore_wr(p_hwfn, p_ptt, PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE *
1773                  PRS_GFT_CAM_LINES_NO_MATCH + REG_SIZE, 0x3ff);
1774
1775         /* Enable gft search */
1776         ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_GFT, 1);
1777 }
1778
1779 /* Configure VF zone size mode */
1780 void ecore_config_vf_zone_size_mode(struct ecore_hwfn *p_hwfn,
1781                                     struct ecore_ptt *p_ptt, u16 mode,
1782                                     bool runtime_init)
1783 {
1784         u32 msdm_vf_size_log = MSTORM_VF_ZONE_DEFAULT_SIZE_LOG;
1785         u32 msdm_vf_offset_mask;
1786
1787         if (mode == VF_ZONE_SIZE_MODE_DOUBLE)
1788                 msdm_vf_size_log += 1;
1789         else if (mode == VF_ZONE_SIZE_MODE_QUAD)
1790                 msdm_vf_size_log += 2;
1791
1792         msdm_vf_offset_mask = (1 << msdm_vf_size_log) - 1;
1793
1794         if (runtime_init) {
1795                 STORE_RT_REG(p_hwfn,
1796                              PGLUE_REG_B_MSDM_VF_SHIFT_B_RT_OFFSET,
1797                              msdm_vf_size_log);
1798                 STORE_RT_REG(p_hwfn,
1799                              PGLUE_REG_B_MSDM_OFFSET_MASK_B_RT_OFFSET,
1800                              msdm_vf_offset_mask);
1801         } else {
1802                 ecore_wr(p_hwfn, p_ptt,
1803                          PGLUE_B_REG_MSDM_VF_SHIFT_B, msdm_vf_size_log);
1804                 ecore_wr(p_hwfn, p_ptt,
1805                          PGLUE_B_REG_MSDM_OFFSET_MASK_B, msdm_vf_offset_mask);
1806         }
1807 }
1808
1809 /* Get mstorm statistics for offset by VF zone size mode */
1810 u32 ecore_get_mstorm_queue_stat_offset(struct ecore_hwfn *p_hwfn,
1811                                        u16 stat_cnt_id,
1812                                        u16 vf_zone_size_mode)
1813 {
1814         u32 offset = MSTORM_QUEUE_STAT_OFFSET(stat_cnt_id);
1815
1816         if ((vf_zone_size_mode != VF_ZONE_SIZE_MODE_DEFAULT) &&
1817             (stat_cnt_id > MAX_NUM_PFS)) {
1818                 if (vf_zone_size_mode == VF_ZONE_SIZE_MODE_DOUBLE)
1819                         offset += (1 << MSTORM_VF_ZONE_DEFAULT_SIZE_LOG) *
1820                             (stat_cnt_id - MAX_NUM_PFS);
1821                 else if (vf_zone_size_mode == VF_ZONE_SIZE_MODE_QUAD)
1822                         offset += 3 * (1 << MSTORM_VF_ZONE_DEFAULT_SIZE_LOG) *
1823                             (stat_cnt_id - MAX_NUM_PFS);
1824         }
1825
1826         return offset;
1827 }
1828
1829 /* Get mstorm VF producer offset by VF zone size mode */
1830 u32 ecore_get_mstorm_eth_vf_prods_offset(struct ecore_hwfn *p_hwfn,
1831                                          u8 vf_id,
1832                                          u8 vf_queue_id,
1833                                          u16 vf_zone_size_mode)
1834 {
1835         u32 offset = MSTORM_ETH_VF_PRODS_OFFSET(vf_id, vf_queue_id);
1836
1837         if (vf_zone_size_mode != VF_ZONE_SIZE_MODE_DEFAULT) {
1838                 if (vf_zone_size_mode == VF_ZONE_SIZE_MODE_DOUBLE)
1839                         offset += (1 << MSTORM_VF_ZONE_DEFAULT_SIZE_LOG) *
1840                                    vf_id;
1841                 else if (vf_zone_size_mode == VF_ZONE_SIZE_MODE_QUAD)
1842                         offset += 3 * (1 << MSTORM_VF_ZONE_DEFAULT_SIZE_LOG) *
1843                                   vf_id;
1844         }
1845
1846         return offset;
1847 }
1848
1849 #ifndef LINUX_REMOVE
1850 #define CRC8_INIT_VALUE 0xFF
1851 #endif
1852 static u8 cdu_crc8_table[CRC8_TABLE_SIZE];
1853
1854 /* Calculate and return CDU validation byte per connection type / region /
1855  * cid
1856  */
1857 static u8 ecore_calc_cdu_validation_byte(u8 conn_type, u8 region, u32 cid)
1858 {
1859         const u8 validation_cfg = CDU_VALIDATION_DEFAULT_CFG;
1860
1861         static u8 crc8_table_valid;     /*automatically initialized to 0*/
1862         u8 crc, validation_byte = 0;
1863         u32 validation_string = 0;
1864         u32 data_to_crc;
1865
1866         if (crc8_table_valid == 0) {
1867                 OSAL_CRC8_POPULATE(cdu_crc8_table, 0x07);
1868                 crc8_table_valid = 1;
1869         }
1870
1871         /*
1872          * The CRC is calculated on the String-to-compress:
1873          * [31:8]  = {CID[31:20],CID[11:0]}
1874          * [7:4]   = Region
1875          * [3:0]   = Type
1876          */
1877         if ((validation_cfg >> CDU_CONTEXT_VALIDATION_CFG_USE_CID) & 1)
1878                 validation_string |= (cid & 0xFFF00000) | ((cid & 0xFFF) << 8);
1879
1880         if ((validation_cfg >> CDU_CONTEXT_VALIDATION_CFG_USE_REGION) & 1)
1881                 validation_string |= ((region & 0xF) << 4);
1882
1883         if ((validation_cfg >> CDU_CONTEXT_VALIDATION_CFG_USE_TYPE) & 1)
1884                 validation_string |= (conn_type & 0xF);
1885
1886         /* Convert to big-endian and calculate CRC8*/
1887         data_to_crc = OSAL_BE32_TO_CPU(validation_string);
1888
1889         crc = OSAL_CRC8(cdu_crc8_table, (u8 *)&data_to_crc, sizeof(data_to_crc),
1890                         CRC8_INIT_VALUE);
1891
1892         /* The validation byte [7:0] is composed:
1893          * for type A validation
1894          * [7]          = active configuration bit
1895          * [6:0]        = crc[6:0]
1896          *
1897          * for type B validation
1898          * [7]          = active configuration bit
1899          * [6:3]        = connection_type[3:0]
1900          * [2:0]        = crc[2:0]
1901          */
1902
1903         validation_byte |= ((validation_cfg >>
1904                              CDU_CONTEXT_VALIDATION_CFG_USE_ACTIVE) & 1) << 7;
1905
1906         if ((validation_cfg >>
1907              CDU_CONTEXT_VALIDATION_CFG_VALIDATION_TYPE_SHIFT) & 1)
1908                 validation_byte |= ((conn_type & 0xF) << 3) | (crc & 0x7);
1909         else
1910                 validation_byte |= crc & 0x7F;
1911
1912         return validation_byte;
1913 }
1914
1915 /* Calcualte and set validation bytes for session context */
1916 void ecore_calc_session_ctx_validation(void *p_ctx_mem, u16 ctx_size,
1917                                        u8 ctx_type, u32 cid)
1918 {
1919         u8 *x_val_ptr, *t_val_ptr, *u_val_ptr, *p_ctx;
1920
1921         p_ctx = (u8 *)p_ctx_mem;
1922         x_val_ptr = &p_ctx[con_region_offsets[0][ctx_type]];
1923         t_val_ptr = &p_ctx[con_region_offsets[1][ctx_type]];
1924         u_val_ptr = &p_ctx[con_region_offsets[2][ctx_type]];
1925
1926         OSAL_MEMSET(p_ctx, 0, ctx_size);
1927
1928         *x_val_ptr = ecore_calc_cdu_validation_byte(ctx_type, 3, cid);
1929         *t_val_ptr = ecore_calc_cdu_validation_byte(ctx_type, 4, cid);
1930         *u_val_ptr = ecore_calc_cdu_validation_byte(ctx_type, 5, cid);
1931 }
1932
1933 /* Calcualte and set validation bytes for task context */
1934 void ecore_calc_task_ctx_validation(void *p_ctx_mem, u16 ctx_size, u8 ctx_type,
1935                                     u32 tid)
1936 {
1937         u8 *p_ctx, *region1_val_ptr;
1938
1939         p_ctx = (u8 *)p_ctx_mem;
1940         region1_val_ptr = &p_ctx[task_region_offsets[0][ctx_type]];
1941
1942         OSAL_MEMSET(p_ctx, 0, ctx_size);
1943
1944         *region1_val_ptr = ecore_calc_cdu_validation_byte(ctx_type, 1, tid);
1945 }
1946
1947 /* Memset session context to 0 while preserving validation bytes */
1948 void ecore_memset_session_ctx(void *p_ctx_mem, u32 ctx_size, u8 ctx_type)
1949 {
1950         u8 *x_val_ptr, *t_val_ptr, *u_val_ptr, *p_ctx;
1951         u8 x_val, t_val, u_val;
1952
1953         p_ctx = (u8 *)p_ctx_mem;
1954         x_val_ptr = &p_ctx[con_region_offsets[0][ctx_type]];
1955         t_val_ptr = &p_ctx[con_region_offsets[1][ctx_type]];
1956         u_val_ptr = &p_ctx[con_region_offsets[2][ctx_type]];
1957
1958         x_val = *x_val_ptr;
1959         t_val = *t_val_ptr;
1960         u_val = *u_val_ptr;
1961
1962         OSAL_MEMSET(p_ctx, 0, ctx_size);
1963
1964         *x_val_ptr = x_val;
1965         *t_val_ptr = t_val;
1966         *u_val_ptr = u_val;
1967 }
1968
1969 /* Memset task context to 0 while preserving validation bytes */
1970 void ecore_memset_task_ctx(void *p_ctx_mem, u32 ctx_size, u8 ctx_type)
1971 {
1972         u8 *p_ctx, *region1_val_ptr;
1973         u8 region1_val;
1974
1975         p_ctx = (u8 *)p_ctx_mem;
1976         region1_val_ptr = &p_ctx[task_region_offsets[0][ctx_type]];
1977
1978         region1_val = *region1_val_ptr;
1979
1980         OSAL_MEMSET(p_ctx, 0, ctx_size);
1981
1982         *region1_val_ptr = region1_val;
1983 }
1984
1985 /* Enable and configure context validation */
1986 void ecore_enable_context_validation(struct ecore_hwfn *p_hwfn,
1987                                      struct ecore_ptt *p_ptt)
1988 {
1989         u32 ctx_validation;
1990
1991         /* Enable validation for connection region 3 - bits [31:24] */
1992         ctx_validation = CDU_VALIDATION_DEFAULT_CFG << 24;
1993         ecore_wr(p_hwfn, p_ptt, CDU_REG_CCFC_CTX_VALID0, ctx_validation);
1994
1995         /* Enable validation for connection region 5 - bits [15: 8] */
1996         ctx_validation = CDU_VALIDATION_DEFAULT_CFG << 8;
1997         ecore_wr(p_hwfn, p_ptt, CDU_REG_CCFC_CTX_VALID1, ctx_validation);
1998
1999         /* Enable validation for connection region 1 - bits [15: 8] */
2000         ctx_validation = CDU_VALIDATION_DEFAULT_CFG << 8;
2001         ecore_wr(p_hwfn, p_ptt, CDU_REG_TCFC_CTX_VALID0, ctx_validation);
2002 }
2003
2004
2005 /*******************************************************************************
2006  * File name : rdma_init.c
2007  * Author    : Michael Shteinbok
2008  *******************************************************************************
2009  *******************************************************************************
2010  * Description:
2011  * RDMA HSI functions
2012  *
2013  *******************************************************************************
2014  * Notes: This is the input to the auto generated file drv_init_fw_funcs.c
2015  *
2016  *******************************************************************************
2017  */
2018 static u32 ecore_get_rdma_assert_ram_addr(struct ecore_hwfn *p_hwfn,
2019                                           u8 storm_id)
2020 {
2021         switch (storm_id) {
2022         case 0: return TSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM +
2023                        TSTORM_RDMA_ASSERT_LEVEL_OFFSET(p_hwfn->rel_pf_id);
2024         case 1: return MSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM +
2025                        MSTORM_RDMA_ASSERT_LEVEL_OFFSET(p_hwfn->rel_pf_id);
2026         case 2: return USEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM +
2027                        USTORM_RDMA_ASSERT_LEVEL_OFFSET(p_hwfn->rel_pf_id);
2028         case 3: return XSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM +
2029                        XSTORM_RDMA_ASSERT_LEVEL_OFFSET(p_hwfn->rel_pf_id);
2030         case 4: return YSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM +
2031                        YSTORM_RDMA_ASSERT_LEVEL_OFFSET(p_hwfn->rel_pf_id);
2032         case 5: return PSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM +
2033                        PSTORM_RDMA_ASSERT_LEVEL_OFFSET(p_hwfn->rel_pf_id);
2034
2035         default: return 0;
2036         }
2037 }
2038
2039 void ecore_set_rdma_error_level(struct ecore_hwfn *p_hwfn,
2040                                 struct ecore_ptt *p_ptt,
2041                                 u8 assert_level[NUM_STORMS])
2042 {
2043         u8 storm_id;
2044         for (storm_id = 0; storm_id < NUM_STORMS; storm_id++) {
2045                 u32 ram_addr = ecore_get_rdma_assert_ram_addr(p_hwfn, storm_id);
2046
2047                 ecore_wr(p_hwfn, p_ptt, ram_addr, assert_level[storm_id]);
2048         }
2049 }