net/qede/base: upgrade FW to 8.33.12.0
[dpdk.git] / drivers / net / qede / base / ecore_init_fw_funcs.c
1 /*
2  * Copyright (c) 2016 QLogic Corporation.
3  * All rights reserved.
4  * www.qlogic.com
5  *
6  * See LICENSE.qede_pmd for copyright and licensing details.
7  */
8
9 #include "bcm_osal.h"
10 #include "ecore_hw.h"
11 #include "ecore_init_ops.h"
12 #include "reg_addr.h"
13 #include "ecore_rt_defs.h"
14 #include "ecore_hsi_common.h"
15 #include "ecore_hsi_init_func.h"
16 #include "ecore_hsi_eth.h"
17 #include "ecore_hsi_init_tool.h"
18 #include "ecore_iro.h"
19 #include "ecore_init_fw_funcs.h"
20
21 #define CDU_VALIDATION_DEFAULT_CFG 61
22
23 static u16 con_region_offsets[3][NUM_OF_CONNECTION_TYPES_E4] = {
24         { 400,  336,  352,  304,  304,  384,  416,  352}, /* region 3 offsets */
25         { 528,  496,  416,  448,  448,  512,  544,  480}, /* region 4 offsets */
26         { 608,  544,  496,  512,  576,  592,  624,  560}  /* region 5 offsets */
27 };
28 static u16 task_region_offsets[1][NUM_OF_CONNECTION_TYPES_E4] = {
29         { 240,  240,  112,    0,    0,    0,    0,   96}  /* region 1 offsets */
30 };
31
32 /* General constants */
33 #define QM_PQ_MEM_4KB(pq_size) (pq_size ? DIV_ROUND_UP((pq_size + 1) * \
34                                 QM_PQ_ELEMENT_SIZE, 0x1000) : 0)
35 #define QM_PQ_SIZE_256B(pq_size) (pq_size ? DIV_ROUND_UP(pq_size, 0x100) - 1 : \
36                                   0)
37 #define QM_INVALID_PQ_ID                0xffff
38
39 /* Feature enable */
40 #define QM_BYPASS_EN                    1
41 #define QM_BYTE_CRD_EN                  1
42
43 /* Other PQ constants */
44 #define QM_OTHER_PQS_PER_PF             4
45
46 /* VOQ constants */
47 #define QM_E5_NUM_EXT_VOQ               (MAX_NUM_PORTS_E5 * NUM_OF_TCS)
48
49 /* WFQ constants: */
50
51 /* Upper bound in MB, 10 * burst size of 1ms in 50Gbps */
52 #define QM_WFQ_UPPER_BOUND              62500000
53
54 /* Bit  of VOQ in WFQ VP PQ map */
55 #define QM_WFQ_VP_PQ_VOQ_SHIFT          0
56
57 /* Bit  of PF in WFQ VP PQ map */
58 #define QM_WFQ_VP_PQ_PF_E4_SHIFT        5
59 #define QM_WFQ_VP_PQ_PF_E5_SHIFT        6
60
61 /* 0x9000 = 4*9*1024 */
62 #define QM_WFQ_INC_VAL(weight)          ((weight) * 0x9000)
63
64 /* Max WFQ increment value is 0.7 * upper bound */
65 #define QM_WFQ_MAX_INC_VAL              ((QM_WFQ_UPPER_BOUND * 7) / 10)
66
67 /* Number of VOQs in E5 QmWfqCrd register */
68 #define QM_WFQ_CRD_E5_NUM_VOQS          16
69
70 /* RL constants: */
71
72 /* Period in us */
73 #define QM_RL_PERIOD                    5
74
75 /* Period in 25MHz cycles */
76 #define QM_RL_PERIOD_CLK_25M            (25 * QM_RL_PERIOD)
77
78 /* RL increment value - rate is specified in mbps. the factor of 1.01 was
79  * added after seeing only 99% factor reached in a 25Gbps port with DPDK RFC
80  * 2544 test. In this scenario the PF RL was reducing the line rate to 99%
81  * although the credit increment value was the correct one and FW calculated
82  * correct packet sizes. The reason for the inaccuracy of the RL is unknown at
83  * this point.
84  */
85 #define QM_RL_INC_VAL(rate) \
86         OSAL_MAX_T(u32, (u32)(((rate ? rate : 100000) * QM_RL_PERIOD * 101) / \
87         (8 * 100)), 1)
88
89 /* PF RL Upper bound is set to 10 * burst size of 1ms in 50Gbps */
90 #define QM_PF_RL_UPPER_BOUND            62500000
91
92 /* Max PF RL increment value is 0.7 * upper bound */
93 #define QM_PF_RL_MAX_INC_VAL            ((QM_PF_RL_UPPER_BOUND * 7) / 10)
94
95 /* Vport RL Upper bound, link speed is in Mpbs */
96 #define QM_VP_RL_UPPER_BOUND(speed) \
97         ((u32)OSAL_MAX_T(u32, QM_RL_INC_VAL(speed), 9700 + 1000))
98
99 /* Max Vport RL increment value is the Vport RL upper bound */
100 #define QM_VP_RL_MAX_INC_VAL(speed)     QM_VP_RL_UPPER_BOUND(speed)
101
102 /* Vport RL credit threshold in case of QM bypass */
103 #define QM_VP_RL_BYPASS_THRESH_SPEED    (QM_VP_RL_UPPER_BOUND(10000) - 1)
104
105 /* AFullOprtnstcCrdMask constants */
106 #define QM_OPPOR_LINE_VOQ_DEF           1
107 #define QM_OPPOR_FW_STOP_DEF            0
108 #define QM_OPPOR_PQ_EMPTY_DEF           1
109
110 /* Command Queue constants: */
111
112 /* Pure LB CmdQ lines (+spare) */
113 #define PBF_CMDQ_PURE_LB_LINES          150
114
115 #define PBF_CMDQ_LINES_E5_RSVD_RATIO    8
116
117 #define PBF_CMDQ_LINES_RT_OFFSET(ext_voq) \
118         (PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET + \
119          ext_voq * \
120          (PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET - \
121           PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET))
122
123 #define PBF_BTB_GUARANTEED_RT_OFFSET(ext_voq) \
124         (PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET + \
125          ext_voq * \
126          (PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET - \
127           PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET))
128
129 #define QM_VOQ_LINE_CRD(pbf_cmd_lines) \
130 ((((pbf_cmd_lines) - 4) * 2) | QM_LINE_CRD_REG_SIGN_BIT)
131
132 /* BTB: blocks constants (block size = 256B) */
133
134 /* 256B blocks in 9700B packet */
135 #define BTB_JUMBO_PKT_BLOCKS            38
136
137 /* Headroom per-port */
138 #define BTB_HEADROOM_BLOCKS             BTB_JUMBO_PKT_BLOCKS
139 #define BTB_PURE_LB_FACTOR              10
140
141 /* Factored (hence really 0.7) */
142 #define BTB_PURE_LB_RATIO               7
143
144 /* QM stop command constants */
145 #define QM_STOP_PQ_MASK_WIDTH           32
146 #define QM_STOP_CMD_ADDR                2
147 #define QM_STOP_CMD_STRUCT_SIZE         2
148 #define QM_STOP_CMD_PAUSE_MASK_OFFSET   0
149 #define QM_STOP_CMD_PAUSE_MASK_SHIFT    0
150 #define QM_STOP_CMD_PAUSE_MASK_MASK     0xffffffff /* @DPDK */
151 #define QM_STOP_CMD_GROUP_ID_OFFSET     1
152 #define QM_STOP_CMD_GROUP_ID_SHIFT      16
153 #define QM_STOP_CMD_GROUP_ID_MASK       15
154 #define QM_STOP_CMD_PQ_TYPE_OFFSET      1
155 #define QM_STOP_CMD_PQ_TYPE_SHIFT       24
156 #define QM_STOP_CMD_PQ_TYPE_MASK        1
157 #define QM_STOP_CMD_MAX_POLL_COUNT      100
158 #define QM_STOP_CMD_POLL_PERIOD_US      500
159
160 /* QM command macros */
161 #define QM_CMD_STRUCT_SIZE(cmd) cmd##_STRUCT_SIZE
162 #define QM_CMD_SET_FIELD(var, cmd, field, value) \
163         SET_FIELD(var[cmd##_##field##_OFFSET], cmd##_##field, value)
164
165 #define QM_INIT_TX_PQ_MAP(p_hwfn, map, chip, pq_id, rl_valid, \
166                           vp_pq_id, rl_id, ext_voq, wrr) \
167         do {                                            \
168                 OSAL_MEMSET(&map, 0, sizeof(map)); \
169                 SET_FIELD(map.reg, QM_RF_PQ_MAP_##chip##_PQ_VALID, 1); \
170                 SET_FIELD(map.reg, QM_RF_PQ_MAP_##chip##_RL_VALID, rl_valid); \
171                 SET_FIELD(map.reg, QM_RF_PQ_MAP_##chip##_VP_PQ_ID, vp_pq_id); \
172                 SET_FIELD(map.reg, QM_RF_PQ_MAP_##chip##_RL_ID, rl_id); \
173                 SET_FIELD(map.reg, QM_RF_PQ_MAP_##chip##_VOQ, ext_voq); \
174                 SET_FIELD(map.reg, \
175                           QM_RF_PQ_MAP_##chip##_WRR_WEIGHT_GROUP, wrr); \
176                 STORE_RT_REG(p_hwfn, QM_REG_TXPQMAP_RT_OFFSET + pq_id, \
177                              *((u32 *)&map)); \
178         } while (0)
179
180 #define WRITE_PQ_INFO_TO_RAM            1
181 #define PQ_INFO_ELEMENT(vp, pf, tc, port, rl_valid, rl) \
182         (((vp) << 0) | ((pf) << 12) | ((tc) << 16) |    \
183          ((port) << 20) | ((rl_valid) << 22) | ((rl) << 24))
184 #define PQ_INFO_RAM_GRC_ADDRESS(pq_id) \
185         (XSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM + 21776 + (pq_id) * 4)
186
187 /******************** INTERNAL IMPLEMENTATION *********************/
188
189 /* Returns the external VOQ number */
190 static u8 ecore_get_ext_voq(struct ecore_hwfn *p_hwfn,
191                             u8 port_id,
192                             u8 tc,
193                             u8 max_phys_tcs_per_port)
194 {
195         if (tc == PURE_LB_TC)
196                 return NUM_OF_PHYS_TCS * (MAX_NUM_PORTS_BB) + port_id;
197         else
198                 return port_id * (max_phys_tcs_per_port) + tc;
199 }
200
201 /* Prepare PF RL enable/disable runtime init values */
202 static void ecore_enable_pf_rl(struct ecore_hwfn *p_hwfn, bool pf_rl_en)
203 {
204         STORE_RT_REG(p_hwfn, QM_REG_RLPFENABLE_RT_OFFSET, pf_rl_en ? 1 : 0);
205         if (pf_rl_en) {
206                 u8 num_ext_voqs = MAX_NUM_VOQS_E4;
207                 u64 voq_bit_mask = ((u64)1 << num_ext_voqs) - 1;
208
209                 /* Enable RLs for all VOQs */
210                 STORE_RT_REG(p_hwfn, QM_REG_RLPFVOQENABLE_RT_OFFSET,
211                              (u32)voq_bit_mask);
212 #ifdef QM_REG_RLPFVOQENABLE_MSB_RT_OFFSET
213                 if (num_ext_voqs >= 32)
214                         STORE_RT_REG(p_hwfn, QM_REG_RLPFVOQENABLE_MSB_RT_OFFSET,
215                                      (u32)(voq_bit_mask >> 32));
216 #endif
217
218                 /* Write RL period */
219                 STORE_RT_REG(p_hwfn, QM_REG_RLPFPERIOD_RT_OFFSET,
220                              QM_RL_PERIOD_CLK_25M);
221                 STORE_RT_REG(p_hwfn, QM_REG_RLPFPERIODTIMER_RT_OFFSET,
222                              QM_RL_PERIOD_CLK_25M);
223
224                 /* Set credit threshold for QM bypass flow */
225                 if (QM_BYPASS_EN)
226                         STORE_RT_REG(p_hwfn, QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET,
227                                      QM_PF_RL_UPPER_BOUND);
228         }
229 }
230
231 /* Prepare PF WFQ enable/disable runtime init values */
232 static void ecore_enable_pf_wfq(struct ecore_hwfn *p_hwfn, bool pf_wfq_en)
233 {
234         STORE_RT_REG(p_hwfn, QM_REG_WFQPFENABLE_RT_OFFSET, pf_wfq_en ? 1 : 0);
235
236         /* Set credit threshold for QM bypass flow */
237         if (pf_wfq_en && QM_BYPASS_EN)
238                 STORE_RT_REG(p_hwfn, QM_REG_AFULLQMBYPTHRPFWFQ_RT_OFFSET,
239                              QM_WFQ_UPPER_BOUND);
240 }
241
242 /* Prepare VPORT RL enable/disable runtime init values */
243 static void ecore_enable_vport_rl(struct ecore_hwfn *p_hwfn, bool vport_rl_en)
244 {
245         STORE_RT_REG(p_hwfn, QM_REG_RLGLBLENABLE_RT_OFFSET,
246                      vport_rl_en ? 1 : 0);
247         if (vport_rl_en) {
248                 /* Write RL period (use timer 0 only) */
249                 STORE_RT_REG(p_hwfn, QM_REG_RLGLBLPERIOD_0_RT_OFFSET,
250                              QM_RL_PERIOD_CLK_25M);
251                 STORE_RT_REG(p_hwfn, QM_REG_RLGLBLPERIODTIMER_0_RT_OFFSET,
252                              QM_RL_PERIOD_CLK_25M);
253
254                 /* Set credit threshold for QM bypass flow */
255                 if (QM_BYPASS_EN)
256                         STORE_RT_REG(p_hwfn,
257                                      QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET,
258                                      QM_VP_RL_BYPASS_THRESH_SPEED);
259         }
260 }
261
262 /* Prepare VPORT WFQ enable/disable runtime init values */
263 static void ecore_enable_vport_wfq(struct ecore_hwfn *p_hwfn, bool vport_wfq_en)
264 {
265         STORE_RT_REG(p_hwfn, QM_REG_WFQVPENABLE_RT_OFFSET,
266                      vport_wfq_en ? 1 : 0);
267
268         /* Set credit threshold for QM bypass flow */
269         if (vport_wfq_en && QM_BYPASS_EN)
270                 STORE_RT_REG(p_hwfn, QM_REG_AFULLQMBYPTHRVPWFQ_RT_OFFSET,
271                              QM_WFQ_UPPER_BOUND);
272 }
273
274 /* Prepare runtime init values to allocate PBF command queue lines for
275  * the specified VOQ
276  */
277 static void ecore_cmdq_lines_voq_rt_init(struct ecore_hwfn *p_hwfn,
278                                          u8 ext_voq,
279                                          u16 cmdq_lines)
280 {
281         u32 qm_line_crd;
282
283         qm_line_crd = QM_VOQ_LINE_CRD(cmdq_lines);
284
285         OVERWRITE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(ext_voq),
286                          (u32)cmdq_lines);
287         STORE_RT_REG(p_hwfn, QM_REG_VOQCRDLINE_RT_OFFSET + ext_voq,
288                          qm_line_crd);
289         STORE_RT_REG(p_hwfn, QM_REG_VOQINITCRDLINE_RT_OFFSET + ext_voq,
290                          qm_line_crd);
291 }
292
293 /* Prepare runtime init values to allocate PBF command queue lines. */
294 static void ecore_cmdq_lines_rt_init(struct ecore_hwfn *p_hwfn,
295                                      u8 max_ports_per_engine,
296                                      u8 max_phys_tcs_per_port,
297                                      struct init_qm_port_params
298                                      port_params[MAX_NUM_PORTS])
299 {
300         u8 tc, ext_voq, port_id, num_tcs_in_port;
301         u8 num_ext_voqs = MAX_NUM_VOQS_E4;
302
303         /* Clear PBF lines of all VOQs */
304         for (ext_voq = 0; ext_voq < num_ext_voqs; ext_voq++)
305                 STORE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(ext_voq), 0);
306
307         for (port_id = 0; port_id < max_ports_per_engine; port_id++) {
308                 u16 phys_lines, phys_lines_per_tc;
309
310                 if (!port_params[port_id].active)
311                         continue;
312
313                 /* Find number of command queue lines to divide between the
314                  * active physical TCs. In E5, 1/8 of the lines are reserved.
315                  * the lines for pure LB TC are subtracted.
316                  */
317                 phys_lines = port_params[port_id].num_pbf_cmd_lines;
318                 phys_lines -= PBF_CMDQ_PURE_LB_LINES;
319
320                 /* Find #lines per active physical TC */
321                 num_tcs_in_port = 0;
322                 for (tc = 0; tc < max_phys_tcs_per_port; tc++)
323                         if (((port_params[port_id].active_phys_tcs >> tc) &
324                               0x1) == 1)
325                                 num_tcs_in_port++;
326                 phys_lines_per_tc = phys_lines / num_tcs_in_port;
327
328                 /* Init registers per active TC */
329                 for (tc = 0; tc < max_phys_tcs_per_port; tc++) {
330                         ext_voq = ecore_get_ext_voq(p_hwfn, port_id, tc,
331                                                     max_phys_tcs_per_port);
332                         if (((port_params[port_id].active_phys_tcs >> tc) &
333                             0x1) == 1)
334                                 ecore_cmdq_lines_voq_rt_init(p_hwfn, ext_voq,
335                                                              phys_lines_per_tc);
336                 }
337
338                 /* Init registers for pure LB TC */
339                 ext_voq = ecore_get_ext_voq(p_hwfn, port_id, PURE_LB_TC,
340                                             max_phys_tcs_per_port);
341                 ecore_cmdq_lines_voq_rt_init(p_hwfn, ext_voq,
342                                              PBF_CMDQ_PURE_LB_LINES);
343         }
344 }
345
346 /*
347  * Prepare runtime init values to allocate guaranteed BTB blocks for the
348  * specified port. The guaranteed BTB space is divided between the TCs as
349  * follows (shared space Is currently not used):
350  * 1. Parameters:
351  *     B BTB blocks for this port
352  *     C Number of physical TCs for this port
353  * 2. Calculation:
354  *     a. 38 blocks (9700B jumbo frame) are allocated for global per port
355  *        headroom
356  *     b. B = B 38 (remainder after global headroom allocation)
357  *     c. MAX(38,B/(C+0.7)) blocks are allocated for the pure LB VOQ.
358  *     d. B = B MAX(38, B/(C+0.7)) (remainder after pure LB allocation).
359  *     e. B/C blocks are allocated for each physical TC.
360  * Assumptions:
361  * - MTU is up to 9700 bytes (38 blocks)
362  * - All TCs are considered symmetrical (same rate and packet size)
363  * - No optimization for lossy TC (all are considered lossless). Shared space is
364  *   not enabled and allocated for each TC.
365  */
366 static void ecore_btb_blocks_rt_init(struct ecore_hwfn *p_hwfn,
367                                      u8 max_ports_per_engine,
368                                      u8 max_phys_tcs_per_port,
369                                      struct init_qm_port_params
370                                      port_params[MAX_NUM_PORTS])
371 {
372         u32 usable_blocks, pure_lb_blocks, phys_blocks;
373         u8 tc, ext_voq, port_id, num_tcs_in_port;
374
375         for (port_id = 0; port_id < max_ports_per_engine; port_id++) {
376                 if (!port_params[port_id].active)
377                         continue;
378
379                 /* Subtract headroom blocks */
380                 usable_blocks = port_params[port_id].num_btb_blocks -
381                                 BTB_HEADROOM_BLOCKS;
382
383                 /* Find blocks per physical TC. use factor to avoid floating
384                  * arithmethic.
385                  */
386                 num_tcs_in_port = 0;
387                 for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++)
388                         if (((port_params[port_id].active_phys_tcs >> tc) &
389                               0x1) == 1)
390                                 num_tcs_in_port++;
391
392                 pure_lb_blocks = (usable_blocks * BTB_PURE_LB_FACTOR) /
393                                   (num_tcs_in_port * BTB_PURE_LB_FACTOR +
394                                    BTB_PURE_LB_RATIO);
395                 pure_lb_blocks = OSAL_MAX_T(u32, BTB_JUMBO_PKT_BLOCKS,
396                                             pure_lb_blocks /
397                                             BTB_PURE_LB_FACTOR);
398                 phys_blocks = (usable_blocks - pure_lb_blocks) /
399                               num_tcs_in_port;
400
401                 /* Init physical TCs */
402                 for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) {
403                         if (((port_params[port_id].active_phys_tcs >> tc) &
404                              0x1) == 1) {
405                                 ext_voq = ecore_get_ext_voq(p_hwfn, port_id, tc,
406                                                          max_phys_tcs_per_port);
407                                 STORE_RT_REG(p_hwfn,
408                                         PBF_BTB_GUARANTEED_RT_OFFSET(ext_voq),
409                                         phys_blocks);
410                         }
411                 }
412
413                 /* Init pure LB TC */
414                 ext_voq = ecore_get_ext_voq(p_hwfn, port_id, PURE_LB_TC,
415                                             max_phys_tcs_per_port);
416                 STORE_RT_REG(p_hwfn, PBF_BTB_GUARANTEED_RT_OFFSET(ext_voq),
417                              pure_lb_blocks);
418         }
419 }
420
421 /* Prepare Tx PQ mapping runtime init values for the specified PF */
422 static void ecore_tx_pq_map_rt_init(struct ecore_hwfn *p_hwfn,
423                                     struct ecore_ptt *p_ptt,
424                                     u8 pf_id,
425                                     u8 max_phys_tcs_per_port,
426                                                 bool is_pf_loading,
427                                     u32 num_pf_cids,
428                                     u32 num_vf_cids,
429                                     u16 start_pq,
430                                     u16 num_pf_pqs,
431                                     u16 num_vf_pqs,
432                                     u8 start_vport,
433                                     u32 base_mem_addr_4kb,
434                                     struct init_qm_pq_params *pq_params,
435                                     struct init_qm_vport_params *vport_params)
436 {
437         /* A bit per Tx PQ indicating if the PQ is associated with a VF */
438         u32 tx_pq_vf_mask[MAX_QM_TX_QUEUES / QM_PF_QUEUE_GROUP_SIZE] = { 0 };
439         u32 num_tx_pq_vf_masks = MAX_QM_TX_QUEUES / QM_PF_QUEUE_GROUP_SIZE;
440         u16 num_pqs, first_pq_group, last_pq_group, i, j, pq_id, pq_group;
441         u32 pq_mem_4kb, vport_pq_mem_4kb, mem_addr_4kb;
442
443         num_pqs = num_pf_pqs + num_vf_pqs;
444
445         first_pq_group = start_pq / QM_PF_QUEUE_GROUP_SIZE;
446         last_pq_group = (start_pq + num_pqs - 1) / QM_PF_QUEUE_GROUP_SIZE;
447
448         pq_mem_4kb = QM_PQ_MEM_4KB(num_pf_cids);
449         vport_pq_mem_4kb = QM_PQ_MEM_4KB(num_vf_cids);
450         mem_addr_4kb = base_mem_addr_4kb;
451
452         /* Set mapping from PQ group to PF */
453         for (pq_group = first_pq_group; pq_group <= last_pq_group; pq_group++)
454                 STORE_RT_REG(p_hwfn, QM_REG_PQTX2PF_0_RT_OFFSET + pq_group,
455                              (u32)(pf_id));
456
457         /* Set PQ sizes */
458         STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_0_RT_OFFSET,
459                      QM_PQ_SIZE_256B(num_pf_cids));
460         STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_1_RT_OFFSET,
461                      QM_PQ_SIZE_256B(num_vf_cids));
462
463         /* Go over all Tx PQs */
464         for (i = 0, pq_id = start_pq; i < num_pqs; i++, pq_id++) {
465                 u32 max_qm_global_rls = MAX_QM_GLOBAL_RLS;
466                 u8 ext_voq, vport_id_in_pf;
467                 bool is_vf_pq, rl_valid;
468                 u16 first_tx_pq_id;
469
470                 ext_voq = ecore_get_ext_voq(p_hwfn, pq_params[i].port_id,
471                                             pq_params[i].tc_id,
472                                             max_phys_tcs_per_port);
473                 is_vf_pq = (i >= num_pf_pqs);
474                 rl_valid = pq_params[i].rl_valid > 0;
475
476                 /* Update first Tx PQ of VPORT/TC */
477                 vport_id_in_pf = pq_params[i].vport_id - start_vport;
478                 first_tx_pq_id =
479                 vport_params[vport_id_in_pf].first_tx_pq_id[pq_params[i].tc_id];
480                 if (first_tx_pq_id == QM_INVALID_PQ_ID) {
481                         u32 map_val = (ext_voq << QM_WFQ_VP_PQ_VOQ_SHIFT) |
482                                        (pf_id << (QM_WFQ_VP_PQ_PF_E4_SHIFT));
483
484                         /* Create new VP PQ */
485                         vport_params[vport_id_in_pf].
486                             first_tx_pq_id[pq_params[i].tc_id] = pq_id;
487                         first_tx_pq_id = pq_id;
488
489                         /* Map VP PQ to VOQ and PF */
490                         STORE_RT_REG(p_hwfn, QM_REG_WFQVPMAP_RT_OFFSET +
491                                      first_tx_pq_id, map_val);
492                 }
493
494                 /* Check RL ID */
495                 if (rl_valid && pq_params[i].vport_id >= max_qm_global_rls) {
496                         DP_NOTICE(p_hwfn, true,
497                                   "Invalid VPORT ID for rate limiter config\n");
498                         rl_valid = false;
499                 }
500
501                 /* Prepare PQ map entry */
502                 struct qm_rf_pq_map_e4 tx_pq_map;
503
504                 QM_INIT_TX_PQ_MAP(p_hwfn, tx_pq_map, E4, pq_id, rl_valid ?
505                                   1 : 0,
506                                   first_tx_pq_id, rl_valid ?
507                                   pq_params[i].vport_id : 0,
508                                   ext_voq, pq_params[i].wrr_group);
509
510                 /* Set PQ base address */
511                 STORE_RT_REG(p_hwfn, QM_REG_BASEADDRTXPQ_RT_OFFSET + pq_id,
512                              mem_addr_4kb);
513
514                 /* Clear PQ pointer table entry (64 bit) */
515                 if (is_pf_loading)
516                         for (j = 0; j < 2; j++)
517                                 STORE_RT_REG(p_hwfn, QM_REG_PTRTBLTX_RT_OFFSET +
518                                              (pq_id * 2) + j, 0);
519
520                 /* Write PQ info to RAM */
521                 if (WRITE_PQ_INFO_TO_RAM != 0) {
522                         u32 pq_info = 0;
523
524                         pq_info = PQ_INFO_ELEMENT(first_tx_pq_id, pf_id,
525                                                   pq_params[i].tc_id,
526                                                   pq_params[i].port_id,
527                                                   rl_valid ? 1 : 0, rl_valid ?
528                                                   pq_params[i].vport_id : 0);
529                         ecore_wr(p_hwfn, p_ptt, PQ_INFO_RAM_GRC_ADDRESS(pq_id),
530                                  pq_info);
531                 }
532
533                 /* If VF PQ, add indication to PQ VF mask */
534                 if (is_vf_pq) {
535                         tx_pq_vf_mask[pq_id / QM_PF_QUEUE_GROUP_SIZE] |=
536                                 (1 << (pq_id % QM_PF_QUEUE_GROUP_SIZE));
537                         mem_addr_4kb += vport_pq_mem_4kb;
538                 } else {
539                         mem_addr_4kb += pq_mem_4kb;
540                 }
541         }
542
543         /* Store Tx PQ VF mask to size select register */
544         for (i = 0; i < num_tx_pq_vf_masks; i++)
545                 if (tx_pq_vf_mask[i])
546                         STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET +
547                                      i, tx_pq_vf_mask[i]);
548 }
549
550 /* Prepare Other PQ mapping runtime init values for the specified PF */
551 static void ecore_other_pq_map_rt_init(struct ecore_hwfn *p_hwfn,
552                                        u8 pf_id,
553                                        bool is_pf_loading,
554                                        u32 num_pf_cids,
555                                        u32 num_tids,
556                                        u32 base_mem_addr_4kb)
557 {
558         u32 pq_size, pq_mem_4kb, mem_addr_4kb;
559         u16 i, j, pq_id, pq_group;
560
561         /* A single other PQ group is used in each PF, where PQ group i is used
562          * in PF i.
563          */
564         pq_group = pf_id;
565         pq_size = num_pf_cids + num_tids;
566         pq_mem_4kb = QM_PQ_MEM_4KB(pq_size);
567         mem_addr_4kb = base_mem_addr_4kb;
568
569         /* Map PQ group to PF */
570         STORE_RT_REG(p_hwfn, QM_REG_PQOTHER2PF_0_RT_OFFSET + pq_group,
571                      (u32)(pf_id));
572
573         /* Set PQ sizes */
574         STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_2_RT_OFFSET,
575                      QM_PQ_SIZE_256B(pq_size));
576
577         for (i = 0, pq_id = pf_id * QM_PF_QUEUE_GROUP_SIZE;
578              i < QM_OTHER_PQS_PER_PF; i++, pq_id++) {
579                 /* Set PQ base address */
580                 STORE_RT_REG(p_hwfn, QM_REG_BASEADDROTHERPQ_RT_OFFSET + pq_id,
581                              mem_addr_4kb);
582
583                 /* Clear PQ pointer table entry */
584                 if (is_pf_loading)
585                         for (j = 0; j < 2; j++)
586                                 STORE_RT_REG(p_hwfn,
587                                              QM_REG_PTRTBLOTHER_RT_OFFSET +
588                                              (pq_id * 2) + j, 0);
589
590                 mem_addr_4kb += pq_mem_4kb;
591         }
592 }
593
594 /* Prepare PF WFQ runtime init values for the specified PF.
595  * Return -1 on error.
596  */
597 static int ecore_pf_wfq_rt_init(struct ecore_hwfn *p_hwfn,
598                                 u8 pf_id,
599                                 u16 pf_wfq,
600                                 u8 max_phys_tcs_per_port,
601                                 u16 num_tx_pqs,
602                                 struct init_qm_pq_params *pq_params)
603 {
604         u32 inc_val, crd_reg_offset;
605         u8 ext_voq;
606         u16 i;
607
608         inc_val = QM_WFQ_INC_VAL(pf_wfq);
609         if (!inc_val || inc_val > QM_WFQ_MAX_INC_VAL) {
610                 DP_NOTICE(p_hwfn, true,
611                           "Invalid PF WFQ weight configuration\n");
612                 return -1;
613         }
614
615         for (i = 0; i < num_tx_pqs; i++) {
616                 ext_voq = ecore_get_ext_voq(p_hwfn, pq_params[i].port_id,
617                                             pq_params[i].tc_id,
618                                             max_phys_tcs_per_port);
619                 crd_reg_offset = (pf_id < MAX_NUM_PFS_BB ?
620                                   QM_REG_WFQPFCRD_RT_OFFSET :
621                                   QM_REG_WFQPFCRD_MSB_RT_OFFSET) +
622                                  ext_voq * MAX_NUM_PFS_BB +
623                                  (pf_id % MAX_NUM_PFS_BB);
624                 OVERWRITE_RT_REG(p_hwfn, crd_reg_offset,
625                                  (u32)QM_WFQ_CRD_REG_SIGN_BIT);
626         }
627
628         STORE_RT_REG(p_hwfn, QM_REG_WFQPFUPPERBOUND_RT_OFFSET +
629                      pf_id, QM_WFQ_UPPER_BOUND | (u32)QM_WFQ_CRD_REG_SIGN_BIT);
630         STORE_RT_REG(p_hwfn, QM_REG_WFQPFWEIGHT_RT_OFFSET + pf_id, inc_val);
631
632         return 0;
633 }
634
635 /* Prepare PF RL runtime init values for the specified PF.
636  * Return -1 on error.
637  */
638 static int ecore_pf_rl_rt_init(struct ecore_hwfn *p_hwfn, u8 pf_id, u32 pf_rl)
639 {
640         u32 inc_val;
641
642         inc_val = QM_RL_INC_VAL(pf_rl);
643         if (inc_val > QM_PF_RL_MAX_INC_VAL) {
644                 DP_NOTICE(p_hwfn, true,
645                           "Invalid PF rate limit configuration\n");
646                 return -1;
647         }
648
649         STORE_RT_REG(p_hwfn, QM_REG_RLPFCRD_RT_OFFSET + pf_id,
650                      (u32)QM_RL_CRD_REG_SIGN_BIT);
651         STORE_RT_REG(p_hwfn, QM_REG_RLPFUPPERBOUND_RT_OFFSET + pf_id,
652                      QM_PF_RL_UPPER_BOUND | (u32)QM_RL_CRD_REG_SIGN_BIT);
653         STORE_RT_REG(p_hwfn, QM_REG_RLPFINCVAL_RT_OFFSET + pf_id, inc_val);
654
655         return 0;
656 }
657
658 /* Prepare VPORT WFQ runtime init values for the specified VPORTs.
659  * Return -1 on error.
660  */
661 static int ecore_vp_wfq_rt_init(struct ecore_hwfn *p_hwfn,
662                                 u8 num_vports,
663                                 struct init_qm_vport_params *vport_params)
664 {
665         u16 vport_pq_id;
666         u32 inc_val;
667         u8 tc, i;
668
669         /* Go over all PF VPORTs */
670         for (i = 0; i < num_vports; i++) {
671                 if (!vport_params[i].vport_wfq)
672                         continue;
673
674                 inc_val = QM_WFQ_INC_VAL(vport_params[i].vport_wfq);
675                 if (inc_val > QM_WFQ_MAX_INC_VAL) {
676                         DP_NOTICE(p_hwfn, true,
677                                   "Invalid VPORT WFQ weight configuration\n");
678                         return -1;
679                 }
680
681                 /* Each VPORT can have several VPORT PQ IDs for various TCs */
682                 for (tc = 0; tc < NUM_OF_TCS; tc++) {
683                         vport_pq_id = vport_params[i].first_tx_pq_id[tc];
684                         if (vport_pq_id != QM_INVALID_PQ_ID) {
685                                 STORE_RT_REG(p_hwfn, QM_REG_WFQVPCRD_RT_OFFSET +
686                                              vport_pq_id,
687                                              (u32)QM_WFQ_CRD_REG_SIGN_BIT);
688                                 STORE_RT_REG(p_hwfn,
689                                              QM_REG_WFQVPWEIGHT_RT_OFFSET +
690                                              vport_pq_id, inc_val);
691                         }
692                 }
693         }
694         return 0;
695 }
696
697 /* Prepare VPORT RL runtime init values for the specified VPORTs.
698  * Return -1 on error.
699  */
700 static int ecore_vport_rl_rt_init(struct ecore_hwfn *p_hwfn,
701                                   u8 start_vport,
702                                   u8 num_vports,
703                                   u32 link_speed,
704                                   struct init_qm_vport_params *vport_params)
705 {
706         u8 i, vport_id;
707         u32 inc_val;
708
709         if (start_vport + num_vports >= MAX_QM_GLOBAL_RLS) {
710                 DP_NOTICE(p_hwfn, true,
711                           "Invalid VPORT ID for rate limiter configuration\n");
712                 return -1;
713         }
714
715         /* Go over all PF VPORTs */
716         for (i = 0, vport_id = start_vport; i < num_vports; i++, vport_id++) {
717                 inc_val = QM_RL_INC_VAL(vport_params[i].vport_rl ?
718                           vport_params[i].vport_rl : link_speed);
719                 if (inc_val > QM_VP_RL_MAX_INC_VAL(link_speed)) {
720                         DP_NOTICE(p_hwfn, true,
721                                   "Invalid VPORT rate-limit configuration\n");
722                         return -1;
723                 }
724
725                 STORE_RT_REG(p_hwfn, QM_REG_RLGLBLCRD_RT_OFFSET + vport_id,
726                              (u32)QM_RL_CRD_REG_SIGN_BIT);
727                 STORE_RT_REG(p_hwfn,
728                              QM_REG_RLGLBLUPPERBOUND_RT_OFFSET + vport_id,
729                              QM_VP_RL_UPPER_BOUND(link_speed) |
730                              (u32)QM_RL_CRD_REG_SIGN_BIT);
731                 STORE_RT_REG(p_hwfn, QM_REG_RLGLBLINCVAL_RT_OFFSET + vport_id,
732                              inc_val);
733         }
734
735         return 0;
736 }
737
738 static bool ecore_poll_on_qm_cmd_ready(struct ecore_hwfn *p_hwfn,
739                                        struct ecore_ptt *p_ptt)
740 {
741         u32 reg_val, i;
742
743         for (i = 0, reg_val = 0; i < QM_STOP_CMD_MAX_POLL_COUNT && !reg_val;
744              i++) {
745                 OSAL_UDELAY(QM_STOP_CMD_POLL_PERIOD_US);
746                 reg_val = ecore_rd(p_hwfn, p_ptt, QM_REG_SDMCMDREADY);
747         }
748
749         /* Check if timeout while waiting for SDM command ready */
750         if (i == QM_STOP_CMD_MAX_POLL_COUNT) {
751                 DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG,
752                            "Timeout waiting for QM SDM cmd ready signal\n");
753                 return false;
754         }
755
756         return true;
757 }
758
759 static bool ecore_send_qm_cmd(struct ecore_hwfn *p_hwfn,
760                               struct ecore_ptt *p_ptt,
761                                                           u32 cmd_addr,
762                                                           u32 cmd_data_lsb,
763                                                           u32 cmd_data_msb)
764 {
765         if (!ecore_poll_on_qm_cmd_ready(p_hwfn, p_ptt))
766                 return false;
767
768         ecore_wr(p_hwfn, p_ptt, QM_REG_SDMCMDADDR, cmd_addr);
769         ecore_wr(p_hwfn, p_ptt, QM_REG_SDMCMDDATALSB, cmd_data_lsb);
770         ecore_wr(p_hwfn, p_ptt, QM_REG_SDMCMDDATAMSB, cmd_data_msb);
771         ecore_wr(p_hwfn, p_ptt, QM_REG_SDMCMDGO, 1);
772         ecore_wr(p_hwfn, p_ptt, QM_REG_SDMCMDGO, 0);
773
774         return ecore_poll_on_qm_cmd_ready(p_hwfn, p_ptt);
775 }
776
777
778 /******************** INTERFACE IMPLEMENTATION *********************/
779
780 u32 ecore_qm_pf_mem_size(u32 num_pf_cids,
781                                                  u32 num_vf_cids,
782                                                  u32 num_tids,
783                                                  u16 num_pf_pqs,
784                                                  u16 num_vf_pqs)
785 {
786         return QM_PQ_MEM_4KB(num_pf_cids) * num_pf_pqs +
787             QM_PQ_MEM_4KB(num_vf_cids) * num_vf_pqs +
788             QM_PQ_MEM_4KB(num_pf_cids + num_tids) * QM_OTHER_PQS_PER_PF;
789 }
790
791 int ecore_qm_common_rt_init(struct ecore_hwfn *p_hwfn,
792                             u8 max_ports_per_engine,
793                             u8 max_phys_tcs_per_port,
794                             bool pf_rl_en,
795                             bool pf_wfq_en,
796                             bool vport_rl_en,
797                             bool vport_wfq_en,
798                             struct init_qm_port_params
799                             port_params[MAX_NUM_PORTS])
800 {
801         u32 mask;
802
803         /* Init AFullOprtnstcCrdMask */
804         mask = (QM_OPPOR_LINE_VOQ_DEF <<
805                 QM_RF_OPPORTUNISTIC_MASK_LINEVOQ_SHIFT) |
806                 (QM_BYTE_CRD_EN << QM_RF_OPPORTUNISTIC_MASK_BYTEVOQ_SHIFT) |
807                 (pf_wfq_en << QM_RF_OPPORTUNISTIC_MASK_PFWFQ_SHIFT) |
808                 (vport_wfq_en << QM_RF_OPPORTUNISTIC_MASK_VPWFQ_SHIFT) |
809                 (pf_rl_en << QM_RF_OPPORTUNISTIC_MASK_PFRL_SHIFT) |
810                 (vport_rl_en << QM_RF_OPPORTUNISTIC_MASK_VPQCNRL_SHIFT) |
811                 (QM_OPPOR_FW_STOP_DEF <<
812                  QM_RF_OPPORTUNISTIC_MASK_FWPAUSE_SHIFT) |
813                 (QM_OPPOR_PQ_EMPTY_DEF <<
814                  QM_RF_OPPORTUNISTIC_MASK_QUEUEEMPTY_SHIFT);
815         STORE_RT_REG(p_hwfn, QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET, mask);
816
817         /* Enable/disable PF RL */
818         ecore_enable_pf_rl(p_hwfn, pf_rl_en);
819
820         /* Enable/disable PF WFQ */
821         ecore_enable_pf_wfq(p_hwfn, pf_wfq_en);
822
823         /* Enable/disable VPORT RL */
824         ecore_enable_vport_rl(p_hwfn, vport_rl_en);
825
826         /* Enable/disable VPORT WFQ */
827         ecore_enable_vport_wfq(p_hwfn, vport_wfq_en);
828
829         /* Init PBF CMDQ line credit */
830         ecore_cmdq_lines_rt_init(p_hwfn, max_ports_per_engine,
831                                  max_phys_tcs_per_port, port_params);
832
833         /* Init BTB blocks in PBF */
834         ecore_btb_blocks_rt_init(p_hwfn, max_ports_per_engine,
835                                  max_phys_tcs_per_port, port_params);
836
837         return 0;
838 }
839
840 int ecore_qm_pf_rt_init(struct ecore_hwfn *p_hwfn,
841                         struct ecore_ptt *p_ptt,
842                         u8 pf_id,
843                         u8 max_phys_tcs_per_port,
844                         bool is_pf_loading,
845                         u32 num_pf_cids,
846                         u32 num_vf_cids,
847                         u32 num_tids,
848                         u16 start_pq,
849                         u16 num_pf_pqs,
850                         u16 num_vf_pqs,
851                         u8 start_vport,
852                         u8 num_vports,
853                         u16 pf_wfq,
854                         u32 pf_rl,
855                         u32 link_speed,
856                         struct init_qm_pq_params *pq_params,
857                         struct init_qm_vport_params *vport_params)
858 {
859         u32 other_mem_size_4kb;
860         u8 tc, i;
861
862         other_mem_size_4kb = QM_PQ_MEM_4KB(num_pf_cids + num_tids) *
863                              QM_OTHER_PQS_PER_PF;
864
865         /* Clear first Tx PQ ID array for each VPORT */
866         for (i = 0; i < num_vports; i++)
867                 for (tc = 0; tc < NUM_OF_TCS; tc++)
868                         vport_params[i].first_tx_pq_id[tc] = QM_INVALID_PQ_ID;
869
870         /* Map Other PQs (if any) */
871 #if QM_OTHER_PQS_PER_PF > 0
872         ecore_other_pq_map_rt_init(p_hwfn, pf_id, is_pf_loading, num_pf_cids,
873                                    num_tids, 0);
874 #endif
875
876         /* Map Tx PQs */
877         ecore_tx_pq_map_rt_init(p_hwfn, p_ptt, pf_id, max_phys_tcs_per_port,
878                                 is_pf_loading, num_pf_cids, num_vf_cids,
879                                 start_pq, num_pf_pqs, num_vf_pqs, start_vport,
880                                 other_mem_size_4kb, pq_params, vport_params);
881
882         /* Init PF WFQ */
883         if (pf_wfq)
884                 if (ecore_pf_wfq_rt_init(p_hwfn, pf_id, pf_wfq,
885                                          max_phys_tcs_per_port,
886                                          num_pf_pqs + num_vf_pqs, pq_params))
887                         return -1;
888
889         /* Init PF RL */
890         if (ecore_pf_rl_rt_init(p_hwfn, pf_id, pf_rl))
891                 return -1;
892
893         /* Set VPORT WFQ */
894         if (ecore_vp_wfq_rt_init(p_hwfn, num_vports, vport_params))
895                 return -1;
896
897         /* Set VPORT RL */
898         if (ecore_vport_rl_rt_init
899             (p_hwfn, start_vport, num_vports, link_speed, vport_params))
900                 return -1;
901
902         return 0;
903 }
904
905 int ecore_init_pf_wfq(struct ecore_hwfn *p_hwfn,
906                       struct ecore_ptt *p_ptt, u8 pf_id, u16 pf_wfq)
907 {
908         u32 inc_val;
909
910         inc_val = QM_WFQ_INC_VAL(pf_wfq);
911         if (!inc_val || inc_val > QM_WFQ_MAX_INC_VAL) {
912                 DP_NOTICE(p_hwfn, true,
913                           "Invalid PF WFQ weight configuration\n");
914                 return -1;
915         }
916
917         ecore_wr(p_hwfn, p_ptt, QM_REG_WFQPFWEIGHT + pf_id * 4, inc_val);
918
919         return 0;
920 }
921
922 int ecore_init_pf_rl(struct ecore_hwfn *p_hwfn,
923                      struct ecore_ptt *p_ptt, u8 pf_id, u32 pf_rl)
924 {
925         u32 inc_val;
926
927         inc_val = QM_RL_INC_VAL(pf_rl);
928         if (inc_val > QM_PF_RL_MAX_INC_VAL) {
929                 DP_NOTICE(p_hwfn, true,
930                           "Invalid PF rate limit configuration\n");
931                 return -1;
932         }
933
934         ecore_wr(p_hwfn, p_ptt, QM_REG_RLPFCRD + pf_id * 4,
935                  (u32)QM_RL_CRD_REG_SIGN_BIT);
936         ecore_wr(p_hwfn, p_ptt, QM_REG_RLPFINCVAL + pf_id * 4, inc_val);
937
938         return 0;
939 }
940
941 int ecore_init_vport_wfq(struct ecore_hwfn *p_hwfn,
942                          struct ecore_ptt *p_ptt,
943                          u16 first_tx_pq_id[NUM_OF_TCS], u16 vport_wfq)
944 {
945         u16 vport_pq_id;
946         u32 inc_val;
947         u8 tc;
948
949         inc_val = QM_WFQ_INC_VAL(vport_wfq);
950         if (!inc_val || inc_val > QM_WFQ_MAX_INC_VAL) {
951                 DP_NOTICE(p_hwfn, true,
952                           "Invalid VPORT WFQ weight configuration\n");
953                 return -1;
954         }
955
956         for (tc = 0; tc < NUM_OF_TCS; tc++) {
957                 vport_pq_id = first_tx_pq_id[tc];
958                 if (vport_pq_id != QM_INVALID_PQ_ID) {
959                         ecore_wr(p_hwfn, p_ptt,
960                                  QM_REG_WFQVPWEIGHT + vport_pq_id * 4, inc_val);
961                 }
962         }
963
964         return 0;
965 }
966
967 int ecore_init_vport_rl(struct ecore_hwfn *p_hwfn,
968                         struct ecore_ptt *p_ptt, u8 vport_id,
969                                                 u32 vport_rl,
970                                                 u32 link_speed)
971 {
972         u32 inc_val, max_qm_global_rls = MAX_QM_GLOBAL_RLS;
973
974         if (vport_id >= max_qm_global_rls) {
975                 DP_NOTICE(p_hwfn, true,
976                           "Invalid VPORT ID for rate limiter configuration\n");
977                 return -1;
978         }
979
980         inc_val = QM_RL_INC_VAL(vport_rl ? vport_rl : link_speed);
981         if (inc_val > QM_VP_RL_MAX_INC_VAL(link_speed)) {
982                 DP_NOTICE(p_hwfn, true,
983                           "Invalid VPORT rate-limit configuration\n");
984                 return -1;
985         }
986
987         ecore_wr(p_hwfn, p_ptt, QM_REG_RLGLBLCRD + vport_id * 4,
988                  (u32)QM_RL_CRD_REG_SIGN_BIT);
989         ecore_wr(p_hwfn, p_ptt, QM_REG_RLGLBLINCVAL + vport_id * 4, inc_val);
990
991         return 0;
992 }
993
994 bool ecore_send_qm_stop_cmd(struct ecore_hwfn *p_hwfn,
995                             struct ecore_ptt *p_ptt,
996                             bool is_release_cmd,
997                             bool is_tx_pq, u16 start_pq, u16 num_pqs)
998 {
999         u32 cmd_arr[QM_CMD_STRUCT_SIZE(QM_STOP_CMD)] = { 0 };
1000         u32 pq_mask = 0, last_pq, pq_id;
1001
1002         last_pq = start_pq + num_pqs - 1;
1003
1004         /* Set command's PQ type */
1005         QM_CMD_SET_FIELD(cmd_arr, QM_STOP_CMD, PQ_TYPE, is_tx_pq ? 0 : 1);
1006
1007         /* Go over requested PQs */
1008         for (pq_id = start_pq; pq_id <= last_pq; pq_id++) {
1009                 /* Set PQ bit in mask (stop command only) */
1010                 if (!is_release_cmd)
1011                         pq_mask |= (1 << (pq_id % QM_STOP_PQ_MASK_WIDTH));
1012
1013                 /* If last PQ or end of PQ mask, write command */
1014                 if ((pq_id == last_pq) ||
1015                     (pq_id % QM_STOP_PQ_MASK_WIDTH ==
1016                     (QM_STOP_PQ_MASK_WIDTH - 1))) {
1017                         QM_CMD_SET_FIELD(cmd_arr, QM_STOP_CMD, PAUSE_MASK,
1018                                          pq_mask);
1019                         QM_CMD_SET_FIELD(cmd_arr, QM_STOP_CMD, GROUP_ID,
1020                                          pq_id / QM_STOP_PQ_MASK_WIDTH);
1021                         if (!ecore_send_qm_cmd
1022                             (p_hwfn, p_ptt, QM_STOP_CMD_ADDR, cmd_arr[0],
1023                              cmd_arr[1]))
1024                                 return false;
1025                         pq_mask = 0;
1026                 }
1027         }
1028
1029         return true;
1030 }
1031
1032
1033 /* NIG: ETS configuration constants */
1034 #define NIG_TX_ETS_CLIENT_OFFSET        4
1035 #define NIG_LB_ETS_CLIENT_OFFSET        1
1036 #define NIG_ETS_MIN_WFQ_BYTES           1600
1037
1038 /* NIG: ETS constants */
1039 #define NIG_ETS_UP_BOUND(weight, mtu) \
1040         (2 * ((weight) > (mtu) ? (weight) : (mtu)))
1041
1042 /* NIG: RL constants */
1043
1044 /* Byte base type value */
1045 #define NIG_RL_BASE_TYPE                1
1046
1047 /* Period in us */
1048 #define NIG_RL_PERIOD                   1
1049
1050 /* Period in 25MHz cycles */
1051 #define NIG_RL_PERIOD_CLK_25M           (25 * NIG_RL_PERIOD)
1052
1053 /* Rate in mbps */
1054 #define NIG_RL_INC_VAL(rate)            (((rate) * NIG_RL_PERIOD) / 8)
1055
1056 #define NIG_RL_MAX_VAL(inc_val, mtu) \
1057         (2 * ((inc_val) > (mtu) ? (inc_val) : (mtu)))
1058
1059 /* NIG: packet prioritry configuration constants */
1060 #define NIG_PRIORITY_MAP_TC_BITS        4
1061
1062
1063 void ecore_init_nig_ets(struct ecore_hwfn *p_hwfn,
1064                         struct ecore_ptt *p_ptt,
1065                         struct init_ets_req *req, bool is_lb)
1066 {
1067         u32 min_weight, tc_weight_base_addr, tc_weight_addr_diff;
1068         u32 tc_bound_base_addr, tc_bound_addr_diff;
1069         u8 sp_tc_map = 0, wfq_tc_map = 0;
1070         u8 tc, num_tc, tc_client_offset;
1071
1072         num_tc = is_lb ? NUM_OF_TCS : NUM_OF_PHYS_TCS;
1073         tc_client_offset = is_lb ? NIG_LB_ETS_CLIENT_OFFSET :
1074                                    NIG_TX_ETS_CLIENT_OFFSET;
1075         min_weight = 0xffffffff;
1076         tc_weight_base_addr = is_lb ? NIG_REG_LB_ARB_CREDIT_WEIGHT_0 :
1077                                       NIG_REG_TX_ARB_CREDIT_WEIGHT_0;
1078         tc_weight_addr_diff = is_lb ? NIG_REG_LB_ARB_CREDIT_WEIGHT_1 -
1079                                       NIG_REG_LB_ARB_CREDIT_WEIGHT_0 :
1080                                       NIG_REG_TX_ARB_CREDIT_WEIGHT_1 -
1081                                       NIG_REG_TX_ARB_CREDIT_WEIGHT_0;
1082         tc_bound_base_addr = is_lb ? NIG_REG_LB_ARB_CREDIT_UPPER_BOUND_0 :
1083                                      NIG_REG_TX_ARB_CREDIT_UPPER_BOUND_0;
1084         tc_bound_addr_diff = is_lb ? NIG_REG_LB_ARB_CREDIT_UPPER_BOUND_1 -
1085                                      NIG_REG_LB_ARB_CREDIT_UPPER_BOUND_0 :
1086                                      NIG_REG_TX_ARB_CREDIT_UPPER_BOUND_1 -
1087                                      NIG_REG_TX_ARB_CREDIT_UPPER_BOUND_0;
1088
1089         for (tc = 0; tc < num_tc; tc++) {
1090                 struct init_ets_tc_req *tc_req = &req->tc_req[tc];
1091
1092                 /* Update SP map */
1093                 if (tc_req->use_sp)
1094                         sp_tc_map |= (1 << tc);
1095
1096                 if (!tc_req->use_wfq)
1097                         continue;
1098
1099                 /* Update WFQ map */
1100                 wfq_tc_map |= (1 << tc);
1101
1102                 /* Find minimal weight */
1103                 if (tc_req->weight < min_weight)
1104                         min_weight = tc_req->weight;
1105         }
1106
1107         /* Write SP map */
1108         ecore_wr(p_hwfn, p_ptt,
1109                  is_lb ? NIG_REG_LB_ARB_CLIENT_IS_STRICT :
1110                  NIG_REG_TX_ARB_CLIENT_IS_STRICT,
1111                  (sp_tc_map << tc_client_offset));
1112
1113         /* Write WFQ map */
1114         ecore_wr(p_hwfn, p_ptt,
1115                  is_lb ? NIG_REG_LB_ARB_CLIENT_IS_SUBJECT2WFQ :
1116                  NIG_REG_TX_ARB_CLIENT_IS_SUBJECT2WFQ,
1117                  (wfq_tc_map << tc_client_offset));
1118         /* write WFQ weights */
1119         for (tc = 0; tc < num_tc; tc++, tc_client_offset++) {
1120                 struct init_ets_tc_req *tc_req = &req->tc_req[tc];
1121                 u32 byte_weight;
1122
1123                 if (!tc_req->use_wfq)
1124                         continue;
1125
1126                 /* Translate weight to bytes */
1127                 byte_weight = (NIG_ETS_MIN_WFQ_BYTES * tc_req->weight) /
1128                               min_weight;
1129
1130                 /* Write WFQ weight */
1131                 ecore_wr(p_hwfn, p_ptt, tc_weight_base_addr +
1132                          tc_weight_addr_diff * tc_client_offset, byte_weight);
1133
1134                 /* Write WFQ upper bound */
1135                 ecore_wr(p_hwfn, p_ptt, tc_bound_base_addr +
1136                          tc_bound_addr_diff * tc_client_offset,
1137                          NIG_ETS_UP_BOUND(byte_weight, req->mtu));
1138         }
1139 }
1140
1141 void ecore_init_nig_lb_rl(struct ecore_hwfn *p_hwfn,
1142                           struct ecore_ptt *p_ptt,
1143                           struct init_nig_lb_rl_req *req)
1144 {
1145         u32 ctrl, inc_val, reg_offset;
1146         u8 tc;
1147
1148         /* Disable global MAC+LB RL */
1149         ctrl =
1150             NIG_RL_BASE_TYPE <<
1151             NIG_REG_TX_LB_GLBRATELIMIT_CTRL_TX_LB_GLBRATELIMIT_BASE_TYPE_SHIFT;
1152         ecore_wr(p_hwfn, p_ptt, NIG_REG_TX_LB_GLBRATELIMIT_CTRL, ctrl);
1153
1154         /* Configure and enable global MAC+LB RL */
1155         if (req->lb_mac_rate) {
1156                 /* Configure  */
1157                 ecore_wr(p_hwfn, p_ptt, NIG_REG_TX_LB_GLBRATELIMIT_INC_PERIOD,
1158                          NIG_RL_PERIOD_CLK_25M);
1159                 inc_val = NIG_RL_INC_VAL(req->lb_mac_rate);
1160                 ecore_wr(p_hwfn, p_ptt, NIG_REG_TX_LB_GLBRATELIMIT_INC_VALUE,
1161                          inc_val);
1162                 ecore_wr(p_hwfn, p_ptt, NIG_REG_TX_LB_GLBRATELIMIT_MAX_VALUE,
1163                          NIG_RL_MAX_VAL(inc_val, req->mtu));
1164
1165                 /* Enable */
1166                 ctrl |=
1167                     1 <<
1168                     NIG_REG_TX_LB_GLBRATELIMIT_CTRL_TX_LB_GLBRATELIMIT_EN_SHIFT;
1169                 ecore_wr(p_hwfn, p_ptt, NIG_REG_TX_LB_GLBRATELIMIT_CTRL, ctrl);
1170         }
1171
1172         /* Disable global LB-only RL */
1173         ctrl =
1174             NIG_RL_BASE_TYPE <<
1175             NIG_REG_LB_BRBRATELIMIT_CTRL_LB_BRBRATELIMIT_BASE_TYPE_SHIFT;
1176         ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_BRBRATELIMIT_CTRL, ctrl);
1177
1178         /* Configure and enable global LB-only RL */
1179         if (req->lb_rate) {
1180                 /* Configure  */
1181                 ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_BRBRATELIMIT_INC_PERIOD,
1182                          NIG_RL_PERIOD_CLK_25M);
1183                 inc_val = NIG_RL_INC_VAL(req->lb_rate);
1184                 ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_BRBRATELIMIT_INC_VALUE,
1185                          inc_val);
1186                 ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_BRBRATELIMIT_MAX_VALUE,
1187                          NIG_RL_MAX_VAL(inc_val, req->mtu));
1188
1189                 /* Enable */
1190                 ctrl |=
1191                     1 << NIG_REG_LB_BRBRATELIMIT_CTRL_LB_BRBRATELIMIT_EN_SHIFT;
1192                 ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_BRBRATELIMIT_CTRL, ctrl);
1193         }
1194
1195         /* Per-TC RLs */
1196         for (tc = 0, reg_offset = 0; tc < NUM_OF_PHYS_TCS;
1197              tc++, reg_offset += 4) {
1198                 /* Disable TC RL */
1199                 ctrl =
1200                     NIG_RL_BASE_TYPE <<
1201                 NIG_REG_LB_TCRATELIMIT_CTRL_0_LB_TCRATELIMIT_BASE_TYPE_0_SHIFT;
1202                 ecore_wr(p_hwfn, p_ptt,
1203                          NIG_REG_LB_TCRATELIMIT_CTRL_0 + reg_offset, ctrl);
1204
1205                 /* Configure and enable TC RL */
1206                 if (!req->tc_rate[tc])
1207                         continue;
1208
1209                 /* Configure */
1210                 ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_TCRATELIMIT_INC_PERIOD_0 +
1211                          reg_offset, NIG_RL_PERIOD_CLK_25M);
1212                 inc_val = NIG_RL_INC_VAL(req->tc_rate[tc]);
1213                 ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_TCRATELIMIT_INC_VALUE_0 +
1214                          reg_offset, inc_val);
1215                 ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_TCRATELIMIT_MAX_VALUE_0 +
1216                          reg_offset, NIG_RL_MAX_VAL(inc_val, req->mtu));
1217
1218                 /* Enable */
1219                 ctrl |= 1 <<
1220                         NIG_REG_LB_TCRATELIMIT_CTRL_0_LB_TCRATELIMIT_EN_0_SHIFT;
1221                 ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_TCRATELIMIT_CTRL_0 +
1222                          reg_offset, ctrl);
1223         }
1224 }
1225
1226 void ecore_init_nig_pri_tc_map(struct ecore_hwfn *p_hwfn,
1227                                struct ecore_ptt *p_ptt,
1228                                struct init_nig_pri_tc_map_req *req)
1229 {
1230         u8 tc_pri_mask[NUM_OF_PHYS_TCS] = { 0 };
1231         u32 pri_tc_mask = 0;
1232         u8 pri, tc;
1233
1234         for (pri = 0; pri < NUM_OF_VLAN_PRIORITIES; pri++) {
1235                 if (!req->pri[pri].valid)
1236                         continue;
1237
1238                 pri_tc_mask |= (req->pri[pri].tc_id <<
1239                                 (pri * NIG_PRIORITY_MAP_TC_BITS));
1240                 tc_pri_mask[req->pri[pri].tc_id] |= (1 << pri);
1241         }
1242
1243         /* Write priority -> TC mask */
1244         ecore_wr(p_hwfn, p_ptt, NIG_REG_PKT_PRIORITY_TO_TC, pri_tc_mask);
1245
1246         /* Write TC -> priority mask */
1247         for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) {
1248                 ecore_wr(p_hwfn, p_ptt, NIG_REG_PRIORITY_FOR_TC_0 + tc * 4,
1249                          tc_pri_mask[tc]);
1250                 ecore_wr(p_hwfn, p_ptt, NIG_REG_RX_TC0_PRIORITY_MASK + tc * 4,
1251                          tc_pri_mask[tc]);
1252         }
1253 }
1254
1255
1256 /* PRS: ETS configuration constants */
1257 #define PRS_ETS_MIN_WFQ_BYTES           1600
1258 #define PRS_ETS_UP_BOUND(weight, mtu) \
1259         (2 * ((weight) > (mtu) ? (weight) : (mtu)))
1260
1261
1262 void ecore_init_prs_ets(struct ecore_hwfn *p_hwfn,
1263                         struct ecore_ptt *p_ptt, struct init_ets_req *req)
1264 {
1265         u32 tc_weight_addr_diff, tc_bound_addr_diff, min_weight = 0xffffffff;
1266         u8 tc, sp_tc_map = 0, wfq_tc_map = 0;
1267
1268         tc_weight_addr_diff = PRS_REG_ETS_ARB_CREDIT_WEIGHT_1 -
1269                               PRS_REG_ETS_ARB_CREDIT_WEIGHT_0;
1270         tc_bound_addr_diff = PRS_REG_ETS_ARB_CREDIT_UPPER_BOUND_1 -
1271                              PRS_REG_ETS_ARB_CREDIT_UPPER_BOUND_0;
1272
1273         for (tc = 0; tc < NUM_OF_TCS; tc++) {
1274                 struct init_ets_tc_req *tc_req = &req->tc_req[tc];
1275
1276                 /* Update SP map */
1277                 if (tc_req->use_sp)
1278                         sp_tc_map |= (1 << tc);
1279
1280                 if (!tc_req->use_wfq)
1281                         continue;
1282
1283                 /* Update WFQ map */
1284                 wfq_tc_map |= (1 << tc);
1285
1286                 /* Find minimal weight */
1287                 if (tc_req->weight < min_weight)
1288                         min_weight = tc_req->weight;
1289         }
1290
1291         /* write SP map */
1292         ecore_wr(p_hwfn, p_ptt, PRS_REG_ETS_ARB_CLIENT_IS_STRICT, sp_tc_map);
1293
1294         /* write WFQ map */
1295         ecore_wr(p_hwfn, p_ptt, PRS_REG_ETS_ARB_CLIENT_IS_SUBJECT2WFQ,
1296                  wfq_tc_map);
1297
1298         /* write WFQ weights */
1299         for (tc = 0; tc < NUM_OF_TCS; tc++) {
1300                 struct init_ets_tc_req *tc_req = &req->tc_req[tc];
1301                 u32 byte_weight;
1302
1303                 if (!tc_req->use_wfq)
1304                         continue;
1305
1306                 /* Translate weight to bytes */
1307                 byte_weight = (PRS_ETS_MIN_WFQ_BYTES * tc_req->weight) /
1308                               min_weight;
1309
1310                 /* Write WFQ weight */
1311                 ecore_wr(p_hwfn, p_ptt, PRS_REG_ETS_ARB_CREDIT_WEIGHT_0 + tc *
1312                          tc_weight_addr_diff, byte_weight);
1313
1314                 /* Write WFQ upper bound */
1315                 ecore_wr(p_hwfn, p_ptt, PRS_REG_ETS_ARB_CREDIT_UPPER_BOUND_0 +
1316                          tc * tc_bound_addr_diff, PRS_ETS_UP_BOUND(byte_weight,
1317                                                                    req->mtu));
1318         }
1319 }
1320
1321
1322 /* BRB: RAM configuration constants */
1323 #define BRB_TOTAL_RAM_BLOCKS_BB 4800
1324 #define BRB_TOTAL_RAM_BLOCKS_K2 5632
1325 #define BRB_BLOCK_SIZE          128
1326 #define BRB_MIN_BLOCKS_PER_TC   9
1327 #define BRB_HYST_BYTES          10240
1328 #define BRB_HYST_BLOCKS         (BRB_HYST_BYTES / BRB_BLOCK_SIZE)
1329
1330 /* Temporary big RAM allocation - should be updated */
1331 void ecore_init_brb_ram(struct ecore_hwfn *p_hwfn,
1332                         struct ecore_ptt *p_ptt, struct init_brb_ram_req *req)
1333 {
1334         u32 tc_headroom_blocks, min_pkt_size_blocks, total_blocks;
1335         u32 active_port_blocks, reg_offset = 0;
1336         u8 port, active_ports = 0;
1337
1338         tc_headroom_blocks = (u32)DIV_ROUND_UP(req->headroom_per_tc,
1339                                                BRB_BLOCK_SIZE);
1340         min_pkt_size_blocks = (u32)DIV_ROUND_UP(req->min_pkt_size,
1341                                                 BRB_BLOCK_SIZE);
1342         total_blocks = ECORE_IS_K2(p_hwfn->p_dev) ? BRB_TOTAL_RAM_BLOCKS_K2 :
1343                                                     BRB_TOTAL_RAM_BLOCKS_BB;
1344
1345         /* Find number of active ports */
1346         for (port = 0; port < MAX_NUM_PORTS; port++)
1347                 if (req->num_active_tcs[port])
1348                         active_ports++;
1349
1350         active_port_blocks = (u32)(total_blocks / active_ports);
1351
1352         for (port = 0; port < req->max_ports_per_engine; port++) {
1353                 u32 port_blocks, port_shared_blocks, port_guaranteed_blocks;
1354                 u32 full_xoff_th, full_xon_th, pause_xoff_th, pause_xon_th;
1355                 u32 tc_guaranteed_blocks;
1356                 u8 tc;
1357
1358                 /* Calculate per-port sizes */
1359                 tc_guaranteed_blocks = (u32)DIV_ROUND_UP(req->guranteed_per_tc,
1360                                                          BRB_BLOCK_SIZE);
1361                 port_blocks = req->num_active_tcs[port] ? active_port_blocks :
1362                                                           0;
1363                 port_guaranteed_blocks = req->num_active_tcs[port] *
1364                                          tc_guaranteed_blocks;
1365                 port_shared_blocks = port_blocks - port_guaranteed_blocks;
1366                 full_xoff_th = req->num_active_tcs[port] *
1367                                BRB_MIN_BLOCKS_PER_TC;
1368                 full_xon_th = full_xoff_th + min_pkt_size_blocks;
1369                 pause_xoff_th = tc_headroom_blocks;
1370                 pause_xon_th = pause_xoff_th + min_pkt_size_blocks;
1371
1372                 /* Init total size per port */
1373                 ecore_wr(p_hwfn, p_ptt, BRB_REG_TOTAL_MAC_SIZE + port * 4,
1374                          port_blocks);
1375
1376                 /* Init shared size per port */
1377                 ecore_wr(p_hwfn, p_ptt, BRB_REG_SHARED_HR_AREA + port * 4,
1378                          port_shared_blocks);
1379
1380                 for (tc = 0; tc < NUM_OF_TCS; tc++, reg_offset += 4) {
1381                         /* Clear init values for non-active TCs */
1382                         if (tc == req->num_active_tcs[port]) {
1383                                 tc_guaranteed_blocks = 0;
1384                                 full_xoff_th = 0;
1385                                 full_xon_th = 0;
1386                                 pause_xoff_th = 0;
1387                                 pause_xon_th = 0;
1388                         }
1389
1390                         /* Init guaranteed size per TC */
1391                         ecore_wr(p_hwfn, p_ptt,
1392                                  BRB_REG_TC_GUARANTIED_0 + reg_offset,
1393                                  tc_guaranteed_blocks);
1394                         ecore_wr(p_hwfn, p_ptt,
1395                                  BRB_REG_MAIN_TC_GUARANTIED_HYST_0 + reg_offset,
1396                                  BRB_HYST_BLOCKS);
1397
1398                         /* Init pause/full thresholds per physical TC - for
1399                          * loopback traffic.
1400                          */
1401                         ecore_wr(p_hwfn, p_ptt,
1402                                  BRB_REG_LB_TC_FULL_XOFF_THRESHOLD_0 +
1403                                  reg_offset, full_xoff_th);
1404                         ecore_wr(p_hwfn, p_ptt,
1405                                  BRB_REG_LB_TC_FULL_XON_THRESHOLD_0 +
1406                                  reg_offset, full_xon_th);
1407                         ecore_wr(p_hwfn, p_ptt,
1408                                  BRB_REG_LB_TC_PAUSE_XOFF_THRESHOLD_0 +
1409                                  reg_offset, pause_xoff_th);
1410                         ecore_wr(p_hwfn, p_ptt,
1411                                  BRB_REG_LB_TC_PAUSE_XON_THRESHOLD_0 +
1412                                  reg_offset, pause_xon_th);
1413
1414                         /* Init pause/full thresholds per physical TC - for
1415                          * main traffic.
1416                          */
1417                         ecore_wr(p_hwfn, p_ptt,
1418                                  BRB_REG_MAIN_TC_FULL_XOFF_THRESHOLD_0 +
1419                                  reg_offset, full_xoff_th);
1420                         ecore_wr(p_hwfn, p_ptt,
1421                                  BRB_REG_MAIN_TC_FULL_XON_THRESHOLD_0 +
1422                                  reg_offset, full_xon_th);
1423                         ecore_wr(p_hwfn, p_ptt,
1424                                  BRB_REG_MAIN_TC_PAUSE_XOFF_THRESHOLD_0 +
1425                                  reg_offset, pause_xoff_th);
1426                         ecore_wr(p_hwfn, p_ptt,
1427                                  BRB_REG_MAIN_TC_PAUSE_XON_THRESHOLD_0 +
1428                                  reg_offset, pause_xon_th);
1429                 }
1430         }
1431 }
1432
1433 /* In MF should be called once per port to set EtherType of OuterTag */
1434 void ecore_set_port_mf_ovlan_eth_type(struct ecore_hwfn *p_hwfn, u32 ethType)
1435 {
1436         /* Update DORQ register */
1437         STORE_RT_REG(p_hwfn, DORQ_REG_TAG1_ETHERTYPE_RT_OFFSET, ethType);
1438 }
1439
1440 #define SET_TUNNEL_TYPE_ENABLE_BIT(var, offset, enable) \
1441 (var = ((var) & ~(1 << (offset))) | ((enable) ? (1 << (offset)) : 0))
1442 #define PRS_ETH_TUNN_OUTPUT_FORMAT        -188897008
1443 #define PRS_ETH_OUTPUT_FORMAT             -46832
1444
1445 void ecore_set_vxlan_dest_port(struct ecore_hwfn *p_hwfn,
1446                                struct ecore_ptt *p_ptt, u16 dest_port)
1447 {
1448         /* Update PRS register */
1449         ecore_wr(p_hwfn, p_ptt, PRS_REG_VXLAN_PORT, dest_port);
1450
1451         /* Update NIG register */
1452         ecore_wr(p_hwfn, p_ptt, NIG_REG_VXLAN_CTRL, dest_port);
1453
1454         /* Update PBF register */
1455         ecore_wr(p_hwfn, p_ptt, PBF_REG_VXLAN_PORT, dest_port);
1456 }
1457
1458 void ecore_set_vxlan_enable(struct ecore_hwfn *p_hwfn,
1459                             struct ecore_ptt *p_ptt, bool vxlan_enable)
1460 {
1461         u32 reg_val;
1462
1463         /* Update PRS register */
1464         reg_val = ecore_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN);
1465         SET_TUNNEL_TYPE_ENABLE_BIT(reg_val,
1466                            PRS_REG_ENCAPSULATION_TYPE_EN_VXLAN_ENABLE_SHIFT,
1467                            vxlan_enable);
1468         ecore_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
1469         if (reg_val) { /* TODO: handle E5 init */
1470                 reg_val = ecore_rd(p_hwfn, p_ptt,
1471                                    PRS_REG_OUTPUT_FORMAT_4_0_BB_K2);
1472
1473                 /* Update output  only if tunnel blocks not included. */
1474                 if (reg_val == (u32)PRS_ETH_OUTPUT_FORMAT)
1475                         ecore_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2,
1476                                  (u32)PRS_ETH_TUNN_OUTPUT_FORMAT);
1477         }
1478
1479         /* Update NIG register */
1480         reg_val = ecore_rd(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE);
1481         SET_TUNNEL_TYPE_ENABLE_BIT(reg_val,
1482                                    NIG_REG_ENC_TYPE_ENABLE_VXLAN_ENABLE_SHIFT,
1483                                    vxlan_enable);
1484         ecore_wr(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE, reg_val);
1485
1486         /* Update DORQ register */
1487         ecore_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_VXLAN_EN,
1488                  vxlan_enable ? 1 : 0);
1489 }
1490
1491 void ecore_set_gre_enable(struct ecore_hwfn *p_hwfn,
1492                           struct ecore_ptt *p_ptt,
1493                           bool eth_gre_enable, bool ip_gre_enable)
1494 {
1495         u32 reg_val;
1496
1497         /* Update PRS register */
1498         reg_val = ecore_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN);
1499         SET_TUNNEL_TYPE_ENABLE_BIT(reg_val,
1500                    PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GRE_ENABLE_SHIFT,
1501                    eth_gre_enable);
1502         SET_TUNNEL_TYPE_ENABLE_BIT(reg_val,
1503                    PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GRE_ENABLE_SHIFT,
1504                    ip_gre_enable);
1505         ecore_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
1506         if (reg_val) { /* TODO: handle E5 init */
1507                 reg_val = ecore_rd(p_hwfn, p_ptt,
1508                                    PRS_REG_OUTPUT_FORMAT_4_0_BB_K2);
1509
1510                 /* Update output  only if tunnel blocks not included. */
1511                 if (reg_val == (u32)PRS_ETH_OUTPUT_FORMAT)
1512                         ecore_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2,
1513                                  (u32)PRS_ETH_TUNN_OUTPUT_FORMAT);
1514         }
1515
1516         /* Update NIG register */
1517         reg_val = ecore_rd(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE);
1518         SET_TUNNEL_TYPE_ENABLE_BIT(reg_val,
1519                    NIG_REG_ENC_TYPE_ENABLE_ETH_OVER_GRE_ENABLE_SHIFT,
1520                    eth_gre_enable);
1521         SET_TUNNEL_TYPE_ENABLE_BIT(reg_val,
1522                    NIG_REG_ENC_TYPE_ENABLE_IP_OVER_GRE_ENABLE_SHIFT,
1523                    ip_gre_enable);
1524         ecore_wr(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE, reg_val);
1525
1526         /* Update DORQ registers */
1527         ecore_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_GRE_ETH_EN,
1528                  eth_gre_enable ? 1 : 0);
1529         ecore_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_GRE_IP_EN,
1530                  ip_gre_enable ? 1 : 0);
1531 }
1532
1533 void ecore_set_geneve_dest_port(struct ecore_hwfn *p_hwfn,
1534                                 struct ecore_ptt *p_ptt, u16 dest_port)
1535 {
1536         /* Update PRS register */
1537         ecore_wr(p_hwfn, p_ptt, PRS_REG_NGE_PORT, dest_port);
1538
1539         /* Update NIG register */
1540         ecore_wr(p_hwfn, p_ptt, NIG_REG_NGE_PORT, dest_port);
1541
1542         /* Update PBF register */
1543         ecore_wr(p_hwfn, p_ptt, PBF_REG_NGE_PORT, dest_port);
1544 }
1545
1546 void ecore_set_geneve_enable(struct ecore_hwfn *p_hwfn,
1547                              struct ecore_ptt *p_ptt,
1548                              bool eth_geneve_enable, bool ip_geneve_enable)
1549 {
1550         u32 reg_val;
1551
1552         /* Update PRS register */
1553         reg_val = ecore_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN);
1554         SET_TUNNEL_TYPE_ENABLE_BIT(reg_val,
1555                    PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GENEVE_ENABLE_SHIFT,
1556                    eth_geneve_enable);
1557         SET_TUNNEL_TYPE_ENABLE_BIT(reg_val,
1558                    PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GENEVE_ENABLE_SHIFT,
1559                    ip_geneve_enable);
1560         ecore_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
1561         if (reg_val) { /* TODO: handle E5 init */
1562                 reg_val = ecore_rd(p_hwfn, p_ptt,
1563                                    PRS_REG_OUTPUT_FORMAT_4_0_BB_K2);
1564
1565                 /* Update output  only if tunnel blocks not included. */
1566                 if (reg_val == (u32)PRS_ETH_OUTPUT_FORMAT)
1567                         ecore_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2,
1568                                  (u32)PRS_ETH_TUNN_OUTPUT_FORMAT);
1569         }
1570
1571         /* Update NIG register */
1572         ecore_wr(p_hwfn, p_ptt, NIG_REG_NGE_ETH_ENABLE,
1573                  eth_geneve_enable ? 1 : 0);
1574         ecore_wr(p_hwfn, p_ptt, NIG_REG_NGE_IP_ENABLE,
1575                  ip_geneve_enable ? 1 : 0);
1576
1577         /* EDPM with geneve tunnel not supported in BB */
1578         if (ECORE_IS_BB_B0(p_hwfn->p_dev))
1579                 return;
1580
1581         /* Update DORQ registers */
1582         ecore_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_NGE_ETH_EN_K2_E5,
1583                  eth_geneve_enable ? 1 : 0);
1584         ecore_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_NGE_IP_EN_K2_E5,
1585                  ip_geneve_enable ? 1 : 0);
1586 }
1587
1588 #define PRS_ETH_VXLAN_NO_L2_ENABLE_OFFSET   4
1589 #define PRS_ETH_VXLAN_NO_L2_OUTPUT_FORMAT      -927094512
1590
1591 void ecore_set_vxlan_no_l2_enable(struct ecore_hwfn *p_hwfn,
1592                                   struct ecore_ptt *p_ptt,
1593                                   bool enable)
1594 {
1595         u32 reg_val, cfg_mask;
1596
1597         /* read PRS config register */
1598         reg_val = ecore_rd(p_hwfn, p_ptt, PRS_REG_MSG_INFO);
1599
1600         /* set VXLAN_NO_L2_ENABLE mask */
1601         cfg_mask = (1 << PRS_ETH_VXLAN_NO_L2_ENABLE_OFFSET);
1602
1603         if (enable) {
1604                 /* set VXLAN_NO_L2_ENABLE flag */
1605                 reg_val |= cfg_mask;
1606
1607                 /* update PRS FIC  register */
1608                 ecore_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2,
1609                  (u32)PRS_ETH_VXLAN_NO_L2_OUTPUT_FORMAT);
1610         } else  {
1611                 /* clear VXLAN_NO_L2_ENABLE flag */
1612                 reg_val &= ~cfg_mask;
1613         }
1614
1615         /* write PRS config register */
1616         ecore_wr(p_hwfn, p_ptt, PRS_REG_MSG_INFO, reg_val);
1617 }
1618
1619 #define T_ETH_PACKET_ACTION_GFT_EVENTID  23
1620 #define PARSER_ETH_CONN_GFT_ACTION_CM_HDR  272
1621 #define T_ETH_PACKET_MATCH_RFS_EVENTID 25
1622 #define PARSER_ETH_CONN_CM_HDR 0
1623 #define CAM_LINE_SIZE sizeof(u32)
1624 #define RAM_LINE_SIZE sizeof(u64)
1625 #define REG_SIZE sizeof(u32)
1626
1627 void ecore_gft_disable(struct ecore_hwfn *p_hwfn,
1628                        struct ecore_ptt *p_ptt,
1629                        u16 pf_id)
1630 {
1631         /* disable gft search for PF */
1632         ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_GFT, 0);
1633
1634         /* Clean ram & cam for next gft session*/
1635
1636         /* Zero camline */
1637         ecore_wr(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE * pf_id, 0);
1638
1639         /* Zero ramline */
1640         ecore_wr(p_hwfn, p_ptt, PRS_REG_GFT_PROFILE_MASK_RAM +
1641                                 RAM_LINE_SIZE * pf_id, 0);
1642         ecore_wr(p_hwfn, p_ptt, PRS_REG_GFT_PROFILE_MASK_RAM +
1643                                 RAM_LINE_SIZE * pf_id + REG_SIZE, 0);
1644 }
1645
1646
1647 void ecore_set_gft_event_id_cm_hdr(struct ecore_hwfn *p_hwfn,
1648                                    struct ecore_ptt *p_ptt)
1649 {
1650         u32 rfs_cm_hdr_event_id;
1651
1652         /* Set RFS event ID to be awakened i Tstorm By Prs */
1653         rfs_cm_hdr_event_id = ecore_rd(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT);
1654         rfs_cm_hdr_event_id |= T_ETH_PACKET_ACTION_GFT_EVENTID <<
1655             PRS_REG_CM_HDR_GFT_EVENT_ID_SHIFT;
1656         rfs_cm_hdr_event_id |= PARSER_ETH_CONN_GFT_ACTION_CM_HDR <<
1657             PRS_REG_CM_HDR_GFT_CM_HDR_SHIFT;
1658         ecore_wr(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT, rfs_cm_hdr_event_id);
1659 }
1660
1661 void ecore_gft_config(struct ecore_hwfn *p_hwfn,
1662                                struct ecore_ptt *p_ptt,
1663                                u16 pf_id,
1664                                bool tcp,
1665                                bool udp,
1666                                bool ipv4,
1667                                bool ipv6,
1668                                enum gft_profile_type profile_type)
1669 {
1670         u32 reg_val, cam_line, ram_line_lo, ram_line_hi;
1671
1672         if (!ipv6 && !ipv4)
1673                 DP_NOTICE(p_hwfn, true, "gft_config: must accept at least on of - ipv4 or ipv6'\n");
1674         if (!tcp && !udp)
1675                 DP_NOTICE(p_hwfn, true, "gft_config: must accept at least on of - udp or tcp\n");
1676         if (profile_type >= MAX_GFT_PROFILE_TYPE)
1677                 DP_NOTICE(p_hwfn, true, "gft_config: unsupported gft_profile_type\n");
1678
1679         /* Set RFS event ID to be awakened i Tstorm By Prs */
1680         reg_val = T_ETH_PACKET_MATCH_RFS_EVENTID <<
1681                   PRS_REG_CM_HDR_GFT_EVENT_ID_SHIFT;
1682         reg_val |= PARSER_ETH_CONN_CM_HDR << PRS_REG_CM_HDR_GFT_CM_HDR_SHIFT;
1683         ecore_wr(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT, reg_val);
1684
1685         /* Do not load context only cid in PRS on match. */
1686         ecore_wr(p_hwfn, p_ptt, PRS_REG_LOAD_L2_FILTER, 0);
1687
1688         /* Do not use tenant ID exist bit for gft search*/
1689         ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TENANT_ID, 0);
1690
1691         /* Set Cam */
1692         cam_line = 0;
1693         SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_VALID, 1);
1694
1695         /* Filters are per PF!! */
1696         SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_PF_ID_MASK,
1697                   GFT_CAM_LINE_MAPPED_PF_ID_MASK_MASK);
1698         SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_PF_ID, pf_id);
1699
1700         if (!(tcp && udp)) {
1701                 SET_FIELD(cam_line,
1702                           GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK,
1703                           GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK_MASK);
1704                 if (tcp)
1705                         SET_FIELD(cam_line,
1706                                   GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE,
1707                                   GFT_PROFILE_TCP_PROTOCOL);
1708                 else
1709                         SET_FIELD(cam_line,
1710                                   GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE,
1711                                   GFT_PROFILE_UDP_PROTOCOL);
1712         }
1713
1714         if (!(ipv4 && ipv6)) {
1715                 SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_IP_VERSION_MASK, 1);
1716                 if (ipv4)
1717                         SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_IP_VERSION,
1718                                   GFT_PROFILE_IPV4);
1719                 else
1720                         SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_IP_VERSION,
1721                                   GFT_PROFILE_IPV6);
1722         }
1723
1724         /* Write characteristics to cam */
1725         ecore_wr(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE * pf_id,
1726                  cam_line);
1727         cam_line = ecore_rd(p_hwfn, p_ptt,
1728                             PRS_REG_GFT_CAM + CAM_LINE_SIZE * pf_id);
1729
1730         /* Write line to RAM - compare to filter 4 tuple */
1731         ram_line_lo = 0;
1732         ram_line_hi = 0;
1733
1734         /* Tunnel type */
1735         SET_FIELD(ram_line_lo, GFT_RAM_LINE_TUNNEL_DST_PORT, 1);
1736         SET_FIELD(ram_line_lo, GFT_RAM_LINE_TUNNEL_OVER_IP_PROTOCOL, 1);
1737
1738         if (profile_type == GFT_PROFILE_TYPE_4_TUPLE) {
1739                 SET_FIELD(ram_line_hi, GFT_RAM_LINE_DST_IP, 1);
1740                 SET_FIELD(ram_line_hi, GFT_RAM_LINE_SRC_IP, 1);
1741                 SET_FIELD(ram_line_hi, GFT_RAM_LINE_OVER_IP_PROTOCOL, 1);
1742                 SET_FIELD(ram_line_lo, GFT_RAM_LINE_ETHERTYPE, 1);
1743                 SET_FIELD(ram_line_lo, GFT_RAM_LINE_SRC_PORT, 1);
1744                 SET_FIELD(ram_line_lo, GFT_RAM_LINE_DST_PORT, 1);
1745         } else if (profile_type == GFT_PROFILE_TYPE_L4_DST_PORT) {
1746                 SET_FIELD(ram_line_hi, GFT_RAM_LINE_OVER_IP_PROTOCOL, 1);
1747                 SET_FIELD(ram_line_lo, GFT_RAM_LINE_ETHERTYPE, 1);
1748                 SET_FIELD(ram_line_lo, GFT_RAM_LINE_DST_PORT, 1);
1749         } else if (profile_type == GFT_PROFILE_TYPE_IP_DST_ADDR) {
1750                 SET_FIELD(ram_line_hi, GFT_RAM_LINE_DST_IP, 1);
1751                 SET_FIELD(ram_line_lo, GFT_RAM_LINE_ETHERTYPE, 1);
1752         } else if (profile_type == GFT_PROFILE_TYPE_IP_SRC_ADDR) {
1753                 SET_FIELD(ram_line_hi, GFT_RAM_LINE_SRC_IP, 1);
1754                 SET_FIELD(ram_line_lo, GFT_RAM_LINE_ETHERTYPE, 1);
1755         } else if (profile_type == GFT_PROFILE_TYPE_TUNNEL_TYPE) {
1756                 SET_FIELD(ram_line_lo, GFT_RAM_LINE_TUNNEL_ETHERTYPE, 1);
1757         }
1758
1759         ecore_wr(p_hwfn, p_ptt,
1760                  PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * pf_id,
1761                  ram_line_lo);
1762         ecore_wr(p_hwfn, p_ptt,
1763                  PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * pf_id +
1764                  REG_SIZE, ram_line_hi);
1765
1766         /* Set default profile so that no filter match will happen */
1767         ecore_wr(p_hwfn, p_ptt, PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE *
1768                  PRS_GFT_CAM_LINES_NO_MATCH, 0xffffffff);
1769         ecore_wr(p_hwfn, p_ptt, PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE *
1770                  PRS_GFT_CAM_LINES_NO_MATCH + REG_SIZE, 0x3ff);
1771
1772         /* Enable gft search */
1773         ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_GFT, 1);
1774 }
1775
1776 /* Configure VF zone size mode */
1777 void ecore_config_vf_zone_size_mode(struct ecore_hwfn *p_hwfn,
1778                                     struct ecore_ptt *p_ptt, u16 mode,
1779                                     bool runtime_init)
1780 {
1781         u32 msdm_vf_size_log = MSTORM_VF_ZONE_DEFAULT_SIZE_LOG;
1782         u32 msdm_vf_offset_mask;
1783
1784         if (mode == VF_ZONE_SIZE_MODE_DOUBLE)
1785                 msdm_vf_size_log += 1;
1786         else if (mode == VF_ZONE_SIZE_MODE_QUAD)
1787                 msdm_vf_size_log += 2;
1788
1789         msdm_vf_offset_mask = (1 << msdm_vf_size_log) - 1;
1790
1791         if (runtime_init) {
1792                 STORE_RT_REG(p_hwfn,
1793                              PGLUE_REG_B_MSDM_VF_SHIFT_B_RT_OFFSET,
1794                              msdm_vf_size_log);
1795                 STORE_RT_REG(p_hwfn,
1796                              PGLUE_REG_B_MSDM_OFFSET_MASK_B_RT_OFFSET,
1797                              msdm_vf_offset_mask);
1798         } else {
1799                 ecore_wr(p_hwfn, p_ptt,
1800                          PGLUE_B_REG_MSDM_VF_SHIFT_B, msdm_vf_size_log);
1801                 ecore_wr(p_hwfn, p_ptt,
1802                          PGLUE_B_REG_MSDM_OFFSET_MASK_B, msdm_vf_offset_mask);
1803         }
1804 }
1805
1806 /* Get mstorm statistics for offset by VF zone size mode */
1807 u32 ecore_get_mstorm_queue_stat_offset(struct ecore_hwfn *p_hwfn,
1808                                        u16 stat_cnt_id,
1809                                        u16 vf_zone_size_mode)
1810 {
1811         u32 offset = MSTORM_QUEUE_STAT_OFFSET(stat_cnt_id);
1812
1813         if ((vf_zone_size_mode != VF_ZONE_SIZE_MODE_DEFAULT) &&
1814             (stat_cnt_id > MAX_NUM_PFS)) {
1815                 if (vf_zone_size_mode == VF_ZONE_SIZE_MODE_DOUBLE)
1816                         offset += (1 << MSTORM_VF_ZONE_DEFAULT_SIZE_LOG) *
1817                             (stat_cnt_id - MAX_NUM_PFS);
1818                 else if (vf_zone_size_mode == VF_ZONE_SIZE_MODE_QUAD)
1819                         offset += 3 * (1 << MSTORM_VF_ZONE_DEFAULT_SIZE_LOG) *
1820                             (stat_cnt_id - MAX_NUM_PFS);
1821         }
1822
1823         return offset;
1824 }
1825
1826 /* Get mstorm VF producer offset by VF zone size mode */
1827 u32 ecore_get_mstorm_eth_vf_prods_offset(struct ecore_hwfn *p_hwfn,
1828                                          u8 vf_id,
1829                                          u8 vf_queue_id,
1830                                          u16 vf_zone_size_mode)
1831 {
1832         u32 offset = MSTORM_ETH_VF_PRODS_OFFSET(vf_id, vf_queue_id);
1833
1834         if (vf_zone_size_mode != VF_ZONE_SIZE_MODE_DEFAULT) {
1835                 if (vf_zone_size_mode == VF_ZONE_SIZE_MODE_DOUBLE)
1836                         offset += (1 << MSTORM_VF_ZONE_DEFAULT_SIZE_LOG) *
1837                                    vf_id;
1838                 else if (vf_zone_size_mode == VF_ZONE_SIZE_MODE_QUAD)
1839                         offset += 3 * (1 << MSTORM_VF_ZONE_DEFAULT_SIZE_LOG) *
1840                                   vf_id;
1841         }
1842
1843         return offset;
1844 }
1845
1846 #ifndef LINUX_REMOVE
1847 #define CRC8_INIT_VALUE 0xFF
1848 #endif
1849 static u8 cdu_crc8_table[CRC8_TABLE_SIZE];
1850
1851 /* Calculate and return CDU validation byte per connection type / region /
1852  * cid
1853  */
1854 static u8 ecore_calc_cdu_validation_byte(u8 conn_type, u8 region, u32 cid)
1855 {
1856         const u8 validation_cfg = CDU_VALIDATION_DEFAULT_CFG;
1857
1858         static u8 crc8_table_valid;     /*automatically initialized to 0*/
1859         u8 crc, validation_byte = 0;
1860         u32 validation_string = 0;
1861         u32 data_to_crc;
1862
1863         if (crc8_table_valid == 0) {
1864                 OSAL_CRC8_POPULATE(cdu_crc8_table, 0x07);
1865                 crc8_table_valid = 1;
1866         }
1867
1868         /*
1869          * The CRC is calculated on the String-to-compress:
1870          * [31:8]  = {CID[31:20],CID[11:0]}
1871          * [7:4]   = Region
1872          * [3:0]   = Type
1873          */
1874         if ((validation_cfg >> CDU_CONTEXT_VALIDATION_CFG_USE_CID) & 1)
1875                 validation_string |= (cid & 0xFFF00000) | ((cid & 0xFFF) << 8);
1876
1877         if ((validation_cfg >> CDU_CONTEXT_VALIDATION_CFG_USE_REGION) & 1)
1878                 validation_string |= ((region & 0xF) << 4);
1879
1880         if ((validation_cfg >> CDU_CONTEXT_VALIDATION_CFG_USE_TYPE) & 1)
1881                 validation_string |= (conn_type & 0xF);
1882
1883         /* Convert to big-endian and calculate CRC8*/
1884         data_to_crc = OSAL_BE32_TO_CPU(validation_string);
1885
1886         crc = OSAL_CRC8(cdu_crc8_table, (u8 *)&data_to_crc, sizeof(data_to_crc),
1887                         CRC8_INIT_VALUE);
1888
1889         /* The validation byte [7:0] is composed:
1890          * for type A validation
1891          * [7]          = active configuration bit
1892          * [6:0]        = crc[6:0]
1893          *
1894          * for type B validation
1895          * [7]          = active configuration bit
1896          * [6:3]        = connection_type[3:0]
1897          * [2:0]        = crc[2:0]
1898          */
1899
1900         validation_byte |= ((validation_cfg >>
1901                              CDU_CONTEXT_VALIDATION_CFG_USE_ACTIVE) & 1) << 7;
1902
1903         if ((validation_cfg >>
1904              CDU_CONTEXT_VALIDATION_CFG_VALIDATION_TYPE_SHIFT) & 1)
1905                 validation_byte |= ((conn_type & 0xF) << 3) | (crc & 0x7);
1906         else
1907                 validation_byte |= crc & 0x7F;
1908
1909         return validation_byte;
1910 }
1911
1912 /* Calcualte and set validation bytes for session context */
1913 void ecore_calc_session_ctx_validation(void *p_ctx_mem, u16 ctx_size,
1914                                        u8 ctx_type, u32 cid)
1915 {
1916         u8 *x_val_ptr, *t_val_ptr, *u_val_ptr, *p_ctx;
1917
1918         p_ctx = (u8 *)p_ctx_mem;
1919         x_val_ptr = &p_ctx[con_region_offsets[0][ctx_type]];
1920         t_val_ptr = &p_ctx[con_region_offsets[1][ctx_type]];
1921         u_val_ptr = &p_ctx[con_region_offsets[2][ctx_type]];
1922
1923         OSAL_MEMSET(p_ctx, 0, ctx_size);
1924
1925         *x_val_ptr = ecore_calc_cdu_validation_byte(ctx_type, 3, cid);
1926         *t_val_ptr = ecore_calc_cdu_validation_byte(ctx_type, 4, cid);
1927         *u_val_ptr = ecore_calc_cdu_validation_byte(ctx_type, 5, cid);
1928 }
1929
1930 /* Calcualte and set validation bytes for task context */
1931 void ecore_calc_task_ctx_validation(void *p_ctx_mem, u16 ctx_size, u8 ctx_type,
1932                                     u32 tid)
1933 {
1934         u8 *p_ctx, *region1_val_ptr;
1935
1936         p_ctx = (u8 *)p_ctx_mem;
1937         region1_val_ptr = &p_ctx[task_region_offsets[0][ctx_type]];
1938
1939         OSAL_MEMSET(p_ctx, 0, ctx_size);
1940
1941         *region1_val_ptr = ecore_calc_cdu_validation_byte(ctx_type, 1, tid);
1942 }
1943
1944 /* Memset session context to 0 while preserving validation bytes */
1945 void ecore_memset_session_ctx(void *p_ctx_mem, u32 ctx_size, u8 ctx_type)
1946 {
1947         u8 *x_val_ptr, *t_val_ptr, *u_val_ptr, *p_ctx;
1948         u8 x_val, t_val, u_val;
1949
1950         p_ctx = (u8 *)p_ctx_mem;
1951         x_val_ptr = &p_ctx[con_region_offsets[0][ctx_type]];
1952         t_val_ptr = &p_ctx[con_region_offsets[1][ctx_type]];
1953         u_val_ptr = &p_ctx[con_region_offsets[2][ctx_type]];
1954
1955         x_val = *x_val_ptr;
1956         t_val = *t_val_ptr;
1957         u_val = *u_val_ptr;
1958
1959         OSAL_MEMSET(p_ctx, 0, ctx_size);
1960
1961         *x_val_ptr = x_val;
1962         *t_val_ptr = t_val;
1963         *u_val_ptr = u_val;
1964 }
1965
1966 /* Memset task context to 0 while preserving validation bytes */
1967 void ecore_memset_task_ctx(void *p_ctx_mem, u32 ctx_size, u8 ctx_type)
1968 {
1969         u8 *p_ctx, *region1_val_ptr;
1970         u8 region1_val;
1971
1972         p_ctx = (u8 *)p_ctx_mem;
1973         region1_val_ptr = &p_ctx[task_region_offsets[0][ctx_type]];
1974
1975         region1_val = *region1_val_ptr;
1976
1977         OSAL_MEMSET(p_ctx, 0, ctx_size);
1978
1979         *region1_val_ptr = region1_val;
1980 }
1981
1982 /* Enable and configure context validation */
1983 void ecore_enable_context_validation(struct ecore_hwfn *p_hwfn,
1984                                      struct ecore_ptt *p_ptt)
1985 {
1986         u32 ctx_validation;
1987
1988         /* Enable validation for connection region 3 - bits [31:24] */
1989         ctx_validation = CDU_VALIDATION_DEFAULT_CFG << 24;
1990         ecore_wr(p_hwfn, p_ptt, CDU_REG_CCFC_CTX_VALID0, ctx_validation);
1991
1992         /* Enable validation for connection region 5 - bits [15: 8] */
1993         ctx_validation = CDU_VALIDATION_DEFAULT_CFG << 8;
1994         ecore_wr(p_hwfn, p_ptt, CDU_REG_CCFC_CTX_VALID1, ctx_validation);
1995
1996         /* Enable validation for connection region 1 - bits [15: 8] */
1997         ctx_validation = CDU_VALIDATION_DEFAULT_CFG << 8;
1998         ecore_wr(p_hwfn, p_ptt, CDU_REG_TCFC_CTX_VALID0, ctx_validation);
1999 }
2000
2001 #define RSS_IND_TABLE_BASE_ADDR       4112
2002 #define RSS_IND_TABLE_VPORT_SIZE      16
2003 #define RSS_IND_TABLE_ENTRY_PER_LINE  8
2004
2005 /* Update RSS indirection table entry. */
2006 void ecore_update_eth_rss_ind_table_entry(struct ecore_hwfn *p_hwfn,
2007                                           struct ecore_ptt *p_ptt,
2008                                           u8 rss_id,
2009                                           u8 ind_table_index,
2010                                           u16 ind_table_value)
2011 {
2012         u32 cnt, rss_addr;
2013         u32 *reg_val;
2014         u16 rss_ind_entry[RSS_IND_TABLE_ENTRY_PER_LINE];
2015         u16 rss_ind_mask[RSS_IND_TABLE_ENTRY_PER_LINE];
2016
2017         /* get entry address */
2018         rss_addr =  RSS_IND_TABLE_BASE_ADDR +
2019                     RSS_IND_TABLE_VPORT_SIZE * rss_id +
2020                     ind_table_index / RSS_IND_TABLE_ENTRY_PER_LINE;
2021
2022         /* prepare update command */
2023         ind_table_index %= RSS_IND_TABLE_ENTRY_PER_LINE;
2024
2025         for (cnt = 0; cnt < RSS_IND_TABLE_ENTRY_PER_LINE; cnt++) {
2026                 if (cnt == ind_table_index) {
2027                         rss_ind_entry[cnt] = ind_table_value;
2028                         rss_ind_mask[cnt]  = 0xFFFF;
2029                 } else {
2030                         rss_ind_entry[cnt] = 0;
2031                         rss_ind_mask[cnt]  = 0;
2032                 }
2033         }
2034
2035         /* Update entry in HW*/
2036         ecore_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_ADDR, rss_addr);
2037
2038         reg_val = (u32 *)rss_ind_mask;
2039         ecore_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_MASK, reg_val[0]);
2040         ecore_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_MASK + 4, reg_val[1]);
2041         ecore_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_MASK + 8, reg_val[2]);
2042         ecore_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_MASK + 12, reg_val[3]);
2043
2044         reg_val = (u32 *)rss_ind_entry;
2045         ecore_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_DATA, reg_val[0]);
2046         ecore_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_DATA + 4, reg_val[1]);
2047         ecore_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_DATA + 8, reg_val[2]);
2048         ecore_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_DATA + 12, reg_val[3]);
2049 }