net/qede/base: update formatting and comments
[dpdk.git] / drivers / net / qede / base / ecore_cxt.c
1 /*
2  * Copyright (c) 2016 QLogic Corporation.
3  * All rights reserved.
4  * www.qlogic.com
5  *
6  * See LICENSE.qede_pmd for copyright and licensing details.
7  */
8
9 #include "bcm_osal.h"
10 #include "reg_addr.h"
11 #include "ecore_hsi_common.h"
12 #include "ecore_hsi_eth.h"
13 #include "ecore_rt_defs.h"
14 #include "ecore_status.h"
15 #include "ecore.h"
16 #include "ecore_init_ops.h"
17 #include "ecore_init_fw_funcs.h"
18 #include "ecore_cxt.h"
19 #include "ecore_hw.h"
20 #include "ecore_dev_api.h"
21
22 /* Max number of connection types in HW (DQ/CDU etc.) */
23 #define MAX_CONN_TYPES          PROTOCOLID_COMMON
24 #define NUM_TASK_TYPES          2
25 #define NUM_TASK_PF_SEGMENTS    4
26 #define NUM_TASK_VF_SEGMENTS    1
27
28 /* Doorbell-Queue constants */
29 #define DQ_RANGE_SHIFT  4
30 #define DQ_RANGE_ALIGN  (1 << DQ_RANGE_SHIFT)
31
32 /* Searcher constants */
33 #define SRC_MIN_NUM_ELEMS 256
34
35 /* Timers constants */
36 #define TM_SHIFT        7
37 #define TM_ALIGN        (1 << TM_SHIFT)
38 #define TM_ELEM_SIZE    4
39
40 /* ILT constants */
41 /* If for some reason, HW P size is modified to be less than 32K,
42  * special handling needs to be made for CDU initialization
43  */
44 #define ILT_DEFAULT_HW_P_SIZE   3
45
46 #define ILT_PAGE_IN_BYTES(hw_p_size)    (1U << ((hw_p_size) + 12))
47 #define ILT_CFG_REG(cli, reg)           PSWRQ2_REG_##cli##_##reg##_RT_OFFSET
48
49 /* ILT entry structure */
50 #define ILT_ENTRY_PHY_ADDR_MASK         0x000FFFFFFFFFFFULL
51 #define ILT_ENTRY_PHY_ADDR_SHIFT        0
52 #define ILT_ENTRY_VALID_MASK            0x1ULL
53 #define ILT_ENTRY_VALID_SHIFT           52
54 #define ILT_ENTRY_IN_REGS               2
55 #define ILT_REG_SIZE_IN_BYTES           4
56
57 /* connection context union */
58 union conn_context {
59         struct core_conn_context core_ctx;
60         struct eth_conn_context eth_ctx;
61 };
62
63 struct src_ent {
64         u8 opaque[56];
65         u64 next;
66 };
67
68 #define CDUT_SEG_ALIGNMET 3     /* in 4k chunks */
69 #define CDUT_SEG_ALIGNMET_IN_BYTES (1 << (CDUT_SEG_ALIGNMET + 12))
70
71 #define CONN_CXT_SIZE(p_hwfn) \
72         ALIGNED_TYPE_SIZE(union conn_context, p_hwfn)
73
74 /* PF per protocl configuration object */
75 #define TASK_SEGMENTS   (NUM_TASK_PF_SEGMENTS + NUM_TASK_VF_SEGMENTS)
76 #define TASK_SEGMENT_VF (NUM_TASK_PF_SEGMENTS)
77
78 struct ecore_tid_seg {
79         u32 count;
80         u8 type;
81         bool has_fl_mem;
82 };
83
84 struct ecore_conn_type_cfg {
85         u32 cid_count;
86         u32 cid_start;
87         u32 cids_per_vf;
88         struct ecore_tid_seg tid_seg[TASK_SEGMENTS];
89 };
90
91 /* ILT Client configuration,
92  * Per connection type (protocol) resources (cids, tis, vf cids etc.)
93  * 1 - for connection context (CDUC) and for each task context we need two
94  * values, for regular task context and for force load memory
95  */
96 #define ILT_CLI_PF_BLOCKS       (1 + NUM_TASK_PF_SEGMENTS * 2)
97 #define ILT_CLI_VF_BLOCKS       (1 + NUM_TASK_VF_SEGMENTS * 2)
98 #define CDUC_BLK                (0)
99 #define CDUT_SEG_BLK(n)         (1 + (u8)(n))
100 #define CDUT_FL_SEG_BLK(n, X)   (1 + (n) + NUM_TASK_##X##_SEGMENTS)
101
102 enum ilt_clients {
103         ILT_CLI_CDUC,
104         ILT_CLI_CDUT,
105         ILT_CLI_QM,
106         ILT_CLI_TM,
107         ILT_CLI_SRC,
108         ILT_CLI_MAX
109 };
110
111 struct ilt_cfg_pair {
112         u32 reg;
113         u32 val;
114 };
115
116 struct ecore_ilt_cli_blk {
117         u32 total_size;         /* 0 means not active */
118         u32 real_size_in_page;
119         u32 start_line;
120         u32 dynamic_line_cnt;
121 };
122
123 struct ecore_ilt_client_cfg {
124         bool active;
125
126         /* ILT boundaries */
127         struct ilt_cfg_pair first;
128         struct ilt_cfg_pair last;
129         struct ilt_cfg_pair p_size;
130
131         /* ILT client blocks for PF */
132         struct ecore_ilt_cli_blk pf_blks[ILT_CLI_PF_BLOCKS];
133         u32 pf_total_lines;
134
135         /* ILT client blocks for VFs */
136         struct ecore_ilt_cli_blk vf_blks[ILT_CLI_VF_BLOCKS];
137         u32 vf_total_lines;
138 };
139
140 /* Per Path -
141  *      ILT shadow table
142  *      Protocol acquired CID lists
143  *      PF start line in ILT
144  */
145 struct ecore_dma_mem {
146         dma_addr_t p_phys;
147         void *p_virt;
148         osal_size_t size;
149 };
150
151 #define MAP_WORD_SIZE           sizeof(unsigned long)
152 #define BITS_PER_MAP_WORD       (MAP_WORD_SIZE * 8)
153
154 struct ecore_cid_acquired_map {
155         u32 start_cid;
156         u32 max_count;
157         unsigned long *cid_map;
158 };
159
160 struct ecore_cxt_mngr {
161         /* Per protocl configuration */
162         struct ecore_conn_type_cfg conn_cfg[MAX_CONN_TYPES];
163
164         /* computed ILT structure */
165         struct ecore_ilt_client_cfg clients[ILT_CLI_MAX];
166
167         /* Task type sizes */
168         u32 task_type_size[NUM_TASK_TYPES];
169
170         /* total number of VFs for this hwfn -
171          * ALL VFs are symmetric in terms of HW resources
172          */
173         u32 vf_count;
174
175         /* Acquired CIDs */
176         struct ecore_cid_acquired_map acquired[MAX_CONN_TYPES];
177
178         /* ILT  shadow table */
179         struct ecore_dma_mem *ilt_shadow;
180         u32 pf_start_line;
181
182         /* SRC T2 */
183         struct ecore_dma_mem *t2;
184         u32 t2_num_pages;
185         u64 first_free;
186         u64 last_free;
187 };
188
189 /* check if resources/configuration is required according to protocol type */
190 static OSAL_INLINE bool src_proto(enum protocol_type type)
191 {
192         return type == PROTOCOLID_TOE;
193 }
194
195 static OSAL_INLINE bool tm_cid_proto(enum protocol_type type)
196 {
197         return type == PROTOCOLID_TOE;
198 }
199
200 /* counts the iids for the CDU/CDUC ILT client configuration */
201 struct ecore_cdu_iids {
202         u32 pf_cids;
203         u32 per_vf_cids;
204 };
205
206 static void ecore_cxt_cdu_iids(struct ecore_cxt_mngr *p_mngr,
207                                struct ecore_cdu_iids *iids)
208 {
209         u32 type;
210
211         for (type = 0; type < MAX_CONN_TYPES; type++) {
212                 iids->pf_cids += p_mngr->conn_cfg[type].cid_count;
213                 iids->per_vf_cids += p_mngr->conn_cfg[type].cids_per_vf;
214         }
215 }
216
217 /* counts the iids for the Searcher block configuration */
218 struct ecore_src_iids {
219         u32 pf_cids;
220         u32 per_vf_cids;
221 };
222
223 static OSAL_INLINE void ecore_cxt_src_iids(struct ecore_cxt_mngr *p_mngr,
224                                            struct ecore_src_iids *iids)
225 {
226         u32 i;
227
228         for (i = 0; i < MAX_CONN_TYPES; i++) {
229                 if (!src_proto(i))
230                         continue;
231
232                 iids->pf_cids += p_mngr->conn_cfg[i].cid_count;
233                 iids->per_vf_cids += p_mngr->conn_cfg[i].cids_per_vf;
234         }
235 }
236
237 /* counts the iids for the Timers block configuration */
238 struct ecore_tm_iids {
239         u32 pf_cids;
240         u32 pf_tids[NUM_TASK_PF_SEGMENTS];      /* per segment */
241         u32 pf_tids_total;
242         u32 per_vf_cids;
243         u32 per_vf_tids;
244 };
245
246 static OSAL_INLINE void ecore_cxt_tm_iids(struct ecore_cxt_mngr *p_mngr,
247                                           struct ecore_tm_iids *iids)
248 {
249         u32 i, j;
250
251         for (i = 0; i < MAX_CONN_TYPES; i++) {
252                 struct ecore_conn_type_cfg *p_cfg = &p_mngr->conn_cfg[i];
253
254                 if (tm_cid_proto(i)) {
255                         iids->pf_cids += p_cfg->cid_count;
256                         iids->per_vf_cids += p_cfg->cids_per_vf;
257                 }
258         }
259
260         iids->pf_cids = ROUNDUP(iids->pf_cids, TM_ALIGN);
261         iids->per_vf_cids = ROUNDUP(iids->per_vf_cids, TM_ALIGN);
262         iids->per_vf_tids = ROUNDUP(iids->per_vf_tids, TM_ALIGN);
263
264         for (iids->pf_tids_total = 0, j = 0; j < NUM_TASK_PF_SEGMENTS; j++) {
265                 iids->pf_tids[j] = ROUNDUP(iids->pf_tids[j], TM_ALIGN);
266                 iids->pf_tids_total += iids->pf_tids[j];
267         }
268 }
269
270 void ecore_cxt_qm_iids(struct ecore_hwfn *p_hwfn, struct ecore_qm_iids *iids)
271 {
272         struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
273         struct ecore_tid_seg *segs;
274         u32 vf_cids = 0, type, j;
275         u32 vf_tids = 0;
276
277         for (type = 0; type < MAX_CONN_TYPES; type++) {
278                 iids->cids += p_mngr->conn_cfg[type].cid_count;
279                 vf_cids += p_mngr->conn_cfg[type].cids_per_vf;
280
281                 segs = p_mngr->conn_cfg[type].tid_seg;
282                 /* for each segment there is at most one
283                  * protocol for which count is not 0.
284                  */
285                 for (j = 0; j < NUM_TASK_PF_SEGMENTS; j++)
286                         iids->tids += segs[j].count;
287
288                 /* The last array elelment is for the VFs. As for PF
289                  * segments there can be only one protocol for
290                  * which this value is not 0.
291                  */
292                 vf_tids += segs[NUM_TASK_PF_SEGMENTS].count;
293         }
294
295         iids->vf_cids += vf_cids * p_mngr->vf_count;
296         iids->tids += vf_tids * p_mngr->vf_count;
297
298         DP_VERBOSE(p_hwfn, ECORE_MSG_ILT,
299                    "iids: CIDS %08x vf_cids %08x tids %08x vf_tids %08x\n",
300                    iids->cids, iids->vf_cids, iids->tids, vf_tids);
301 }
302
303 static struct ecore_tid_seg *ecore_cxt_tid_seg_info(struct ecore_hwfn *p_hwfn,
304                                                     u32 seg)
305 {
306         struct ecore_cxt_mngr *p_cfg = p_hwfn->p_cxt_mngr;
307         u32 i;
308
309         /* Find the protocol with tid count > 0 for this segment.
310          * Note: there can only be one and this is already validated.
311          */
312         for (i = 0; i < MAX_CONN_TYPES; i++) {
313                 if (p_cfg->conn_cfg[i].tid_seg[seg].count)
314                         return &p_cfg->conn_cfg[i].tid_seg[seg];
315         }
316         return OSAL_NULL;
317 }
318
319 /* set the iids (cid/tid) count per protocol */
320 void ecore_cxt_set_proto_cid_count(struct ecore_hwfn *p_hwfn,
321                                    enum protocol_type type,
322                                    u32 cid_count, u32 vf_cid_cnt)
323 {
324         struct ecore_cxt_mngr *p_mgr = p_hwfn->p_cxt_mngr;
325         struct ecore_conn_type_cfg *p_conn = &p_mgr->conn_cfg[type];
326
327         p_conn->cid_count = ROUNDUP(cid_count, DQ_RANGE_ALIGN);
328         p_conn->cids_per_vf = ROUNDUP(vf_cid_cnt, DQ_RANGE_ALIGN);
329 }
330
331 u32 ecore_cxt_get_proto_cid_count(struct ecore_hwfn *p_hwfn,
332                                   enum protocol_type type, u32 *vf_cid)
333 {
334         if (vf_cid)
335                 *vf_cid = p_hwfn->p_cxt_mngr->conn_cfg[type].cids_per_vf;
336
337         return p_hwfn->p_cxt_mngr->conn_cfg[type].cid_count;
338 }
339
340 u32 ecore_cxt_get_proto_cid_start(struct ecore_hwfn *p_hwfn,
341                                   enum protocol_type type)
342 {
343         return p_hwfn->p_cxt_mngr->acquired[type].start_cid;
344 }
345
346 static u32 ecore_cxt_get_proto_tid_count(struct ecore_hwfn *p_hwfn,
347                                          enum protocol_type type)
348 {
349         u32 cnt = 0;
350         int i;
351
352         for (i = 0; i < TASK_SEGMENTS; i++)
353                 cnt += p_hwfn->p_cxt_mngr->conn_cfg[type].tid_seg[i].count;
354
355         return cnt;
356 }
357
358 static OSAL_INLINE void
359 ecore_cxt_set_proto_tid_count(struct ecore_hwfn *p_hwfn,
360                               enum protocol_type proto,
361                               u8 seg, u8 seg_type, u32 count, bool has_fl)
362 {
363         struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
364         struct ecore_tid_seg *p_seg = &p_mngr->conn_cfg[proto].tid_seg[seg];
365
366         p_seg->count = count;
367         p_seg->has_fl_mem = has_fl;
368         p_seg->type = seg_type;
369 }
370
371 /* the *p_line parameter must be either 0 for the first invocation or the
372  * value returned in the previous invocation.
373  */
374 static void ecore_ilt_cli_blk_fill(struct ecore_ilt_client_cfg *p_cli,
375                                    struct ecore_ilt_cli_blk *p_blk,
376                                    u32 start_line,
377                                    u32 total_size, u32 elem_size)
378 {
379         u32 ilt_size = ILT_PAGE_IN_BYTES(p_cli->p_size.val);
380
381         /* verify that it's called once for each block */
382         if (p_blk->total_size)
383                 return;
384
385         p_blk->total_size = total_size;
386         p_blk->real_size_in_page = 0;
387         if (elem_size)
388                 p_blk->real_size_in_page = (ilt_size / elem_size) * elem_size;
389         p_blk->start_line = start_line;
390 }
391
392 static void ecore_ilt_cli_adv_line(struct ecore_hwfn *p_hwfn,
393                                    struct ecore_ilt_client_cfg *p_cli,
394                                    struct ecore_ilt_cli_blk *p_blk,
395                                    u32 *p_line, enum ilt_clients client_id)
396 {
397         if (!p_blk->total_size)
398                 return;
399
400         if (!p_cli->active)
401                 p_cli->first.val = *p_line;
402
403         p_cli->active = true;
404         *p_line += DIV_ROUND_UP(p_blk->total_size, p_blk->real_size_in_page);
405         p_cli->last.val = *p_line - 1;
406
407         DP_VERBOSE(p_hwfn, ECORE_MSG_ILT,
408                    "ILT[Client %d] - Lines: [%08x - %08x]. Block - Size %08x"
409                    " [Real %08x] Start line %d\n",
410                    client_id, p_cli->first.val, p_cli->last.val,
411                    p_blk->total_size, p_blk->real_size_in_page,
412                    p_blk->start_line);
413 }
414
415 static u32 ecore_ilt_get_dynamic_line_cnt(struct ecore_hwfn *p_hwfn,
416                                           enum ilt_clients ilt_client)
417 {
418         u32 cid_count = p_hwfn->p_cxt_mngr->conn_cfg[PROTOCOLID_ROCE].cid_count;
419         struct ecore_ilt_client_cfg *p_cli;
420         u32 lines_to_skip = 0;
421         u32 cxts_per_p;
422
423         /* TBD MK: ILT code should be simplified once PROTO enum is changed */
424
425         if (ilt_client == ILT_CLI_CDUC) {
426                 p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC];
427
428                 cxts_per_p = ILT_PAGE_IN_BYTES(p_cli->p_size.val) /
429                     (u32)CONN_CXT_SIZE(p_hwfn);
430
431                 lines_to_skip = cid_count / cxts_per_p;
432         }
433
434         return lines_to_skip;
435 }
436
437 enum _ecore_status_t ecore_cxt_cfg_ilt_compute(struct ecore_hwfn *p_hwfn)
438 {
439         struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
440         u32 curr_line, total, i, task_size, line;
441         struct ecore_ilt_client_cfg *p_cli;
442         struct ecore_ilt_cli_blk *p_blk;
443         struct ecore_cdu_iids cdu_iids;
444         struct ecore_src_iids src_iids;
445         struct ecore_qm_iids qm_iids;
446         struct ecore_tm_iids tm_iids;
447         struct ecore_tid_seg *p_seg;
448
449         OSAL_MEM_ZERO(&qm_iids, sizeof(qm_iids));
450         OSAL_MEM_ZERO(&cdu_iids, sizeof(cdu_iids));
451         OSAL_MEM_ZERO(&src_iids, sizeof(src_iids));
452         OSAL_MEM_ZERO(&tm_iids, sizeof(tm_iids));
453
454         p_mngr->pf_start_line = RESC_START(p_hwfn, ECORE_ILT);
455
456         DP_VERBOSE(p_hwfn, ECORE_MSG_ILT,
457                    "hwfn [%d] - Set context mngr starting line to be 0x%08x\n",
458                    p_hwfn->my_id, p_hwfn->p_cxt_mngr->pf_start_line);
459
460         /* CDUC */
461         p_cli = &p_mngr->clients[ILT_CLI_CDUC];
462
463         curr_line = p_mngr->pf_start_line;
464
465         /* CDUC PF */
466         p_cli->pf_total_lines = 0;
467
468         /* get the counters for the CDUC,CDUC and QM clients  */
469         ecore_cxt_cdu_iids(p_mngr, &cdu_iids);
470
471         p_blk = &p_cli->pf_blks[CDUC_BLK];
472
473         total = cdu_iids.pf_cids * CONN_CXT_SIZE(p_hwfn);
474
475         ecore_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
476                                total, CONN_CXT_SIZE(p_hwfn));
477
478         ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, ILT_CLI_CDUC);
479         p_cli->pf_total_lines = curr_line - p_blk->start_line;
480
481         p_blk->dynamic_line_cnt = ecore_ilt_get_dynamic_line_cnt(p_hwfn,
482                                                                  ILT_CLI_CDUC);
483
484         /* CDUC VF */
485         p_blk = &p_cli->vf_blks[CDUC_BLK];
486         total = cdu_iids.per_vf_cids * CONN_CXT_SIZE(p_hwfn);
487
488         ecore_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
489                                total, CONN_CXT_SIZE(p_hwfn));
490
491         ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, ILT_CLI_CDUC);
492         p_cli->vf_total_lines = curr_line - p_blk->start_line;
493
494         for (i = 1; i < p_mngr->vf_count; i++)
495                 ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
496                                        ILT_CLI_CDUC);
497
498         /* CDUT PF */
499         p_cli = &p_mngr->clients[ILT_CLI_CDUT];
500         p_cli->first.val = curr_line;
501
502         /* first the 'working' task memory */
503         for (i = 0; i < NUM_TASK_PF_SEGMENTS; i++) {
504                 p_seg = ecore_cxt_tid_seg_info(p_hwfn, i);
505                 if (!p_seg || p_seg->count == 0)
506                         continue;
507
508                 p_blk = &p_cli->pf_blks[CDUT_SEG_BLK(i)];
509                 total = p_seg->count * p_mngr->task_type_size[p_seg->type];
510                 ecore_ilt_cli_blk_fill(p_cli, p_blk, curr_line, total,
511                                        p_mngr->task_type_size[p_seg->type]);
512
513                 ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
514                                        ILT_CLI_CDUT);
515         }
516
517         /* next the 'init' task memory (forced load memory) */
518         for (i = 0; i < NUM_TASK_PF_SEGMENTS; i++) {
519                 p_seg = ecore_cxt_tid_seg_info(p_hwfn, i);
520                 if (!p_seg || p_seg->count == 0)
521                         continue;
522
523                 p_blk = &p_cli->pf_blks[CDUT_FL_SEG_BLK(i, PF)];
524
525                 if (!p_seg->has_fl_mem) {
526                         /* The segment is active (total size pf 'working'
527                          * memory is > 0) but has no FL (forced-load, Init)
528                          * memory. Thus:
529                          *
530                          * 1.   The total-size in the corrsponding FL block of
531                          *      the ILT client is set to 0 - No ILT line are
532                          *      provisioned and no ILT memory allocated.
533                          *
534                          * 2.   The start-line of said block is set to the
535                          *      start line of the matching working memory
536                          *      block in the ILT client. This is later used to
537                          *      configure the CDU segment offset registers and
538                          *      results in an FL command for TIDs of this
539                          *      segment behaves as regular load commands
540                          *      (loading TIDs from the working memory).
541                          */
542                         line = p_cli->pf_blks[CDUT_SEG_BLK(i)].start_line;
543
544                         ecore_ilt_cli_blk_fill(p_cli, p_blk, line, 0, 0);
545                         continue;
546                 }
547                 total = p_seg->count * p_mngr->task_type_size[p_seg->type];
548
549                 ecore_ilt_cli_blk_fill(p_cli, p_blk,
550                                        curr_line, total,
551                                        p_mngr->task_type_size[p_seg->type]);
552
553                 ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
554                                        ILT_CLI_CDUT);
555         }
556         p_cli->pf_total_lines = curr_line - p_cli->pf_blks[0].start_line;
557
558         /* CDUT VF */
559         p_seg = ecore_cxt_tid_seg_info(p_hwfn, TASK_SEGMENT_VF);
560         if (p_seg && p_seg->count) {
561                 /* Stricly speaking we need to iterate over all VF
562                  * task segment types, but a VF has only 1 segment
563                  */
564
565                 /* 'working' memory */
566                 total = p_seg->count * p_mngr->task_type_size[p_seg->type];
567
568                 p_blk = &p_cli->vf_blks[CDUT_SEG_BLK(0)];
569                 ecore_ilt_cli_blk_fill(p_cli, p_blk,
570                                        curr_line, total,
571                                        p_mngr->task_type_size[p_seg->type]);
572
573                 ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
574                                        ILT_CLI_CDUT);
575
576                 /* 'init' memory */
577                 p_blk = &p_cli->vf_blks[CDUT_FL_SEG_BLK(0, VF)];
578                 if (!p_seg->has_fl_mem) {
579                         /* see comment above */
580                         line = p_cli->vf_blks[CDUT_SEG_BLK(0)].start_line;
581                         ecore_ilt_cli_blk_fill(p_cli, p_blk, line, 0, 0);
582                 } else {
583                         task_size = p_mngr->task_type_size[p_seg->type];
584                         ecore_ilt_cli_blk_fill(p_cli, p_blk,
585                                                curr_line, total, task_size);
586                         ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
587                                                ILT_CLI_CDUT);
588                 }
589                 p_cli->vf_total_lines = curr_line -
590                     p_cli->vf_blks[0].start_line;
591
592                 /* Now for the rest of the VFs */
593                 for (i = 1; i < p_mngr->vf_count; i++) {
594                         p_blk = &p_cli->vf_blks[CDUT_SEG_BLK(0)];
595                         ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
596                                                ILT_CLI_CDUT);
597
598                         p_blk = &p_cli->vf_blks[CDUT_FL_SEG_BLK(0, VF)];
599                         ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
600                                                ILT_CLI_CDUT);
601                 }
602         }
603
604         /* QM */
605         p_cli = &p_mngr->clients[ILT_CLI_QM];
606         p_blk = &p_cli->pf_blks[0];
607
608         ecore_cxt_qm_iids(p_hwfn, &qm_iids);
609         total = ecore_qm_pf_mem_size(p_hwfn->rel_pf_id, qm_iids.cids,
610                                      qm_iids.vf_cids, qm_iids.tids,
611                                      p_hwfn->qm_info.num_pqs,
612                                      p_hwfn->qm_info.num_vf_pqs);
613
614         DP_VERBOSE(p_hwfn, ECORE_MSG_ILT,
615                    "QM ILT Info, (cids=%d, vf_cids=%d, tids=%d, num_pqs=%d,"
616                    " num_vf_pqs=%d, memory_size=%d)\n",
617                    qm_iids.cids, qm_iids.vf_cids, qm_iids.tids,
618                    p_hwfn->qm_info.num_pqs, p_hwfn->qm_info.num_vf_pqs, total);
619
620         ecore_ilt_cli_blk_fill(p_cli, p_blk, curr_line, total * 0x1000,
621                                QM_PQ_ELEMENT_SIZE);
622
623         ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, ILT_CLI_QM);
624         p_cli->pf_total_lines = curr_line - p_blk->start_line;
625
626         /* SRC */
627         p_cli = &p_mngr->clients[ILT_CLI_SRC];
628         ecore_cxt_src_iids(p_mngr, &src_iids);
629
630         /* Both the PF and VFs searcher connections are stored in the per PF
631          * database. Thus sum the PF searcher cids and all the VFs searcher
632          * cids.
633          */
634         total = src_iids.pf_cids + src_iids.per_vf_cids * p_mngr->vf_count;
635         if (total) {
636                 u32 local_max = OSAL_MAX_T(u32, total,
637                                            SRC_MIN_NUM_ELEMS);
638
639                 total = OSAL_ROUNDUP_POW_OF_TWO(local_max);
640
641                 p_blk = &p_cli->pf_blks[0];
642                 ecore_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
643                                        total * sizeof(struct src_ent),
644                                        sizeof(struct src_ent));
645
646                 ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
647                                        ILT_CLI_SRC);
648                 p_cli->pf_total_lines = curr_line - p_blk->start_line;
649         }
650
651         /* TM PF */
652         p_cli = &p_mngr->clients[ILT_CLI_TM];
653         ecore_cxt_tm_iids(p_mngr, &tm_iids);
654         total = tm_iids.pf_cids + tm_iids.pf_tids_total;
655         if (total) {
656                 p_blk = &p_cli->pf_blks[0];
657                 ecore_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
658                                        total * TM_ELEM_SIZE, TM_ELEM_SIZE);
659
660                 ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
661                                        ILT_CLI_TM);
662                 p_cli->pf_total_lines = curr_line - p_blk->start_line;
663         }
664
665         /* TM VF */
666         total = tm_iids.per_vf_cids + tm_iids.per_vf_tids;
667         if (total) {
668                 p_blk = &p_cli->vf_blks[0];
669                 ecore_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
670                                        total * TM_ELEM_SIZE, TM_ELEM_SIZE);
671
672                 ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
673                                        ILT_CLI_TM);
674                 p_cli->pf_total_lines = curr_line - p_blk->start_line;
675
676                 for (i = 1; i < p_mngr->vf_count; i++) {
677                         ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
678                                                ILT_CLI_TM);
679                 }
680         }
681
682         if (curr_line - p_hwfn->p_cxt_mngr->pf_start_line >
683             RESC_NUM(p_hwfn, ECORE_ILT)) {
684                 DP_ERR(p_hwfn, "too many ilt lines...#lines=%d\n",
685                        curr_line - p_hwfn->p_cxt_mngr->pf_start_line);
686                 return ECORE_INVAL;
687         }
688
689         return ECORE_SUCCESS;
690 }
691
692 static void ecore_cxt_src_t2_free(struct ecore_hwfn *p_hwfn)
693 {
694         struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
695         u32 i;
696
697         if (!p_mngr->t2)
698                 return;
699
700         for (i = 0; i < p_mngr->t2_num_pages; i++)
701                 if (p_mngr->t2[i].p_virt)
702                         OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
703                                                p_mngr->t2[i].p_virt,
704                                                p_mngr->t2[i].p_phys,
705                                                p_mngr->t2[i].size);
706
707         OSAL_FREE(p_hwfn->p_dev, p_mngr->t2);
708         p_mngr->t2 = OSAL_NULL;
709 }
710
711 static enum _ecore_status_t ecore_cxt_src_t2_alloc(struct ecore_hwfn *p_hwfn)
712 {
713         struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
714         u32 conn_num, total_size, ent_per_page, psz, i;
715         struct ecore_ilt_client_cfg *p_src;
716         struct ecore_src_iids src_iids;
717         struct ecore_dma_mem *p_t2;
718         enum _ecore_status_t rc;
719
720         OSAL_MEM_ZERO(&src_iids, sizeof(src_iids));
721
722         /* if the SRC ILT client is inactive - there are no connection
723          * requiring the searcer, leave.
724          */
725         p_src = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_SRC];
726         if (!p_src->active)
727                 return ECORE_SUCCESS;
728
729         ecore_cxt_src_iids(p_mngr, &src_iids);
730         conn_num = src_iids.pf_cids + src_iids.per_vf_cids * p_mngr->vf_count;
731         total_size = conn_num * sizeof(struct src_ent);
732
733         /* use the same page size as the SRC ILT client */
734         psz = ILT_PAGE_IN_BYTES(p_src->p_size.val);
735         p_mngr->t2_num_pages = DIV_ROUND_UP(total_size, psz);
736
737         /* allocate t2 */
738         p_mngr->t2 = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
739                                  p_mngr->t2_num_pages *
740                                  sizeof(struct ecore_dma_mem));
741         if (!p_mngr->t2) {
742                 DP_NOTICE(p_hwfn, true, "Failed to allocate t2 table\n");
743                 rc = ECORE_NOMEM;
744                 goto t2_fail;
745         }
746
747         /* allocate t2 pages */
748         for (i = 0; i < p_mngr->t2_num_pages; i++) {
749                 u32 size = OSAL_MIN_T(u32, total_size, psz);
750                 void **p_virt = &p_mngr->t2[i].p_virt;
751
752                 *p_virt = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev,
753                                                   &p_mngr->t2[i].p_phys, size);
754                 if (!p_mngr->t2[i].p_virt) {
755                         rc = ECORE_NOMEM;
756                         goto t2_fail;
757                 }
758                 OSAL_MEM_ZERO(*p_virt, size);
759                 p_mngr->t2[i].size = size;
760                 total_size -= size;
761         }
762
763         /* Set the t2 pointers */
764
765         /* entries per page - must be a power of two */
766         ent_per_page = psz / sizeof(struct src_ent);
767
768         p_mngr->first_free = (u64)p_mngr->t2[0].p_phys;
769
770         p_t2 = &p_mngr->t2[(conn_num - 1) / ent_per_page];
771         p_mngr->last_free = (u64)p_t2->p_phys +
772             ((conn_num - 1) & (ent_per_page - 1)) * sizeof(struct src_ent);
773
774         for (i = 0; i < p_mngr->t2_num_pages; i++) {
775                 u32 ent_num = OSAL_MIN_T(u32, ent_per_page, conn_num);
776                 struct src_ent *entries = p_mngr->t2[i].p_virt;
777                 u64 p_ent_phys = (u64)p_mngr->t2[i].p_phys, val;
778                 u32 j;
779
780                 for (j = 0; j < ent_num - 1; j++) {
781                         val = p_ent_phys + (j + 1) * sizeof(struct src_ent);
782                         entries[j].next = OSAL_CPU_TO_BE64(val);
783                 }
784
785                 if (i < p_mngr->t2_num_pages - 1)
786                         val = (u64)p_mngr->t2[i + 1].p_phys;
787                 else
788                         val = 0;
789                 entries[j].next = OSAL_CPU_TO_BE64(val);
790
791                 conn_num -= ent_per_page;
792         }
793
794         return ECORE_SUCCESS;
795
796 t2_fail:
797         ecore_cxt_src_t2_free(p_hwfn);
798         return rc;
799 }
800
801 #define for_each_ilt_valid_client(pos, clients)         \
802         for (pos = 0; pos < ILT_CLI_MAX; pos++)         \
803                 if (!clients[pos].active) {             \
804                         continue;                       \
805                 } else                                  \
806
807
808 /* Total number of ILT lines used by this PF */
809 static u32 ecore_cxt_ilt_shadow_size(struct ecore_ilt_client_cfg *ilt_clients)
810 {
811         u32 size = 0;
812         u32 i;
813
814         for_each_ilt_valid_client(i, ilt_clients)
815                 size += (ilt_clients[i].last.val -
816                          ilt_clients[i].first.val + 1);
817
818         return size;
819 }
820
821 static void ecore_ilt_shadow_free(struct ecore_hwfn *p_hwfn)
822 {
823         struct ecore_ilt_client_cfg *p_cli = p_hwfn->p_cxt_mngr->clients;
824         struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
825         u32 ilt_size, i;
826
827         ilt_size = ecore_cxt_ilt_shadow_size(p_cli);
828
829         for (i = 0; p_mngr->ilt_shadow && i < ilt_size; i++) {
830                 struct ecore_dma_mem *p_dma = &p_mngr->ilt_shadow[i];
831
832                 if (p_dma->p_virt)
833                         OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
834                                                p_dma->p_virt,
835                                                p_dma->p_phys, p_dma->size);
836                 p_dma->p_virt = OSAL_NULL;
837         }
838         OSAL_FREE(p_hwfn->p_dev, p_mngr->ilt_shadow);
839 }
840
841 static enum _ecore_status_t
842 ecore_ilt_blk_alloc(struct ecore_hwfn *p_hwfn,
843                     struct ecore_ilt_cli_blk *p_blk,
844                     enum ilt_clients ilt_client, u32 start_line_offset)
845 {
846         struct ecore_dma_mem *ilt_shadow = p_hwfn->p_cxt_mngr->ilt_shadow;
847         u32 lines, line, sz_left, lines_to_skip = 0;
848
849         /* Special handling for RoCE that supports dynamic allocation */
850         if (ilt_client == ILT_CLI_CDUT)
851                 return ECORE_SUCCESS;
852
853         lines_to_skip = p_blk->dynamic_line_cnt;
854
855         if (!p_blk->total_size)
856                 return ECORE_SUCCESS;
857
858         sz_left = p_blk->total_size;
859         lines = DIV_ROUND_UP(sz_left, p_blk->real_size_in_page) - lines_to_skip;
860         line = p_blk->start_line + start_line_offset -
861             p_hwfn->p_cxt_mngr->pf_start_line + lines_to_skip;
862
863         for (; lines; lines--) {
864                 dma_addr_t p_phys;
865                 void *p_virt;
866                 u32 size;
867
868                 size = OSAL_MIN_T(u32, sz_left, p_blk->real_size_in_page);
869
870 /* @DPDK */
871 #define ILT_BLOCK_ALIGN_SIZE 0x1000
872                 p_virt = OSAL_DMA_ALLOC_COHERENT_ALIGNED(p_hwfn->p_dev,
873                                                          &p_phys, size,
874                                                          ILT_BLOCK_ALIGN_SIZE);
875                 if (!p_virt)
876                         return ECORE_NOMEM;
877                 OSAL_MEM_ZERO(p_virt, size);
878
879                 ilt_shadow[line].p_phys = p_phys;
880                 ilt_shadow[line].p_virt = p_virt;
881                 ilt_shadow[line].size = size;
882
883                 DP_VERBOSE(p_hwfn, ECORE_MSG_ILT,
884                            "ILT shadow: Line [%d] Physical 0x%lx"
885                            " Virtual %p Size %d\n",
886                            line, (unsigned long)p_phys, p_virt, size);
887
888                 sz_left -= size;
889                 line++;
890         }
891
892         return ECORE_SUCCESS;
893 }
894
895 static enum _ecore_status_t ecore_ilt_shadow_alloc(struct ecore_hwfn *p_hwfn)
896 {
897         struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
898         struct ecore_ilt_client_cfg *clients = p_mngr->clients;
899         struct ecore_ilt_cli_blk *p_blk;
900         u32 size, i, j, k;
901         enum _ecore_status_t rc;
902
903         size = ecore_cxt_ilt_shadow_size(clients);
904         p_mngr->ilt_shadow = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
905                                          size * sizeof(struct ecore_dma_mem));
906
907         if (!p_mngr->ilt_shadow) {
908                 DP_NOTICE(p_hwfn, true,
909                           "Failed to allocate ilt shadow table\n");
910                 rc = ECORE_NOMEM;
911                 goto ilt_shadow_fail;
912         }
913
914         DP_VERBOSE(p_hwfn, ECORE_MSG_ILT,
915                    "Allocated 0x%x bytes for ilt shadow\n",
916                    (u32)(size * sizeof(struct ecore_dma_mem)));
917
918         for_each_ilt_valid_client(i, clients) {
919                 for (j = 0; j < ILT_CLI_PF_BLOCKS; j++) {
920                         p_blk = &clients[i].pf_blks[j];
921                         rc = ecore_ilt_blk_alloc(p_hwfn, p_blk, i, 0);
922                         if (rc != ECORE_SUCCESS)
923                                 goto ilt_shadow_fail;
924                 }
925                 for (k = 0; k < p_mngr->vf_count; k++) {
926                         for (j = 0; j < ILT_CLI_VF_BLOCKS; j++) {
927                                 u32 lines = clients[i].vf_total_lines * k;
928
929                                 p_blk = &clients[i].vf_blks[j];
930                                 rc = ecore_ilt_blk_alloc(p_hwfn, p_blk,
931                                                          i, lines);
932                                 if (rc != ECORE_SUCCESS)
933                                         goto ilt_shadow_fail;
934                         }
935                 }
936         }
937
938         return ECORE_SUCCESS;
939
940 ilt_shadow_fail:
941         ecore_ilt_shadow_free(p_hwfn);
942         return rc;
943 }
944
945 static void ecore_cid_map_free(struct ecore_hwfn *p_hwfn)
946 {
947         struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
948         u32 type;
949
950         for (type = 0; type < MAX_CONN_TYPES; type++) {
951                 OSAL_FREE(p_hwfn->p_dev, p_mngr->acquired[type].cid_map);
952                 p_mngr->acquired[type].max_count = 0;
953                 p_mngr->acquired[type].start_cid = 0;
954         }
955 }
956
957 static enum _ecore_status_t ecore_cid_map_alloc(struct ecore_hwfn *p_hwfn)
958 {
959         struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
960         u32 start_cid = 0;
961         u32 type;
962
963         for (type = 0; type < MAX_CONN_TYPES; type++) {
964                 u32 cid_cnt = p_hwfn->p_cxt_mngr->conn_cfg[type].cid_count;
965                 u32 size;
966
967                 if (cid_cnt == 0)
968                         continue;
969
970                 size = MAP_WORD_SIZE * DIV_ROUND_UP(cid_cnt, BITS_PER_MAP_WORD);
971                 p_mngr->acquired[type].cid_map = OSAL_ZALLOC(p_hwfn->p_dev,
972                                                              GFP_KERNEL, size);
973                 if (!p_mngr->acquired[type].cid_map)
974                         goto cid_map_fail;
975
976                 p_mngr->acquired[type].max_count = cid_cnt;
977                 p_mngr->acquired[type].start_cid = start_cid;
978
979                 p_hwfn->p_cxt_mngr->conn_cfg[type].cid_start = start_cid;
980
981                 DP_VERBOSE(p_hwfn, ECORE_MSG_CXT,
982                            "Type %08x start: %08x count %08x\n",
983                            type, p_mngr->acquired[type].start_cid,
984                            p_mngr->acquired[type].max_count);
985                 start_cid += cid_cnt;
986         }
987
988         return ECORE_SUCCESS;
989
990 cid_map_fail:
991         ecore_cid_map_free(p_hwfn);
992         return ECORE_NOMEM;
993 }
994
995 enum _ecore_status_t ecore_cxt_mngr_alloc(struct ecore_hwfn *p_hwfn)
996 {
997         struct ecore_cxt_mngr *p_mngr;
998         u32 i;
999
1000         p_mngr = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*p_mngr));
1001         if (!p_mngr) {
1002                 DP_NOTICE(p_hwfn, true,
1003                           "Failed to allocate `struct ecore_cxt_mngr'\n");
1004                 return ECORE_NOMEM;
1005         }
1006
1007         /* Initialize ILT client registers */
1008         p_mngr->clients[ILT_CLI_CDUC].first.reg = ILT_CFG_REG(CDUC, FIRST_ILT);
1009         p_mngr->clients[ILT_CLI_CDUC].last.reg = ILT_CFG_REG(CDUC, LAST_ILT);
1010         p_mngr->clients[ILT_CLI_CDUC].p_size.reg = ILT_CFG_REG(CDUC, P_SIZE);
1011
1012         p_mngr->clients[ILT_CLI_QM].first.reg = ILT_CFG_REG(QM, FIRST_ILT);
1013         p_mngr->clients[ILT_CLI_QM].last.reg = ILT_CFG_REG(QM, LAST_ILT);
1014         p_mngr->clients[ILT_CLI_QM].p_size.reg = ILT_CFG_REG(QM, P_SIZE);
1015
1016         p_mngr->clients[ILT_CLI_TM].first.reg = ILT_CFG_REG(TM, FIRST_ILT);
1017         p_mngr->clients[ILT_CLI_TM].last.reg = ILT_CFG_REG(TM, LAST_ILT);
1018         p_mngr->clients[ILT_CLI_TM].p_size.reg = ILT_CFG_REG(TM, P_SIZE);
1019
1020         p_mngr->clients[ILT_CLI_SRC].first.reg = ILT_CFG_REG(SRC, FIRST_ILT);
1021         p_mngr->clients[ILT_CLI_SRC].last.reg = ILT_CFG_REG(SRC, LAST_ILT);
1022         p_mngr->clients[ILT_CLI_SRC].p_size.reg = ILT_CFG_REG(SRC, P_SIZE);
1023
1024         p_mngr->clients[ILT_CLI_CDUT].first.reg = ILT_CFG_REG(CDUT, FIRST_ILT);
1025         p_mngr->clients[ILT_CLI_CDUT].last.reg = ILT_CFG_REG(CDUT, LAST_ILT);
1026         p_mngr->clients[ILT_CLI_CDUT].p_size.reg = ILT_CFG_REG(CDUT, P_SIZE);
1027
1028         /* default ILT page size for all clients is 32K */
1029         for (i = 0; i < ILT_CLI_MAX; i++)
1030                 p_mngr->clients[i].p_size.val = ILT_DEFAULT_HW_P_SIZE;
1031
1032         /* Initialize task sizes */
1033         p_mngr->task_type_size[0] = 512; /* @DPDK */
1034         p_mngr->task_type_size[1] = 128; /* @DPDK */
1035
1036         p_mngr->vf_count = p_hwfn->p_dev->sriov_info.total_vfs;
1037         /* Set the cxt mangr pointer priori to further allocations */
1038         p_hwfn->p_cxt_mngr = p_mngr;
1039
1040         return ECORE_SUCCESS;
1041 }
1042
1043 enum _ecore_status_t ecore_cxt_tables_alloc(struct ecore_hwfn *p_hwfn)
1044 {
1045         enum _ecore_status_t rc;
1046
1047         /* Allocate the ILT shadow table */
1048         rc = ecore_ilt_shadow_alloc(p_hwfn);
1049         if (rc) {
1050                 DP_NOTICE(p_hwfn, true, "Failed to allocate ilt memory\n");
1051                 goto tables_alloc_fail;
1052         }
1053
1054         /* Allocate the T2  table */
1055         rc = ecore_cxt_src_t2_alloc(p_hwfn);
1056         if (rc) {
1057                 DP_NOTICE(p_hwfn, true, "Failed to allocate T2 memory\n");
1058                 goto tables_alloc_fail;
1059         }
1060
1061         /* Allocate and initialize the acquired cids bitmaps */
1062         rc = ecore_cid_map_alloc(p_hwfn);
1063         if (rc) {
1064                 DP_NOTICE(p_hwfn, true, "Failed to allocate cid maps\n");
1065                 goto tables_alloc_fail;
1066         }
1067
1068         return ECORE_SUCCESS;
1069
1070 tables_alloc_fail:
1071         ecore_cxt_mngr_free(p_hwfn);
1072         return rc;
1073 }
1074
1075 void ecore_cxt_mngr_free(struct ecore_hwfn *p_hwfn)
1076 {
1077         if (!p_hwfn->p_cxt_mngr)
1078                 return;
1079
1080         ecore_cid_map_free(p_hwfn);
1081         ecore_cxt_src_t2_free(p_hwfn);
1082         ecore_ilt_shadow_free(p_hwfn);
1083         OSAL_FREE(p_hwfn->p_dev, p_hwfn->p_cxt_mngr);
1084
1085         p_hwfn->p_cxt_mngr = OSAL_NULL;
1086 }
1087
1088 void ecore_cxt_mngr_setup(struct ecore_hwfn *p_hwfn)
1089 {
1090         struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
1091         int type;
1092
1093         /* Reset acquired cids */
1094         for (type = 0; type < MAX_CONN_TYPES; type++) {
1095                 u32 cid_cnt = p_hwfn->p_cxt_mngr->conn_cfg[type].cid_count;
1096                 u32 i;
1097
1098                 if (cid_cnt == 0)
1099                         continue;
1100
1101                 for (i = 0; i < DIV_ROUND_UP(cid_cnt, BITS_PER_MAP_WORD); i++)
1102                         p_mngr->acquired[type].cid_map[i] = 0;
1103         }
1104 }
1105
1106 /* HW initialization helper (per Block, per phase) */
1107
1108 /* CDU Common */
1109 #define CDUC_CXT_SIZE_SHIFT                                             \
1110         CDU_REG_CID_ADDR_PARAMS_CONTEXT_SIZE_SHIFT
1111
1112 #define CDUC_CXT_SIZE_MASK                                              \
1113         (CDU_REG_CID_ADDR_PARAMS_CONTEXT_SIZE >> CDUC_CXT_SIZE_SHIFT)
1114
1115 #define CDUC_BLOCK_WASTE_SHIFT                                          \
1116         CDU_REG_CID_ADDR_PARAMS_BLOCK_WASTE_SHIFT
1117
1118 #define CDUC_BLOCK_WASTE_MASK                                           \
1119         (CDU_REG_CID_ADDR_PARAMS_BLOCK_WASTE >> CDUC_BLOCK_WASTE_SHIFT)
1120
1121 #define CDUC_NCIB_SHIFT                                                 \
1122         CDU_REG_CID_ADDR_PARAMS_NCIB_SHIFT
1123
1124 #define CDUC_NCIB_MASK                                                  \
1125         (CDU_REG_CID_ADDR_PARAMS_NCIB >> CDUC_NCIB_SHIFT)
1126
1127 #define CDUT_TYPE0_CXT_SIZE_SHIFT                                       \
1128         CDU_REG_SEGMENT0_PARAMS_T0_TID_SIZE_SHIFT
1129
1130 #define CDUT_TYPE0_CXT_SIZE_MASK                                        \
1131         (CDU_REG_SEGMENT0_PARAMS_T0_TID_SIZE >>                         \
1132         CDUT_TYPE0_CXT_SIZE_SHIFT)
1133
1134 #define CDUT_TYPE0_BLOCK_WASTE_SHIFT                                    \
1135         CDU_REG_SEGMENT0_PARAMS_T0_TID_BLOCK_WASTE_SHIFT
1136
1137 #define CDUT_TYPE0_BLOCK_WASTE_MASK                                     \
1138         (CDU_REG_SEGMENT0_PARAMS_T0_TID_BLOCK_WASTE >>                  \
1139         CDUT_TYPE0_BLOCK_WASTE_SHIFT)
1140
1141 #define CDUT_TYPE0_NCIB_SHIFT                                           \
1142         CDU_REG_SEGMENT0_PARAMS_T0_NUM_TIDS_IN_BLOCK_SHIFT
1143
1144 #define CDUT_TYPE0_NCIB_MASK                                            \
1145         (CDU_REG_SEGMENT0_PARAMS_T0_NUM_TIDS_IN_BLOCK >>                \
1146         CDUT_TYPE0_NCIB_SHIFT)
1147
1148 #define CDUT_TYPE1_CXT_SIZE_SHIFT                                       \
1149         CDU_REG_SEGMENT1_PARAMS_T1_TID_SIZE_SHIFT
1150
1151 #define CDUT_TYPE1_CXT_SIZE_MASK                                        \
1152         (CDU_REG_SEGMENT1_PARAMS_T1_TID_SIZE >>                         \
1153         CDUT_TYPE1_CXT_SIZE_SHIFT)
1154
1155 #define CDUT_TYPE1_BLOCK_WASTE_SHIFT                                    \
1156         CDU_REG_SEGMENT1_PARAMS_T1_TID_BLOCK_WASTE_SHIFT
1157
1158 #define CDUT_TYPE1_BLOCK_WASTE_MASK                                     \
1159         (CDU_REG_SEGMENT1_PARAMS_T1_TID_BLOCK_WASTE >>                  \
1160         CDUT_TYPE1_BLOCK_WASTE_SHIFT)
1161
1162 #define CDUT_TYPE1_NCIB_SHIFT                                           \
1163         CDU_REG_SEGMENT1_PARAMS_T1_NUM_TIDS_IN_BLOCK_SHIFT
1164
1165 #define CDUT_TYPE1_NCIB_MASK                                            \
1166         (CDU_REG_SEGMENT1_PARAMS_T1_NUM_TIDS_IN_BLOCK >>                \
1167         CDUT_TYPE1_NCIB_SHIFT)
1168
1169 static void ecore_cdu_init_common(struct ecore_hwfn *p_hwfn)
1170 {
1171         u32 page_sz, elems_per_page, block_waste, cxt_size, cdu_params = 0;
1172
1173         /* CDUC - connection configuration */
1174         page_sz = p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC].p_size.val;
1175         cxt_size = CONN_CXT_SIZE(p_hwfn);
1176         elems_per_page = ILT_PAGE_IN_BYTES(page_sz) / cxt_size;
1177         block_waste = ILT_PAGE_IN_BYTES(page_sz) - elems_per_page * cxt_size;
1178
1179         SET_FIELD(cdu_params, CDUC_CXT_SIZE, cxt_size);
1180         SET_FIELD(cdu_params, CDUC_BLOCK_WASTE, block_waste);
1181         SET_FIELD(cdu_params, CDUC_NCIB, elems_per_page);
1182         STORE_RT_REG(p_hwfn, CDU_REG_CID_ADDR_PARAMS_RT_OFFSET, cdu_params);
1183
1184         /* CDUT - type-0 tasks configuration */
1185         page_sz = p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT].p_size.val;
1186         cxt_size = p_hwfn->p_cxt_mngr->task_type_size[0];
1187         elems_per_page = ILT_PAGE_IN_BYTES(page_sz) / cxt_size;
1188         block_waste = ILT_PAGE_IN_BYTES(page_sz) - elems_per_page * cxt_size;
1189
1190         /* cxt size and block-waste are multipes of 8 */
1191         cdu_params = 0;
1192         SET_FIELD(cdu_params, CDUT_TYPE0_CXT_SIZE, (cxt_size >> 3));
1193         SET_FIELD(cdu_params, CDUT_TYPE0_BLOCK_WASTE, (block_waste >> 3));
1194         SET_FIELD(cdu_params, CDUT_TYPE0_NCIB, elems_per_page);
1195         STORE_RT_REG(p_hwfn, CDU_REG_SEGMENT0_PARAMS_RT_OFFSET, cdu_params);
1196
1197         /* CDUT - type-1 tasks configuration */
1198         cxt_size = p_hwfn->p_cxt_mngr->task_type_size[1];
1199         elems_per_page = ILT_PAGE_IN_BYTES(page_sz) / cxt_size;
1200         block_waste = ILT_PAGE_IN_BYTES(page_sz) - elems_per_page * cxt_size;
1201
1202         /* cxt size and block-waste are multipes of 8 */
1203         cdu_params = 0;
1204         SET_FIELD(cdu_params, CDUT_TYPE1_CXT_SIZE, (cxt_size >> 3));
1205         SET_FIELD(cdu_params, CDUT_TYPE1_BLOCK_WASTE, (block_waste >> 3));
1206         SET_FIELD(cdu_params, CDUT_TYPE1_NCIB, elems_per_page);
1207         STORE_RT_REG(p_hwfn, CDU_REG_SEGMENT1_PARAMS_RT_OFFSET, cdu_params);
1208 }
1209
1210 /* CDU PF */
1211 #define CDU_SEG_REG_TYPE_SHIFT          CDU_SEG_TYPE_OFFSET_REG_TYPE_SHIFT
1212 #define CDU_SEG_REG_TYPE_MASK           0x1
1213 #define CDU_SEG_REG_OFFSET_SHIFT        0
1214 #define CDU_SEG_REG_OFFSET_MASK         CDU_SEG_TYPE_OFFSET_REG_OFFSET_MASK
1215
1216 static void ecore_cdu_init_pf(struct ecore_hwfn *p_hwfn)
1217 {
1218         struct ecore_ilt_client_cfg *p_cli;
1219         struct ecore_tid_seg *p_seg;
1220         u32 cdu_seg_params, offset;
1221         int i;
1222
1223         static const u32 rt_type_offset_arr[] = {
1224                 CDU_REG_PF_SEG0_TYPE_OFFSET_RT_OFFSET,
1225                 CDU_REG_PF_SEG1_TYPE_OFFSET_RT_OFFSET,
1226                 CDU_REG_PF_SEG2_TYPE_OFFSET_RT_OFFSET,
1227                 CDU_REG_PF_SEG3_TYPE_OFFSET_RT_OFFSET
1228         };
1229
1230         static const u32 rt_type_offset_fl_arr[] = {
1231                 CDU_REG_PF_FL_SEG0_TYPE_OFFSET_RT_OFFSET,
1232                 CDU_REG_PF_FL_SEG1_TYPE_OFFSET_RT_OFFSET,
1233                 CDU_REG_PF_FL_SEG2_TYPE_OFFSET_RT_OFFSET,
1234                 CDU_REG_PF_FL_SEG3_TYPE_OFFSET_RT_OFFSET
1235         };
1236
1237         p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT];
1238
1239         /* There are initializations only for CDUT during pf Phase */
1240         for (i = 0; i < NUM_TASK_PF_SEGMENTS; i++) {
1241                 /* Segment 0 */
1242                 p_seg = ecore_cxt_tid_seg_info(p_hwfn, i);
1243                 if (!p_seg)
1244                         continue;
1245
1246                 /* Note: start_line is already adjusted for the CDU
1247                  * segment register granularity, so we just need to
1248                  * divide. Adjustment is implicit as we assume ILT
1249                  * Page size is larger than 32K!
1250                  */
1251                 offset = (ILT_PAGE_IN_BYTES(p_cli->p_size.val) *
1252                           (p_cli->pf_blks[CDUT_SEG_BLK(i)].start_line -
1253                            p_cli->first.val)) / CDUT_SEG_ALIGNMET_IN_BYTES;
1254
1255                 cdu_seg_params = 0;
1256                 SET_FIELD(cdu_seg_params, CDU_SEG_REG_TYPE, p_seg->type);
1257                 SET_FIELD(cdu_seg_params, CDU_SEG_REG_OFFSET, offset);
1258                 STORE_RT_REG(p_hwfn, rt_type_offset_arr[i], cdu_seg_params);
1259
1260                 offset = (ILT_PAGE_IN_BYTES(p_cli->p_size.val) *
1261                           (p_cli->pf_blks[CDUT_FL_SEG_BLK(i, PF)].start_line -
1262                            p_cli->first.val)) / CDUT_SEG_ALIGNMET_IN_BYTES;
1263
1264                 cdu_seg_params = 0;
1265                 SET_FIELD(cdu_seg_params, CDU_SEG_REG_TYPE, p_seg->type);
1266                 SET_FIELD(cdu_seg_params, CDU_SEG_REG_OFFSET, offset);
1267                 STORE_RT_REG(p_hwfn, rt_type_offset_fl_arr[i], cdu_seg_params);
1268         }
1269 }
1270
1271 void ecore_qm_init_pf(struct ecore_hwfn *p_hwfn)
1272 {
1273         struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
1274         struct ecore_qm_iids iids;
1275
1276         OSAL_MEM_ZERO(&iids, sizeof(iids));
1277         ecore_cxt_qm_iids(p_hwfn, &iids);
1278
1279         ecore_qm_pf_rt_init(p_hwfn, p_hwfn->p_main_ptt, p_hwfn->port_id,
1280                             p_hwfn->rel_pf_id, qm_info->max_phys_tcs_per_port,
1281                             p_hwfn->first_on_engine,
1282                             iids.cids, iids.vf_cids, iids.tids,
1283                             qm_info->start_pq,
1284                             qm_info->num_pqs - qm_info->num_vf_pqs,
1285                             qm_info->num_vf_pqs,
1286                             qm_info->start_vport,
1287                             qm_info->num_vports, qm_info->pf_wfq,
1288                             qm_info->pf_rl, p_hwfn->qm_info.qm_pq_params,
1289                             p_hwfn->qm_info.qm_vport_params);
1290 }
1291
1292 /* CM PF */
1293 static enum _ecore_status_t ecore_cm_init_pf(struct ecore_hwfn *p_hwfn)
1294 {
1295         union ecore_qm_pq_params pq_params;
1296         u16 pq;
1297
1298         /* XCM pure-LB queue */
1299         OSAL_MEMSET(&pq_params, 0, sizeof(pq_params));
1300         pq_params.core.tc = LB_TC;
1301         pq = ecore_get_qm_pq(p_hwfn, PROTOCOLID_CORE, &pq_params);
1302         STORE_RT_REG(p_hwfn, XCM_REG_CON_PHY_Q3_RT_OFFSET, pq);
1303
1304         return ECORE_SUCCESS;
1305 }
1306
1307 /* DQ PF */
1308 static void ecore_dq_init_pf(struct ecore_hwfn *p_hwfn)
1309 {
1310         struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
1311         u32 dq_pf_max_cid = 0, dq_vf_max_cid = 0;
1312
1313         dq_pf_max_cid += (p_mngr->conn_cfg[0].cid_count >> DQ_RANGE_SHIFT);
1314         STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_0_RT_OFFSET, dq_pf_max_cid);
1315
1316         dq_vf_max_cid += (p_mngr->conn_cfg[0].cids_per_vf >> DQ_RANGE_SHIFT);
1317         STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_0_RT_OFFSET, dq_vf_max_cid);
1318
1319         dq_pf_max_cid += (p_mngr->conn_cfg[1].cid_count >> DQ_RANGE_SHIFT);
1320         STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_1_RT_OFFSET, dq_pf_max_cid);
1321
1322         dq_vf_max_cid += (p_mngr->conn_cfg[1].cids_per_vf >> DQ_RANGE_SHIFT);
1323         STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_1_RT_OFFSET, dq_vf_max_cid);
1324
1325         dq_pf_max_cid += (p_mngr->conn_cfg[2].cid_count >> DQ_RANGE_SHIFT);
1326         STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_2_RT_OFFSET, dq_pf_max_cid);
1327
1328         dq_vf_max_cid += (p_mngr->conn_cfg[2].cids_per_vf >> DQ_RANGE_SHIFT);
1329         STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_2_RT_OFFSET, dq_vf_max_cid);
1330
1331         dq_pf_max_cid += (p_mngr->conn_cfg[3].cid_count >> DQ_RANGE_SHIFT);
1332         STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_3_RT_OFFSET, dq_pf_max_cid);
1333
1334         dq_vf_max_cid += (p_mngr->conn_cfg[3].cids_per_vf >> DQ_RANGE_SHIFT);
1335         STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_3_RT_OFFSET, dq_vf_max_cid);
1336
1337         dq_pf_max_cid += (p_mngr->conn_cfg[4].cid_count >> DQ_RANGE_SHIFT);
1338         STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_4_RT_OFFSET, dq_pf_max_cid);
1339
1340         dq_vf_max_cid += (p_mngr->conn_cfg[4].cids_per_vf >> DQ_RANGE_SHIFT);
1341         STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_4_RT_OFFSET, dq_vf_max_cid);
1342
1343         dq_pf_max_cid += (p_mngr->conn_cfg[5].cid_count >> DQ_RANGE_SHIFT);
1344         STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_5_RT_OFFSET, dq_pf_max_cid);
1345
1346         dq_vf_max_cid += (p_mngr->conn_cfg[5].cids_per_vf >> DQ_RANGE_SHIFT);
1347         STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_5_RT_OFFSET, dq_vf_max_cid);
1348
1349         /* Connection types 6 & 7 are not in use, yet they must be configured
1350          * as the highest possible connection. Not configuring them means the
1351          * defaults will be  used, and with a large number of cids a bug may
1352          * occur, if the defaults will be smaller than dq_pf_max_cid /
1353          * dq_vf_max_cid.
1354          */
1355         STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_6_RT_OFFSET, dq_pf_max_cid);
1356         STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_6_RT_OFFSET, dq_vf_max_cid);
1357
1358         STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_7_RT_OFFSET, dq_pf_max_cid);
1359         STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_7_RT_OFFSET, dq_vf_max_cid);
1360 }
1361
1362 static void ecore_ilt_bounds_init(struct ecore_hwfn *p_hwfn)
1363 {
1364         struct ecore_ilt_client_cfg *ilt_clients;
1365         int i;
1366
1367         ilt_clients = p_hwfn->p_cxt_mngr->clients;
1368         for_each_ilt_valid_client(i, ilt_clients) {
1369                 STORE_RT_REG(p_hwfn,
1370                              ilt_clients[i].first.reg,
1371                              ilt_clients[i].first.val);
1372                 STORE_RT_REG(p_hwfn,
1373                              ilt_clients[i].last.reg, ilt_clients[i].last.val);
1374                 STORE_RT_REG(p_hwfn,
1375                              ilt_clients[i].p_size.reg,
1376                              ilt_clients[i].p_size.val);
1377         }
1378 }
1379
1380 static void ecore_ilt_vf_bounds_init(struct ecore_hwfn *p_hwfn)
1381 {
1382         struct ecore_ilt_client_cfg *p_cli;
1383         u32 blk_factor;
1384
1385         /* For simplicty  we set the 'block' to be an ILT page */
1386                 STORE_RT_REG(p_hwfn,
1387                              PSWRQ2_REG_VF_BASE_RT_OFFSET,
1388                      p_hwfn->hw_info.first_vf_in_pf);
1389                 STORE_RT_REG(p_hwfn,
1390                              PSWRQ2_REG_VF_LAST_ILT_RT_OFFSET,
1391                      p_hwfn->hw_info.first_vf_in_pf +
1392                      p_hwfn->p_dev->sriov_info.total_vfs);
1393
1394         p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC];
1395         blk_factor = OSAL_LOG2(ILT_PAGE_IN_BYTES(p_cli->p_size.val) >> 10);
1396         if (p_cli->active) {
1397                 STORE_RT_REG(p_hwfn,
1398                              PSWRQ2_REG_CDUC_BLOCKS_FACTOR_RT_OFFSET,
1399                              blk_factor);
1400                 STORE_RT_REG(p_hwfn,
1401                              PSWRQ2_REG_CDUC_NUMBER_OF_PF_BLOCKS_RT_OFFSET,
1402                              p_cli->pf_total_lines);
1403                 STORE_RT_REG(p_hwfn,
1404                              PSWRQ2_REG_CDUC_VF_BLOCKS_RT_OFFSET,
1405                              p_cli->vf_total_lines);
1406         }
1407
1408         p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT];
1409         blk_factor = OSAL_LOG2(ILT_PAGE_IN_BYTES(p_cli->p_size.val) >> 10);
1410         if (p_cli->active) {
1411                 STORE_RT_REG(p_hwfn,
1412                              PSWRQ2_REG_CDUT_BLOCKS_FACTOR_RT_OFFSET,
1413                              blk_factor);
1414                 STORE_RT_REG(p_hwfn,
1415                              PSWRQ2_REG_CDUT_NUMBER_OF_PF_BLOCKS_RT_OFFSET,
1416                              p_cli->pf_total_lines);
1417                 STORE_RT_REG(p_hwfn,
1418                              PSWRQ2_REG_CDUT_VF_BLOCKS_RT_OFFSET,
1419                              p_cli->vf_total_lines);
1420         }
1421
1422         p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_TM];
1423         blk_factor = OSAL_LOG2(ILT_PAGE_IN_BYTES(p_cli->p_size.val) >> 10);
1424         if (p_cli->active) {
1425                 STORE_RT_REG(p_hwfn,
1426                              PSWRQ2_REG_TM_BLOCKS_FACTOR_RT_OFFSET, blk_factor);
1427                 STORE_RT_REG(p_hwfn,
1428                              PSWRQ2_REG_TM_NUMBER_OF_PF_BLOCKS_RT_OFFSET,
1429                              p_cli->pf_total_lines);
1430                 STORE_RT_REG(p_hwfn,
1431                              PSWRQ2_REG_TM_VF_BLOCKS_RT_OFFSET,
1432                              p_cli->vf_total_lines);
1433         }
1434 }
1435
1436 /* ILT (PSWRQ2) PF */
1437 static void ecore_ilt_init_pf(struct ecore_hwfn *p_hwfn)
1438 {
1439         struct ecore_ilt_client_cfg *clients;
1440         struct ecore_cxt_mngr *p_mngr;
1441         struct ecore_dma_mem *p_shdw;
1442         u32 line, rt_offst, i;
1443
1444         ecore_ilt_bounds_init(p_hwfn);
1445         ecore_ilt_vf_bounds_init(p_hwfn);
1446
1447         p_mngr = p_hwfn->p_cxt_mngr;
1448         p_shdw = p_mngr->ilt_shadow;
1449         clients = p_hwfn->p_cxt_mngr->clients;
1450
1451         for_each_ilt_valid_client(i, clients) {
1452                 /* Client's 1st val and RT array are absolute, ILT shadows'
1453                  * lines are relative.
1454                  */
1455                 line = clients[i].first.val - p_mngr->pf_start_line;
1456                 rt_offst = PSWRQ2_REG_ILT_MEMORY_RT_OFFSET +
1457                     clients[i].first.val * ILT_ENTRY_IN_REGS;
1458
1459                 for (; line <= clients[i].last.val - p_mngr->pf_start_line;
1460                      line++, rt_offst += ILT_ENTRY_IN_REGS) {
1461                         u64 ilt_hw_entry = 0;
1462
1463                         /** p_virt could be OSAL_NULL incase of dynamic
1464                          *  allocation
1465                          */
1466                         if (p_shdw[line].p_virt != OSAL_NULL) {
1467                                 SET_FIELD(ilt_hw_entry, ILT_ENTRY_VALID, 1ULL);
1468                                 SET_FIELD(ilt_hw_entry, ILT_ENTRY_PHY_ADDR,
1469                                           (p_shdw[line].p_phys >> 12));
1470
1471                                 DP_VERBOSE(p_hwfn, ECORE_MSG_ILT,
1472                                         "Setting RT[0x%08x] from"
1473                                         " ILT[0x%08x] [Client is %d] to"
1474                                         " Physical addr: 0x%lx\n",
1475                                         rt_offst, line, i,
1476                                         (unsigned long)(p_shdw[line].
1477                                                         p_phys >> 12));
1478                         }
1479
1480                         STORE_RT_REG_AGG(p_hwfn, rt_offst, ilt_hw_entry);
1481                 }
1482         }
1483 }
1484
1485 /* SRC (Searcher) PF */
1486 static void ecore_src_init_pf(struct ecore_hwfn *p_hwfn)
1487 {
1488         struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
1489         u32 rounded_conn_num, conn_num, conn_max;
1490         struct ecore_src_iids src_iids;
1491
1492         OSAL_MEM_ZERO(&src_iids, sizeof(src_iids));
1493         ecore_cxt_src_iids(p_mngr, &src_iids);
1494         conn_num = src_iids.pf_cids + src_iids.per_vf_cids * p_mngr->vf_count;
1495         if (!conn_num)
1496                 return;
1497
1498         conn_max = OSAL_MAX_T(u32, conn_num, SRC_MIN_NUM_ELEMS);
1499         rounded_conn_num = OSAL_ROUNDUP_POW_OF_TWO(conn_max);
1500
1501         STORE_RT_REG(p_hwfn, SRC_REG_COUNTFREE_RT_OFFSET, conn_num);
1502         STORE_RT_REG(p_hwfn, SRC_REG_NUMBER_HASH_BITS_RT_OFFSET,
1503                      OSAL_LOG2(rounded_conn_num));
1504
1505         STORE_RT_REG_AGG(p_hwfn, SRC_REG_FIRSTFREE_RT_OFFSET,
1506                          p_hwfn->p_cxt_mngr->first_free);
1507         STORE_RT_REG_AGG(p_hwfn, SRC_REG_LASTFREE_RT_OFFSET,
1508                          p_hwfn->p_cxt_mngr->last_free);
1509 }
1510
1511 /* Timers PF */
1512 #define TM_CFG_NUM_IDS_SHIFT            0
1513 #define TM_CFG_NUM_IDS_MASK             0xFFFFULL
1514 #define TM_CFG_PRE_SCAN_OFFSET_SHIFT    16
1515 #define TM_CFG_PRE_SCAN_OFFSET_MASK     0x1FFULL
1516 #define TM_CFG_PARENT_PF_SHIFT          25
1517 #define TM_CFG_PARENT_PF_MASK           0x7ULL
1518
1519 #define TM_CFG_CID_PRE_SCAN_ROWS_SHIFT  30
1520 #define TM_CFG_CID_PRE_SCAN_ROWS_MASK   0x1FFULL
1521
1522 #define TM_CFG_TID_OFFSET_SHIFT         30
1523 #define TM_CFG_TID_OFFSET_MASK          0x7FFFFULL
1524 #define TM_CFG_TID_PRE_SCAN_ROWS_SHIFT  49
1525 #define TM_CFG_TID_PRE_SCAN_ROWS_MASK   0x1FFULL
1526
1527 static void ecore_tm_init_pf(struct ecore_hwfn *p_hwfn)
1528 {
1529         struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
1530         u32 active_seg_mask = 0, tm_offset, rt_reg;
1531         struct ecore_tm_iids tm_iids;
1532         u64 cfg_word;
1533         u8 i;
1534
1535         OSAL_MEM_ZERO(&tm_iids, sizeof(tm_iids));
1536         ecore_cxt_tm_iids(p_mngr, &tm_iids);
1537
1538         /* @@@TBD No pre-scan for now */
1539
1540         /* Note: We assume consecutive VFs for a PF */
1541         for (i = 0; i < p_mngr->vf_count; i++) {
1542                 cfg_word = 0;
1543                 SET_FIELD(cfg_word, TM_CFG_NUM_IDS, tm_iids.per_vf_cids);
1544                 SET_FIELD(cfg_word, TM_CFG_PRE_SCAN_OFFSET, 0);
1545                 SET_FIELD(cfg_word, TM_CFG_PARENT_PF, p_hwfn->rel_pf_id);
1546                 SET_FIELD(cfg_word, TM_CFG_CID_PRE_SCAN_ROWS, 0);
1547
1548                 rt_reg = TM_REG_CONFIG_CONN_MEM_RT_OFFSET +
1549                     (sizeof(cfg_word) / sizeof(u32)) *
1550                     (p_hwfn->hw_info.first_vf_in_pf + i);
1551                 STORE_RT_REG_AGG(p_hwfn, rt_reg, cfg_word);
1552         }
1553
1554         cfg_word = 0;
1555         SET_FIELD(cfg_word, TM_CFG_NUM_IDS, tm_iids.pf_cids);
1556         SET_FIELD(cfg_word, TM_CFG_PRE_SCAN_OFFSET, 0);
1557         SET_FIELD(cfg_word, TM_CFG_PARENT_PF, 0);       /* n/a for PF */
1558         SET_FIELD(cfg_word, TM_CFG_CID_PRE_SCAN_ROWS, 0); /* scan all   */
1559
1560         rt_reg = TM_REG_CONFIG_CONN_MEM_RT_OFFSET +
1561             (sizeof(cfg_word) / sizeof(u32)) *
1562             (NUM_OF_VFS(p_hwfn->p_dev) + p_hwfn->rel_pf_id);
1563         STORE_RT_REG_AGG(p_hwfn, rt_reg, cfg_word);
1564
1565         /* enale scan */
1566         STORE_RT_REG(p_hwfn, TM_REG_PF_ENABLE_CONN_RT_OFFSET,
1567                      tm_iids.pf_cids ? 0x1 : 0x0);
1568
1569         /* @@@TBD how to enable the scan for the VFs */
1570
1571         tm_offset = tm_iids.per_vf_cids;
1572
1573         /* Note: We assume consecutive VFs for a PF */
1574         for (i = 0; i < p_mngr->vf_count; i++) {
1575                 cfg_word = 0;
1576                 SET_FIELD(cfg_word, TM_CFG_NUM_IDS, tm_iids.per_vf_tids);
1577                 SET_FIELD(cfg_word, TM_CFG_PRE_SCAN_OFFSET, 0);
1578                 SET_FIELD(cfg_word, TM_CFG_PARENT_PF, p_hwfn->rel_pf_id);
1579                 SET_FIELD(cfg_word, TM_CFG_TID_OFFSET, tm_offset);
1580                 SET_FIELD(cfg_word, TM_CFG_TID_PRE_SCAN_ROWS, (u64)0);
1581
1582                 rt_reg = TM_REG_CONFIG_TASK_MEM_RT_OFFSET +
1583                     (sizeof(cfg_word) / sizeof(u32)) *
1584                     (p_hwfn->hw_info.first_vf_in_pf + i);
1585
1586                 STORE_RT_REG_AGG(p_hwfn, rt_reg, cfg_word);
1587         }
1588
1589         tm_offset = tm_iids.pf_cids;
1590         for (i = 0; i < NUM_TASK_PF_SEGMENTS; i++) {
1591                 cfg_word = 0;
1592                 SET_FIELD(cfg_word, TM_CFG_NUM_IDS, tm_iids.pf_tids[i]);
1593                 SET_FIELD(cfg_word, TM_CFG_PRE_SCAN_OFFSET, 0);
1594                 SET_FIELD(cfg_word, TM_CFG_PARENT_PF, 0);
1595                 SET_FIELD(cfg_word, TM_CFG_TID_OFFSET, tm_offset);
1596                 SET_FIELD(cfg_word, TM_CFG_TID_PRE_SCAN_ROWS, (u64)0);
1597
1598                 rt_reg = TM_REG_CONFIG_TASK_MEM_RT_OFFSET +
1599                     (sizeof(cfg_word) / sizeof(u32)) *
1600                     (NUM_OF_VFS(p_hwfn->p_dev) +
1601                      p_hwfn->rel_pf_id * NUM_TASK_PF_SEGMENTS + i);
1602
1603                 STORE_RT_REG_AGG(p_hwfn, rt_reg, cfg_word);
1604                 active_seg_mask |= (tm_iids.pf_tids[i] ? (1 << i) : 0);
1605
1606                 tm_offset += tm_iids.pf_tids[i];
1607         }
1608
1609         STORE_RT_REG(p_hwfn, TM_REG_PF_ENABLE_TASK_RT_OFFSET, active_seg_mask);
1610
1611         /* @@@TBD how to enable the scan for the VFs */
1612 }
1613
1614 static void ecore_prs_init_common(struct ecore_hwfn *p_hwfn)
1615 {
1616 }
1617
1618 void ecore_cxt_hw_init_common(struct ecore_hwfn *p_hwfn)
1619 {
1620         /* CDU configuration */
1621         ecore_cdu_init_common(p_hwfn);
1622         ecore_prs_init_common(p_hwfn);
1623 }
1624
1625 void ecore_cxt_hw_init_pf(struct ecore_hwfn *p_hwfn)
1626 {
1627         ecore_qm_init_pf(p_hwfn);
1628         ecore_cm_init_pf(p_hwfn);
1629         ecore_dq_init_pf(p_hwfn);
1630         ecore_cdu_init_pf(p_hwfn);
1631         ecore_ilt_init_pf(p_hwfn);
1632         ecore_src_init_pf(p_hwfn);
1633         ecore_tm_init_pf(p_hwfn);
1634 }
1635
1636 enum _ecore_status_t ecore_cxt_acquire_cid(struct ecore_hwfn *p_hwfn,
1637                                            enum protocol_type type, u32 *p_cid)
1638 {
1639         struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
1640         u32 rel_cid;
1641
1642         if (type >= MAX_CONN_TYPES || !p_mngr->acquired[type].cid_map) {
1643                 DP_NOTICE(p_hwfn, true, "Invalid protocol type %d", type);
1644                 return ECORE_INVAL;
1645         }
1646
1647         rel_cid = OSAL_FIND_FIRST_ZERO_BIT(p_mngr->acquired[type].cid_map,
1648                                            p_mngr->acquired[type].max_count);
1649
1650         if (rel_cid >= p_mngr->acquired[type].max_count) {
1651                 DP_NOTICE(p_hwfn, false, "no CID available for protocol %d\n",
1652                           type);
1653                 return ECORE_NORESOURCES;
1654         }
1655
1656         OSAL_SET_BIT(rel_cid, p_mngr->acquired[type].cid_map);
1657
1658         *p_cid = rel_cid + p_mngr->acquired[type].start_cid;
1659
1660         return ECORE_SUCCESS;
1661 }
1662
1663 static bool ecore_cxt_test_cid_acquired(struct ecore_hwfn *p_hwfn,
1664                                         u32 cid, enum protocol_type *p_type)
1665 {
1666         struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
1667         struct ecore_cid_acquired_map *p_map;
1668         enum protocol_type p;
1669         u32 rel_cid;
1670
1671         /* Iterate over protocols and find matching cid range */
1672         for (p = 0; p < MAX_CONN_TYPES; p++) {
1673                 p_map = &p_mngr->acquired[p];
1674
1675                 if (!p_map->cid_map)
1676                         continue;
1677                 if (cid >= p_map->start_cid &&
1678                     cid < p_map->start_cid + p_map->max_count) {
1679                         break;
1680                 }
1681         }
1682         *p_type = p;
1683
1684         if (p == MAX_CONN_TYPES) {
1685                 DP_NOTICE(p_hwfn, true, "Invalid CID %d", cid);
1686                 return false;
1687         }
1688         rel_cid = cid - p_map->start_cid;
1689         if (!OSAL_TEST_BIT(rel_cid, p_map->cid_map)) {
1690                 DP_NOTICE(p_hwfn, true, "CID %d not acquired", cid);
1691                 return false;
1692         }
1693         return true;
1694 }
1695
1696 void ecore_cxt_release_cid(struct ecore_hwfn *p_hwfn, u32 cid)
1697 {
1698         struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
1699         enum protocol_type type;
1700         bool b_acquired;
1701         u32 rel_cid;
1702
1703         /* Test acquired and find matching per-protocol map */
1704         b_acquired = ecore_cxt_test_cid_acquired(p_hwfn, cid, &type);
1705
1706         if (!b_acquired)
1707                 return;
1708
1709         rel_cid = cid - p_mngr->acquired[type].start_cid;
1710         OSAL_CLEAR_BIT(rel_cid, p_mngr->acquired[type].cid_map);
1711 }
1712
1713 enum _ecore_status_t ecore_cxt_get_cid_info(struct ecore_hwfn *p_hwfn,
1714                                             struct ecore_cxt_info *p_info)
1715 {
1716         struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
1717         u32 conn_cxt_size, hw_p_size, cxts_per_p, line;
1718         enum protocol_type type;
1719         bool b_acquired;
1720
1721         /* Test acquired and find matching per-protocol map */
1722         b_acquired = ecore_cxt_test_cid_acquired(p_hwfn, p_info->iid, &type);
1723
1724         if (!b_acquired)
1725                 return ECORE_INVAL;
1726
1727         /* set the protocl type */
1728         p_info->type = type;
1729
1730         /* compute context virtual pointer */
1731         hw_p_size = p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC].p_size.val;
1732
1733         conn_cxt_size = CONN_CXT_SIZE(p_hwfn);
1734         cxts_per_p = ILT_PAGE_IN_BYTES(hw_p_size) / conn_cxt_size;
1735         line = p_info->iid / cxts_per_p;
1736
1737         /* Make sure context is allocated (dynamic allocation) */
1738         if (!p_mngr->ilt_shadow[line].p_virt)
1739                 return ECORE_INVAL;
1740
1741         p_info->p_cxt = (u8 *)p_mngr->ilt_shadow[line].p_virt +
1742             p_info->iid % cxts_per_p * conn_cxt_size;
1743
1744         DP_VERBOSE(p_hwfn, (ECORE_MSG_ILT | ECORE_MSG_CXT),
1745                 "Accessing ILT shadow[%d]: CXT pointer is at %p (for iid %d)\n",
1746                 (p_info->iid / cxts_per_p), p_info->p_cxt, p_info->iid);
1747
1748         return ECORE_SUCCESS;
1749 }
1750
1751 enum _ecore_status_t ecore_cxt_set_pf_params(struct ecore_hwfn *p_hwfn)
1752 {
1753         /* Set the number of required CORE connections */
1754         u32 core_cids = 1;      /* SPQ */
1755
1756         ecore_cxt_set_proto_cid_count(p_hwfn, PROTOCOLID_CORE, core_cids, 0);
1757
1758         switch (p_hwfn->hw_info.personality) {
1759         case ECORE_PCI_ETH:
1760                 {
1761                         struct ecore_eth_pf_params *p_params =
1762                             &p_hwfn->pf_params.eth_pf_params;
1763
1764                         ecore_cxt_set_proto_cid_count(p_hwfn,
1765                                 PROTOCOLID_ETH,
1766                                 p_params->num_cons, 1); /* FIXME VF count... */
1767
1768                         break;
1769                 }
1770         default:
1771                 return ECORE_INVAL;
1772         }
1773
1774         return ECORE_SUCCESS;
1775 }
1776
1777 enum _ecore_status_t ecore_cxt_get_tid_mem_info(struct ecore_hwfn *p_hwfn,
1778                                                 struct ecore_tid_mem *p_info)
1779 {
1780         struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
1781         u32 proto, seg, total_lines, i, shadow_line;
1782         struct ecore_ilt_client_cfg *p_cli;
1783         struct ecore_ilt_cli_blk *p_fl_seg;
1784         struct ecore_tid_seg *p_seg_info;
1785
1786         /* Verify the personality */
1787         switch (p_hwfn->hw_info.personality) {
1788         default:
1789                 return ECORE_INVAL;
1790         }
1791
1792         p_cli = &p_mngr->clients[ILT_CLI_CDUT];
1793         if (!p_cli->active)
1794                 return ECORE_INVAL;
1795
1796         p_seg_info = &p_mngr->conn_cfg[proto].tid_seg[seg];
1797         if (!p_seg_info->has_fl_mem)
1798                 return ECORE_INVAL;
1799
1800         p_fl_seg = &p_cli->pf_blks[CDUT_FL_SEG_BLK(seg, PF)];
1801         total_lines = DIV_ROUND_UP(p_fl_seg->total_size,
1802                                    p_fl_seg->real_size_in_page);
1803
1804         for (i = 0; i < total_lines; i++) {
1805                 shadow_line = i + p_fl_seg->start_line -
1806                     p_hwfn->p_cxt_mngr->pf_start_line;
1807                 p_info->blocks[i] = p_mngr->ilt_shadow[shadow_line].p_virt;
1808         }
1809         p_info->waste = ILT_PAGE_IN_BYTES(p_cli->p_size.val) -
1810             p_fl_seg->real_size_in_page;
1811         p_info->tid_size = p_mngr->task_type_size[p_seg_info->type];
1812         p_info->num_tids_per_block = p_fl_seg->real_size_in_page /
1813             p_info->tid_size;
1814
1815         return ECORE_SUCCESS;
1816 }
1817
1818 /* This function is very RoCE oriented, if another protocol in the future
1819  * will want this feature we'll need to modify the function to be more generic
1820  */
1821 static enum _ecore_status_t
1822 ecore_cxt_free_ilt_range(struct ecore_hwfn *p_hwfn,
1823                          enum ecore_cxt_elem_type elem_type,
1824                          u32 start_iid, u32 count)
1825 {
1826         u32 reg_offset, elem_size, hw_p_size, elems_per_p;
1827         u32 start_line, end_line, shadow_start_line, shadow_end_line;
1828         struct ecore_ilt_client_cfg *p_cli;
1829         struct ecore_ilt_cli_blk *p_blk;
1830         u32 end_iid = start_iid + count;
1831         struct ecore_ptt *p_ptt;
1832         u64 ilt_hw_entry = 0;
1833         u32 i;
1834
1835         if (elem_type == ECORE_ELEM_CXT) {
1836                 p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC];
1837                 elem_size = CONN_CXT_SIZE(p_hwfn);
1838                 p_blk = &p_cli->pf_blks[CDUC_BLK];
1839         }
1840
1841         /* Calculate line in ilt */
1842         hw_p_size = p_cli->p_size.val;
1843         elems_per_p = ILT_PAGE_IN_BYTES(hw_p_size) / elem_size;
1844         start_line = p_blk->start_line + (start_iid / elems_per_p);
1845         end_line = p_blk->start_line + (end_iid / elems_per_p);
1846         if (((end_iid + 1) / elems_per_p) != (end_iid / elems_per_p))
1847                 end_line--;
1848
1849         shadow_start_line = start_line - p_hwfn->p_cxt_mngr->pf_start_line;
1850         shadow_end_line = end_line - p_hwfn->p_cxt_mngr->pf_start_line;
1851
1852         p_ptt = ecore_ptt_acquire(p_hwfn);
1853         if (!p_ptt) {
1854                 DP_NOTICE(p_hwfn, false,
1855                           "ECORE_TIME_OUT on ptt acquire - dynamic allocation");
1856                 return ECORE_TIMEOUT;
1857         }
1858
1859         for (i = shadow_start_line; i < shadow_end_line; i++) {
1860                 if (!p_hwfn->p_cxt_mngr->ilt_shadow[i].p_virt)
1861                         continue;
1862
1863                 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
1864                                        p_hwfn->p_cxt_mngr->ilt_shadow[i].p_virt,
1865                                        p_hwfn->p_cxt_mngr->ilt_shadow[i].p_phys,
1866                                        p_hwfn->p_cxt_mngr->ilt_shadow[i].size);
1867
1868                 p_hwfn->p_cxt_mngr->ilt_shadow[i].p_virt = OSAL_NULL;
1869                 p_hwfn->p_cxt_mngr->ilt_shadow[i].p_phys = 0;
1870                 p_hwfn->p_cxt_mngr->ilt_shadow[i].size = 0;
1871
1872                 /* compute absolute offset */
1873                 reg_offset = PSWRQ2_REG_ILT_MEMORY +
1874                     ((start_line++) * ILT_REG_SIZE_IN_BYTES *
1875                      ILT_ENTRY_IN_REGS);
1876
1877                 ecore_wr(p_hwfn, p_ptt, reg_offset, U64_LO(ilt_hw_entry));
1878                 ecore_wr(p_hwfn, p_ptt, reg_offset + ILT_REG_SIZE_IN_BYTES,
1879                          U64_HI(ilt_hw_entry));
1880         }
1881
1882         ecore_ptt_release(p_hwfn, p_ptt);
1883
1884         return ECORE_SUCCESS;
1885 }
1886
1887 enum _ecore_status_t ecore_cxt_free_proto_ilt(struct ecore_hwfn *p_hwfn,
1888                                               enum protocol_type proto)
1889 {
1890         enum _ecore_status_t rc;
1891         u32 cid;
1892
1893         /* Free Connection CXT */
1894         rc = ecore_cxt_free_ilt_range(p_hwfn, ECORE_ELEM_CXT,
1895                                       ecore_cxt_get_proto_cid_start(p_hwfn,
1896                                                                     proto),
1897                                       ecore_cxt_get_proto_cid_count(p_hwfn,
1898                                                                     proto,
1899                                                                     &cid));
1900
1901         if (rc)
1902                 return rc;
1903
1904         /* Free Task CXT */
1905         rc = ecore_cxt_free_ilt_range(p_hwfn, ECORE_ELEM_TASK, 0,
1906                                       ecore_cxt_get_proto_tid_count(p_hwfn,
1907                                                                     proto));
1908
1909         return rc;
1910 }
1911
1912 enum _ecore_status_t ecore_cxt_get_task_ctx(struct ecore_hwfn *p_hwfn,
1913                                             u32 tid,
1914                                             u8 ctx_type, void **pp_task_ctx)
1915 {
1916         struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
1917         struct ecore_ilt_client_cfg *p_cli;
1918         struct ecore_ilt_cli_blk *p_seg;
1919         struct ecore_tid_seg *p_seg_info;
1920         u32 proto, seg;
1921         u32 total_lines;
1922         u32 tid_size, ilt_idx;
1923         u32 num_tids_per_block;
1924
1925         /* Verify the personality */
1926         switch (p_hwfn->hw_info.personality) {
1927         default:
1928                 return ECORE_INVAL;
1929         }
1930
1931         p_cli = &p_mngr->clients[ILT_CLI_CDUT];
1932         if (!p_cli->active)
1933                 return ECORE_INVAL;
1934
1935         p_seg_info = &p_mngr->conn_cfg[proto].tid_seg[seg];
1936
1937         if (ctx_type == ECORE_CTX_WORKING_MEM) {
1938                 p_seg = &p_cli->pf_blks[CDUT_SEG_BLK(seg)];
1939         } else if (ctx_type == ECORE_CTX_FL_MEM) {
1940                 if (!p_seg_info->has_fl_mem)
1941                         return ECORE_INVAL;
1942                 p_seg = &p_cli->pf_blks[CDUT_FL_SEG_BLK(seg, PF)];
1943         } else {
1944                 return ECORE_INVAL;
1945         }
1946         total_lines = DIV_ROUND_UP(p_seg->total_size, p_seg->real_size_in_page);
1947         tid_size = p_mngr->task_type_size[p_seg_info->type];
1948         num_tids_per_block = p_seg->real_size_in_page / tid_size;
1949
1950         if (total_lines < tid / num_tids_per_block)
1951                 return ECORE_INVAL;
1952
1953         ilt_idx = tid / num_tids_per_block + p_seg->start_line -
1954             p_mngr->pf_start_line;
1955         *pp_task_ctx = (u8 *)p_mngr->ilt_shadow[ilt_idx].p_virt +
1956             (tid % num_tids_per_block) * tid_size;
1957
1958         return ECORE_SUCCESS;
1959 }