qede: add SRIOV support
[dpdk.git] / drivers / net / qede / base / ecore_dev.c
1 /*
2  * Copyright (c) 2016 QLogic Corporation.
3  * All rights reserved.
4  * www.qlogic.com
5  *
6  * See LICENSE.qede_pmd for copyright and licensing details.
7  */
8
9 #include "bcm_osal.h"
10 #include "reg_addr.h"
11 #include "ecore_gtt_reg_addr.h"
12 #include "ecore.h"
13 #include "ecore_chain.h"
14 #include "ecore_status.h"
15 #include "ecore_hw.h"
16 #include "ecore_rt_defs.h"
17 #include "ecore_init_ops.h"
18 #include "ecore_int.h"
19 #include "ecore_cxt.h"
20 #include "ecore_spq.h"
21 #include "ecore_init_fw_funcs.h"
22 #include "ecore_sp_commands.h"
23 #include "ecore_dev_api.h"
24 #include "ecore_sriov.h"
25 #include "ecore_vf.h"
26 #include "ecore_mcp.h"
27 #include "ecore_hw_defs.h"
28 #include "mcp_public.h"
29 #include "ecore_iro.h"
30 #include "nvm_cfg.h"
31 #include "ecore_dev_api.h"
32
33 /* Configurable */
34 #define ECORE_MIN_DPIS          (4)     /* The minimal number of DPIs required
35                                          * load the driver. The number was
36                                          * arbitrarily set.
37                                          */
38
39 /* Derived */
40 #define ECORE_MIN_PWM_REGION    ((ECORE_WID_SIZE) * (ECORE_MIN_DPIS))
41
42 enum BAR_ID {
43         BAR_ID_0,               /* used for GRC */
44         BAR_ID_1                /* Used for doorbells */
45 };
46
47 static u32 ecore_hw_bar_size(struct ecore_hwfn *p_hwfn, enum BAR_ID bar_id)
48 {
49         u32 bar_reg = (bar_id == BAR_ID_0 ?
50                        PGLUE_B_REG_PF_BAR0_SIZE : PGLUE_B_REG_PF_BAR1_SIZE);
51         u32 val = ecore_rd(p_hwfn, p_hwfn->p_main_ptt, bar_reg);
52
53         /* The above registers were updated in the past only in CMT mode. Since
54          * they were found to be useful MFW started updating them from 8.7.7.0.
55          * In older MFW versions they are set to 0 which means disabled.
56          */
57         if (!val) {
58                 if (p_hwfn->p_dev->num_hwfns > 1) {
59                         DP_NOTICE(p_hwfn, false,
60                                   "BAR size not configured. Assuming BAR"
61                                   " size of 256kB for GRC and 512kB for DB\n");
62                         return BAR_ID_0 ? 256 * 1024 : 512 * 1024;
63                 }
64
65                 DP_NOTICE(p_hwfn, false,
66                           "BAR size not configured. Assuming BAR"
67                           " size of 512kB for GRC and 512kB for DB\n");
68                 return 512 * 1024;
69         }
70
71         return 1 << (val + 15);
72 }
73
74 void ecore_init_dp(struct ecore_dev *p_dev,
75                    u32 dp_module, u8 dp_level, void *dp_ctx)
76 {
77         u32 i;
78
79         p_dev->dp_level = dp_level;
80         p_dev->dp_module = dp_module;
81         p_dev->dp_ctx = dp_ctx;
82         for (i = 0; i < MAX_HWFNS_PER_DEVICE; i++) {
83                 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
84
85                 p_hwfn->dp_level = dp_level;
86                 p_hwfn->dp_module = dp_module;
87                 p_hwfn->dp_ctx = dp_ctx;
88         }
89 }
90
91 void ecore_init_struct(struct ecore_dev *p_dev)
92 {
93         u8 i;
94
95         for (i = 0; i < MAX_HWFNS_PER_DEVICE; i++) {
96                 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
97
98                 p_hwfn->p_dev = p_dev;
99                 p_hwfn->my_id = i;
100                 p_hwfn->b_active = false;
101
102                 OSAL_MUTEX_ALLOC(p_hwfn, &p_hwfn->dmae_info.mutex);
103                 OSAL_MUTEX_INIT(&p_hwfn->dmae_info.mutex);
104         }
105
106         /* hwfn 0 is always active */
107         p_dev->hwfns[0].b_active = true;
108
109         /* set the default cache alignment to 128 (may be overridden later) */
110         p_dev->cache_shift = 7;
111 }
112
113 static void ecore_qm_info_free(struct ecore_hwfn *p_hwfn)
114 {
115         struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
116
117         OSAL_FREE(p_hwfn->p_dev, qm_info->qm_pq_params);
118         qm_info->qm_pq_params = OSAL_NULL;
119         OSAL_FREE(p_hwfn->p_dev, qm_info->qm_vport_params);
120         qm_info->qm_vport_params = OSAL_NULL;
121         OSAL_FREE(p_hwfn->p_dev, qm_info->qm_port_params);
122         qm_info->qm_port_params = OSAL_NULL;
123         OSAL_FREE(p_hwfn->p_dev, qm_info->wfq_data);
124         qm_info->wfq_data = OSAL_NULL;
125 }
126
127 void ecore_resc_free(struct ecore_dev *p_dev)
128 {
129         int i;
130
131         if (IS_VF(p_dev))
132                 return;
133
134         OSAL_FREE(p_dev, p_dev->fw_data);
135         p_dev->fw_data = OSAL_NULL;
136
137         OSAL_FREE(p_dev, p_dev->reset_stats);
138
139         for_each_hwfn(p_dev, i) {
140                 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
141
142                 OSAL_FREE(p_dev, p_hwfn->p_tx_cids);
143                 p_hwfn->p_tx_cids = OSAL_NULL;
144                 OSAL_FREE(p_dev, p_hwfn->p_rx_cids);
145                 p_hwfn->p_rx_cids = OSAL_NULL;
146         }
147
148         for_each_hwfn(p_dev, i) {
149                 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
150
151                 ecore_cxt_mngr_free(p_hwfn);
152                 ecore_qm_info_free(p_hwfn);
153                 ecore_spq_free(p_hwfn);
154                 ecore_eq_free(p_hwfn, p_hwfn->p_eq);
155                 ecore_consq_free(p_hwfn, p_hwfn->p_consq);
156                 ecore_int_free(p_hwfn);
157                 ecore_iov_free(p_hwfn);
158                 ecore_dmae_info_free(p_hwfn);
159                 /* @@@TBD Flush work-queue ? */
160         }
161 }
162
163 static enum _ecore_status_t ecore_init_qm_info(struct ecore_hwfn *p_hwfn,
164                                                bool b_sleepable)
165 {
166         u8 num_vports, vf_offset = 0, i, vport_id, num_ports;
167         struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
168         struct init_qm_port_params *p_qm_port;
169         u16 num_pqs, multi_cos_tcs = 1;
170 #ifdef CONFIG_ECORE_SRIOV
171         u16 num_vfs = p_hwfn->p_dev->sriov_info.total_vfs;
172 #else
173         u16 num_vfs = 0;
174 #endif
175
176         OSAL_MEM_ZERO(qm_info, sizeof(*qm_info));
177
178 #ifndef ASIC_ONLY
179         /* @TMP - Don't allocate QM queues for VFs on emulation */
180         if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
181                 DP_NOTICE(p_hwfn, false,
182                           "Emulation - skip configuring QM queues for VFs\n");
183                 num_vfs = 0;
184         }
185 #endif
186
187         num_pqs = multi_cos_tcs + num_vfs + 1;  /* The '1' is for pure-LB */
188         num_vports = (u8)RESC_NUM(p_hwfn, ECORE_VPORT);
189
190         /* Sanity checking that setup requires legal number of resources */
191         if (num_pqs > RESC_NUM(p_hwfn, ECORE_PQ)) {
192                 DP_ERR(p_hwfn,
193                        "Need too many Physical queues - 0x%04x when"
194                         " only %04x are available\n",
195                        num_pqs, RESC_NUM(p_hwfn, ECORE_PQ));
196                 return ECORE_INVAL;
197         }
198
199         /* PQs will be arranged as follows: First per-TC PQ, then pure-LB queue,
200          * then special queues, then per-VF PQ.
201          */
202         qm_info->qm_pq_params = OSAL_ZALLOC(p_hwfn->p_dev,
203                                             b_sleepable ? GFP_KERNEL :
204                                             GFP_ATOMIC,
205                                             sizeof(struct init_qm_pq_params) *
206                                             num_pqs);
207         if (!qm_info->qm_pq_params)
208                 goto alloc_err;
209
210         qm_info->qm_vport_params = OSAL_ZALLOC(p_hwfn->p_dev,
211                                                b_sleepable ? GFP_KERNEL :
212                                                GFP_ATOMIC,
213                                                sizeof(struct
214                                                       init_qm_vport_params) *
215                                                num_vports);
216         if (!qm_info->qm_vport_params)
217                 goto alloc_err;
218
219         qm_info->qm_port_params = OSAL_ZALLOC(p_hwfn->p_dev,
220                                               b_sleepable ? GFP_KERNEL :
221                                               GFP_ATOMIC,
222                                               sizeof(struct init_qm_port_params)
223                                               * MAX_NUM_PORTS);
224         if (!qm_info->qm_port_params)
225                 goto alloc_err;
226
227         qm_info->wfq_data = OSAL_ZALLOC(p_hwfn->p_dev,
228                                         b_sleepable ? GFP_KERNEL :
229                                         GFP_ATOMIC,
230                                         sizeof(struct ecore_wfq_data) *
231                                         num_vports);
232
233         if (!qm_info->wfq_data)
234                 goto alloc_err;
235
236         vport_id = (u8)RESC_START(p_hwfn, ECORE_VPORT);
237
238         /* First init per-TC PQs */
239         for (i = 0; i < multi_cos_tcs; i++) {
240                 struct init_qm_pq_params *params = &qm_info->qm_pq_params[i];
241
242                 if (p_hwfn->hw_info.personality == ECORE_PCI_ETH) {
243                         params->vport_id = vport_id;
244                         params->tc_id = p_hwfn->hw_info.non_offload_tc;
245                         params->wrr_group = 1;  /* @@@TBD ECORE_WRR_MEDIUM */
246                 } else {
247                         params->vport_id = vport_id;
248                         params->tc_id = p_hwfn->hw_info.offload_tc;
249                         params->wrr_group = 1;  /* @@@TBD ECORE_WRR_MEDIUM */
250                 }
251         }
252
253         /* Then init pure-LB PQ */
254         qm_info->pure_lb_pq = i;
255         qm_info->qm_pq_params[i].vport_id =
256             (u8)RESC_START(p_hwfn, ECORE_VPORT);
257         qm_info->qm_pq_params[i].tc_id = PURE_LB_TC;
258         qm_info->qm_pq_params[i].wrr_group = 1;
259         i++;
260
261         /* Then init per-VF PQs */
262         vf_offset = i;
263         for (i = 0; i < num_vfs; i++) {
264                 /* First vport is used by the PF */
265                 qm_info->qm_pq_params[vf_offset + i].vport_id = vport_id +
266                     i + 1;
267                 qm_info->qm_pq_params[vf_offset + i].tc_id =
268                     p_hwfn->hw_info.non_offload_tc;
269                 qm_info->qm_pq_params[vf_offset + i].wrr_group = 1;
270         };
271
272         qm_info->vf_queues_offset = vf_offset;
273         qm_info->num_pqs = num_pqs;
274         qm_info->num_vports = num_vports;
275
276         /* Initialize qm port parameters */
277         num_ports = p_hwfn->p_dev->num_ports_in_engines;
278         for (i = 0; i < num_ports; i++) {
279                 p_qm_port = &qm_info->qm_port_params[i];
280                 p_qm_port->active = 1;
281                 if (num_ports == 4)
282                         p_qm_port->num_active_phys_tcs = 2;
283                 else
284                         p_qm_port->num_active_phys_tcs = 5;
285                 p_qm_port->num_pbf_cmd_lines = PBF_MAX_CMD_LINES / num_ports;
286                 p_qm_port->num_btb_blocks = BTB_MAX_BLOCKS / num_ports;
287         }
288
289         if (ECORE_IS_AH(p_hwfn->p_dev) && (num_ports == 4))
290                 qm_info->max_phys_tcs_per_port = NUM_PHYS_TCS_4PORT_K2;
291         else
292                 qm_info->max_phys_tcs_per_port = NUM_OF_PHYS_TCS;
293
294         qm_info->start_pq = (u16)RESC_START(p_hwfn, ECORE_PQ);
295
296         qm_info->num_vf_pqs = num_vfs;
297         qm_info->start_vport = (u8)RESC_START(p_hwfn, ECORE_VPORT);
298
299         for (i = 0; i < qm_info->num_vports; i++)
300                 qm_info->qm_vport_params[i].vport_wfq = 1;
301
302         qm_info->pf_wfq = 0;
303         qm_info->pf_rl = 0;
304         qm_info->vport_rl_en = 1;
305         qm_info->vport_wfq_en = 1;
306
307         return ECORE_SUCCESS;
308
309 alloc_err:
310         DP_NOTICE(p_hwfn, false, "Failed to allocate memory for QM params\n");
311         ecore_qm_info_free(p_hwfn);
312         return ECORE_NOMEM;
313 }
314
315 /* This function reconfigures the QM pf on the fly.
316  * For this purpose we:
317  * 1. reconfigure the QM database
318  * 2. set new values to runtime arrat
319  * 3. send an sdm_qm_cmd through the rbc interface to stop the QM
320  * 4. activate init tool in QM_PF stage
321  * 5. send an sdm_qm_cmd through rbc interface to release the QM
322  */
323 enum _ecore_status_t ecore_qm_reconf(struct ecore_hwfn *p_hwfn,
324                                      struct ecore_ptt *p_ptt)
325 {
326         struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
327         enum _ecore_status_t rc;
328         bool b_rc;
329
330         /* qm_info is allocated in ecore_init_qm_info() which is already called
331          * from ecore_resc_alloc() or previous call of ecore_qm_reconf().
332          * The allocated size may change each init, so we free it before next
333          * allocation.
334          */
335         ecore_qm_info_free(p_hwfn);
336
337         /* initialize ecore's qm data structure */
338         rc = ecore_init_qm_info(p_hwfn, false);
339         if (rc != ECORE_SUCCESS)
340                 return rc;
341
342         /* stop PF's qm queues */
343         b_rc = ecore_send_qm_stop_cmd(p_hwfn, p_ptt, false, true,
344                                       qm_info->start_pq, qm_info->num_pqs);
345         if (!b_rc)
346                 return ECORE_INVAL;
347
348         /* clear the QM_PF runtime phase leftovers from previous init */
349         ecore_init_clear_rt_data(p_hwfn);
350
351         /* prepare QM portion of runtime array */
352         ecore_qm_init_pf(p_hwfn);
353
354         /* activate init tool on runtime array */
355         rc = ecore_init_run(p_hwfn, p_ptt, PHASE_QM_PF, p_hwfn->rel_pf_id,
356                             p_hwfn->hw_info.hw_mode);
357         if (rc != ECORE_SUCCESS)
358                 return rc;
359
360         /* start PF's qm queues */
361         b_rc = ecore_send_qm_stop_cmd(p_hwfn, p_ptt, true, true,
362                                       qm_info->start_pq, qm_info->num_pqs);
363         if (!rc)
364                 return ECORE_INVAL;
365
366         return ECORE_SUCCESS;
367 }
368
369 enum _ecore_status_t ecore_resc_alloc(struct ecore_dev *p_dev)
370 {
371         enum _ecore_status_t rc = ECORE_SUCCESS;
372         struct ecore_consq *p_consq;
373         struct ecore_eq *p_eq;
374         int i;
375
376         if (IS_VF(p_dev))
377                 return rc;
378
379         p_dev->fw_data = OSAL_ZALLOC(p_dev, GFP_KERNEL,
380                                      sizeof(struct ecore_fw_data));
381         if (!p_dev->fw_data)
382                 return ECORE_NOMEM;
383
384         /* Allocate Memory for the Queue->CID mapping */
385         for_each_hwfn(p_dev, i) {
386                 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
387
388                 /* @@@TMP - resc management, change to actual required size */
389                 int tx_size = sizeof(struct ecore_hw_cid_data) *
390                     RESC_NUM(p_hwfn, ECORE_L2_QUEUE);
391                 int rx_size = sizeof(struct ecore_hw_cid_data) *
392                     RESC_NUM(p_hwfn, ECORE_L2_QUEUE);
393
394                 p_hwfn->p_tx_cids = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
395                                                 tx_size);
396                 if (!p_hwfn->p_tx_cids) {
397                         DP_NOTICE(p_hwfn, true,
398                                   "Failed to allocate memory for Tx Cids\n");
399                         goto alloc_no_mem;
400                 }
401
402                 p_hwfn->p_rx_cids = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
403                                                 rx_size);
404                 if (!p_hwfn->p_rx_cids) {
405                         DP_NOTICE(p_hwfn, true,
406                                   "Failed to allocate memory for Rx Cids\n");
407                         goto alloc_no_mem;
408                 }
409         }
410
411         for_each_hwfn(p_dev, i) {
412                 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
413
414                 /* First allocate the context manager structure */
415                 rc = ecore_cxt_mngr_alloc(p_hwfn);
416                 if (rc)
417                         goto alloc_err;
418
419                 /* Set the HW cid/tid numbers (in the contest manager)
420                  * Must be done prior to any further computations.
421                  */
422                 rc = ecore_cxt_set_pf_params(p_hwfn);
423                 if (rc)
424                         goto alloc_err;
425
426                 /* Prepare and process QM requirements */
427                 rc = ecore_init_qm_info(p_hwfn, true);
428                 if (rc)
429                         goto alloc_err;
430
431                 /* Compute the ILT client partition */
432                 rc = ecore_cxt_cfg_ilt_compute(p_hwfn);
433                 if (rc)
434                         goto alloc_err;
435
436                 /* CID map / ILT shadow table / T2
437                  * The talbes sizes are determined by the computations above
438                  */
439                 rc = ecore_cxt_tables_alloc(p_hwfn);
440                 if (rc)
441                         goto alloc_err;
442
443                 /* SPQ, must follow ILT because initializes SPQ context */
444                 rc = ecore_spq_alloc(p_hwfn);
445                 if (rc)
446                         goto alloc_err;
447
448                 /* SP status block allocation */
449                 p_hwfn->p_dpc_ptt = ecore_get_reserved_ptt(p_hwfn,
450                                                            RESERVED_PTT_DPC);
451
452                 rc = ecore_int_alloc(p_hwfn, p_hwfn->p_main_ptt);
453                 if (rc)
454                         goto alloc_err;
455
456                 rc = ecore_iov_alloc(p_hwfn);
457                 if (rc)
458                         goto alloc_err;
459
460                 /* EQ */
461                 p_eq = ecore_eq_alloc(p_hwfn, 256);
462                 if (!p_eq)
463                         goto alloc_no_mem;
464                 p_hwfn->p_eq = p_eq;
465
466                 p_consq = ecore_consq_alloc(p_hwfn);
467                 if (!p_consq)
468                         goto alloc_no_mem;
469                 p_hwfn->p_consq = p_consq;
470
471                 /* DMA info initialization */
472                 rc = ecore_dmae_info_alloc(p_hwfn);
473                 if (rc) {
474                         DP_NOTICE(p_hwfn, true,
475                                   "Failed to allocate memory for"
476                                   " dmae_info structure\n");
477                         goto alloc_err;
478                 }
479         }
480
481         p_dev->reset_stats = OSAL_ZALLOC(p_dev, GFP_KERNEL,
482                                          sizeof(struct ecore_eth_stats));
483         if (!p_dev->reset_stats) {
484                 DP_NOTICE(p_dev, true, "Failed to allocate reset statistics\n");
485                 goto alloc_no_mem;
486         }
487
488         return ECORE_SUCCESS;
489
490 alloc_no_mem:
491         rc = ECORE_NOMEM;
492 alloc_err:
493         ecore_resc_free(p_dev);
494         return rc;
495 }
496
497 void ecore_resc_setup(struct ecore_dev *p_dev)
498 {
499         int i;
500
501         if (IS_VF(p_dev))
502                 return;
503
504         for_each_hwfn(p_dev, i) {
505                 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
506
507                 ecore_cxt_mngr_setup(p_hwfn);
508                 ecore_spq_setup(p_hwfn);
509                 ecore_eq_setup(p_hwfn, p_hwfn->p_eq);
510                 ecore_consq_setup(p_hwfn, p_hwfn->p_consq);
511
512                 /* Read shadow of current MFW mailbox */
513                 ecore_mcp_read_mb(p_hwfn, p_hwfn->p_main_ptt);
514                 OSAL_MEMCPY(p_hwfn->mcp_info->mfw_mb_shadow,
515                             p_hwfn->mcp_info->mfw_mb_cur,
516                             p_hwfn->mcp_info->mfw_mb_length);
517
518                 ecore_int_setup(p_hwfn, p_hwfn->p_main_ptt);
519
520                 ecore_iov_setup(p_hwfn, p_hwfn->p_main_ptt);
521         }
522 }
523
524 #define FINAL_CLEANUP_POLL_CNT  (100)
525 #define FINAL_CLEANUP_POLL_TIME (10)
526 enum _ecore_status_t ecore_final_cleanup(struct ecore_hwfn *p_hwfn,
527                                          struct ecore_ptt *p_ptt,
528                                          u16 id, bool is_vf)
529 {
530         u32 command = 0, addr, count = FINAL_CLEANUP_POLL_CNT;
531         enum _ecore_status_t rc = ECORE_TIMEOUT;
532
533 #ifndef ASIC_ONLY
534         if (CHIP_REV_IS_TEDIBEAR(p_hwfn->p_dev) ||
535             CHIP_REV_IS_SLOW(p_hwfn->p_dev)) {
536                 DP_INFO(p_hwfn, "Skipping final cleanup for non-ASIC\n");
537                 return ECORE_SUCCESS;
538         }
539 #endif
540
541         addr = GTT_BAR0_MAP_REG_USDM_RAM +
542             USTORM_FLR_FINAL_ACK_OFFSET(p_hwfn->rel_pf_id);
543
544         if (is_vf)
545                 id += 0x10;
546
547         command |= X_FINAL_CLEANUP_AGG_INT <<
548             SDM_AGG_INT_COMP_PARAMS_AGG_INT_INDEX_SHIFT;
549         command |= 1 << SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_ENABLE_SHIFT;
550         command |= id << SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_BIT_SHIFT;
551         command |= SDM_COMP_TYPE_AGG_INT << SDM_OP_GEN_COMP_TYPE_SHIFT;
552
553         /* Make sure notification is not set before initiating final cleanup */
554         if (REG_RD(p_hwfn, addr)) {
555                 DP_NOTICE(p_hwfn, false,
556                           "Unexpected; Found final cleanup notification "
557                           "before initiating final cleanup\n");
558                 REG_WR(p_hwfn, addr, 0);
559         }
560
561         DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
562                    "Sending final cleanup for PFVF[%d] [Command %08x\n]",
563                    id, OSAL_CPU_TO_LE32(command));
564
565         ecore_wr(p_hwfn, p_ptt, XSDM_REG_OPERATION_GEN,
566                  OSAL_CPU_TO_LE32(command));
567
568         /* Poll until completion */
569         while (!REG_RD(p_hwfn, addr) && count--)
570                 OSAL_MSLEEP(FINAL_CLEANUP_POLL_TIME);
571
572         if (REG_RD(p_hwfn, addr))
573                 rc = ECORE_SUCCESS;
574         else
575                 DP_NOTICE(p_hwfn, true,
576                           "Failed to receive FW final cleanup notification\n");
577
578         /* Cleanup afterwards */
579         REG_WR(p_hwfn, addr, 0);
580
581         return rc;
582 }
583
584 static void ecore_calc_hw_mode(struct ecore_hwfn *p_hwfn)
585 {
586         int hw_mode = 0;
587
588         switch (ECORE_GET_TYPE(p_hwfn->p_dev)) {
589         case CHIP_BB_A0:
590                 hw_mode |= 1 << MODE_BB_A0;
591                 break;
592         case CHIP_BB_B0:
593                 hw_mode |= 1 << MODE_BB_B0;
594                 break;
595         case CHIP_K2:
596                 hw_mode |= 1 << MODE_K2;
597                 break;
598         default:
599                 DP_NOTICE(p_hwfn, true, "Can't initialize chip ID %d\n",
600                           ECORE_GET_TYPE(p_hwfn->p_dev));
601                 return;
602         }
603
604         /* Ports per engine is based on the values in CNIG_REG_NW_PORT_MODE */
605         switch (p_hwfn->p_dev->num_ports_in_engines) {
606         case 1:
607                 hw_mode |= 1 << MODE_PORTS_PER_ENG_1;
608                 break;
609         case 2:
610                 hw_mode |= 1 << MODE_PORTS_PER_ENG_2;
611                 break;
612         case 4:
613                 hw_mode |= 1 << MODE_PORTS_PER_ENG_4;
614                 break;
615         default:
616                 DP_NOTICE(p_hwfn, true,
617                           "num_ports_in_engine = %d not supported\n",
618                           p_hwfn->p_dev->num_ports_in_engines);
619                 return;
620         }
621
622         switch (p_hwfn->p_dev->mf_mode) {
623         case ECORE_MF_DEFAULT:
624         case ECORE_MF_NPAR:
625                 hw_mode |= 1 << MODE_MF_SI;
626                 break;
627         case ECORE_MF_OVLAN:
628                 hw_mode |= 1 << MODE_MF_SD;
629                 break;
630         default:
631                 DP_NOTICE(p_hwfn, true,
632                           "Unsupported MF mode, init as DEFAULT\n");
633                 hw_mode |= 1 << MODE_MF_SI;
634         }
635
636 #ifndef ASIC_ONLY
637         if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) {
638                 if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) {
639                         hw_mode |= 1 << MODE_FPGA;
640                 } else {
641                         if (p_hwfn->p_dev->b_is_emul_full)
642                                 hw_mode |= 1 << MODE_EMUL_FULL;
643                         else
644                                 hw_mode |= 1 << MODE_EMUL_REDUCED;
645                 }
646         } else
647 #endif
648                 hw_mode |= 1 << MODE_ASIC;
649
650         if (ENABLE_EAGLE_ENG1_WORKAROUND(p_hwfn))
651                 hw_mode |= 1 << MODE_EAGLE_ENG1_WORKAROUND;
652
653         if (p_hwfn->p_dev->num_hwfns > 1)
654                 hw_mode |= 1 << MODE_100G;
655
656         p_hwfn->hw_info.hw_mode = hw_mode;
657
658         DP_VERBOSE(p_hwfn, (ECORE_MSG_PROBE | ECORE_MSG_IFUP),
659                    "Configuring function for hw_mode: 0x%08x\n",
660                    p_hwfn->hw_info.hw_mode);
661 }
662
663 #ifndef ASIC_ONLY
664 /* MFW-replacement initializations for non-ASIC */
665 static void ecore_hw_init_chip(struct ecore_hwfn *p_hwfn,
666                                struct ecore_ptt *p_ptt)
667 {
668         u32 pl_hv = 1;
669         int i;
670
671         if (CHIP_REV_IS_EMUL(p_hwfn->p_dev) && ECORE_IS_AH(p_hwfn->p_dev))
672                 pl_hv |= 0x600;
673
674         ecore_wr(p_hwfn, p_ptt, MISCS_REG_RESET_PL_HV + 4, pl_hv);
675
676         if (CHIP_REV_IS_EMUL(p_hwfn->p_dev) && ECORE_IS_AH(p_hwfn->p_dev))
677                 ecore_wr(p_hwfn, p_ptt, MISCS_REG_RESET_PL_HV_2, 0x3ffffff);
678
679         /* initialize port mode to 4x10G_E (10G with 4x10 SERDES) */
680         /* CNIG_REG_NW_PORT_MODE is same for A0 and B0 */
681         if (!CHIP_REV_IS_EMUL(p_hwfn->p_dev) || !ECORE_IS_AH(p_hwfn->p_dev))
682                 ecore_wr(p_hwfn, p_ptt, CNIG_REG_NW_PORT_MODE_BB_B0, 4);
683
684         if (CHIP_REV_IS_EMUL(p_hwfn->p_dev) && ECORE_IS_AH(p_hwfn->p_dev)) {
685                 /* 2 for 4-port, 1 for 2-port, 0 for 1-port */
686                 ecore_wr(p_hwfn, p_ptt, MISC_REG_PORT_MODE,
687                          (p_hwfn->p_dev->num_ports_in_engines >> 1));
688
689                 ecore_wr(p_hwfn, p_ptt, MISC_REG_BLOCK_256B_EN,
690                          p_hwfn->p_dev->num_ports_in_engines == 4 ? 0 : 3);
691         }
692
693         /* Poll on RBC */
694         ecore_wr(p_hwfn, p_ptt, PSWRQ2_REG_RBC_DONE, 1);
695         for (i = 0; i < 100; i++) {
696                 OSAL_UDELAY(50);
697                 if (ecore_rd(p_hwfn, p_ptt, PSWRQ2_REG_CFG_DONE) == 1)
698                         break;
699         }
700         if (i == 100)
701                 DP_NOTICE(p_hwfn, true,
702                           "RBC done failed to complete in PSWRQ2\n");
703 }
704 #endif
705
706 /* Init run time data for all PFs and their VFs on an engine.
707  * TBD - for VFs - Once we have parent PF info for each VF in
708  * shmem available as CAU requires knowledge of parent PF for each VF.
709  */
710 static void ecore_init_cau_rt_data(struct ecore_dev *p_dev)
711 {
712         u32 offset = CAU_REG_SB_VAR_MEMORY_RT_OFFSET;
713         int i, sb_id;
714
715         for_each_hwfn(p_dev, i) {
716                 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
717                 struct ecore_igu_info *p_igu_info;
718                 struct ecore_igu_block *p_block;
719                 struct cau_sb_entry sb_entry;
720
721                 p_igu_info = p_hwfn->hw_info.p_igu_info;
722
723                 for (sb_id = 0; sb_id < ECORE_MAPPING_MEMORY_SIZE(p_dev);
724                      sb_id++) {
725                         p_block = &p_igu_info->igu_map.igu_blocks[sb_id];
726
727                         if (!p_block->is_pf)
728                                 continue;
729
730                         ecore_init_cau_sb_entry(p_hwfn, &sb_entry,
731                                                 p_block->function_id, 0, 0);
732                         STORE_RT_REG_AGG(p_hwfn, offset + sb_id * 2, sb_entry);
733                 }
734         }
735 }
736
737 static enum _ecore_status_t ecore_hw_init_common(struct ecore_hwfn *p_hwfn,
738                                                  struct ecore_ptt *p_ptt,
739                                                  int hw_mode)
740 {
741         struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
742         enum _ecore_status_t rc = ECORE_SUCCESS;
743         struct ecore_dev *p_dev = p_hwfn->p_dev;
744         u8 vf_id, max_num_vfs;
745         u16 num_pfs, pf_id;
746         u32 concrete_fid;
747
748         ecore_init_cau_rt_data(p_dev);
749
750         /* Program GTT windows */
751         ecore_gtt_init(p_hwfn);
752
753 #ifndef ASIC_ONLY
754         if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
755                 ecore_hw_init_chip(p_hwfn, p_hwfn->p_main_ptt);
756 #endif
757
758         if (p_hwfn->mcp_info) {
759                 if (p_hwfn->mcp_info->func_info.bandwidth_max)
760                         qm_info->pf_rl_en = 1;
761                 if (p_hwfn->mcp_info->func_info.bandwidth_min)
762                         qm_info->pf_wfq_en = 1;
763         }
764
765         ecore_qm_common_rt_init(p_hwfn,
766                                 p_hwfn->p_dev->num_ports_in_engines,
767                                 qm_info->max_phys_tcs_per_port,
768                                 qm_info->pf_rl_en, qm_info->pf_wfq_en,
769                                 qm_info->vport_rl_en, qm_info->vport_wfq_en,
770                                 qm_info->qm_port_params);
771
772         ecore_cxt_hw_init_common(p_hwfn);
773
774         /* Close gate from NIG to BRB/Storm; By default they are open, but
775          * we close them to prevent NIG from passing data to reset blocks.
776          * Should have been done in the ENGINE phase, but init-tool lacks
777          * proper port-pretend capabilities.
778          */
779         ecore_wr(p_hwfn, p_ptt, NIG_REG_RX_BRB_OUT_EN, 0);
780         ecore_wr(p_hwfn, p_ptt, NIG_REG_STORM_OUT_EN, 0);
781         ecore_port_pretend(p_hwfn, p_ptt, p_hwfn->port_id ^ 1);
782         ecore_wr(p_hwfn, p_ptt, NIG_REG_RX_BRB_OUT_EN, 0);
783         ecore_wr(p_hwfn, p_ptt, NIG_REG_STORM_OUT_EN, 0);
784         ecore_port_unpretend(p_hwfn, p_ptt);
785
786         rc = ecore_init_run(p_hwfn, p_ptt, PHASE_ENGINE, ANY_PHASE_ID, hw_mode);
787         if (rc != ECORE_SUCCESS)
788                 return rc;
789
790         /* @@TBD MichalK - should add VALIDATE_VFID to init tool...
791          * need to decide with which value, maybe runtime
792          */
793         ecore_wr(p_hwfn, p_ptt, PSWRQ2_REG_L2P_VALIDATE_VFID, 0);
794         ecore_wr(p_hwfn, p_ptt, PGLUE_B_REG_USE_CLIENTID_IN_TAG, 1);
795
796         if (ECORE_IS_BB(p_hwfn->p_dev)) {
797                 num_pfs = NUM_OF_ENG_PFS(p_hwfn->p_dev);
798                 if (num_pfs == 1)
799                         return rc;
800                 /* pretend to original PF */
801                 ecore_fid_pretend(p_hwfn, p_ptt, p_hwfn->rel_pf_id);
802         }
803
804         /* Workaround for avoiding CCFC execution error when getting packets
805          * with CRC errors, and allowing instead the invoking of the FW error
806          * handler.
807          * This is not done inside the init tool since it currently can't
808          * perform a pretending to VFs.
809          */
810         max_num_vfs = ECORE_IS_AH(p_hwfn->p_dev) ? MAX_NUM_VFS_K2
811             : MAX_NUM_VFS_BB;
812         for (vf_id = 0; vf_id < max_num_vfs; vf_id++) {
813                 concrete_fid = ecore_vfid_to_concrete(p_hwfn, vf_id);
814                 ecore_fid_pretend(p_hwfn, p_ptt, (u16)concrete_fid);
815                 ecore_wr(p_hwfn, p_ptt, CCFC_REG_STRONG_ENABLE_VF, 0x1);
816         }
817         /* pretend to original PF */
818         ecore_fid_pretend(p_hwfn, p_ptt, p_hwfn->rel_pf_id);
819
820         return rc;
821 }
822
823 #ifndef ASIC_ONLY
824 #define MISC_REG_RESET_REG_2_XMAC_BIT (1 << 4)
825 #define MISC_REG_RESET_REG_2_XMAC_SOFT_BIT (1 << 5)
826
827 #define PMEG_IF_BYTE_COUNT      8
828
829 static void ecore_wr_nw_port(struct ecore_hwfn *p_hwfn,
830                              struct ecore_ptt *p_ptt,
831                              u32 addr, u64 data, u8 reg_type, u8 port)
832 {
833         DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
834                    "CMD: %08x, ADDR: 0x%08x, DATA: %08x:%08x\n",
835                    ecore_rd(p_hwfn, p_ptt, CNIG_REG_PMEG_IF_CMD_BB_B0) |
836                    (8 << PMEG_IF_BYTE_COUNT),
837                    (reg_type << 25) | (addr << 8) | port,
838                    (u32)((data >> 32) & 0xffffffff),
839                    (u32)(data & 0xffffffff));
840
841         ecore_wr(p_hwfn, p_ptt, CNIG_REG_PMEG_IF_CMD_BB_B0,
842                  (ecore_rd(p_hwfn, p_ptt, CNIG_REG_PMEG_IF_CMD_BB_B0) &
843                   0xffff00fe) | (8 << PMEG_IF_BYTE_COUNT));
844         ecore_wr(p_hwfn, p_ptt, CNIG_REG_PMEG_IF_ADDR_BB_B0,
845                  (reg_type << 25) | (addr << 8) | port);
846         ecore_wr(p_hwfn, p_ptt, CNIG_REG_PMEG_IF_WRDATA_BB_B0,
847                  data & 0xffffffff);
848         ecore_wr(p_hwfn, p_ptt, CNIG_REG_PMEG_IF_WRDATA_BB_B0,
849                  (data >> 32) & 0xffffffff);
850 }
851
852 #define XLPORT_MODE_REG (0x20a)
853 #define XLPORT_MAC_CONTROL (0x210)
854 #define XLPORT_FLOW_CONTROL_CONFIG (0x207)
855 #define XLPORT_ENABLE_REG (0x20b)
856
857 #define XLMAC_CTRL (0x600)
858 #define XLMAC_MODE (0x601)
859 #define XLMAC_RX_MAX_SIZE (0x608)
860 #define XLMAC_TX_CTRL (0x604)
861 #define XLMAC_PAUSE_CTRL (0x60d)
862 #define XLMAC_PFC_CTRL (0x60e)
863
864 static void ecore_emul_link_init_ah(struct ecore_hwfn *p_hwfn,
865                                     struct ecore_ptt *p_ptt)
866 {
867         u8 port = p_hwfn->port_id;
868         u32 mac_base = NWM_REG_MAC0 + (port << 2) * NWM_REG_MAC0_SIZE;
869
870         ecore_wr(p_hwfn, p_ptt, CNIG_REG_NIG_PORT0_CONF_K2 + (port << 2),
871                  (1 << CNIG_REG_NIG_PORT0_CONF_NIG_PORT_ENABLE_0_SHIFT) |
872                  (port << CNIG_REG_NIG_PORT0_CONF_NIG_PORT_NWM_PORT_MAP_0_SHIFT)
873                  | (0 << CNIG_REG_NIG_PORT0_CONF_NIG_PORT_RATE_0_SHIFT));
874
875         ecore_wr(p_hwfn, p_ptt, mac_base + ETH_MAC_REG_XIF_MODE,
876                  1 << ETH_MAC_REG_XIF_MODE_XGMII_SHIFT);
877
878         ecore_wr(p_hwfn, p_ptt, mac_base + ETH_MAC_REG_FRM_LENGTH,
879                  9018 << ETH_MAC_REG_FRM_LENGTH_FRM_LENGTH_SHIFT);
880
881         ecore_wr(p_hwfn, p_ptt, mac_base + ETH_MAC_REG_TX_IPG_LENGTH,
882                  0xc << ETH_MAC_REG_TX_IPG_LENGTH_TXIPG_SHIFT);
883
884         ecore_wr(p_hwfn, p_ptt, mac_base + ETH_MAC_REG_RX_FIFO_SECTIONS,
885                  8 << ETH_MAC_REG_RX_FIFO_SECTIONS_RX_SECTION_FULL_SHIFT);
886
887         ecore_wr(p_hwfn, p_ptt, mac_base + ETH_MAC_REG_TX_FIFO_SECTIONS,
888                  (0xA << ETH_MAC_REG_TX_FIFO_SECTIONS_TX_SECTION_EMPTY_SHIFT) |
889                  (8 << ETH_MAC_REG_TX_FIFO_SECTIONS_TX_SECTION_FULL_SHIFT));
890
891         ecore_wr(p_hwfn, p_ptt, mac_base + ETH_MAC_REG_COMMAND_CONFIG, 0xa853);
892 }
893
894 static void ecore_emul_link_init(struct ecore_hwfn *p_hwfn,
895                                  struct ecore_ptt *p_ptt)
896 {
897         u8 loopback = 0, port = p_hwfn->port_id * 2;
898
899         DP_INFO(p_hwfn->p_dev, "Configurating Emulation Link %02x\n", port);
900
901         if (ECORE_IS_AH(p_hwfn->p_dev)) {
902                 ecore_emul_link_init_ah(p_hwfn, p_ptt);
903                 return;
904         }
905
906         ecore_wr_nw_port(p_hwfn, p_ptt, XLPORT_MODE_REG, (0x4 << 4) | 0x4, 1,
907                                 port);
908         ecore_wr_nw_port(p_hwfn, p_ptt, XLPORT_MAC_CONTROL, 0, 1, port);
909         ecore_wr_nw_port(p_hwfn, p_ptt, XLMAC_CTRL, 0x40, 0, port);
910         ecore_wr_nw_port(p_hwfn, p_ptt, XLMAC_MODE, 0x40, 0, port);
911         ecore_wr_nw_port(p_hwfn, p_ptt, XLMAC_RX_MAX_SIZE, 0x3fff, 0, port);
912         ecore_wr_nw_port(p_hwfn, p_ptt, XLMAC_TX_CTRL,
913                          0x01000000800ULL | (0xa << 12) | ((u64)1 << 38),
914                          0, port);
915         ecore_wr_nw_port(p_hwfn, p_ptt, XLMAC_PAUSE_CTRL, 0x7c000, 0, port);
916         ecore_wr_nw_port(p_hwfn, p_ptt, XLMAC_PFC_CTRL,
917                          0x30ffffc000ULL, 0, port);
918         ecore_wr_nw_port(p_hwfn, p_ptt, XLMAC_CTRL, 0x3 | (loopback << 2), 0,
919                         port);
920         ecore_wr_nw_port(p_hwfn, p_ptt, XLMAC_CTRL, 0x1003 | (loopback << 2),
921                         0, port);
922         ecore_wr_nw_port(p_hwfn, p_ptt, XLPORT_FLOW_CONTROL_CONFIG, 1, 0, port);
923         ecore_wr_nw_port(p_hwfn, p_ptt, XLPORT_ENABLE_REG, 0xf, 1, port);
924 }
925
926 static void ecore_link_init(struct ecore_hwfn *p_hwfn,
927                             struct ecore_ptt *p_ptt, u8 port)
928 {
929         int port_offset = port ? 0x800 : 0;
930         u32 xmac_rxctrl = 0;
931
932         /* Reset of XMAC */
933         /* FIXME: move to common start */
934         ecore_wr(p_hwfn, p_ptt, MISC_REG_RESET_PL_PDA_VAUX + 2 * sizeof(u32),
935                 MISC_REG_RESET_REG_2_XMAC_BIT); /* Clear */
936         OSAL_MSLEEP(1);
937         ecore_wr(p_hwfn, p_ptt, MISC_REG_RESET_PL_PDA_VAUX + sizeof(u32),
938                 MISC_REG_RESET_REG_2_XMAC_BIT); /* Set */
939
940         ecore_wr(p_hwfn, p_ptt, MISC_REG_XMAC_CORE_PORT_MODE, 1);
941
942         /* Set the number of ports on the Warp Core to 10G */
943         ecore_wr(p_hwfn, p_ptt, MISC_REG_XMAC_PHY_PORT_MODE, 3);
944
945         /* Soft reset of XMAC */
946         ecore_wr(p_hwfn, p_ptt, MISC_REG_RESET_PL_PDA_VAUX + 2 * sizeof(u32),
947                  MISC_REG_RESET_REG_2_XMAC_SOFT_BIT);
948         OSAL_MSLEEP(1);
949         ecore_wr(p_hwfn, p_ptt, MISC_REG_RESET_PL_PDA_VAUX + sizeof(u32),
950                  MISC_REG_RESET_REG_2_XMAC_SOFT_BIT);
951
952         /* FIXME: move to common end */
953         if (CHIP_REV_IS_FPGA(p_hwfn->p_dev))
954                 ecore_wr(p_hwfn, p_ptt, XMAC_REG_MODE + port_offset, 0x20);
955
956         /* Set Max packet size: initialize XMAC block register for port 0 */
957         ecore_wr(p_hwfn, p_ptt, XMAC_REG_RX_MAX_SIZE + port_offset, 0x2710);
958
959         /* CRC append for Tx packets: init XMAC block register for port 1 */
960         ecore_wr(p_hwfn, p_ptt, XMAC_REG_TX_CTRL_LO + port_offset, 0xC800);
961
962         /* Enable TX and RX: initialize XMAC block register for port 1 */
963         ecore_wr(p_hwfn, p_ptt, XMAC_REG_CTRL + port_offset,
964                  XMAC_REG_CTRL_TX_EN | XMAC_REG_CTRL_RX_EN);
965         xmac_rxctrl = ecore_rd(p_hwfn, p_ptt, XMAC_REG_RX_CTRL + port_offset);
966         xmac_rxctrl |= XMAC_REG_RX_CTRL_PROCESS_VARIABLE_PREAMBLE;
967         ecore_wr(p_hwfn, p_ptt, XMAC_REG_RX_CTRL + port_offset, xmac_rxctrl);
968 }
969 #endif
970
971 static enum _ecore_status_t ecore_hw_init_port(struct ecore_hwfn *p_hwfn,
972                                                struct ecore_ptt *p_ptt,
973                                                int hw_mode)
974 {
975         enum _ecore_status_t rc = ECORE_SUCCESS;
976
977         /* Init sequence */
978         rc = ecore_init_run(p_hwfn, p_ptt, PHASE_PORT, p_hwfn->port_id,
979                             hw_mode);
980         if (rc != ECORE_SUCCESS)
981                 return rc;
982
983 #ifndef ASIC_ONLY
984         if (CHIP_REV_IS_ASIC(p_hwfn->p_dev))
985                 return ECORE_SUCCESS;
986
987         if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) {
988                 if (ECORE_IS_AH(p_hwfn->p_dev))
989                         return ECORE_SUCCESS;
990                 ecore_link_init(p_hwfn, p_ptt, p_hwfn->port_id);
991         } else if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
992                 if (p_hwfn->p_dev->num_hwfns > 1) {
993                         /* Activate OPTE in CMT */
994                         u32 val;
995
996                         val = ecore_rd(p_hwfn, p_ptt, MISCS_REG_RESET_PL_HV);
997                         val |= 0x10;
998                         ecore_wr(p_hwfn, p_ptt, MISCS_REG_RESET_PL_HV, val);
999                         ecore_wr(p_hwfn, p_ptt, MISC_REG_CLK_100G_MODE, 1);
1000                         ecore_wr(p_hwfn, p_ptt, MISCS_REG_CLK_100G_MODE, 1);
1001                         ecore_wr(p_hwfn, p_ptt, MISC_REG_OPTE_MODE, 1);
1002                         ecore_wr(p_hwfn, p_ptt,
1003                                  NIG_REG_LLH_ENG_CLS_TCP_4_TUPLE_SEARCH, 1);
1004                         ecore_wr(p_hwfn, p_ptt,
1005                                  NIG_REG_LLH_ENG_CLS_ENG_ID_TBL, 0x55555555);
1006                         ecore_wr(p_hwfn, p_ptt,
1007                                  NIG_REG_LLH_ENG_CLS_ENG_ID_TBL + 0x4,
1008                                  0x55555555);
1009                 }
1010
1011                 ecore_emul_link_init(p_hwfn, p_ptt);
1012         } else {
1013                 DP_INFO(p_hwfn->p_dev, "link is not being configured\n");
1014         }
1015 #endif
1016
1017         return rc;
1018 }
1019
1020 static enum _ecore_status_t
1021 ecore_hw_init_pf_doorbell_bar(struct ecore_hwfn *p_hwfn,
1022                               struct ecore_ptt *p_ptt)
1023 {
1024         u32 pwm_regsize, norm_regsize;
1025         u32 non_pwm_conn, min_addr_reg1;
1026         u32 db_bar_size, n_cpus;
1027         u32 pf_dems_shift;
1028         int rc = ECORE_SUCCESS;
1029
1030         db_bar_size = ecore_hw_bar_size(p_hwfn, BAR_ID_1);
1031         if (p_hwfn->p_dev->num_hwfns > 1)
1032                 db_bar_size /= 2;
1033
1034         /* Calculate doorbell regions
1035          * -----------------------------------
1036          * The doorbell BAR is made of two regions. The first is called normal
1037          * region and the second is called PWM region. In the normal region
1038          * each ICID has its own set of addresses so that writing to that
1039          * specific address identifies the ICID. In the Process Window Mode
1040          * region the ICID is given in the data written to the doorbell. The
1041          * above per PF register denotes the offset in the doorbell BAR in which
1042          * the PWM region begins.
1043          * The normal region has ECORE_PF_DEMS_SIZE bytes per ICID, that is per
1044          * non-PWM connection. The calculation below computes the total non-PWM
1045          * connections. The DORQ_REG_PF_MIN_ADDR_REG1 register is
1046          * in units of 4,096 bytes.
1047          */
1048         non_pwm_conn = ecore_cxt_get_proto_cid_start(p_hwfn, PROTOCOLID_CORE) +
1049             ecore_cxt_get_proto_cid_count(p_hwfn, PROTOCOLID_CORE,
1050                                           OSAL_NULL) +
1051             ecore_cxt_get_proto_cid_count(p_hwfn, PROTOCOLID_ETH, OSAL_NULL);
1052         norm_regsize = ROUNDUP(ECORE_PF_DEMS_SIZE * non_pwm_conn, 4096);
1053         min_addr_reg1 = norm_regsize / 4096;
1054         pwm_regsize = db_bar_size - norm_regsize;
1055
1056         /* Check that the normal and PWM sizes are valid */
1057         if (db_bar_size < norm_regsize) {
1058                 DP_ERR(p_hwfn->p_dev,
1059                        "Doorbell BAR size 0x%x is too"
1060                        " small (normal region is 0x%0x )\n",
1061                        db_bar_size, norm_regsize);
1062                 return ECORE_NORESOURCES;
1063         }
1064         if (pwm_regsize < ECORE_MIN_PWM_REGION) {
1065                 DP_ERR(p_hwfn->p_dev,
1066                        "PWM region size 0x%0x is too small."
1067                        " Should be at least 0x%0x (Doorbell BAR size"
1068                        " is 0x%x and normal region size is 0x%0x)\n",
1069                        pwm_regsize, ECORE_MIN_PWM_REGION, db_bar_size,
1070                        norm_regsize);
1071                 return ECORE_NORESOURCES;
1072         }
1073
1074         /* Update hwfn */
1075         p_hwfn->dpi_start_offset = norm_regsize; /* this is later used to
1076                                                   * calculate the doorbell
1077                                                   * address
1078                                                   */
1079
1080         /* Update registers */
1081         /* DEMS size is configured log2 of DWORDs, hence the division by 4 */
1082         pf_dems_shift = OSAL_LOG2(ECORE_PF_DEMS_SIZE / 4);
1083         ecore_wr(p_hwfn, p_ptt, DORQ_REG_PF_ICID_BIT_SHIFT_NORM, pf_dems_shift);
1084         ecore_wr(p_hwfn, p_ptt, DORQ_REG_PF_MIN_ADDR_REG1, min_addr_reg1);
1085
1086         DP_INFO(p_hwfn,
1087                 "Doorbell size 0x%x, Normal region 0x%x, PWM region 0x%x\n",
1088                 db_bar_size, norm_regsize, pwm_regsize);
1089         DP_INFO(p_hwfn, "DPI size 0x%x, DPI count 0x%x\n", p_hwfn->dpi_size,
1090                 p_hwfn->dpi_count);
1091
1092         return ECORE_SUCCESS;
1093 }
1094
1095 static enum _ecore_status_t
1096 ecore_hw_init_pf(struct ecore_hwfn *p_hwfn,
1097                  struct ecore_ptt *p_ptt,
1098                  struct ecore_tunn_start_params *p_tunn,
1099                  int hw_mode,
1100                  bool b_hw_start,
1101                  enum ecore_int_mode int_mode, bool allow_npar_tx_switch)
1102 {
1103         enum _ecore_status_t rc = ECORE_SUCCESS;
1104         u8 rel_pf_id = p_hwfn->rel_pf_id;
1105         u32 prs_reg;
1106         u16 ctrl;
1107         int pos;
1108
1109         /* ILT/DQ/CM/QM */
1110         if (p_hwfn->mcp_info) {
1111                 struct ecore_mcp_function_info *p_info;
1112
1113                 p_info = &p_hwfn->mcp_info->func_info;
1114                 if (p_info->bandwidth_min)
1115                         p_hwfn->qm_info.pf_wfq = p_info->bandwidth_min;
1116
1117                 /* Update rate limit once we'll actually have a link */
1118                 p_hwfn->qm_info.pf_rl = 100;
1119         }
1120         ecore_cxt_hw_init_pf(p_hwfn);
1121
1122         ecore_int_igu_init_rt(p_hwfn);  /* @@@TBD TODO MichalS multi hwfn ?? */
1123
1124         /* Set VLAN in NIG if needed */
1125         if (hw_mode & (1 << MODE_MF_SD)) {
1126                 DP_VERBOSE(p_hwfn, ECORE_MSG_HW, "Configuring LLH_FUNC_TAG\n");
1127                 STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET, 1);
1128                 STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET,
1129                              p_hwfn->hw_info.ovlan);
1130         }
1131
1132         /* Enable classification by MAC if needed */
1133         if (hw_mode & (1 << MODE_MF_SI)) {
1134                 DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
1135                            "Configuring TAGMAC_CLS_TYPE\n");
1136                 STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET,
1137                              1);
1138         }
1139
1140         /* Protocl Configuration  - @@@TBD - should we set 0 otherwise? */
1141         STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_TCP_RT_OFFSET, 0);
1142
1143         /* perform debug configuration when chip is out of reset */
1144         OSAL_BEFORE_PF_START((void *)p_hwfn->p_dev, p_hwfn->my_id);
1145
1146         /* Cleanup chip from previous driver if such remains exist */
1147         rc = ecore_final_cleanup(p_hwfn, p_ptt, rel_pf_id, false);
1148         if (rc != ECORE_SUCCESS) {
1149                 ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_RAMROD_FAIL);
1150                 return rc;
1151         }
1152
1153         /* PF Init sequence */
1154         rc = ecore_init_run(p_hwfn, p_ptt, PHASE_PF, rel_pf_id, hw_mode);
1155         if (rc)
1156                 return rc;
1157
1158         /* QM_PF Init sequence (may be invoked separately e.g. for DCB) */
1159         rc = ecore_init_run(p_hwfn, p_ptt, PHASE_QM_PF, rel_pf_id, hw_mode);
1160         if (rc)
1161                 return rc;
1162
1163         /* Pure runtime initializations - directly to the HW  */
1164         ecore_int_igu_init_pure_rt(p_hwfn, p_ptt, true, true);
1165
1166         /* PCI relaxed ordering causes a decrease in the performance on some
1167          * systems. Till a root cause is found, disable this attribute in the
1168          * PCI config space.
1169          */
1170         /* Not in use @DPDK
1171          * pos = OSAL_PCI_FIND_CAPABILITY(p_hwfn->p_dev, PCI_CAP_ID_EXP);
1172          * if (!pos) {
1173          *      DP_NOTICE(p_hwfn, true,
1174          *                "Failed to find the PCI Express"
1175          *                " Capability structure in the PCI config space\n");
1176          *      return ECORE_IO;
1177          * }
1178          * OSAL_PCI_READ_CONFIG_WORD(p_hwfn->p_dev, pos + PCI_EXP_DEVCTL,
1179          *                           &ctrl);
1180          * ctrl &= ~PCI_EXP_DEVCTL_RELAX_EN;
1181          * OSAL_PCI_WRITE_CONFIG_WORD(p_hwfn->p_dev, pos + PCI_EXP_DEVCTL,
1182          *                           &ctrl);
1183          */
1184
1185         rc = ecore_hw_init_pf_doorbell_bar(p_hwfn, p_ptt);
1186         if (rc)
1187                 return rc;
1188
1189         if (b_hw_start) {
1190                 /* enable interrupts */
1191                 ecore_int_igu_enable(p_hwfn, p_ptt, int_mode);
1192
1193                 /* send function start command */
1194                 rc = ecore_sp_pf_start(p_hwfn, p_tunn, p_hwfn->p_dev->mf_mode,
1195                                        allow_npar_tx_switch);
1196                 if (rc) {
1197                         DP_NOTICE(p_hwfn, true,
1198                                   "Function start ramrod failed\n");
1199                 } else {
1200                         prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_TAG1);
1201                         DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE,
1202                                    "PRS_REG_SEARCH_TAG1: %x\n", prs_reg);
1203
1204                         DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE,
1205                                    "PRS_REG_SEARCH register after start PFn\n");
1206                         prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_TCP);
1207                         DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE,
1208                                    "PRS_REG_SEARCH_TCP: %x\n", prs_reg);
1209                         prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_UDP);
1210                         DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE,
1211                                    "PRS_REG_SEARCH_UDP: %x\n", prs_reg);
1212                         prs_reg = ecore_rd(p_hwfn, p_ptt,
1213                                            PRS_REG_SEARCH_TCP_FIRST_FRAG);
1214                         DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE,
1215                                    "PRS_REG_SEARCH_TCP_FIRST_FRAG: %x\n",
1216                                    prs_reg);
1217                         prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_TAG1);
1218                         DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE,
1219                                    "PRS_REG_SEARCH_TAG1: %x\n", prs_reg);
1220                 }
1221         }
1222         return rc;
1223 }
1224
1225 static enum _ecore_status_t
1226 ecore_change_pci_hwfn(struct ecore_hwfn *p_hwfn,
1227                       struct ecore_ptt *p_ptt, u8 enable)
1228 {
1229         u32 delay_idx = 0, val, set_val = enable ? 1 : 0;
1230
1231         /* Change PF in PXP */
1232         ecore_wr(p_hwfn, p_ptt,
1233                  PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, set_val);
1234
1235         /* wait until value is set - try for 1 second every 50us */
1236         for (delay_idx = 0; delay_idx < 20000; delay_idx++) {
1237                 val = ecore_rd(p_hwfn, p_ptt,
1238                                PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER);
1239                 if (val == set_val)
1240                         break;
1241
1242                 OSAL_UDELAY(50);
1243         }
1244
1245         if (val != set_val) {
1246                 DP_NOTICE(p_hwfn, true,
1247                           "PFID_ENABLE_MASTER wasn't changed after a second\n");
1248                 return ECORE_UNKNOWN_ERROR;
1249         }
1250
1251         return ECORE_SUCCESS;
1252 }
1253
1254 static void ecore_reset_mb_shadow(struct ecore_hwfn *p_hwfn,
1255                                   struct ecore_ptt *p_main_ptt)
1256 {
1257         /* Read shadow of current MFW mailbox */
1258         ecore_mcp_read_mb(p_hwfn, p_main_ptt);
1259         OSAL_MEMCPY(p_hwfn->mcp_info->mfw_mb_shadow,
1260                     p_hwfn->mcp_info->mfw_mb_cur,
1261                     p_hwfn->mcp_info->mfw_mb_length);
1262 }
1263
1264 enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev,
1265                                    struct ecore_tunn_start_params *p_tunn,
1266                                    bool b_hw_start,
1267                                    enum ecore_int_mode int_mode,
1268                                    bool allow_npar_tx_switch,
1269                                    const u8 *bin_fw_data)
1270 {
1271         enum _ecore_status_t rc, mfw_rc;
1272         u32 load_code, param;
1273         int i, j;
1274
1275         if (IS_PF(p_dev)) {
1276                 rc = ecore_init_fw_data(p_dev, bin_fw_data);
1277                 if (rc != ECORE_SUCCESS)
1278                         return rc;
1279         }
1280
1281         for_each_hwfn(p_dev, i) {
1282                 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
1283
1284                 if (IS_VF(p_dev)) {
1285                         rc = ecore_vf_pf_init(p_hwfn);
1286                         if (rc)
1287                                 return rc;
1288                         continue;
1289                 }
1290
1291                 /* Enable DMAE in PXP */
1292                 rc = ecore_change_pci_hwfn(p_hwfn, p_hwfn->p_main_ptt, true);
1293
1294                 ecore_calc_hw_mode(p_hwfn);
1295                 /* @@@TBD need to add here:
1296                  * Check for fan failure
1297                  * Prev_unload
1298                  */
1299                 rc = ecore_mcp_load_req(p_hwfn, p_hwfn->p_main_ptt, &load_code);
1300                 if (rc) {
1301                         DP_NOTICE(p_hwfn, true,
1302                                   "Failed sending LOAD_REQ command\n");
1303                         return rc;
1304                 }
1305
1306                 /* CQ75580:
1307                  * When coming back from hiberbate state, the registers from
1308                  * which shadow is read initially are not initialized. It turns
1309                  * out that these registers get initialized during the call to
1310                  * ecore_mcp_load_req request. So we need to reread them here
1311                  * to get the proper shadow register value.
1312                  * Note: This is a workaround for the missinginig MFW
1313                  * initialization. It may be removed once the implementation
1314                  * is done.
1315                  */
1316                 ecore_reset_mb_shadow(p_hwfn, p_hwfn->p_main_ptt);
1317
1318                 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
1319                            "Load request was sent.Resp:0x%x, Load code: 0x%x\n",
1320                            rc, load_code);
1321
1322                 /* Only relevant for recovery:
1323                  * Clear the indication after the LOAD_REQ command is responded
1324                  * by the MFW.
1325                  */
1326                 p_dev->recov_in_prog = false;
1327
1328                 p_hwfn->first_on_engine = (load_code ==
1329                                            FW_MSG_CODE_DRV_LOAD_ENGINE);
1330
1331                 switch (load_code) {
1332                 case FW_MSG_CODE_DRV_LOAD_ENGINE:
1333                         rc = ecore_hw_init_common(p_hwfn, p_hwfn->p_main_ptt,
1334                                                   p_hwfn->hw_info.hw_mode);
1335                         if (rc)
1336                                 break;
1337                         /* Fall into */
1338                 case FW_MSG_CODE_DRV_LOAD_PORT:
1339                         rc = ecore_hw_init_port(p_hwfn, p_hwfn->p_main_ptt,
1340                                                 p_hwfn->hw_info.hw_mode);
1341                         if (rc)
1342                                 break;
1343
1344                         if (ENABLE_EAGLE_ENG1_WORKAROUND(p_hwfn)) {
1345                                 struct init_nig_pri_tc_map_req tc_map;
1346
1347                                 OSAL_MEM_ZERO(&tc_map, sizeof(tc_map));
1348
1349                                 /* remove this once flow control is
1350                                  * implemented
1351                                  */
1352                                 for (j = 0; j < NUM_OF_VLAN_PRIORITIES; j++) {
1353                                         tc_map.pri[j].tc_id = 0;
1354                                         tc_map.pri[j].valid = 1;
1355                                 }
1356                                 ecore_init_nig_pri_tc_map(p_hwfn,
1357                                                           p_hwfn->p_main_ptt,
1358                                                           &tc_map);
1359                         }
1360                         /* fallthrough */
1361                 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
1362                         rc = ecore_hw_init_pf(p_hwfn, p_hwfn->p_main_ptt,
1363                                               p_tunn, p_hwfn->hw_info.hw_mode,
1364                                               b_hw_start, int_mode,
1365                                               allow_npar_tx_switch);
1366                         break;
1367                 default:
1368                         rc = ECORE_NOTIMPL;
1369                         break;
1370                 }
1371
1372                 if (rc != ECORE_SUCCESS)
1373                         DP_NOTICE(p_hwfn, true,
1374                                   "init phase failed loadcode 0x%x (rc %d)\n",
1375                                   load_code, rc);
1376
1377                 /* ACK mfw regardless of success or failure of initialization */
1378                 mfw_rc = ecore_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
1379                                        DRV_MSG_CODE_LOAD_DONE,
1380                                        0, &load_code, &param);
1381                 if (rc != ECORE_SUCCESS)
1382                         return rc;
1383                 if (mfw_rc != ECORE_SUCCESS) {
1384                         DP_NOTICE(p_hwfn, true,
1385                                   "Failed sending LOAD_DONE command\n");
1386                         return mfw_rc;
1387                 }
1388
1389                 p_hwfn->hw_init_done = true;
1390         }
1391
1392         return ECORE_SUCCESS;
1393 }
1394
1395 #define ECORE_HW_STOP_RETRY_LIMIT       (10)
1396 static OSAL_INLINE void ecore_hw_timers_stop(struct ecore_dev *p_dev,
1397                                              struct ecore_hwfn *p_hwfn,
1398                                              struct ecore_ptt *p_ptt)
1399 {
1400         int i;
1401
1402         /* close timers */
1403         ecore_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_CONN, 0x0);
1404         ecore_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_TASK, 0x0);
1405         for (i = 0; i < ECORE_HW_STOP_RETRY_LIMIT &&
1406                                         !p_dev->recov_in_prog; i++) {
1407                 if ((!ecore_rd(p_hwfn, p_ptt,
1408                                TM_REG_PF_SCAN_ACTIVE_CONN)) &&
1409                     (!ecore_rd(p_hwfn, p_ptt, TM_REG_PF_SCAN_ACTIVE_TASK)))
1410                         break;
1411
1412                 /* Dependent on number of connection/tasks, possibly
1413                  * 1ms sleep is required between polls
1414                  */
1415                 OSAL_MSLEEP(1);
1416         }
1417         if (i == ECORE_HW_STOP_RETRY_LIMIT)
1418                 DP_NOTICE(p_hwfn, true,
1419                           "Timers linear scans are not over"
1420                           " [Connection %02x Tasks %02x]\n",
1421                           (u8)ecore_rd(p_hwfn, p_ptt,
1422                                        TM_REG_PF_SCAN_ACTIVE_CONN),
1423                           (u8)ecore_rd(p_hwfn, p_ptt,
1424                                        TM_REG_PF_SCAN_ACTIVE_TASK));
1425 }
1426
1427 void ecore_hw_timers_stop_all(struct ecore_dev *p_dev)
1428 {
1429         int j;
1430
1431         for_each_hwfn(p_dev, j) {
1432                 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[j];
1433                 struct ecore_ptt *p_ptt = p_hwfn->p_main_ptt;
1434
1435                 ecore_hw_timers_stop(p_dev, p_hwfn, p_ptt);
1436         }
1437 }
1438
1439 enum _ecore_status_t ecore_hw_stop(struct ecore_dev *p_dev)
1440 {
1441         enum _ecore_status_t rc = ECORE_SUCCESS, t_rc;
1442         int j;
1443
1444         for_each_hwfn(p_dev, j) {
1445                 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[j];
1446                 struct ecore_ptt *p_ptt = p_hwfn->p_main_ptt;
1447
1448                 DP_VERBOSE(p_hwfn, ECORE_MSG_IFDOWN, "Stopping hw/fw\n");
1449
1450                 if (IS_VF(p_dev)) {
1451                         ecore_vf_pf_int_cleanup(p_hwfn);
1452                         continue;
1453                 }
1454
1455                 /* mark the hw as uninitialized... */
1456                 p_hwfn->hw_init_done = false;
1457
1458                 rc = ecore_sp_pf_stop(p_hwfn);
1459                 if (rc)
1460                         DP_NOTICE(p_hwfn, true,
1461                                   "Failed to close PF against FW. Continue to"
1462                                   " stop HW to prevent illegal host access"
1463                                   " by the device\n");
1464
1465                 /* perform debug action after PF stop was sent */
1466                 OSAL_AFTER_PF_STOP((void *)p_hwfn->p_dev, p_hwfn->my_id);
1467
1468                 /* close NIG to BRB gate */
1469                 ecore_wr(p_hwfn, p_ptt,
1470                          NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x1);
1471
1472                 /* close parser */
1473                 ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TCP, 0x0);
1474                 ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_UDP, 0x0);
1475                 ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_OPENFLOW, 0x0);
1476
1477                 /* @@@TBD - clean transmission queues (5.b) */
1478                 /* @@@TBD - clean BTB (5.c) */
1479
1480                 ecore_hw_timers_stop(p_dev, p_hwfn, p_ptt);
1481
1482                 /* @@@TBD - verify DMAE requests are done (8) */
1483
1484                 /* Disable Attention Generation */
1485                 ecore_int_igu_disable_int(p_hwfn, p_ptt);
1486                 ecore_wr(p_hwfn, p_ptt, IGU_REG_LEADING_EDGE_LATCH, 0);
1487                 ecore_wr(p_hwfn, p_ptt, IGU_REG_TRAILING_EDGE_LATCH, 0);
1488                 ecore_int_igu_init_pure_rt(p_hwfn, p_ptt, false, true);
1489                 /* Need to wait 1ms to guarantee SBs are cleared */
1490                 OSAL_MSLEEP(1);
1491         }
1492
1493         if (IS_PF(p_dev)) {
1494                 /* Disable DMAE in PXP - in CMT, this should only be done for
1495                  * first hw-function, and only after all transactions have
1496                  * stopped for all active hw-functions.
1497                  */
1498                 t_rc = ecore_change_pci_hwfn(&p_dev->hwfns[0],
1499                                              p_dev->hwfns[0].p_main_ptt, false);
1500                 if (t_rc != ECORE_SUCCESS)
1501                         rc = t_rc;
1502         }
1503
1504         return rc;
1505 }
1506
1507 void ecore_hw_stop_fastpath(struct ecore_dev *p_dev)
1508 {
1509         int j;
1510
1511         for_each_hwfn(p_dev, j) {
1512                 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[j];
1513                 struct ecore_ptt *p_ptt = p_hwfn->p_main_ptt;
1514
1515                 if (IS_VF(p_dev)) {
1516                         ecore_vf_pf_int_cleanup(p_hwfn);
1517                         continue;
1518                 }
1519
1520                 DP_VERBOSE(p_hwfn, ECORE_MSG_IFDOWN,
1521                            "Shutting down the fastpath\n");
1522
1523                 ecore_wr(p_hwfn, p_ptt,
1524                          NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x1);
1525
1526                 ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TCP, 0x0);
1527                 ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_UDP, 0x0);
1528                 ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_OPENFLOW, 0x0);
1529
1530                 /* @@@TBD - clean transmission queues (5.b) */
1531                 /* @@@TBD - clean BTB (5.c) */
1532
1533                 /* @@@TBD - verify DMAE requests are done (8) */
1534
1535                 ecore_int_igu_init_pure_rt(p_hwfn, p_ptt, false, false);
1536                 /* Need to wait 1ms to guarantee SBs are cleared */
1537                 OSAL_MSLEEP(1);
1538         }
1539 }
1540
1541 void ecore_hw_start_fastpath(struct ecore_hwfn *p_hwfn)
1542 {
1543         struct ecore_ptt *p_ptt = p_hwfn->p_main_ptt;
1544
1545         if (IS_VF(p_hwfn->p_dev))
1546                 return;
1547
1548         /* Re-open incoming traffic */
1549         ecore_wr(p_hwfn, p_hwfn->p_main_ptt,
1550                  NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x0);
1551 }
1552
1553 static enum _ecore_status_t ecore_reg_assert(struct ecore_hwfn *p_hwfn,
1554                                              struct ecore_ptt *p_ptt, u32 reg,
1555                                              bool expected)
1556 {
1557         u32 assert_val = ecore_rd(p_hwfn, p_ptt, reg);
1558
1559         if (assert_val != expected) {
1560                 DP_NOTICE(p_hwfn, true, "Value at address 0x%08x != 0x%08x\n",
1561                           reg, expected);
1562                 return ECORE_UNKNOWN_ERROR;
1563         }
1564
1565         return 0;
1566 }
1567
1568 enum _ecore_status_t ecore_hw_reset(struct ecore_dev *p_dev)
1569 {
1570         enum _ecore_status_t rc = ECORE_SUCCESS;
1571         u32 unload_resp, unload_param;
1572         int i;
1573
1574         for_each_hwfn(p_dev, i) {
1575                 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
1576
1577                 if (IS_VF(p_dev)) {
1578                         rc = ecore_vf_pf_reset(p_hwfn);
1579                         if (rc)
1580                                 return rc;
1581                         continue;
1582                 }
1583
1584                 DP_VERBOSE(p_hwfn, ECORE_MSG_IFDOWN, "Resetting hw/fw\n");
1585
1586                 /* Check for incorrect states */
1587                 if (!p_dev->recov_in_prog) {
1588                         ecore_reg_assert(p_hwfn, p_hwfn->p_main_ptt,
1589                                          QM_REG_USG_CNT_PF_TX, 0);
1590                         ecore_reg_assert(p_hwfn, p_hwfn->p_main_ptt,
1591                                          QM_REG_USG_CNT_PF_OTHER, 0);
1592                         /* @@@TBD - assert on incorrect xCFC values (10.b) */
1593                 }
1594
1595                 /* Disable PF in HW blocks */
1596                 ecore_wr(p_hwfn, p_hwfn->p_main_ptt, DORQ_REG_PF_DB_ENABLE, 0);
1597                 ecore_wr(p_hwfn, p_hwfn->p_main_ptt, QM_REG_PF_EN, 0);
1598                 ecore_wr(p_hwfn, p_hwfn->p_main_ptt,
1599                          TCFC_REG_STRONG_ENABLE_PF, 0);
1600                 ecore_wr(p_hwfn, p_hwfn->p_main_ptt,
1601                          CCFC_REG_STRONG_ENABLE_PF, 0);
1602
1603                 if (p_dev->recov_in_prog) {
1604                         DP_VERBOSE(p_hwfn, ECORE_MSG_IFDOWN,
1605                                    "Recovery is in progress -> skip "
1606                                    "sending unload_req/done\n");
1607                         break;
1608                 }
1609
1610                 /* Send unload command to MCP */
1611                 rc = ecore_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
1612                                    DRV_MSG_CODE_UNLOAD_REQ,
1613                                    DRV_MB_PARAM_UNLOAD_WOL_MCP,
1614                                    &unload_resp, &unload_param);
1615                 if (rc != ECORE_SUCCESS) {
1616                         DP_NOTICE(p_hwfn, true,
1617                                   "ecore_hw_reset: UNLOAD_REQ failed\n");
1618                         /* @@TBD - what to do? for now, assume ENG. */
1619                         unload_resp = FW_MSG_CODE_DRV_UNLOAD_ENGINE;
1620                 }
1621
1622                 rc = ecore_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
1623                                    DRV_MSG_CODE_UNLOAD_DONE,
1624                                    0, &unload_resp, &unload_param);
1625                 if (rc != ECORE_SUCCESS) {
1626                         DP_NOTICE(p_hwfn,
1627                                   true, "ecore_hw_reset: UNLOAD_DONE failed\n");
1628                         /* @@@TBD - Should it really ASSERT here ? */
1629                         return rc;
1630                 }
1631         }
1632
1633         return rc;
1634 }
1635
1636 /* Free hwfn memory and resources acquired in hw_hwfn_prepare */
1637 static void ecore_hw_hwfn_free(struct ecore_hwfn *p_hwfn)
1638 {
1639         ecore_ptt_pool_free(p_hwfn);
1640         OSAL_FREE(p_hwfn->p_dev, p_hwfn->hw_info.p_igu_info);
1641 }
1642
1643 /* Setup bar access */
1644 static void ecore_hw_hwfn_prepare(struct ecore_hwfn *p_hwfn)
1645 {
1646         /* clear indirect access */
1647         ecore_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_PGL_ADDR_88_F0, 0);
1648         ecore_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_PGL_ADDR_8C_F0, 0);
1649         ecore_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_PGL_ADDR_90_F0, 0);
1650         ecore_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_PGL_ADDR_94_F0, 0);
1651
1652         /* Clean Previous errors if such exist */
1653         ecore_wr(p_hwfn, p_hwfn->p_main_ptt,
1654                  PGLUE_B_REG_WAS_ERROR_PF_31_0_CLR, 1 << p_hwfn->abs_pf_id);
1655
1656         /* enable internal target-read */
1657         ecore_wr(p_hwfn, p_hwfn->p_main_ptt,
1658                  PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1);
1659 }
1660
1661 static void get_function_id(struct ecore_hwfn *p_hwfn)
1662 {
1663         /* ME Register */
1664         p_hwfn->hw_info.opaque_fid = (u16)REG_RD(p_hwfn,
1665                                                  PXP_PF_ME_OPAQUE_ADDR);
1666
1667         p_hwfn->hw_info.concrete_fid = REG_RD(p_hwfn, PXP_PF_ME_CONCRETE_ADDR);
1668
1669         /* Bits 16-19 from the ME registers are the pf_num */
1670         /* @@ @TBD - check, may be wrong after B0 implementation for CMT */
1671         p_hwfn->abs_pf_id = (p_hwfn->hw_info.concrete_fid >> 16) & 0xf;
1672         p_hwfn->rel_pf_id = GET_FIELD(p_hwfn->hw_info.concrete_fid,
1673                                       PXP_CONCRETE_FID_PFID);
1674         p_hwfn->port_id = GET_FIELD(p_hwfn->hw_info.concrete_fid,
1675                                     PXP_CONCRETE_FID_PORT);
1676
1677         DP_VERBOSE(p_hwfn, ECORE_MSG_PROBE,
1678                    "Read ME register: Concrete 0x%08x Opaque 0x%04x\n",
1679                    p_hwfn->hw_info.concrete_fid, p_hwfn->hw_info.opaque_fid);
1680 }
1681
1682 static void ecore_hw_set_feat(struct ecore_hwfn *p_hwfn)
1683 {
1684         u32 *feat_num = p_hwfn->hw_info.feat_num;
1685         int num_features = 1;
1686
1687         /* L2 Queues require each: 1 status block. 1 L2 queue */
1688         feat_num[ECORE_PF_L2_QUE] =
1689             OSAL_MIN_T(u32,
1690                        RESC_NUM(p_hwfn, ECORE_SB) / num_features,
1691                        RESC_NUM(p_hwfn, ECORE_L2_QUEUE));
1692
1693         DP_VERBOSE(p_hwfn, ECORE_MSG_PROBE,
1694                    "#PF_L2_QUEUES=%d #SBS=%d num_features=%d\n",
1695                    feat_num[ECORE_PF_L2_QUE],
1696                    RESC_NUM(p_hwfn, ECORE_SB), num_features);
1697 }
1698
1699 /* @@@TBD MK RESC: This info is currently hard code and set as if we were MF
1700  * need to read it from shmem...
1701  */
1702 static enum _ecore_status_t ecore_hw_get_resc(struct ecore_hwfn *p_hwfn)
1703 {
1704         u32 *resc_start = p_hwfn->hw_info.resc_start;
1705         u8 num_funcs = p_hwfn->num_funcs_on_engine;
1706         u32 *resc_num = p_hwfn->hw_info.resc_num;
1707         int i, max_vf_vlan_filters;
1708         struct ecore_sb_cnt_info sb_cnt_info;
1709         bool b_ah = ECORE_IS_AH(p_hwfn->p_dev);
1710
1711         OSAL_MEM_ZERO(&sb_cnt_info, sizeof(sb_cnt_info));
1712
1713 #ifdef CONFIG_ECORE_SRIOV
1714         max_vf_vlan_filters = ECORE_ETH_MAX_VF_NUM_VLAN_FILTERS;
1715 #else
1716         max_vf_vlan_filters = 0;
1717 #endif
1718
1719         ecore_int_get_num_sbs(p_hwfn, &sb_cnt_info);
1720         resc_num[ECORE_SB] = OSAL_MIN_T(u32,
1721                                         (MAX_SB_PER_PATH_BB / num_funcs),
1722                                         sb_cnt_info.sb_cnt);
1723
1724         resc_num[ECORE_L2_QUEUE] = (b_ah ? MAX_NUM_L2_QUEUES_K2 :
1725                                     MAX_NUM_L2_QUEUES_BB) / num_funcs;
1726         resc_num[ECORE_VPORT] = (b_ah ? MAX_NUM_VPORTS_K2 :
1727                                  MAX_NUM_VPORTS_BB) / num_funcs;
1728         resc_num[ECORE_RSS_ENG] = (b_ah ? ETH_RSS_ENGINE_NUM_K2 :
1729                                    ETH_RSS_ENGINE_NUM_BB) / num_funcs;
1730         resc_num[ECORE_PQ] = (b_ah ? MAX_QM_TX_QUEUES_K2 :
1731                               MAX_QM_TX_QUEUES_BB) / num_funcs;
1732         resc_num[ECORE_RL] = 8;
1733         resc_num[ECORE_MAC] = ETH_NUM_MAC_FILTERS / num_funcs;
1734         resc_num[ECORE_VLAN] = (ETH_NUM_VLAN_FILTERS -
1735                                 max_vf_vlan_filters +
1736                                 1 /*For vlan0 */) / num_funcs;
1737
1738         /* TODO - there will be a problem in AH - there are only 11k lines */
1739         resc_num[ECORE_ILT] = (b_ah ? PXP_NUM_ILT_RECORDS_K2 :
1740                                PXP_NUM_ILT_RECORDS_BB) / num_funcs;
1741
1742 #ifndef ASIC_ONLY
1743         if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) {
1744                 /* Reduced build contains less PQs */
1745                 if (!(p_hwfn->p_dev->b_is_emul_full))
1746                         resc_num[ECORE_PQ] = 32;
1747
1748                 /* For AH emulation, since we have a possible maximal number of
1749                  * 16 enabled PFs, in case there are not enough ILT lines -
1750                  * allocate only first PF as RoCE and have all the other ETH
1751                  * only with less ILT lines.
1752                  */
1753                 if (!p_hwfn->rel_pf_id && p_hwfn->p_dev->b_is_emul_full)
1754                         resc_num[ECORE_ILT] = resc_num[ECORE_ILT];
1755         }
1756 #endif
1757
1758         for (i = 0; i < ECORE_MAX_RESC; i++)
1759                 resc_start[i] = resc_num[i] * p_hwfn->rel_pf_id;
1760
1761 #ifndef ASIC_ONLY
1762         /* Correct the common ILT calculation if PF0 has more */
1763         if (CHIP_REV_IS_SLOW(p_hwfn->p_dev) &&
1764             p_hwfn->p_dev->b_is_emul_full &&
1765             p_hwfn->rel_pf_id && resc_num[ECORE_ILT])
1766                 resc_start[ECORE_ILT] += resc_num[ECORE_ILT];
1767 #endif
1768
1769         /* Sanity for ILT */
1770         if ((b_ah && (RESC_END(p_hwfn, ECORE_ILT) > PXP_NUM_ILT_RECORDS_K2)) ||
1771             (!b_ah && (RESC_END(p_hwfn, ECORE_ILT) > PXP_NUM_ILT_RECORDS_BB))) {
1772                 DP_NOTICE(p_hwfn, true,
1773                           "Can't assign ILT pages [%08x,...,%08x]\n",
1774                           RESC_START(p_hwfn, ECORE_ILT), RESC_END(p_hwfn,
1775                                                                   ECORE_ILT) -
1776                           1);
1777                 return ECORE_INVAL;
1778         }
1779
1780         ecore_hw_set_feat(p_hwfn);
1781
1782         DP_VERBOSE(p_hwfn, ECORE_MSG_PROBE,
1783                    "The numbers for each resource are:\n"
1784                    "SB = %d start = %d\n"
1785                    "L2_QUEUE = %d start = %d\n"
1786                    "VPORT = %d start = %d\n"
1787                    "PQ = %d start = %d\n"
1788                    "RL = %d start = %d\n"
1789                    "MAC = %d start = %d\n"
1790                    "VLAN = %d start = %d\n"
1791                    "ILT = %d start = %d\n"
1792                    "CMDQS_CQS = %d start = %d\n",
1793                    RESC_NUM(p_hwfn, ECORE_SB), RESC_START(p_hwfn, ECORE_SB),
1794                    RESC_NUM(p_hwfn, ECORE_L2_QUEUE),
1795                    RESC_START(p_hwfn, ECORE_L2_QUEUE),
1796                    RESC_NUM(p_hwfn, ECORE_VPORT),
1797                    RESC_START(p_hwfn, ECORE_VPORT),
1798                    RESC_NUM(p_hwfn, ECORE_PQ), RESC_START(p_hwfn, ECORE_PQ),
1799                    RESC_NUM(p_hwfn, ECORE_RL), RESC_START(p_hwfn, ECORE_RL),
1800                    RESC_NUM(p_hwfn, ECORE_MAC), RESC_START(p_hwfn, ECORE_MAC),
1801                    RESC_NUM(p_hwfn, ECORE_VLAN),
1802                    RESC_START(p_hwfn, ECORE_VLAN),
1803                    RESC_NUM(p_hwfn, ECORE_ILT), RESC_START(p_hwfn, ECORE_ILT),
1804                    RESC_NUM(p_hwfn, ECORE_CMDQS_CQS),
1805                    RESC_START(p_hwfn, ECORE_CMDQS_CQS));
1806
1807         return ECORE_SUCCESS;
1808 }
1809
1810 static enum _ecore_status_t ecore_hw_get_nvm_info(struct ecore_hwfn *p_hwfn,
1811                                                   struct ecore_ptt *p_ptt)
1812 {
1813         u32 nvm_cfg1_offset, mf_mode, addr, generic_cont0, core_cfg;
1814         u32 port_cfg_addr, link_temp, device_capabilities;
1815         struct ecore_mcp_link_params *link;
1816
1817         /* Read global nvm_cfg address */
1818         u32 nvm_cfg_addr = ecore_rd(p_hwfn, p_ptt, MISC_REG_GEN_PURP_CR0);
1819
1820         /* Verify MCP has initialized it */
1821         if (nvm_cfg_addr == 0) {
1822                 DP_NOTICE(p_hwfn, false, "Shared memory not initialized\n");
1823                 return ECORE_INVAL;
1824         }
1825
1826         /* Read nvm_cfg1  (Notice this is just offset, and not offsize (TBD) */
1827         nvm_cfg1_offset = ecore_rd(p_hwfn, p_ptt, nvm_cfg_addr + 4);
1828
1829         addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
1830             OFFSETOF(struct nvm_cfg1, glob) + OFFSETOF(struct nvm_cfg1_glob,
1831                                                        core_cfg);
1832
1833         core_cfg = ecore_rd(p_hwfn, p_ptt, addr);
1834
1835         switch ((core_cfg & NVM_CFG1_GLOB_NETWORK_PORT_MODE_MASK) >>
1836                 NVM_CFG1_GLOB_NETWORK_PORT_MODE_OFFSET) {
1837         case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_2X40G:
1838                 p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_2X40G;
1839                 break;
1840         case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_2X50G:
1841                 p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_2X50G;
1842                 break;
1843         case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_1X100G:
1844                 p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_1X100G;
1845                 break;
1846         case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_4X10G_F:
1847                 p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_4X10G_F;
1848                 break;
1849         case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_4X10G_E:
1850                 p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_4X10G_E;
1851                 break;
1852         case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_4X20G:
1853                 p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_4X20G;
1854                 break;
1855         case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_1X40G:
1856                 p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_1X40G;
1857                 break;
1858         case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_2X25G:
1859                 p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_2X25G;
1860                 break;
1861         case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_1X25G:
1862                 p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_1X25G;
1863                 break;
1864         default:
1865                 DP_NOTICE(p_hwfn, true, "Unknown port mode in 0x%08x\n",
1866                           core_cfg);
1867                 break;
1868         }
1869
1870         /* Read default link configuration */
1871         link = &p_hwfn->mcp_info->link_input;
1872         port_cfg_addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
1873             OFFSETOF(struct nvm_cfg1, port[MFW_PORT(p_hwfn)]);
1874         link_temp = ecore_rd(p_hwfn, p_ptt,
1875                              port_cfg_addr +
1876                              OFFSETOF(struct nvm_cfg1_port, speed_cap_mask));
1877         link_temp &= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_MASK;
1878         link->speed.advertised_speeds = link_temp;
1879
1880         link_temp = link->speed.advertised_speeds;
1881         p_hwfn->mcp_info->link_capabilities.speed_capabilities = link_temp;
1882
1883         link_temp = ecore_rd(p_hwfn, p_ptt,
1884                              port_cfg_addr +
1885                              OFFSETOF(struct nvm_cfg1_port, link_settings));
1886         switch ((link_temp & NVM_CFG1_PORT_DRV_LINK_SPEED_MASK) >>
1887                 NVM_CFG1_PORT_DRV_LINK_SPEED_OFFSET) {
1888         case NVM_CFG1_PORT_DRV_LINK_SPEED_AUTONEG:
1889                 link->speed.autoneg = true;
1890                 break;
1891         case NVM_CFG1_PORT_DRV_LINK_SPEED_1G:
1892                 link->speed.forced_speed = 1000;
1893                 break;
1894         case NVM_CFG1_PORT_DRV_LINK_SPEED_10G:
1895                 link->speed.forced_speed = 10000;
1896                 break;
1897         case NVM_CFG1_PORT_DRV_LINK_SPEED_25G:
1898                 link->speed.forced_speed = 25000;
1899                 break;
1900         case NVM_CFG1_PORT_DRV_LINK_SPEED_40G:
1901                 link->speed.forced_speed = 40000;
1902                 break;
1903         case NVM_CFG1_PORT_DRV_LINK_SPEED_50G:
1904                 link->speed.forced_speed = 50000;
1905                 break;
1906         case NVM_CFG1_PORT_DRV_LINK_SPEED_100G:
1907                 link->speed.forced_speed = 100000;
1908                 break;
1909         default:
1910                 DP_NOTICE(p_hwfn, true, "Unknown Speed in 0x%08x\n", link_temp);
1911         }
1912
1913         link_temp &= NVM_CFG1_PORT_DRV_FLOW_CONTROL_MASK;
1914         link_temp >>= NVM_CFG1_PORT_DRV_FLOW_CONTROL_OFFSET;
1915         link->pause.autoneg = !!(link_temp &
1916                                   NVM_CFG1_PORT_DRV_FLOW_CONTROL_AUTONEG);
1917         link->pause.forced_rx = !!(link_temp &
1918                                     NVM_CFG1_PORT_DRV_FLOW_CONTROL_RX);
1919         link->pause.forced_tx = !!(link_temp &
1920                                     NVM_CFG1_PORT_DRV_FLOW_CONTROL_TX);
1921         link->loopback_mode = 0;
1922
1923         DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
1924                    "Read default link: Speed 0x%08x, Adv. Speed 0x%08x,"
1925                    " AN: 0x%02x, PAUSE AN: 0x%02x\n",
1926                    link->speed.forced_speed, link->speed.advertised_speeds,
1927                    link->speed.autoneg, link->pause.autoneg);
1928
1929         /* Read Multi-function information from shmem */
1930         addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
1931             OFFSETOF(struct nvm_cfg1, glob) +
1932             OFFSETOF(struct nvm_cfg1_glob, generic_cont0);
1933
1934         generic_cont0 = ecore_rd(p_hwfn, p_ptt, addr);
1935
1936         mf_mode = (generic_cont0 & NVM_CFG1_GLOB_MF_MODE_MASK) >>
1937             NVM_CFG1_GLOB_MF_MODE_OFFSET;
1938
1939         switch (mf_mode) {
1940         case NVM_CFG1_GLOB_MF_MODE_MF_ALLOWED:
1941                 p_hwfn->p_dev->mf_mode = ECORE_MF_OVLAN;
1942                 break;
1943         case NVM_CFG1_GLOB_MF_MODE_NPAR1_0:
1944                 p_hwfn->p_dev->mf_mode = ECORE_MF_NPAR;
1945                 break;
1946         case NVM_CFG1_GLOB_MF_MODE_DEFAULT:
1947                 p_hwfn->p_dev->mf_mode = ECORE_MF_DEFAULT;
1948                 break;
1949         }
1950         DP_INFO(p_hwfn, "Multi function mode is %08x\n",
1951                 p_hwfn->p_dev->mf_mode);
1952
1953         /* Read Multi-function information from shmem */
1954         addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
1955             OFFSETOF(struct nvm_cfg1, glob) +
1956             OFFSETOF(struct nvm_cfg1_glob, device_capabilities);
1957
1958         device_capabilities = ecore_rd(p_hwfn, p_ptt, addr);
1959         if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ETHERNET)
1960                 OSAL_SET_BIT(ECORE_DEV_CAP_ETH,
1961                              &p_hwfn->hw_info.device_capabilities);
1962
1963         return ecore_mcp_fill_shmem_func_info(p_hwfn, p_ptt);
1964 }
1965
1966 static void ecore_get_num_funcs(struct ecore_hwfn *p_hwfn,
1967                                 struct ecore_ptt *p_ptt)
1968 {
1969         u8 num_funcs;
1970         u32 tmp, mask;
1971
1972         num_funcs = ECORE_IS_AH(p_hwfn->p_dev) ? MAX_NUM_PFS_K2
1973             : MAX_NUM_PFS_BB;
1974
1975         /* Bit 0 of MISCS_REG_FUNCTION_HIDE indicates whether the bypass values
1976          * in the other bits are selected.
1977          * Bits 1-15 are for functions 1-15, respectively, and their value is
1978          * '0' only for enabled functions (function 0 always exists and
1979          * enabled).
1980          * In case of CMT, only the "even" functions are enabled, and thus the
1981          * number of functions for both hwfns is learnt from the same bits.
1982          */
1983
1984         tmp = ecore_rd(p_hwfn, p_ptt, MISCS_REG_FUNCTION_HIDE);
1985         if (tmp & 0x1) {
1986                 if (ECORE_PATH_ID(p_hwfn) && p_hwfn->p_dev->num_hwfns == 1) {
1987                         num_funcs = 0;
1988                         mask = 0xaaaa;
1989                 } else {
1990                         num_funcs = 1;
1991                         mask = 0x5554;
1992                 }
1993
1994                 tmp = (tmp ^ 0xffffffff) & mask;
1995                 while (tmp) {
1996                         if (tmp & 0x1)
1997                                 num_funcs++;
1998                         tmp >>= 0x1;
1999                 }
2000         }
2001
2002         p_hwfn->num_funcs_on_engine = num_funcs;
2003
2004 #ifndef ASIC_ONLY
2005         if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) {
2006                 DP_NOTICE(p_hwfn, false,
2007                           "FPGA: Limit number of PFs to 4 [would affect"
2008                           " resource allocation, needed for IOV]\n");
2009                 p_hwfn->num_funcs_on_engine = 4;
2010         }
2011 #endif
2012
2013         DP_VERBOSE(p_hwfn, ECORE_MSG_PROBE, "num_funcs_on_engine = %d\n",
2014                    p_hwfn->num_funcs_on_engine);
2015 }
2016
2017 static void ecore_hw_info_port_num_bb(struct ecore_hwfn *p_hwfn,
2018                                       struct ecore_ptt *p_ptt)
2019 {
2020         u32 port_mode;
2021
2022 #ifndef ASIC_ONLY
2023         /* Read the port mode */
2024         if (CHIP_REV_IS_FPGA(p_hwfn->p_dev))
2025                 port_mode = 4;
2026         else if (CHIP_REV_IS_EMUL(p_hwfn->p_dev) &&
2027                  (p_hwfn->p_dev->num_hwfns > 1))
2028                 /* In CMT on emulation, assume 1 port */
2029                 port_mode = 1;
2030         else
2031 #endif
2032                 port_mode = ecore_rd(p_hwfn, p_ptt,
2033                                      CNIG_REG_NW_PORT_MODE_BB_B0);
2034
2035         if (port_mode < 3) {
2036                 p_hwfn->p_dev->num_ports_in_engines = 1;
2037         } else if (port_mode <= 5) {
2038                 p_hwfn->p_dev->num_ports_in_engines = 2;
2039         } else {
2040                 DP_NOTICE(p_hwfn, true, "PORT MODE: %d not supported\n",
2041                           p_hwfn->p_dev->num_ports_in_engines);
2042
2043                 /* Default num_ports_in_engines to something */
2044                 p_hwfn->p_dev->num_ports_in_engines = 1;
2045         }
2046 }
2047
2048 static void ecore_hw_info_port_num_ah(struct ecore_hwfn *p_hwfn,
2049                                       struct ecore_ptt *p_ptt)
2050 {
2051         u32 port;
2052         int i;
2053
2054         p_hwfn->p_dev->num_ports_in_engines = 0;
2055
2056         for (i = 0; i < MAX_NUM_PORTS_K2; i++) {
2057                 port = ecore_rd(p_hwfn, p_ptt,
2058                                 CNIG_REG_NIG_PORT0_CONF_K2 + (i * 4));
2059                 if (port & 1)
2060                         p_hwfn->p_dev->num_ports_in_engines++;
2061         }
2062 }
2063
2064 static void ecore_hw_info_port_num(struct ecore_hwfn *p_hwfn,
2065                                    struct ecore_ptt *p_ptt)
2066 {
2067         if (ECORE_IS_BB(p_hwfn->p_dev))
2068                 ecore_hw_info_port_num_bb(p_hwfn, p_ptt);
2069         else
2070                 ecore_hw_info_port_num_ah(p_hwfn, p_ptt);
2071 }
2072
2073 static enum _ecore_status_t
2074 ecore_get_hw_info(struct ecore_hwfn *p_hwfn,
2075                   struct ecore_ptt *p_ptt,
2076                   enum ecore_pci_personality personality)
2077 {
2078         enum _ecore_status_t rc;
2079
2080         rc = ecore_iov_hw_info(p_hwfn, p_hwfn->p_main_ptt);
2081         if (rc)
2082                 return rc;
2083
2084         /* TODO In get_hw_info, amoungst others:
2085          * Get MCP FW revision and determine according to it the supported
2086          * featrues (e.g. DCB)
2087          * Get boot mode
2088          * ecore_get_pcie_width_speed, WOL capability.
2089          * Number of global CQ-s (for storage
2090          */
2091         ecore_hw_info_port_num(p_hwfn, p_ptt);
2092
2093 #ifndef ASIC_ONLY
2094         if (CHIP_REV_IS_ASIC(p_hwfn->p_dev))
2095 #endif
2096                 ecore_hw_get_nvm_info(p_hwfn, p_ptt);
2097
2098         rc = ecore_int_igu_read_cam(p_hwfn, p_ptt);
2099         if (rc)
2100                 return rc;
2101
2102 #ifndef ASIC_ONLY
2103         if (CHIP_REV_IS_ASIC(p_hwfn->p_dev) && ecore_mcp_is_init(p_hwfn)) {
2104 #endif
2105                 OSAL_MEMCPY(p_hwfn->hw_info.hw_mac_addr,
2106                             p_hwfn->mcp_info->func_info.mac, ETH_ALEN);
2107 #ifndef ASIC_ONLY
2108         } else {
2109                 static u8 mcp_hw_mac[6] = { 0, 2, 3, 4, 5, 6 };
2110
2111                 OSAL_MEMCPY(p_hwfn->hw_info.hw_mac_addr, mcp_hw_mac, ETH_ALEN);
2112                 p_hwfn->hw_info.hw_mac_addr[5] = p_hwfn->abs_pf_id;
2113         }
2114 #endif
2115
2116         if (ecore_mcp_is_init(p_hwfn)) {
2117                 if (p_hwfn->mcp_info->func_info.ovlan != ECORE_MCP_VLAN_UNSET)
2118                         p_hwfn->hw_info.ovlan =
2119                             p_hwfn->mcp_info->func_info.ovlan;
2120
2121                 ecore_mcp_cmd_port_init(p_hwfn, p_ptt);
2122         }
2123
2124         if (personality != ECORE_PCI_DEFAULT)
2125                 p_hwfn->hw_info.personality = personality;
2126         else if (ecore_mcp_is_init(p_hwfn))
2127                 p_hwfn->hw_info.personality =
2128                     p_hwfn->mcp_info->func_info.protocol;
2129
2130 #ifndef ASIC_ONLY
2131         /* To overcome ILT lack for emulation, until at least until we'll have
2132          * a definite answer from system about it, allow only PF0 to be RoCE.
2133          */
2134         if (CHIP_REV_IS_EMUL(p_hwfn->p_dev) && ECORE_IS_AH(p_hwfn->p_dev))
2135                 p_hwfn->hw_info.personality = ECORE_PCI_ETH;
2136 #endif
2137
2138         ecore_get_num_funcs(p_hwfn, p_ptt);
2139
2140         /* Feat num is dependent on personality and on the number of functions
2141          * on the engine. Therefore it should be come after personality
2142          * initialization and after getting the number of functions.
2143          */
2144         return ecore_hw_get_resc(p_hwfn);
2145 }
2146
2147 /* @TMP - this should move to a proper .h */
2148 #define CHIP_NUM_AH                     0x8070
2149
2150 static enum _ecore_status_t ecore_get_dev_info(struct ecore_dev *p_dev)
2151 {
2152         struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
2153         u32 tmp;
2154
2155         /* Read Vendor Id / Device Id */
2156         OSAL_PCI_READ_CONFIG_WORD(p_dev, PCICFG_VENDOR_ID_OFFSET,
2157                                   &p_dev->vendor_id);
2158         OSAL_PCI_READ_CONFIG_WORD(p_dev, PCICFG_DEVICE_ID_OFFSET,
2159                                   &p_dev->device_id);
2160
2161         p_dev->chip_num = (u16)ecore_rd(p_hwfn, p_hwfn->p_main_ptt,
2162                                          MISCS_REG_CHIP_NUM);
2163         p_dev->chip_rev = (u16)ecore_rd(p_hwfn, p_hwfn->p_main_ptt,
2164                                         MISCS_REG_CHIP_REV);
2165
2166         MASK_FIELD(CHIP_REV, p_dev->chip_rev);
2167
2168         /* Determine type */
2169         if (p_dev->device_id == CHIP_NUM_AH)
2170                 p_dev->type = ECORE_DEV_TYPE_AH;
2171         else
2172                 p_dev->type = ECORE_DEV_TYPE_BB;
2173
2174         /* Learn number of HW-functions */
2175         tmp = ecore_rd(p_hwfn, p_hwfn->p_main_ptt,
2176                        MISCS_REG_CMT_ENABLED_FOR_PAIR);
2177
2178         if (tmp & (1 << p_hwfn->rel_pf_id)) {
2179                 DP_NOTICE(p_dev->hwfns, false, "device in CMT mode\n");
2180                 p_dev->num_hwfns = 2;
2181         } else {
2182                 p_dev->num_hwfns = 1;
2183         }
2184
2185 #ifndef ASIC_ONLY
2186         if (CHIP_REV_IS_EMUL(p_dev)) {
2187                 /* For some reason we have problems with this register
2188                  * in B0 emulation; Simply assume no CMT
2189                  */
2190                 DP_NOTICE(p_dev->hwfns, false,
2191                           "device on emul - assume no CMT\n");
2192                 p_dev->num_hwfns = 1;
2193         }
2194 #endif
2195
2196         p_dev->chip_bond_id = ecore_rd(p_hwfn, p_hwfn->p_main_ptt,
2197                                        MISCS_REG_CHIP_TEST_REG) >> 4;
2198         MASK_FIELD(CHIP_BOND_ID, p_dev->chip_bond_id);
2199         p_dev->chip_metal = (u16)ecore_rd(p_hwfn, p_hwfn->p_main_ptt,
2200                                           MISCS_REG_CHIP_METAL);
2201         MASK_FIELD(CHIP_METAL, p_dev->chip_metal);
2202         DP_INFO(p_dev->hwfns,
2203                 "Chip details - %s%d, Num: %04x Rev: %04x Bond id: %04x"
2204                 " Metal: %04x\n",
2205                 ECORE_IS_BB(p_dev) ? "BB" : "AH",
2206                 CHIP_REV_IS_A0(p_dev) ? 0 : 1,
2207                 p_dev->chip_num, p_dev->chip_rev, p_dev->chip_bond_id,
2208                 p_dev->chip_metal);
2209
2210         if (ECORE_IS_BB(p_dev) && CHIP_REV_IS_A0(p_dev)) {
2211                 DP_NOTICE(p_dev->hwfns, false,
2212                           "The chip type/rev (BB A0) is not supported!\n");
2213                 return ECORE_ABORTED;
2214         }
2215 #ifndef ASIC_ONLY
2216         if (CHIP_REV_IS_EMUL(p_dev) && ECORE_IS_AH(p_dev))
2217                 ecore_wr(p_hwfn, p_hwfn->p_main_ptt,
2218                          MISCS_REG_PLL_MAIN_CTRL_4, 0x1);
2219
2220         if (CHIP_REV_IS_EMUL(p_dev)) {
2221                 tmp = ecore_rd(p_hwfn, p_hwfn->p_main_ptt,
2222                                MISCS_REG_ECO_RESERVED);
2223                 if (tmp & (1 << 29)) {
2224                         DP_NOTICE(p_hwfn, false,
2225                                   "Emulation: Running on a FULL build\n");
2226                         p_dev->b_is_emul_full = true;
2227                 } else {
2228                         DP_NOTICE(p_hwfn, false,
2229                                   "Emulation: Running on a REDUCED build\n");
2230                 }
2231         }
2232 #endif
2233
2234         return ECORE_SUCCESS;
2235 }
2236
2237 void ecore_prepare_hibernate(struct ecore_dev *p_dev)
2238 {
2239         int j;
2240
2241         if (IS_VF(p_dev))
2242                 return;
2243
2244         for_each_hwfn(p_dev, j) {
2245                 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[j];
2246
2247                 DP_VERBOSE(p_hwfn, ECORE_MSG_IFDOWN,
2248                            "Mark hw/fw uninitialized\n");
2249
2250                 p_hwfn->hw_init_done = false;
2251                 p_hwfn->first_on_engine = false;
2252         }
2253 }
2254
2255 static enum _ecore_status_t
2256 ecore_hw_prepare_single(struct ecore_hwfn *p_hwfn,
2257                         void OSAL_IOMEM *p_regview,
2258                         void OSAL_IOMEM *p_doorbells,
2259                         enum ecore_pci_personality personality)
2260 {
2261         enum _ecore_status_t rc = ECORE_SUCCESS;
2262
2263         /* Split PCI bars evenly between hwfns */
2264         p_hwfn->regview = p_regview;
2265         p_hwfn->doorbells = p_doorbells;
2266
2267         /* Validate that chip access is feasible */
2268         if (REG_RD(p_hwfn, PXP_PF_ME_OPAQUE_ADDR) == 0xffffffff) {
2269                 DP_ERR(p_hwfn,
2270                        "Reading the ME register returns all Fs;"
2271                        " Preventing further chip access\n");
2272                 return ECORE_INVAL;
2273         }
2274
2275         get_function_id(p_hwfn);
2276
2277         /* Allocate PTT pool */
2278         rc = ecore_ptt_pool_alloc(p_hwfn);
2279         if (rc) {
2280                 DP_NOTICE(p_hwfn, true, "Failed to prepare hwfn's hw\n");
2281                 goto err0;
2282         }
2283
2284         /* Allocate the main PTT */
2285         p_hwfn->p_main_ptt = ecore_get_reserved_ptt(p_hwfn, RESERVED_PTT_MAIN);
2286
2287         /* First hwfn learns basic information, e.g., number of hwfns */
2288         if (!p_hwfn->my_id) {
2289                 rc = ecore_get_dev_info(p_hwfn->p_dev);
2290                 if (rc != ECORE_SUCCESS)
2291                         goto err1;
2292         }
2293
2294         ecore_hw_hwfn_prepare(p_hwfn);
2295
2296         /* Initialize MCP structure */
2297         rc = ecore_mcp_cmd_init(p_hwfn, p_hwfn->p_main_ptt);
2298         if (rc) {
2299                 DP_NOTICE(p_hwfn, true, "Failed initializing mcp command\n");
2300                 goto err1;
2301         }
2302
2303         /* Read the device configuration information from the HW and SHMEM */
2304         rc = ecore_get_hw_info(p_hwfn, p_hwfn->p_main_ptt, personality);
2305         if (rc) {
2306                 DP_NOTICE(p_hwfn, true, "Failed to get HW information\n");
2307                 goto err2;
2308         }
2309
2310         /* Allocate the init RT array and initialize the init-ops engine */
2311         rc = ecore_init_alloc(p_hwfn);
2312         if (rc) {
2313                 DP_NOTICE(p_hwfn, true, "Failed to allocate the init array\n");
2314                 goto err2;
2315         }
2316 #ifndef ASIC_ONLY
2317         if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) {
2318                 DP_NOTICE(p_hwfn, false,
2319                           "FPGA: workaround; Prevent DMAE parities\n");
2320                 ecore_wr(p_hwfn, p_hwfn->p_main_ptt, PCIE_REG_PRTY_MASK, 7);
2321
2322                 DP_NOTICE(p_hwfn, false,
2323                           "FPGA: workaround: Set VF bar0 size\n");
2324                 ecore_wr(p_hwfn, p_hwfn->p_main_ptt,
2325                          PGLUE_B_REG_VF_BAR0_SIZE, 4);
2326         }
2327 #endif
2328
2329         return rc;
2330 err2:
2331         ecore_mcp_free(p_hwfn);
2332 err1:
2333         ecore_hw_hwfn_free(p_hwfn);
2334 err0:
2335         return rc;
2336 }
2337
2338 enum _ecore_status_t ecore_hw_prepare(struct ecore_dev *p_dev, int personality)
2339 {
2340         struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
2341         enum _ecore_status_t rc;
2342
2343         if (IS_VF(p_dev))
2344                 return ecore_vf_hw_prepare(p_dev);
2345
2346         /* Store the precompiled init data ptrs */
2347         ecore_init_iro_array(p_dev);
2348
2349         /* Initialize the first hwfn - will learn number of hwfns */
2350         rc = ecore_hw_prepare_single(p_hwfn,
2351                                      p_dev->regview,
2352                                      p_dev->doorbells, personality);
2353         if (rc != ECORE_SUCCESS)
2354                 return rc;
2355
2356         personality = p_hwfn->hw_info.personality;
2357
2358         /* initialalize 2nd hwfn if necessary */
2359         if (p_dev->num_hwfns > 1) {
2360                 void OSAL_IOMEM *p_regview, *p_doorbell;
2361                 u8 OSAL_IOMEM *addr;
2362
2363                 /* adjust bar offset for second engine */
2364                 addr = (u8 OSAL_IOMEM *)p_dev->regview +
2365                     ecore_hw_bar_size(p_hwfn, BAR_ID_0) / 2;
2366                 p_regview = (void OSAL_IOMEM *)addr;
2367
2368                 addr = (u8 OSAL_IOMEM *)p_dev->doorbells +
2369                     ecore_hw_bar_size(p_hwfn, BAR_ID_1) / 2;
2370                 p_doorbell = (void OSAL_IOMEM *)addr;
2371
2372                 /* prepare second hw function */
2373                 rc = ecore_hw_prepare_single(&p_dev->hwfns[1], p_regview,
2374                                              p_doorbell, personality);
2375
2376                 /* in case of error, need to free the previously
2377                  * initialiazed hwfn 0
2378                  */
2379                 if (rc != ECORE_SUCCESS) {
2380                         ecore_init_free(p_hwfn);
2381                         ecore_mcp_free(p_hwfn);
2382                         ecore_hw_hwfn_free(p_hwfn);
2383                         return rc;
2384                 }
2385         }
2386
2387         return ECORE_SUCCESS;
2388 }
2389
2390 void ecore_hw_remove(struct ecore_dev *p_dev)
2391 {
2392         int i;
2393
2394         for_each_hwfn(p_dev, i) {
2395                 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
2396
2397                 if (IS_VF(p_dev)) {
2398                         ecore_vf_pf_release(p_hwfn);
2399                         continue;
2400                 }
2401
2402                 ecore_init_free(p_hwfn);
2403                 ecore_hw_hwfn_free(p_hwfn);
2404                 ecore_mcp_free(p_hwfn);
2405
2406                 OSAL_MUTEX_DEALLOC(&p_hwfn->dmae_info.mutex);
2407         }
2408 }
2409
2410 static void ecore_chain_free_next_ptr(struct ecore_dev *p_dev,
2411                                       struct ecore_chain *p_chain)
2412 {
2413         void *p_virt = p_chain->p_virt_addr, *p_virt_next = OSAL_NULL;
2414         dma_addr_t p_phys = p_chain->p_phys_addr, p_phys_next = 0;
2415         struct ecore_chain_next *p_next;
2416         u32 size, i;
2417
2418         if (!p_virt)
2419                 return;
2420
2421         size = p_chain->elem_size * p_chain->usable_per_page;
2422
2423         for (i = 0; i < p_chain->page_cnt; i++) {
2424                 if (!p_virt)
2425                         break;
2426
2427                 p_next = (struct ecore_chain_next *)((u8 *)p_virt + size);
2428                 p_virt_next = p_next->next_virt;
2429                 p_phys_next = HILO_DMA_REGPAIR(p_next->next_phys);
2430
2431                 OSAL_DMA_FREE_COHERENT(p_dev, p_virt, p_phys,
2432                                        ECORE_CHAIN_PAGE_SIZE);
2433
2434                 p_virt = p_virt_next;
2435                 p_phys = p_phys_next;
2436         }
2437 }
2438
2439 static void ecore_chain_free_single(struct ecore_dev *p_dev,
2440                                     struct ecore_chain *p_chain)
2441 {
2442         if (!p_chain->p_virt_addr)
2443                 return;
2444
2445         OSAL_DMA_FREE_COHERENT(p_dev, p_chain->p_virt_addr,
2446                                p_chain->p_phys_addr, ECORE_CHAIN_PAGE_SIZE);
2447 }
2448
2449 static void ecore_chain_free_pbl(struct ecore_dev *p_dev,
2450                                  struct ecore_chain *p_chain)
2451 {
2452         void **pp_virt_addr_tbl = p_chain->pbl.pp_virt_addr_tbl;
2453         u8 *p_pbl_virt = (u8 *)p_chain->pbl.p_virt_table;
2454         u32 page_cnt = p_chain->page_cnt, i, pbl_size;
2455
2456         if (!pp_virt_addr_tbl)
2457                 return;
2458
2459         if (!p_chain->pbl.p_virt_table)
2460                 goto out;
2461
2462         for (i = 0; i < page_cnt; i++) {
2463                 if (!pp_virt_addr_tbl[i])
2464                         break;
2465
2466                 OSAL_DMA_FREE_COHERENT(p_dev, pp_virt_addr_tbl[i],
2467                                        *(dma_addr_t *)p_pbl_virt,
2468                                        ECORE_CHAIN_PAGE_SIZE);
2469
2470                 p_pbl_virt += ECORE_CHAIN_PBL_ENTRY_SIZE;
2471         }
2472
2473         pbl_size = page_cnt * ECORE_CHAIN_PBL_ENTRY_SIZE;
2474         OSAL_DMA_FREE_COHERENT(p_dev, p_chain->pbl.p_virt_table,
2475                                p_chain->pbl.p_phys_table, pbl_size);
2476 out:
2477         OSAL_VFREE(p_dev, p_chain->pbl.pp_virt_addr_tbl);
2478 }
2479
2480 void ecore_chain_free(struct ecore_dev *p_dev, struct ecore_chain *p_chain)
2481 {
2482         switch (p_chain->mode) {
2483         case ECORE_CHAIN_MODE_NEXT_PTR:
2484                 ecore_chain_free_next_ptr(p_dev, p_chain);
2485                 break;
2486         case ECORE_CHAIN_MODE_SINGLE:
2487                 ecore_chain_free_single(p_dev, p_chain);
2488                 break;
2489         case ECORE_CHAIN_MODE_PBL:
2490                 ecore_chain_free_pbl(p_dev, p_chain);
2491                 break;
2492         }
2493 }
2494
2495 static enum _ecore_status_t
2496 ecore_chain_alloc_sanity_check(struct ecore_dev *p_dev,
2497                                enum ecore_chain_cnt_type cnt_type,
2498                                osal_size_t elem_size, u32 page_cnt)
2499 {
2500         u64 chain_size = ELEMS_PER_PAGE(elem_size) * page_cnt;
2501
2502         /* The actual chain size can be larger than the maximal possible value
2503          * after rounding up the requested elements number to pages, and after
2504          * taking into acount the unusuable elements (next-ptr elements).
2505          * The size of a "u16" chain can be (U16_MAX + 1) since the chain
2506          * size/capacity fields are of a u32 type.
2507          */
2508         if ((cnt_type == ECORE_CHAIN_CNT_TYPE_U16 &&
2509              chain_size > ((u32)ECORE_U16_MAX + 1)) ||
2510             (cnt_type == ECORE_CHAIN_CNT_TYPE_U32 &&
2511              chain_size > ECORE_U32_MAX)) {
2512                 DP_NOTICE(p_dev, true,
2513                           "The actual chain size (0x%lx) is larger than"
2514                           " the maximal possible value\n",
2515                           (unsigned long)chain_size);
2516                 return ECORE_INVAL;
2517         }
2518
2519         return ECORE_SUCCESS;
2520 }
2521
2522 static enum _ecore_status_t
2523 ecore_chain_alloc_next_ptr(struct ecore_dev *p_dev, struct ecore_chain *p_chain)
2524 {
2525         void *p_virt = OSAL_NULL, *p_virt_prev = OSAL_NULL;
2526         dma_addr_t p_phys = 0;
2527         u32 i;
2528
2529         for (i = 0; i < p_chain->page_cnt; i++) {
2530                 p_virt = OSAL_DMA_ALLOC_COHERENT(p_dev, &p_phys,
2531                                                  ECORE_CHAIN_PAGE_SIZE);
2532                 if (!p_virt) {
2533                         DP_NOTICE(p_dev, true,
2534                                   "Failed to allocate chain memory\n");
2535                         return ECORE_NOMEM;
2536                 }
2537
2538                 if (i == 0) {
2539                         ecore_chain_init_mem(p_chain, p_virt, p_phys);
2540                         ecore_chain_reset(p_chain);
2541                 } else {
2542                         ecore_chain_init_next_ptr_elem(p_chain, p_virt_prev,
2543                                                        p_virt, p_phys);
2544                 }
2545
2546                 p_virt_prev = p_virt;
2547         }
2548         /* Last page's next element should point to the beginning of the
2549          * chain.
2550          */
2551         ecore_chain_init_next_ptr_elem(p_chain, p_virt_prev,
2552                                        p_chain->p_virt_addr,
2553                                        p_chain->p_phys_addr);
2554
2555         return ECORE_SUCCESS;
2556 }
2557
2558 static enum _ecore_status_t
2559 ecore_chain_alloc_single(struct ecore_dev *p_dev, struct ecore_chain *p_chain)
2560 {
2561         void *p_virt = OSAL_NULL;
2562         dma_addr_t p_phys = 0;
2563
2564         p_virt = OSAL_DMA_ALLOC_COHERENT(p_dev, &p_phys, ECORE_CHAIN_PAGE_SIZE);
2565         if (!p_virt) {
2566                 DP_NOTICE(p_dev, true, "Failed to allocate chain memory\n");
2567                 return ECORE_NOMEM;
2568         }
2569
2570         ecore_chain_init_mem(p_chain, p_virt, p_phys);
2571         ecore_chain_reset(p_chain);
2572
2573         return ECORE_SUCCESS;
2574 }
2575
2576 static enum _ecore_status_t ecore_chain_alloc_pbl(struct ecore_dev *p_dev,
2577                                                   struct ecore_chain *p_chain)
2578 {
2579         void *p_virt = OSAL_NULL;
2580         u8 *p_pbl_virt = OSAL_NULL;
2581         void **pp_virt_addr_tbl = OSAL_NULL;
2582         dma_addr_t p_phys = 0, p_pbl_phys = 0;
2583         u32 page_cnt = p_chain->page_cnt, size, i;
2584
2585         size = page_cnt * sizeof(*pp_virt_addr_tbl);
2586         pp_virt_addr_tbl = (void **)OSAL_VALLOC(p_dev, size);
2587         if (!pp_virt_addr_tbl) {
2588                 DP_NOTICE(p_dev, true,
2589                           "Failed to allocate memory for the chain"
2590                           " virtual addresses table\n");
2591                 return ECORE_NOMEM;
2592         }
2593         OSAL_MEM_ZERO(pp_virt_addr_tbl, size);
2594
2595         /* The allocation of the PBL table is done with its full size, since it
2596          * is expected to be successive.
2597          */
2598         size = page_cnt * ECORE_CHAIN_PBL_ENTRY_SIZE;
2599         p_pbl_virt = OSAL_DMA_ALLOC_COHERENT(p_dev, &p_pbl_phys, size);
2600         if (!p_pbl_virt) {
2601                 DP_NOTICE(p_dev, true, "Failed to allocate chain pbl memory\n");
2602                 return ECORE_NOMEM;
2603         }
2604
2605         ecore_chain_init_pbl_mem(p_chain, p_pbl_virt, p_pbl_phys,
2606                                  pp_virt_addr_tbl);
2607
2608         for (i = 0; i < page_cnt; i++) {
2609                 p_virt = OSAL_DMA_ALLOC_COHERENT(p_dev, &p_phys,
2610                                                  ECORE_CHAIN_PAGE_SIZE);
2611                 if (!p_virt) {
2612                         DP_NOTICE(p_dev, true,
2613                                   "Failed to allocate chain memory\n");
2614                         return ECORE_NOMEM;
2615                 }
2616
2617                 if (i == 0) {
2618                         ecore_chain_init_mem(p_chain, p_virt, p_phys);
2619                         ecore_chain_reset(p_chain);
2620                 }
2621
2622                 /* Fill the PBL table with the physical address of the page */
2623                 *(dma_addr_t *)p_pbl_virt = p_phys;
2624                 /* Keep the virtual address of the page */
2625                 p_chain->pbl.pp_virt_addr_tbl[i] = p_virt;
2626
2627                 p_pbl_virt += ECORE_CHAIN_PBL_ENTRY_SIZE;
2628         }
2629
2630         return ECORE_SUCCESS;
2631 }
2632
2633 enum _ecore_status_t ecore_chain_alloc(struct ecore_dev *p_dev,
2634                                        enum ecore_chain_use_mode intended_use,
2635                                        enum ecore_chain_mode mode,
2636                                        enum ecore_chain_cnt_type cnt_type,
2637                                        u32 num_elems, osal_size_t elem_size,
2638                                        struct ecore_chain *p_chain)
2639 {
2640         u32 page_cnt;
2641         enum _ecore_status_t rc = ECORE_SUCCESS;
2642
2643         if (mode == ECORE_CHAIN_MODE_SINGLE)
2644                 page_cnt = 1;
2645         else
2646                 page_cnt = ECORE_CHAIN_PAGE_CNT(num_elems, elem_size, mode);
2647
2648         rc = ecore_chain_alloc_sanity_check(p_dev, cnt_type, elem_size,
2649                                             page_cnt);
2650         if (rc) {
2651                 DP_NOTICE(p_dev, true,
2652                           "Cannot allocate a chain with the given arguments:\n"
2653                           " [use_mode %d, mode %d, cnt_type %d, num_elems %d,"
2654                           " elem_size %zu]\n",
2655                           intended_use, mode, cnt_type, num_elems, elem_size);
2656                 return rc;
2657         }
2658
2659         ecore_chain_init_params(p_chain, page_cnt, (u8)elem_size, intended_use,
2660                                 mode, cnt_type);
2661
2662         switch (mode) {
2663         case ECORE_CHAIN_MODE_NEXT_PTR:
2664                 rc = ecore_chain_alloc_next_ptr(p_dev, p_chain);
2665                 break;
2666         case ECORE_CHAIN_MODE_SINGLE:
2667                 rc = ecore_chain_alloc_single(p_dev, p_chain);
2668                 break;
2669         case ECORE_CHAIN_MODE_PBL:
2670                 rc = ecore_chain_alloc_pbl(p_dev, p_chain);
2671                 break;
2672         }
2673         if (rc)
2674                 goto nomem;
2675
2676         return ECORE_SUCCESS;
2677
2678 nomem:
2679         ecore_chain_free(p_dev, p_chain);
2680         return rc;
2681 }
2682
2683 enum _ecore_status_t ecore_fw_l2_queue(struct ecore_hwfn *p_hwfn,
2684                                        u16 src_id, u16 *dst_id)
2685 {
2686         if (src_id >= RESC_NUM(p_hwfn, ECORE_L2_QUEUE)) {
2687                 u16 min, max;
2688
2689                 min = (u16)RESC_START(p_hwfn, ECORE_L2_QUEUE);
2690                 max = min + RESC_NUM(p_hwfn, ECORE_L2_QUEUE);
2691                 DP_NOTICE(p_hwfn, true,
2692                           "l2_queue id [%d] is not valid, available"
2693                           " indices [%d - %d]\n",
2694                           src_id, min, max);
2695
2696                 return ECORE_INVAL;
2697         }
2698
2699         *dst_id = RESC_START(p_hwfn, ECORE_L2_QUEUE) + src_id;
2700
2701         return ECORE_SUCCESS;
2702 }
2703
2704 enum _ecore_status_t ecore_fw_vport(struct ecore_hwfn *p_hwfn,
2705                                     u8 src_id, u8 *dst_id)
2706 {
2707         if (src_id >= RESC_NUM(p_hwfn, ECORE_VPORT)) {
2708                 u8 min, max;
2709
2710                 min = (u8)RESC_START(p_hwfn, ECORE_VPORT);
2711                 max = min + RESC_NUM(p_hwfn, ECORE_VPORT);
2712                 DP_NOTICE(p_hwfn, true,
2713                           "vport id [%d] is not valid, available"
2714                           " indices [%d - %d]\n",
2715                           src_id, min, max);
2716
2717                 return ECORE_INVAL;
2718         }
2719
2720         *dst_id = RESC_START(p_hwfn, ECORE_VPORT) + src_id;
2721
2722         return ECORE_SUCCESS;
2723 }
2724
2725 enum _ecore_status_t ecore_fw_rss_eng(struct ecore_hwfn *p_hwfn,
2726                                       u8 src_id, u8 *dst_id)
2727 {
2728         if (src_id >= RESC_NUM(p_hwfn, ECORE_RSS_ENG)) {
2729                 u8 min, max;
2730
2731                 min = (u8)RESC_START(p_hwfn, ECORE_RSS_ENG);
2732                 max = min + RESC_NUM(p_hwfn, ECORE_RSS_ENG);
2733                 DP_NOTICE(p_hwfn, true,
2734                           "rss_eng id [%d] is not valid,avail idx [%d - %d]\n",
2735                           src_id, min, max);
2736
2737                 return ECORE_INVAL;
2738         }
2739
2740         *dst_id = RESC_START(p_hwfn, ECORE_RSS_ENG) + src_id;
2741
2742         return ECORE_SUCCESS;
2743 }
2744
2745 enum _ecore_status_t ecore_llh_add_mac_filter(struct ecore_hwfn *p_hwfn,
2746                                               struct ecore_ptt *p_ptt,
2747                                               u8 *p_filter)
2748 {
2749         u32 high, low, en;
2750         int i;
2751
2752         if (!(IS_MF_SI(p_hwfn) || IS_MF_DEFAULT(p_hwfn)))
2753                 return ECORE_SUCCESS;
2754
2755         high = p_filter[1] | (p_filter[0] << 8);
2756         low = p_filter[5] | (p_filter[4] << 8) |
2757             (p_filter[3] << 16) | (p_filter[2] << 24);
2758
2759         /* Find a free entry and utilize it */
2760         for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) {
2761                 en = ecore_rd(p_hwfn, p_ptt,
2762                               NIG_REG_LLH_FUNC_FILTER_EN + i * sizeof(u32));
2763                 if (en)
2764                         continue;
2765                 ecore_wr(p_hwfn, p_ptt,
2766                          NIG_REG_LLH_FUNC_FILTER_VALUE +
2767                          2 * i * sizeof(u32), low);
2768                 ecore_wr(p_hwfn, p_ptt,
2769                          NIG_REG_LLH_FUNC_FILTER_VALUE +
2770                          (2 * i + 1) * sizeof(u32), high);
2771                 ecore_wr(p_hwfn, p_ptt,
2772                          NIG_REG_LLH_FUNC_FILTER_MODE + i * sizeof(u32), 0);
2773                 ecore_wr(p_hwfn, p_ptt,
2774                          NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE +
2775                          i * sizeof(u32), 0);
2776                 ecore_wr(p_hwfn, p_ptt,
2777                          NIG_REG_LLH_FUNC_FILTER_EN + i * sizeof(u32), 1);
2778                 break;
2779         }
2780         if (i >= NIG_REG_LLH_FUNC_FILTER_EN_SIZE) {
2781                 DP_NOTICE(p_hwfn, false,
2782                           "Failed to find an empty LLH filter to utilize\n");
2783                 return ECORE_INVAL;
2784         }
2785
2786         DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
2787                    "MAC: %x:%x:%x:%x:%x:%x is added at %d\n",
2788                    p_filter[0], p_filter[1], p_filter[2],
2789                    p_filter[3], p_filter[4], p_filter[5], i);
2790
2791         return ECORE_SUCCESS;
2792 }
2793
2794 void ecore_llh_remove_mac_filter(struct ecore_hwfn *p_hwfn,
2795                                  struct ecore_ptt *p_ptt, u8 *p_filter)
2796 {
2797         u32 high, low;
2798         int i;
2799
2800         if (!(IS_MF_SI(p_hwfn) || IS_MF_DEFAULT(p_hwfn)))
2801                 return;
2802
2803         high = p_filter[1] | (p_filter[0] << 8);
2804         low = p_filter[5] | (p_filter[4] << 8) |
2805             (p_filter[3] << 16) | (p_filter[2] << 24);
2806
2807         /* Find the entry and clean it */
2808         for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) {
2809                 if (ecore_rd(p_hwfn, p_ptt,
2810                              NIG_REG_LLH_FUNC_FILTER_VALUE +
2811                              2 * i * sizeof(u32)) != low)
2812                         continue;
2813                 if (ecore_rd(p_hwfn, p_ptt,
2814                              NIG_REG_LLH_FUNC_FILTER_VALUE +
2815                              (2 * i + 1) * sizeof(u32)) != high)
2816                         continue;
2817
2818                 ecore_wr(p_hwfn, p_ptt,
2819                          NIG_REG_LLH_FUNC_FILTER_EN + i * sizeof(u32), 0);
2820                 ecore_wr(p_hwfn, p_ptt,
2821                          NIG_REG_LLH_FUNC_FILTER_VALUE +
2822                          2 * i * sizeof(u32), 0);
2823                 ecore_wr(p_hwfn, p_ptt,
2824                          NIG_REG_LLH_FUNC_FILTER_VALUE +
2825                          (2 * i + 1) * sizeof(u32), 0);
2826                 break;
2827         }
2828         if (i >= NIG_REG_LLH_FUNC_FILTER_EN_SIZE)
2829                 DP_NOTICE(p_hwfn, false,
2830                           "Tried to remove a non-configured filter\n");
2831 }
2832
2833 enum _ecore_status_t ecore_llh_add_ethertype_filter(struct ecore_hwfn *p_hwfn,
2834                                                     struct ecore_ptt *p_ptt,
2835                                                     u16 filter)
2836 {
2837         u32 high, low, en;
2838         int i;
2839
2840         if (!(IS_MF_SI(p_hwfn) || IS_MF_DEFAULT(p_hwfn)))
2841                 return ECORE_SUCCESS;
2842
2843         high = filter;
2844         low = 0;
2845
2846         /* Find a free entry and utilize it */
2847         for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) {
2848                 en = ecore_rd(p_hwfn, p_ptt,
2849                               NIG_REG_LLH_FUNC_FILTER_EN + i * sizeof(u32));
2850                 if (en)
2851                         continue;
2852                 ecore_wr(p_hwfn, p_ptt,
2853                          NIG_REG_LLH_FUNC_FILTER_VALUE +
2854                          2 * i * sizeof(u32), low);
2855                 ecore_wr(p_hwfn, p_ptt,
2856                          NIG_REG_LLH_FUNC_FILTER_VALUE +
2857                          (2 * i + 1) * sizeof(u32), high);
2858                 ecore_wr(p_hwfn, p_ptt,
2859                          NIG_REG_LLH_FUNC_FILTER_MODE + i * sizeof(u32), 1);
2860                 ecore_wr(p_hwfn, p_ptt,
2861                          NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE +
2862                          i * sizeof(u32), 1);
2863                 ecore_wr(p_hwfn, p_ptt,
2864                          NIG_REG_LLH_FUNC_FILTER_EN + i * sizeof(u32), 1);
2865                 break;
2866         }
2867         if (i >= NIG_REG_LLH_FUNC_FILTER_EN_SIZE) {
2868                 DP_NOTICE(p_hwfn, false,
2869                           "Failed to find an empty LLH filter to utilize\n");
2870                 return ECORE_INVAL;
2871         }
2872
2873         DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
2874                    "ETH type: %x is added at %d\n", filter, i);
2875
2876         return ECORE_SUCCESS;
2877 }
2878
2879 void ecore_llh_remove_ethertype_filter(struct ecore_hwfn *p_hwfn,
2880                                        struct ecore_ptt *p_ptt, u16 filter)
2881 {
2882         u32 high, low;
2883         int i;
2884
2885         if (!(IS_MF_SI(p_hwfn) || IS_MF_DEFAULT(p_hwfn)))
2886                 return;
2887
2888         high = filter;
2889         low = 0;
2890
2891         /* Find the entry and clean it */
2892         for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) {
2893                 if (ecore_rd(p_hwfn, p_ptt,
2894                              NIG_REG_LLH_FUNC_FILTER_VALUE +
2895                              2 * i * sizeof(u32)) != low)
2896                         continue;
2897                 if (ecore_rd(p_hwfn, p_ptt,
2898                              NIG_REG_LLH_FUNC_FILTER_VALUE +
2899                              (2 * i + 1) * sizeof(u32)) != high)
2900                         continue;
2901
2902                 ecore_wr(p_hwfn, p_ptt,
2903                          NIG_REG_LLH_FUNC_FILTER_EN + i * sizeof(u32), 0);
2904                 ecore_wr(p_hwfn, p_ptt,
2905                          NIG_REG_LLH_FUNC_FILTER_VALUE +
2906                          2 * i * sizeof(u32), 0);
2907                 ecore_wr(p_hwfn, p_ptt,
2908                          NIG_REG_LLH_FUNC_FILTER_VALUE +
2909                          (2 * i + 1) * sizeof(u32), 0);
2910                 break;
2911         }
2912         if (i >= NIG_REG_LLH_FUNC_FILTER_EN_SIZE)
2913                 DP_NOTICE(p_hwfn, false,
2914                           "Tried to remove a non-configured filter\n");
2915 }
2916
2917 void ecore_llh_clear_all_filters(struct ecore_hwfn *p_hwfn,
2918                                  struct ecore_ptt *p_ptt)
2919 {
2920         int i;
2921
2922         if (!(IS_MF_SI(p_hwfn) || IS_MF_DEFAULT(p_hwfn)))
2923                 return;
2924
2925         for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) {
2926                 ecore_wr(p_hwfn, p_ptt,
2927                          NIG_REG_LLH_FUNC_FILTER_EN + i * sizeof(u32), 0);
2928                 ecore_wr(p_hwfn, p_ptt,
2929                          NIG_REG_LLH_FUNC_FILTER_VALUE +
2930                          2 * i * sizeof(u32), 0);
2931                 ecore_wr(p_hwfn, p_ptt,
2932                          NIG_REG_LLH_FUNC_FILTER_VALUE +
2933                          (2 * i + 1) * sizeof(u32), 0);
2934         }
2935 }
2936
2937 enum _ecore_status_t ecore_test_registers(struct ecore_hwfn *p_hwfn,
2938                                           struct ecore_ptt *p_ptt)
2939 {
2940         u32 reg_tbl[] = {
2941                 BRB_REG_HEADER_SIZE,
2942                 BTB_REG_HEADER_SIZE,
2943                 CAU_REG_LONG_TIMEOUT_THRESHOLD,
2944                 CCFC_REG_ACTIVITY_COUNTER,
2945                 CDU_REG_CID_ADDR_PARAMS,
2946                 DBG_REG_CLIENT_ENABLE,
2947                 DMAE_REG_INIT,
2948                 DORQ_REG_IFEN,
2949                 GRC_REG_TIMEOUT_EN,
2950                 IGU_REG_BLOCK_CONFIGURATION,
2951                 MCM_REG_INIT,
2952                 MCP2_REG_DBG_DWORD_ENABLE,
2953                 MISC_REG_PORT_MODE,
2954                 MISCS_REG_CLK_100G_MODE,
2955                 MSDM_REG_ENABLE_IN1,
2956                 MSEM_REG_ENABLE_IN,
2957                 NIG_REG_CM_HDR,
2958                 NCSI_REG_CONFIG,
2959                 PBF_REG_INIT,
2960                 PTU_REG_ATC_INIT_ARRAY,
2961                 PCM_REG_INIT,
2962                 PGLUE_B_REG_ADMIN_PER_PF_REGION,
2963                 PRM_REG_DISABLE_PRM,
2964                 PRS_REG_SOFT_RST,
2965                 PSDM_REG_ENABLE_IN1,
2966                 PSEM_REG_ENABLE_IN,
2967                 PSWRQ_REG_DBG_SELECT,
2968                 PSWRQ2_REG_CDUT_P_SIZE,
2969                 PSWHST_REG_DISCARD_INTERNAL_WRITES,
2970                 PSWHST2_REG_DBGSYN_ALMOST_FULL_THR,
2971                 PSWRD_REG_DBG_SELECT,
2972                 PSWRD2_REG_CONF11,
2973                 PSWWR_REG_USDM_FULL_TH,
2974                 PSWWR2_REG_CDU_FULL_TH2,
2975                 QM_REG_MAXPQSIZE_0,
2976                 RSS_REG_RSS_INIT_EN,
2977                 RDIF_REG_STOP_ON_ERROR,
2978                 SRC_REG_SOFT_RST,
2979                 TCFC_REG_ACTIVITY_COUNTER,
2980                 TCM_REG_INIT,
2981                 TM_REG_PXP_READ_DATA_FIFO_INIT,
2982                 TSDM_REG_ENABLE_IN1,
2983                 TSEM_REG_ENABLE_IN,
2984                 TDIF_REG_STOP_ON_ERROR,
2985                 UCM_REG_INIT,
2986                 UMAC_REG_IPG_HD_BKP_CNTL_BB_B0,
2987                 USDM_REG_ENABLE_IN1,
2988                 USEM_REG_ENABLE_IN,
2989                 XCM_REG_INIT,
2990                 XSDM_REG_ENABLE_IN1,
2991                 XSEM_REG_ENABLE_IN,
2992                 YCM_REG_INIT,
2993                 YSDM_REG_ENABLE_IN1,
2994                 YSEM_REG_ENABLE_IN,
2995                 XYLD_REG_SCBD_STRICT_PRIO,
2996                 TMLD_REG_SCBD_STRICT_PRIO,
2997                 MULD_REG_SCBD_STRICT_PRIO,
2998                 YULD_REG_SCBD_STRICT_PRIO,
2999         };
3000         u32 test_val[] = { 0x0, 0x1 };
3001         u32 val, save_val, i, j;
3002
3003         for (i = 0; i < OSAL_ARRAY_SIZE(test_val); i++) {
3004                 for (j = 0; j < OSAL_ARRAY_SIZE(reg_tbl); j++) {
3005                         save_val = ecore_rd(p_hwfn, p_ptt, reg_tbl[j]);
3006                         ecore_wr(p_hwfn, p_ptt, reg_tbl[j], test_val[i]);
3007                         val = ecore_rd(p_hwfn, p_ptt, reg_tbl[j]);
3008                         /* Restore the original register's value */
3009                         ecore_wr(p_hwfn, p_ptt, reg_tbl[j], save_val);
3010                         if (val != test_val[i]) {
3011                                 DP_INFO(p_hwfn->p_dev,
3012                                         "offset 0x%x: val 0x%x != 0x%x\n",
3013                                         reg_tbl[j], val, test_val[i]);
3014                                 return ECORE_AGAIN;
3015                         }
3016                 }
3017         }
3018         return ECORE_SUCCESS;
3019 }
3020
3021 static enum _ecore_status_t ecore_set_coalesce(struct ecore_hwfn *p_hwfn,
3022                                                struct ecore_ptt *p_ptt,
3023                                                u32 hw_addr, void *p_qzone,
3024                                                osal_size_t qzone_size,
3025                                                u8 timeset)
3026 {
3027         struct coalescing_timeset *p_coalesce_timeset;
3028
3029         if (IS_VF(p_hwfn->p_dev)) {
3030                 DP_NOTICE(p_hwfn, true, "VF coalescing config not supported\n");
3031                 return ECORE_INVAL;
3032         }
3033
3034         if (p_hwfn->p_dev->int_coalescing_mode != ECORE_COAL_MODE_ENABLE) {
3035                 DP_NOTICE(p_hwfn, true,
3036                           "Coalescing configuration not enabled\n");
3037                 return ECORE_INVAL;
3038         }
3039
3040         OSAL_MEMSET(p_qzone, 0, qzone_size);
3041         p_coalesce_timeset = p_qzone;
3042         p_coalesce_timeset->timeset = timeset;
3043         p_coalesce_timeset->valid = 1;
3044         ecore_memcpy_to(p_hwfn, p_ptt, hw_addr, p_qzone, qzone_size);
3045
3046         return ECORE_SUCCESS;
3047 }
3048
3049 enum _ecore_status_t ecore_set_rxq_coalesce(struct ecore_hwfn *p_hwfn,
3050                                             struct ecore_ptt *p_ptt,
3051                                             u8 coalesce, u8 qid)
3052 {
3053         struct ustorm_eth_queue_zone qzone;
3054         u16 fw_qid = 0;
3055         u32 address;
3056         u8 timeset;
3057         enum _ecore_status_t rc;
3058
3059         rc = ecore_fw_l2_queue(p_hwfn, (u16)qid, &fw_qid);
3060         if (rc != ECORE_SUCCESS)
3061                 return rc;
3062
3063         address = BAR0_MAP_REG_USDM_RAM + USTORM_ETH_QUEUE_ZONE_OFFSET(fw_qid);
3064         /* Translate the coalescing time into a timeset, according to:
3065          * Timeout[Rx] = TimeSet[Rx] << (TimerRes[Rx] + 1)
3066          */
3067         timeset = coalesce >> (ECORE_CAU_DEF_RX_TIMER_RES + 1);
3068
3069         rc = ecore_set_coalesce(p_hwfn, p_ptt, address, &qzone,
3070                                 sizeof(struct ustorm_eth_queue_zone), timeset);
3071         if (rc != ECORE_SUCCESS)
3072                 goto out;
3073
3074         p_hwfn->p_dev->rx_coalesce_usecs = coalesce;
3075 out:
3076         return rc;
3077 }
3078
3079 enum _ecore_status_t ecore_set_txq_coalesce(struct ecore_hwfn *p_hwfn,
3080                                             struct ecore_ptt *p_ptt,
3081                                             u8 coalesce, u8 qid)
3082 {
3083         struct ystorm_eth_queue_zone qzone;
3084         u16 fw_qid = 0;
3085         u32 address;
3086         u8 timeset;
3087         enum _ecore_status_t rc;
3088
3089         rc = ecore_fw_l2_queue(p_hwfn, (u16)qid, &fw_qid);
3090         if (rc != ECORE_SUCCESS)
3091                 return rc;
3092
3093         address = BAR0_MAP_REG_YSDM_RAM + YSTORM_ETH_QUEUE_ZONE_OFFSET(fw_qid);
3094         /* Translate the coalescing time into a timeset, according to:
3095          * Timeout[Tx] = TimeSet[Tx] << (TimerRes[Tx] + 1)
3096          */
3097         timeset = coalesce >> (ECORE_CAU_DEF_TX_TIMER_RES + 1);
3098
3099         rc = ecore_set_coalesce(p_hwfn, p_ptt, address, &qzone,
3100                                 sizeof(struct ystorm_eth_queue_zone), timeset);
3101         if (rc != ECORE_SUCCESS)
3102                 goto out;
3103
3104         p_hwfn->p_dev->tx_coalesce_usecs = coalesce;
3105 out:
3106         return rc;
3107 }
3108
3109 /* Calculate final WFQ values for all vports and configure it.
3110  * After this configuration each vport must have
3111  * approx min rate =  vport_wfq * min_pf_rate / ECORE_WFQ_UNIT
3112  */
3113 static void ecore_configure_wfq_for_all_vports(struct ecore_hwfn *p_hwfn,
3114                                                struct ecore_ptt *p_ptt,
3115                                                u32 min_pf_rate)
3116 {
3117         struct init_qm_vport_params *vport_params;
3118         int i, num_vports;
3119
3120         vport_params = p_hwfn->qm_info.qm_vport_params;
3121         num_vports = p_hwfn->qm_info.num_vports;
3122
3123         for (i = 0; i < num_vports; i++) {
3124                 u32 wfq_speed = p_hwfn->qm_info.wfq_data[i].min_speed;
3125
3126                 vport_params[i].vport_wfq =
3127                     (wfq_speed * ECORE_WFQ_UNIT) / min_pf_rate;
3128                 ecore_init_vport_wfq(p_hwfn, p_ptt,
3129                                      vport_params[i].first_tx_pq_id,
3130                                      vport_params[i].vport_wfq);
3131         }
3132 }
3133
3134 static void
3135 ecore_init_wfq_default_param(struct ecore_hwfn *p_hwfn, u32 min_pf_rate)
3136 {
3137         int i, num_vports;
3138         u32 min_speed;
3139
3140         num_vports = p_hwfn->qm_info.num_vports;
3141         min_speed = min_pf_rate / num_vports;
3142
3143         for (i = 0; i < num_vports; i++) {
3144                 p_hwfn->qm_info.qm_vport_params[i].vport_wfq = 1;
3145                 p_hwfn->qm_info.wfq_data[i].default_min_speed = min_speed;
3146         }
3147 }
3148
3149 static void ecore_disable_wfq_for_all_vports(struct ecore_hwfn *p_hwfn,
3150                                              struct ecore_ptt *p_ptt,
3151                                              u32 min_pf_rate)
3152 {
3153         struct init_qm_vport_params *vport_params;
3154         int i, num_vports;
3155
3156         vport_params = p_hwfn->qm_info.qm_vport_params;
3157         num_vports = p_hwfn->qm_info.num_vports;
3158
3159         for (i = 0; i < num_vports; i++) {
3160                 ecore_init_wfq_default_param(p_hwfn, min_pf_rate);
3161                 ecore_init_vport_wfq(p_hwfn, p_ptt,
3162                                      vport_params[i].first_tx_pq_id,
3163                                      vport_params[i].vport_wfq);
3164         }
3165 }
3166
3167 /* validate wfq for a given vport and required min rate */
3168 static enum _ecore_status_t ecore_init_wfq_param(struct ecore_hwfn *p_hwfn,
3169                                                  u16 vport_id, u32 req_rate,
3170                                                  u32 min_pf_rate)
3171 {
3172         u32 total_req_min_rate = 0, total_left_rate = 0, left_rate_per_vp = 0;
3173         int non_requested_count = 0, req_count = 0, i, num_vports;
3174
3175         num_vports = p_hwfn->qm_info.num_vports;
3176
3177         /* Check pre-set data for some of the vports */
3178         for (i = 0; i < num_vports; i++) {
3179                 u32 tmp_speed;
3180
3181                 if ((i != vport_id) && p_hwfn->qm_info.wfq_data[i].configured) {
3182                         req_count++;
3183                         tmp_speed = p_hwfn->qm_info.wfq_data[i].min_speed;
3184                         total_req_min_rate += tmp_speed;
3185                 }
3186         }
3187
3188         /* Include current vport data as well */
3189         req_count++;
3190         total_req_min_rate += req_rate;
3191         non_requested_count = p_hwfn->qm_info.num_vports - req_count;
3192
3193         /* validate possible error cases */
3194         if (req_rate > min_pf_rate) {
3195                 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
3196                            "Vport [%d] - Requested rate[%d Mbps] is greater"
3197                            " than configured PF min rate[%d Mbps]\n",
3198                            vport_id, req_rate, min_pf_rate);
3199                 return ECORE_INVAL;
3200         }
3201
3202         if (req_rate * ECORE_WFQ_UNIT / min_pf_rate < 1) {
3203                 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
3204                            "Vport [%d] - Requested rate[%d Mbps] is less than"
3205                            " one percent of configured PF min rate[%d Mbps]\n",
3206                            vport_id, req_rate, min_pf_rate);
3207                 return ECORE_INVAL;
3208         }
3209
3210         /* TBD - for number of vports greater than 100 */
3211         if (ECORE_WFQ_UNIT / p_hwfn->qm_info.num_vports < 1) {
3212                 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
3213                            "Number of vports are greater than 100\n");
3214                 return ECORE_INVAL;
3215         }
3216
3217         if (total_req_min_rate > min_pf_rate) {
3218                 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
3219                            "Total requested min rate for all vports[%d Mbps]"
3220                            "is greater than configured PF min rate[%d Mbps]\n",
3221                            total_req_min_rate, min_pf_rate);
3222                 return ECORE_INVAL;
3223         }
3224
3225         /* Data left for non requested vports */
3226         total_left_rate = min_pf_rate - total_req_min_rate;
3227         left_rate_per_vp = total_left_rate / non_requested_count;
3228
3229         /* validate if non requested get < 1% of min bw */
3230         if (left_rate_per_vp * ECORE_WFQ_UNIT / min_pf_rate < 1)
3231                 return ECORE_INVAL;
3232
3233         /* now req_rate for given vport passes all scenarios.
3234          * assign final wfq rates to all vports.
3235          */
3236         p_hwfn->qm_info.wfq_data[vport_id].min_speed = req_rate;
3237         p_hwfn->qm_info.wfq_data[vport_id].configured = true;
3238
3239         for (i = 0; i < num_vports; i++) {
3240                 if (p_hwfn->qm_info.wfq_data[i].configured)
3241                         continue;
3242
3243                 p_hwfn->qm_info.wfq_data[i].min_speed = left_rate_per_vp;
3244         }
3245
3246         return ECORE_SUCCESS;
3247 }
3248
3249 static int __ecore_configure_vport_wfq(struct ecore_hwfn *p_hwfn,
3250                                        struct ecore_ptt *p_ptt,
3251                                        u16 vp_id, u32 rate)
3252 {
3253         struct ecore_mcp_link_state *p_link;
3254         int rc = ECORE_SUCCESS;
3255
3256         p_link = &p_hwfn->p_dev->hwfns[0].mcp_info->link_output;
3257
3258         if (!p_link->min_pf_rate) {
3259                 p_hwfn->qm_info.wfq_data[vp_id].min_speed = rate;
3260                 p_hwfn->qm_info.wfq_data[vp_id].configured = true;
3261                 return rc;
3262         }
3263
3264         rc = ecore_init_wfq_param(p_hwfn, vp_id, rate, p_link->min_pf_rate);
3265
3266         if (rc == ECORE_SUCCESS)
3267                 ecore_configure_wfq_for_all_vports(p_hwfn, p_ptt,
3268                                                    p_link->min_pf_rate);
3269         else
3270                 DP_NOTICE(p_hwfn, false,
3271                           "Validation failed while configuring min rate\n");
3272
3273         return rc;
3274 }
3275
3276 static int __ecore_configure_vp_wfq_on_link_change(struct ecore_hwfn *p_hwfn,
3277                                                    struct ecore_ptt *p_ptt,
3278                                                    u32 min_pf_rate)
3279 {
3280         int rc = ECORE_SUCCESS;
3281         bool use_wfq = false;
3282         u16 i, num_vports;
3283
3284         num_vports = p_hwfn->qm_info.num_vports;
3285
3286         /* Validate all pre configured vports for wfq */
3287         for (i = 0; i < num_vports; i++) {
3288                 if (p_hwfn->qm_info.wfq_data[i].configured) {
3289                         u32 rate = p_hwfn->qm_info.wfq_data[i].min_speed;
3290
3291                         use_wfq = true;
3292                         rc = ecore_init_wfq_param(p_hwfn, i, rate, min_pf_rate);
3293                         if (rc == ECORE_INVAL) {
3294                                 DP_NOTICE(p_hwfn, false,
3295                                           "Validation failed while"
3296                                           " configuring min rate\n");
3297                                 break;
3298                         }
3299                 }
3300         }
3301
3302         if (rc == ECORE_SUCCESS && use_wfq)
3303                 ecore_configure_wfq_for_all_vports(p_hwfn, p_ptt, min_pf_rate);
3304         else
3305                 ecore_disable_wfq_for_all_vports(p_hwfn, p_ptt, min_pf_rate);
3306
3307         return rc;
3308 }
3309
3310 /* Main API for ecore clients to configure vport min rate.
3311  * vp_id - vport id in PF Range[0 - (total_num_vports_per_pf - 1)]
3312  * rate - Speed in Mbps needs to be assigned to a given vport.
3313  */
3314 int ecore_configure_vport_wfq(struct ecore_dev *p_dev, u16 vp_id, u32 rate)
3315 {
3316         int i, rc = ECORE_INVAL;
3317
3318         /* TBD - for multiple hardware functions - that is 100 gig */
3319         if (p_dev->num_hwfns > 1) {
3320                 DP_NOTICE(p_dev, false,
3321                           "WFQ configuration is not supported for this dev\n");
3322                 return rc;
3323         }
3324
3325         for_each_hwfn(p_dev, i) {
3326                 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
3327                 struct ecore_ptt *p_ptt;
3328
3329                 p_ptt = ecore_ptt_acquire(p_hwfn);
3330                 if (!p_ptt)
3331                         return ECORE_TIMEOUT;
3332
3333                 rc = __ecore_configure_vport_wfq(p_hwfn, p_ptt, vp_id, rate);
3334
3335                 if (rc != ECORE_SUCCESS) {
3336                         ecore_ptt_release(p_hwfn, p_ptt);
3337                         return rc;
3338                 }
3339
3340                 ecore_ptt_release(p_hwfn, p_ptt);
3341         }
3342
3343         return rc;
3344 }
3345
3346 /* API to configure WFQ from mcp link change */
3347 void ecore_configure_vp_wfq_on_link_change(struct ecore_dev *p_dev,
3348                                            u32 min_pf_rate)
3349 {
3350         int i;
3351
3352         /* TBD - for multiple hardware functions - that is 100 gig */
3353         if (p_dev->num_hwfns > 1) {
3354                 DP_VERBOSE(p_dev, ECORE_MSG_LINK,
3355                            "WFQ configuration is not supported for this dev\n");
3356                 return;
3357         }
3358
3359         for_each_hwfn(p_dev, i) {
3360                 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
3361
3362                 __ecore_configure_vp_wfq_on_link_change(p_hwfn,
3363                                                         p_hwfn->p_dpc_ptt,
3364                                                         min_pf_rate);
3365         }
3366 }
3367
3368 int __ecore_configure_pf_max_bandwidth(struct ecore_hwfn *p_hwfn,
3369                                        struct ecore_ptt *p_ptt,
3370                                        struct ecore_mcp_link_state *p_link,
3371                                        u8 max_bw)
3372 {
3373         int rc = ECORE_SUCCESS;
3374
3375         p_hwfn->mcp_info->func_info.bandwidth_max = max_bw;
3376
3377         if (!p_link->line_speed)
3378                 return rc;
3379
3380         p_link->speed = (p_link->line_speed * max_bw) / 100;
3381
3382         rc = ecore_init_pf_rl(p_hwfn, p_ptt, p_hwfn->rel_pf_id, p_link->speed);
3383
3384         DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
3385                    "Configured MAX bandwidth to be %08x Mb/sec\n",
3386                    p_link->speed);
3387
3388         return rc;
3389 }
3390
3391 /* Main API to configure PF max bandwidth where bw range is [1 - 100] */
3392 int ecore_configure_pf_max_bandwidth(struct ecore_dev *p_dev, u8 max_bw)
3393 {
3394         int i, rc = ECORE_INVAL;
3395
3396         if (max_bw < 1 || max_bw > 100) {
3397                 DP_NOTICE(p_dev, false, "PF max bw valid range is [1-100]\n");
3398                 return rc;
3399         }
3400
3401         for_each_hwfn(p_dev, i) {
3402                 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
3403                 struct ecore_hwfn *p_lead = ECORE_LEADING_HWFN(p_dev);
3404                 struct ecore_mcp_link_state *p_link;
3405                 struct ecore_ptt *p_ptt;
3406
3407                 p_link = &p_lead->mcp_info->link_output;
3408
3409                 p_ptt = ecore_ptt_acquire(p_hwfn);
3410                 if (!p_ptt)
3411                         return ECORE_TIMEOUT;
3412
3413                 rc = __ecore_configure_pf_max_bandwidth(p_hwfn, p_ptt,
3414                                                         p_link, max_bw);
3415                 if (rc != ECORE_SUCCESS) {
3416                         ecore_ptt_release(p_hwfn, p_ptt);
3417                         return rc;
3418                 }
3419
3420                 ecore_ptt_release(p_hwfn, p_ptt);
3421         }
3422
3423         return rc;
3424 }
3425
3426 int __ecore_configure_pf_min_bandwidth(struct ecore_hwfn *p_hwfn,
3427                                        struct ecore_ptt *p_ptt,
3428                                        struct ecore_mcp_link_state *p_link,
3429                                        u8 min_bw)
3430 {
3431         int rc = ECORE_SUCCESS;
3432
3433         p_hwfn->mcp_info->func_info.bandwidth_min = min_bw;
3434
3435         if (!p_link->line_speed)
3436                 return rc;
3437
3438         p_link->min_pf_rate = (p_link->line_speed * min_bw) / 100;
3439
3440         rc = ecore_init_pf_wfq(p_hwfn, p_ptt, p_hwfn->rel_pf_id, min_bw);
3441
3442         DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
3443                    "Configured MIN bandwidth to be %d Mb/sec\n",
3444                    p_link->min_pf_rate);
3445
3446         return rc;
3447 }
3448
3449 /* Main API to configure PF min bandwidth where bw range is [1-100] */
3450 int ecore_configure_pf_min_bandwidth(struct ecore_dev *p_dev, u8 min_bw)
3451 {
3452         int i, rc = ECORE_INVAL;
3453
3454         if (min_bw < 1 || min_bw > 100) {
3455                 DP_NOTICE(p_dev, false, "PF min bw valid range is [1-100]\n");
3456                 return rc;
3457         }
3458
3459         for_each_hwfn(p_dev, i) {
3460                 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
3461                 struct ecore_hwfn *p_lead = ECORE_LEADING_HWFN(p_dev);
3462                 struct ecore_mcp_link_state *p_link;
3463                 struct ecore_ptt *p_ptt;
3464
3465                 p_link = &p_lead->mcp_info->link_output;
3466
3467                 p_ptt = ecore_ptt_acquire(p_hwfn);
3468                 if (!p_ptt)
3469                         return ECORE_TIMEOUT;
3470
3471                 rc = __ecore_configure_pf_min_bandwidth(p_hwfn, p_ptt,
3472                                                         p_link, min_bw);
3473                 if (rc != ECORE_SUCCESS) {
3474                         ecore_ptt_release(p_hwfn, p_ptt);
3475                         return rc;
3476                 }
3477
3478                 if (p_link->min_pf_rate) {
3479                         u32 min_rate = p_link->min_pf_rate;
3480
3481                         rc = __ecore_configure_vp_wfq_on_link_change(p_hwfn,
3482                                                                      p_ptt,
3483                                                                      min_rate);
3484                 }
3485
3486                 ecore_ptt_release(p_hwfn, p_ptt);
3487         }
3488
3489         return rc;
3490 }
3491
3492 void ecore_clean_wfq_db(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
3493 {
3494         struct ecore_mcp_link_state *p_link;
3495
3496         p_link = &p_hwfn->mcp_info->link_output;
3497
3498         if (p_link->min_pf_rate)
3499                 ecore_disable_wfq_for_all_vports(p_hwfn, p_ptt,
3500                                                  p_link->min_pf_rate);
3501
3502         OSAL_MEMSET(p_hwfn->qm_info.wfq_data, 0,
3503                     sizeof(*p_hwfn->qm_info.wfq_data) *
3504                     p_hwfn->qm_info.num_vports);
3505 }
3506
3507 int ecore_device_num_engines(struct ecore_dev *p_dev)
3508 {
3509         return ECORE_IS_BB(p_dev) ? 2 : 1;
3510 }
3511
3512 int ecore_device_num_ports(struct ecore_dev *p_dev)
3513 {
3514         /* in CMT always only one port */
3515         if (p_dev->num_hwfns > 1)
3516                 return 1;
3517
3518         return p_dev->num_ports_in_engines * ecore_device_num_engines(p_dev);
3519 }