net/qede/base: update FW to 8.40.25.0
[dpdk.git] / drivers / net / qede / base / ecore_spq.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright (c) 2016 - 2018 Cavium Inc.
3  * All rights reserved.
4  * www.cavium.com
5  */
6
7 #include "bcm_osal.h"
8 #include "reg_addr.h"
9 #include "ecore_gtt_reg_addr.h"
10 #include "ecore_hsi_common.h"
11 #include "ecore.h"
12 #include "ecore_sp_api.h"
13 #include "ecore_spq.h"
14 #include "ecore_iro.h"
15 #include "ecore_init_fw_funcs.h"
16 #include "ecore_cxt.h"
17 #include "ecore_int.h"
18 #include "ecore_dev_api.h"
19 #include "ecore_mcp.h"
20 #include "ecore_hw.h"
21 #include "ecore_sriov.h"
22
23 /***************************************************************************
24  * Structures & Definitions
25  ***************************************************************************/
26
27 #define SPQ_HIGH_PRI_RESERVE_DEFAULT    (1)
28
29 #define SPQ_BLOCK_DELAY_MAX_ITER        (10)
30 #define SPQ_BLOCK_DELAY_US              (10)
31 #define SPQ_BLOCK_SLEEP_MAX_ITER        (200)
32 #define SPQ_BLOCK_SLEEP_MS              (5)
33
34 /***************************************************************************
35  * Blocking Imp. (BLOCK/EBLOCK mode)
36  ***************************************************************************/
37 static void ecore_spq_blocking_cb(struct ecore_hwfn *p_hwfn, void *cookie,
38                                   union event_ring_data OSAL_UNUSED * data,
39                                   u8 fw_return_code)
40 {
41         struct ecore_spq_comp_done *comp_done;
42
43         comp_done = (struct ecore_spq_comp_done *)cookie;
44
45         comp_done->done = 0x1;
46         comp_done->fw_return_code = fw_return_code;
47
48         /* make update visible to waiting thread */
49         OSAL_SMP_WMB(p_hwfn->p_dev);
50 }
51
52 static enum _ecore_status_t __ecore_spq_block(struct ecore_hwfn *p_hwfn,
53                                               struct ecore_spq_entry *p_ent,
54                                               u8 *p_fw_ret,
55                                               bool sleep_between_iter)
56 {
57         struct ecore_spq_comp_done *comp_done;
58         u32 iter_cnt;
59
60         comp_done = (struct ecore_spq_comp_done *)p_ent->comp_cb.cookie;
61         iter_cnt = sleep_between_iter ? p_hwfn->p_spq->block_sleep_max_iter
62                                       : SPQ_BLOCK_DELAY_MAX_ITER;
63 #ifndef ASIC_ONLY
64         if (CHIP_REV_IS_EMUL(p_hwfn->p_dev) && sleep_between_iter)
65                 iter_cnt *= 5;
66 #endif
67
68         while (iter_cnt--) {
69                 OSAL_POLL_MODE_DPC(p_hwfn);
70                 OSAL_SMP_RMB(p_hwfn->p_dev);
71                 if (comp_done->done == 1) {
72                         if (p_fw_ret)
73                                 *p_fw_ret = comp_done->fw_return_code;
74                         return ECORE_SUCCESS;
75                 }
76
77                 if (sleep_between_iter)
78                         OSAL_MSLEEP(SPQ_BLOCK_SLEEP_MS);
79                 else
80                         OSAL_UDELAY(SPQ_BLOCK_DELAY_US);
81         }
82
83         return ECORE_TIMEOUT;
84 }
85
86 static enum _ecore_status_t ecore_spq_block(struct ecore_hwfn *p_hwfn,
87                                             struct ecore_spq_entry *p_ent,
88                                             u8 *p_fw_ret, bool skip_quick_poll)
89 {
90         struct ecore_spq_comp_done *comp_done;
91         struct ecore_ptt *p_ptt;
92         enum _ecore_status_t rc;
93
94         /* A relatively short polling period w/o sleeping, to allow the FW to
95          * complete the ramrod and thus possibly to avoid the following sleeps.
96          */
97         if (!skip_quick_poll) {
98                 rc = __ecore_spq_block(p_hwfn, p_ent, p_fw_ret, false);
99                 if (rc == ECORE_SUCCESS)
100                         return ECORE_SUCCESS;
101         }
102
103         /* Move to polling with a sleeping period between iterations */
104         rc = __ecore_spq_block(p_hwfn, p_ent, p_fw_ret, true);
105         if (rc == ECORE_SUCCESS)
106                 return ECORE_SUCCESS;
107
108         p_ptt = ecore_ptt_acquire(p_hwfn);
109         if (!p_ptt)
110                 return ECORE_AGAIN;
111
112         DP_INFO(p_hwfn, "Ramrod is stuck, requesting MCP drain\n");
113         rc = ecore_mcp_drain(p_hwfn, p_ptt);
114         ecore_ptt_release(p_hwfn, p_ptt);
115         if (rc != ECORE_SUCCESS) {
116                 DP_NOTICE(p_hwfn, true, "MCP drain failed\n");
117                 goto err;
118         }
119
120         /* Retry after drain */
121         rc = __ecore_spq_block(p_hwfn, p_ent, p_fw_ret, true);
122         if (rc == ECORE_SUCCESS)
123                 return ECORE_SUCCESS;
124
125         comp_done = (struct ecore_spq_comp_done *)p_ent->comp_cb.cookie;
126         if (comp_done->done == 1) {
127                 if (p_fw_ret)
128                         *p_fw_ret = comp_done->fw_return_code;
129                 return ECORE_SUCCESS;
130         }
131 err:
132         DP_NOTICE(p_hwfn, true,
133                   "Ramrod is stuck [CID %08x cmd %02x proto %02x echo %04x]\n",
134                   OSAL_LE32_TO_CPU(p_ent->elem.hdr.cid),
135                   p_ent->elem.hdr.cmd_id, p_ent->elem.hdr.protocol_id,
136                   OSAL_LE16_TO_CPU(p_ent->elem.hdr.echo));
137
138         ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_RAMROD_FAIL);
139
140         return ECORE_BUSY;
141 }
142
143 void ecore_set_spq_block_timeout(struct ecore_hwfn *p_hwfn,
144                                  u32 spq_timeout_ms)
145 {
146         p_hwfn->p_spq->block_sleep_max_iter = spq_timeout_ms ?
147                 spq_timeout_ms / SPQ_BLOCK_SLEEP_MS :
148                 SPQ_BLOCK_SLEEP_MAX_ITER;
149 }
150
151 /***************************************************************************
152  * SPQ entries inner API
153  ***************************************************************************/
154 static enum _ecore_status_t
155 ecore_spq_fill_entry(struct ecore_hwfn *p_hwfn, struct ecore_spq_entry *p_ent)
156 {
157         p_ent->flags = 0;
158
159         switch (p_ent->comp_mode) {
160         case ECORE_SPQ_MODE_EBLOCK:
161         case ECORE_SPQ_MODE_BLOCK:
162                 p_ent->comp_cb.function = ecore_spq_blocking_cb;
163                 break;
164         case ECORE_SPQ_MODE_CB:
165                 break;
166         default:
167                 DP_NOTICE(p_hwfn, true, "Unknown SPQE completion mode %d\n",
168                           p_ent->comp_mode);
169                 return ECORE_INVAL;
170         }
171
172         DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
173                    "Ramrod header: [CID 0x%08x CMD 0x%02x protocol 0x%02x]"
174                    " Data pointer: [%08x:%08x] Completion Mode: %s\n",
175                    p_ent->elem.hdr.cid, p_ent->elem.hdr.cmd_id,
176                    p_ent->elem.hdr.protocol_id,
177                    p_ent->elem.data_ptr.hi, p_ent->elem.data_ptr.lo,
178                    D_TRINE(p_ent->comp_mode, ECORE_SPQ_MODE_EBLOCK,
179                            ECORE_SPQ_MODE_BLOCK, "MODE_EBLOCK", "MODE_BLOCK",
180                            "MODE_CB"));
181
182         return ECORE_SUCCESS;
183 }
184
185 /***************************************************************************
186  * HSI access
187  ***************************************************************************/
188
189 #define XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN_MASK                   0x1
190 #define XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN_SHIFT                  0
191 #define XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE_MASK               0x1
192 #define XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT              7
193 #define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN_MASK               0x1
194 #define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN_SHIFT              4
195 #define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN_MASK        0x1
196 #define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN_SHIFT       6
197
198 static void ecore_spq_hw_initialize(struct ecore_hwfn *p_hwfn,
199                                     struct ecore_spq *p_spq)
200 {
201         __le32 *p_spq_base_lo, *p_spq_base_hi;
202         struct regpair *p_consolid_base_addr;
203         u8 *p_flags1, *p_flags9, *p_flags10;
204         struct core_conn_context *p_cxt;
205         struct ecore_cxt_info cxt_info;
206         u32 core_conn_context_size;
207         __le16 *p_physical_q0;
208         u16 physical_q;
209         enum _ecore_status_t rc;
210
211         cxt_info.iid = p_spq->cid;
212
213         rc = ecore_cxt_get_cid_info(p_hwfn, &cxt_info);
214
215         if (rc != ECORE_SUCCESS) {
216                 DP_NOTICE(p_hwfn, true, "Cannot find context info for cid=%d\n",
217                           p_spq->cid);
218                 return;
219         }
220
221         p_cxt = cxt_info.p_cxt;
222         core_conn_context_size = sizeof(*p_cxt);
223         p_flags1 = &p_cxt->xstorm_ag_context.flags1;
224         p_flags9 = &p_cxt->xstorm_ag_context.flags9;
225         p_flags10 = &p_cxt->xstorm_ag_context.flags10;
226         p_physical_q0 = &p_cxt->xstorm_ag_context.physical_q0;
227         p_spq_base_lo = &p_cxt->xstorm_st_context.spq_base_lo;
228         p_spq_base_hi = &p_cxt->xstorm_st_context.spq_base_hi;
229         p_consolid_base_addr = &p_cxt->xstorm_st_context.consolid_base_addr;
230
231         /* @@@TBD we zero the context until we have ilt_reset implemented. */
232         OSAL_MEM_ZERO(p_cxt, core_conn_context_size);
233
234         SET_FIELD(*p_flags10, XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN, 1);
235         SET_FIELD(*p_flags1, XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE, 1);
236         SET_FIELD(*p_flags9, XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN, 1);
237
238         /* CDU validation - FIXME currently disabled */
239
240         /* QM physical queue */
241         physical_q = ecore_get_cm_pq_idx(p_hwfn, PQ_FLAGS_LB);
242         *p_physical_q0 = OSAL_CPU_TO_LE16(physical_q);
243
244         *p_spq_base_lo = DMA_LO_LE(p_spq->chain.p_phys_addr);
245         *p_spq_base_hi = DMA_HI_LE(p_spq->chain.p_phys_addr);
246
247         DMA_REGPAIR_LE(*p_consolid_base_addr,
248                        p_hwfn->p_consq->chain.p_phys_addr);
249 }
250
251 static enum _ecore_status_t ecore_spq_hw_post(struct ecore_hwfn *p_hwfn,
252                                               struct ecore_spq *p_spq,
253                                               struct ecore_spq_entry *p_ent)
254 {
255         struct ecore_chain *p_chain = &p_hwfn->p_spq->chain;
256         struct core_db_data *p_db_data = &p_spq->db_data;
257         u16 echo = ecore_chain_get_prod_idx(p_chain);
258         struct slow_path_element *elem;
259
260         p_ent->elem.hdr.echo = OSAL_CPU_TO_LE16(echo);
261         elem = ecore_chain_produce(p_chain);
262         if (!elem) {
263                 DP_NOTICE(p_hwfn, true, "Failed to produce from SPQ chain\n");
264                 return ECORE_INVAL;
265         }
266
267         *elem = p_ent->elem;    /* Struct assignment */
268
269         p_db_data->spq_prod =
270                 OSAL_CPU_TO_LE16(ecore_chain_get_prod_idx(p_chain));
271
272         /* Make sure the SPQE is updated before the doorbell */
273         OSAL_WMB(p_hwfn->p_dev);
274
275         DOORBELL(p_hwfn, p_spq->db_addr_offset, *(u32 *)p_db_data);
276
277         /* Make sure doorbell is rang */
278         OSAL_WMB(p_hwfn->p_dev);
279
280         DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
281                    "Doorbelled [0x%08x, CID 0x%08x] with Flags: %02x"
282                    " agg_params: %02x, prod: %04x\n",
283                    p_spq->db_addr_offset, p_spq->cid, p_db_data->params,
284                    p_db_data->agg_flags, ecore_chain_get_prod_idx(p_chain));
285
286         return ECORE_SUCCESS;
287 }
288
289 /***************************************************************************
290  * Asynchronous events
291  ***************************************************************************/
292
293 static enum _ecore_status_t
294 ecore_async_event_completion(struct ecore_hwfn *p_hwfn,
295                              struct event_ring_entry *p_eqe)
296 {
297         ecore_spq_async_comp_cb cb;
298         enum _ecore_status_t rc;
299
300         if (p_eqe->protocol_id >= MAX_PROTOCOL_TYPE) {
301                 DP_ERR(p_hwfn, "Wrong protocol: %d\n", p_eqe->protocol_id);
302                 return ECORE_INVAL;
303         }
304
305         cb = p_hwfn->p_spq->async_comp_cb[p_eqe->protocol_id];
306         if (!cb) {
307                 DP_NOTICE(p_hwfn,
308                           true, "Unknown Async completion for protocol: %d\n",
309                           p_eqe->protocol_id);
310                 return ECORE_INVAL;
311         }
312
313         rc = cb(p_hwfn, p_eqe->opcode, p_eqe->echo,
314                 &p_eqe->data, p_eqe->fw_return_code);
315         if (rc != ECORE_SUCCESS)
316                 DP_NOTICE(p_hwfn, true,
317                           "Async completion callback failed, rc = %d [opcode %x, echo %x, fw_return_code %x]",
318                           rc, p_eqe->opcode, p_eqe->echo,
319                           p_eqe->fw_return_code);
320
321         return rc;
322 }
323
324 enum _ecore_status_t
325 ecore_spq_register_async_cb(struct ecore_hwfn *p_hwfn,
326                             enum protocol_type protocol_id,
327                             ecore_spq_async_comp_cb cb)
328 {
329         if (!p_hwfn->p_spq || (protocol_id >= MAX_PROTOCOL_TYPE))
330                 return ECORE_INVAL;
331
332         p_hwfn->p_spq->async_comp_cb[protocol_id] = cb;
333         return ECORE_SUCCESS;
334 }
335
336 void
337 ecore_spq_unregister_async_cb(struct ecore_hwfn *p_hwfn,
338                               enum protocol_type protocol_id)
339 {
340         if (!p_hwfn->p_spq || (protocol_id >= MAX_PROTOCOL_TYPE))
341                 return;
342
343         p_hwfn->p_spq->async_comp_cb[protocol_id] = OSAL_NULL;
344 }
345
346 /***************************************************************************
347  * EQ API
348  ***************************************************************************/
349 void ecore_eq_prod_update(struct ecore_hwfn *p_hwfn, u16 prod)
350 {
351         u32 addr = GTT_BAR0_MAP_REG_USDM_RAM +
352             USTORM_EQE_CONS_OFFSET(p_hwfn->rel_pf_id);
353
354         REG_WR16(p_hwfn, addr, prod);
355
356         /* keep prod updates ordered */
357         OSAL_MMIOWB(p_hwfn->p_dev);
358 }
359
360 enum _ecore_status_t ecore_eq_completion(struct ecore_hwfn *p_hwfn,
361                                          void *cookie)
362 {
363         struct ecore_eq *p_eq = cookie;
364         struct ecore_chain *p_chain = &p_eq->chain;
365         u16 fw_cons_idx             = 0;
366         enum _ecore_status_t rc = ECORE_SUCCESS;
367
368         if (!p_hwfn->p_spq) {
369                 DP_ERR(p_hwfn, "Unexpected NULL p_spq\n");
370                 return ECORE_INVAL;
371         }
372
373         /* take a snapshot of the FW consumer */
374         fw_cons_idx = OSAL_LE16_TO_CPU(*p_eq->p_fw_cons);
375
376         DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ, "fw_cons_idx %x\n", fw_cons_idx);
377
378         /* Need to guarantee the fw_cons index we use points to a usuable
379          * element (to comply with our chain), so our macros would comply
380          */
381         if ((fw_cons_idx & ecore_chain_get_usable_per_page(p_chain)) ==
382             ecore_chain_get_usable_per_page(p_chain)) {
383                 fw_cons_idx += ecore_chain_get_unusable_per_page(p_chain);
384         }
385
386         /* Complete current segment of eq entries */
387         while (fw_cons_idx != ecore_chain_get_cons_idx(p_chain)) {
388                 struct event_ring_entry *p_eqe = ecore_chain_consume(p_chain);
389                 if (!p_eqe) {
390                         DP_ERR(p_hwfn,
391                                "Unexpected NULL chain consumer entry\n");
392                         break;
393                 }
394
395                 DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
396                            "op %x prot %x res0 %x echo %x fwret %x flags %x\n",
397                            p_eqe->opcode,            /* Event Opcode */
398                            p_eqe->protocol_id,  /* Event Protocol ID */
399                            p_eqe->reserved0,    /* Reserved */
400                            /* Echo value from ramrod data on the host */
401                            OSAL_LE16_TO_CPU(p_eqe->echo),
402                            p_eqe->fw_return_code,    /* FW return code for SP
403                                                       * ramrods
404                                                       */
405                            p_eqe->flags);
406
407                 if (GET_FIELD(p_eqe->flags, EVENT_RING_ENTRY_ASYNC))
408                         ecore_async_event_completion(p_hwfn, p_eqe);
409                 else
410                         ecore_spq_completion(p_hwfn,
411                                              p_eqe->echo,
412                                              p_eqe->fw_return_code,
413                                              &p_eqe->data);
414
415                 ecore_chain_recycle_consumed(p_chain);
416         }
417
418         ecore_eq_prod_update(p_hwfn, ecore_chain_get_prod_idx(p_chain));
419
420         return rc;
421 }
422
423 enum _ecore_status_t ecore_eq_alloc(struct ecore_hwfn *p_hwfn, u16 num_elem)
424 {
425         struct ecore_eq *p_eq;
426
427         /* Allocate EQ struct */
428         p_eq = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*p_eq));
429         if (!p_eq) {
430                 DP_NOTICE(p_hwfn, false,
431                           "Failed to allocate `struct ecore_eq'\n");
432                 return ECORE_NOMEM;
433         }
434
435         /* Allocate and initialize EQ chain*/
436         if (ecore_chain_alloc(p_hwfn->p_dev,
437                               ECORE_CHAIN_USE_TO_PRODUCE,
438                               ECORE_CHAIN_MODE_PBL,
439                               ECORE_CHAIN_CNT_TYPE_U16,
440                               num_elem,
441                               sizeof(union event_ring_element),
442                               &p_eq->chain, OSAL_NULL) != ECORE_SUCCESS) {
443                 DP_NOTICE(p_hwfn, false, "Failed to allocate eq chain\n");
444                 goto eq_allocate_fail;
445         }
446
447         /* register EQ completion on the SP SB */
448         ecore_int_register_cb(p_hwfn, ecore_eq_completion,
449                               p_eq, &p_eq->eq_sb_index, &p_eq->p_fw_cons);
450
451         p_hwfn->p_eq = p_eq;
452         return ECORE_SUCCESS;
453
454 eq_allocate_fail:
455         OSAL_FREE(p_hwfn->p_dev, p_eq);
456         return ECORE_NOMEM;
457 }
458
459 void ecore_eq_setup(struct ecore_hwfn *p_hwfn)
460 {
461         ecore_chain_reset(&p_hwfn->p_eq->chain);
462 }
463
464 void ecore_eq_free(struct ecore_hwfn *p_hwfn)
465 {
466         if (!p_hwfn->p_eq)
467                 return;
468
469         ecore_chain_free(p_hwfn->p_dev, &p_hwfn->p_eq->chain);
470
471         OSAL_FREE(p_hwfn->p_dev, p_hwfn->p_eq);
472         p_hwfn->p_eq = OSAL_NULL;
473 }
474
475 /***************************************************************************
476 * CQE API - manipulate EQ functionality
477 ***************************************************************************/
478 static enum _ecore_status_t ecore_cqe_completion(struct ecore_hwfn *p_hwfn,
479                                                  struct eth_slow_path_rx_cqe
480                                                  *cqe,
481                                                  enum protocol_type protocol)
482 {
483         if (IS_VF(p_hwfn->p_dev))
484                 return OSAL_VF_CQE_COMPLETION(p_hwfn, cqe, protocol);
485
486         /* @@@tmp - it's possible we'll eventually want to handle some
487          * actual commands that can arrive here, but for now this is only
488          * used to complete the ramrod using the echo value on the cqe
489          */
490         return ecore_spq_completion(p_hwfn, cqe->echo, 0, OSAL_NULL);
491 }
492
493 enum _ecore_status_t ecore_eth_cqe_completion(struct ecore_hwfn *p_hwfn,
494                                               struct eth_slow_path_rx_cqe *cqe)
495 {
496         enum _ecore_status_t rc;
497
498         rc = ecore_cqe_completion(p_hwfn, cqe, PROTOCOLID_ETH);
499         if (rc) {
500                 DP_NOTICE(p_hwfn, true,
501                           "Failed to handle RXQ CQE [cmd 0x%02x]\n",
502                           cqe->ramrod_cmd_id);
503         }
504
505         return rc;
506 }
507
508 /***************************************************************************
509  * Slow hwfn Queue (spq)
510  ***************************************************************************/
511 void ecore_spq_setup(struct ecore_hwfn *p_hwfn)
512 {
513         struct ecore_spq *p_spq = p_hwfn->p_spq;
514         struct ecore_spq_entry *p_virt = OSAL_NULL;
515         struct core_db_data *p_db_data;
516         void OSAL_IOMEM *db_addr;
517         dma_addr_t p_phys = 0;
518         u32 i, capacity;
519         enum _ecore_status_t rc;
520
521         OSAL_LIST_INIT(&p_spq->pending);
522         OSAL_LIST_INIT(&p_spq->completion_pending);
523         OSAL_LIST_INIT(&p_spq->free_pool);
524         OSAL_LIST_INIT(&p_spq->unlimited_pending);
525         OSAL_SPIN_LOCK_INIT(&p_spq->lock);
526
527         /* SPQ empty pool */
528         p_phys = p_spq->p_phys + OFFSETOF(struct ecore_spq_entry, ramrod);
529         p_virt = p_spq->p_virt;
530
531         capacity = ecore_chain_get_capacity(&p_spq->chain);
532         for (i = 0; i < capacity; i++) {
533                 DMA_REGPAIR_LE(p_virt->elem.data_ptr, p_phys);
534
535                 OSAL_LIST_PUSH_TAIL(&p_virt->list, &p_spq->free_pool);
536
537                 p_virt++;
538                 p_phys += sizeof(struct ecore_spq_entry);
539         }
540
541         /* Statistics */
542         p_spq->normal_count = 0;
543         p_spq->comp_count = 0;
544         p_spq->comp_sent_count = 0;
545         p_spq->unlimited_pending_count = 0;
546
547         OSAL_MEM_ZERO(p_spq->p_comp_bitmap,
548                       SPQ_COMP_BMAP_SIZE * sizeof(unsigned long));
549         p_spq->comp_bitmap_idx = 0;
550
551         /* SPQ cid, cannot fail */
552         ecore_cxt_acquire_cid(p_hwfn, PROTOCOLID_CORE, &p_spq->cid);
553         ecore_spq_hw_initialize(p_hwfn, p_spq);
554
555         /* reset the chain itself */
556         ecore_chain_reset(&p_spq->chain);
557
558         /* Initialize the address/data of the SPQ doorbell */
559         p_spq->db_addr_offset = DB_ADDR(p_spq->cid, DQ_DEMS_LEGACY);
560         p_db_data = &p_spq->db_data;
561         OSAL_MEM_ZERO(p_db_data, sizeof(*p_db_data));
562         SET_FIELD(p_db_data->params, CORE_DB_DATA_DEST, DB_DEST_XCM);
563         SET_FIELD(p_db_data->params, CORE_DB_DATA_AGG_CMD, DB_AGG_CMD_MAX);
564         SET_FIELD(p_db_data->params, CORE_DB_DATA_AGG_VAL_SEL,
565                   DQ_XCM_CORE_SPQ_PROD_CMD);
566         p_db_data->agg_flags = DQ_XCM_CORE_DQ_CF_CMD;
567
568         /* Register the SPQ doorbell with the doorbell recovery mechanism */
569         db_addr = (void *)((u8 *)p_hwfn->doorbells + p_spq->db_addr_offset);
570         rc = ecore_db_recovery_add(p_hwfn->p_dev, db_addr, &p_spq->db_data,
571                                    DB_REC_WIDTH_32B, DB_REC_KERNEL);
572         if (rc != ECORE_SUCCESS)
573                 DP_INFO(p_hwfn,
574                         "Failed to register the SPQ doorbell with the doorbell recovery mechanism\n");
575 }
576
577 enum _ecore_status_t ecore_spq_alloc(struct ecore_hwfn *p_hwfn)
578 {
579         struct ecore_spq_entry *p_virt = OSAL_NULL;
580         struct ecore_spq *p_spq = OSAL_NULL;
581         dma_addr_t p_phys = 0;
582         u32 capacity;
583
584         /* SPQ struct */
585         p_spq =
586             OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(struct ecore_spq));
587         if (!p_spq) {
588                 DP_NOTICE(p_hwfn, false, "Failed to allocate `struct ecore_spq'\n");
589                 return ECORE_NOMEM;
590         }
591
592         /* SPQ ring  */
593         if (ecore_chain_alloc(p_hwfn->p_dev,
594                               ECORE_CHAIN_USE_TO_PRODUCE,
595                               ECORE_CHAIN_MODE_SINGLE,
596                               ECORE_CHAIN_CNT_TYPE_U16,
597                               0, /* N/A when the mode is SINGLE */
598                               sizeof(struct slow_path_element),
599                               &p_spq->chain, OSAL_NULL)) {
600                 DP_NOTICE(p_hwfn, false, "Failed to allocate spq chain\n");
601                 goto spq_allocate_fail;
602         }
603
604         /* allocate and fill the SPQ elements (incl. ramrod data list) */
605         capacity = ecore_chain_get_capacity(&p_spq->chain);
606         p_virt = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, &p_phys,
607                                          capacity *
608                                          sizeof(struct ecore_spq_entry));
609         if (!p_virt)
610                 goto spq_allocate_fail;
611
612         p_spq->p_virt = p_virt;
613         p_spq->p_phys = p_phys;
614
615 #ifdef CONFIG_ECORE_LOCK_ALLOC
616         if (OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_spq->lock))
617                 goto spq_allocate_fail;
618 #endif
619
620         p_hwfn->p_spq = p_spq;
621         return ECORE_SUCCESS;
622
623 spq_allocate_fail:
624         ecore_chain_free(p_hwfn->p_dev, &p_spq->chain);
625         OSAL_FREE(p_hwfn->p_dev, p_spq);
626         return ECORE_NOMEM;
627 }
628
629 void ecore_spq_free(struct ecore_hwfn *p_hwfn)
630 {
631         struct ecore_spq *p_spq = p_hwfn->p_spq;
632         void OSAL_IOMEM *db_addr;
633         u32 capacity;
634
635         if (!p_spq)
636                 return;
637
638         /* Delete the SPQ doorbell from the doorbell recovery mechanism */
639         db_addr = (void *)((u8 *)p_hwfn->doorbells + p_spq->db_addr_offset);
640         ecore_db_recovery_del(p_hwfn->p_dev, db_addr, &p_spq->db_data);
641
642         if (p_spq->p_virt) {
643                 capacity = ecore_chain_get_capacity(&p_spq->chain);
644                 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
645                                        p_spq->p_virt,
646                                        p_spq->p_phys,
647                                        capacity *
648                                        sizeof(struct ecore_spq_entry));
649         }
650
651         ecore_chain_free(p_hwfn->p_dev, &p_spq->chain);
652 #ifdef CONFIG_ECORE_LOCK_ALLOC
653         OSAL_SPIN_LOCK_DEALLOC(&p_spq->lock);
654 #endif
655
656         OSAL_FREE(p_hwfn->p_dev, p_spq);
657 }
658
659 enum _ecore_status_t
660 ecore_spq_get_entry(struct ecore_hwfn *p_hwfn, struct ecore_spq_entry **pp_ent)
661 {
662         struct ecore_spq *p_spq = p_hwfn->p_spq;
663         struct ecore_spq_entry *p_ent = OSAL_NULL;
664         enum _ecore_status_t rc = ECORE_SUCCESS;
665
666         OSAL_SPIN_LOCK(&p_spq->lock);
667
668         if (OSAL_LIST_IS_EMPTY(&p_spq->free_pool)) {
669                 p_ent = OSAL_ZALLOC(p_hwfn->p_dev, GFP_ATOMIC, sizeof(*p_ent));
670                 if (!p_ent) {
671                         DP_NOTICE(p_hwfn, false, "Failed to allocate an SPQ entry for a pending ramrod\n");
672                         rc = ECORE_NOMEM;
673                         goto out_unlock;
674                 }
675                 p_ent->queue = &p_spq->unlimited_pending;
676         } else {
677                 p_ent = OSAL_LIST_FIRST_ENTRY(&p_spq->free_pool,
678                                               struct ecore_spq_entry, list);
679                 OSAL_LIST_REMOVE_ENTRY(&p_ent->list, &p_spq->free_pool);
680                 p_ent->queue = &p_spq->pending;
681         }
682
683         *pp_ent = p_ent;
684
685 out_unlock:
686         OSAL_SPIN_UNLOCK(&p_spq->lock);
687         return rc;
688 }
689
690 /* Locked variant; Should be called while the SPQ lock is taken */
691 static void __ecore_spq_return_entry(struct ecore_hwfn *p_hwfn,
692                                      struct ecore_spq_entry *p_ent)
693 {
694         OSAL_LIST_PUSH_TAIL(&p_ent->list, &p_hwfn->p_spq->free_pool);
695 }
696
697 void ecore_spq_return_entry(struct ecore_hwfn *p_hwfn,
698                             struct ecore_spq_entry *p_ent)
699 {
700         OSAL_SPIN_LOCK(&p_hwfn->p_spq->lock);
701         __ecore_spq_return_entry(p_hwfn, p_ent);
702         OSAL_SPIN_UNLOCK(&p_hwfn->p_spq->lock);
703 }
704
705 /**
706  * @brief ecore_spq_add_entry - adds a new entry to the pending
707  *        list. Should be used while lock is being held.
708  *
709  * Addes an entry to the pending list is there is room (en empty
710  * element is available in the free_pool), or else places the
711  * entry in the unlimited_pending pool.
712  *
713  * @param p_hwfn
714  * @param p_ent
715  * @param priority
716  *
717  * @return enum _ecore_status_t
718  */
719 static enum _ecore_status_t
720 ecore_spq_add_entry(struct ecore_hwfn *p_hwfn,
721                     struct ecore_spq_entry *p_ent, enum spq_priority priority)
722 {
723         struct ecore_spq *p_spq = p_hwfn->p_spq;
724
725         if (p_ent->queue == &p_spq->unlimited_pending) {
726                 if (OSAL_LIST_IS_EMPTY(&p_spq->free_pool)) {
727                         OSAL_LIST_PUSH_TAIL(&p_ent->list,
728                                             &p_spq->unlimited_pending);
729                         p_spq->unlimited_pending_count++;
730
731                         return ECORE_SUCCESS;
732
733                 } else {
734                         struct ecore_spq_entry *p_en2;
735
736                         p_en2 = OSAL_LIST_FIRST_ENTRY(&p_spq->free_pool,
737                                                      struct ecore_spq_entry,
738                                                      list);
739                         OSAL_LIST_REMOVE_ENTRY(&p_en2->list, &p_spq->free_pool);
740
741                         /* Copy the ring element physical pointer to the new
742                          * entry, since we are about to override the entire ring
743                          * entry and don't want to lose the pointer.
744                          */
745                         p_ent->elem.data_ptr = p_en2->elem.data_ptr;
746
747                         *p_en2 = *p_ent;
748
749                         /* EBLOCK responsible to free the allocated p_ent */
750                         if (p_ent->comp_mode != ECORE_SPQ_MODE_EBLOCK)
751                                 OSAL_FREE(p_hwfn->p_dev, p_ent);
752
753                         p_ent = p_en2;
754                 }
755         }
756
757         /* entry is to be placed in 'pending' queue */
758         switch (priority) {
759         case ECORE_SPQ_PRIORITY_NORMAL:
760                 OSAL_LIST_PUSH_TAIL(&p_ent->list, &p_spq->pending);
761                 p_spq->normal_count++;
762                 break;
763         case ECORE_SPQ_PRIORITY_HIGH:
764                 OSAL_LIST_PUSH_HEAD(&p_ent->list, &p_spq->pending);
765                 p_spq->high_count++;
766                 break;
767         default:
768                 return ECORE_INVAL;
769         }
770
771         return ECORE_SUCCESS;
772 }
773
774 /***************************************************************************
775  * Accessor
776  ***************************************************************************/
777
778 u32 ecore_spq_get_cid(struct ecore_hwfn *p_hwfn)
779 {
780         if (!p_hwfn->p_spq)
781                 return 0xffffffff;      /* illegal */
782         return p_hwfn->p_spq->cid;
783 }
784
785 /***************************************************************************
786  * Posting new Ramrods
787  ***************************************************************************/
788
789 static enum _ecore_status_t ecore_spq_post_list(struct ecore_hwfn *p_hwfn,
790                                                 osal_list_t *head,
791                                                 u32 keep_reserve)
792 {
793         struct ecore_spq *p_spq = p_hwfn->p_spq;
794         enum _ecore_status_t rc;
795
796         /* TODO - implementation might be wasteful; will always keep room
797          * for an additional high priority ramrod (even if one is already
798          * pending FW)
799          */
800         while (ecore_chain_get_elem_left(&p_spq->chain) > keep_reserve &&
801                !OSAL_LIST_IS_EMPTY(head)) {
802                 struct ecore_spq_entry *p_ent =
803                     OSAL_LIST_FIRST_ENTRY(head, struct ecore_spq_entry, list);
804                 if (p_ent != OSAL_NULL) {
805 #if defined(_NTDDK_)
806 #pragma warning(suppress : 6011 28182)
807 #endif
808                         OSAL_LIST_REMOVE_ENTRY(&p_ent->list, head);
809                         OSAL_LIST_PUSH_TAIL(&p_ent->list,
810                                             &p_spq->completion_pending);
811                         p_spq->comp_sent_count++;
812
813                         rc = ecore_spq_hw_post(p_hwfn, p_spq, p_ent);
814                         if (rc) {
815                                 OSAL_LIST_REMOVE_ENTRY(&p_ent->list,
816                                                     &p_spq->completion_pending);
817                                 __ecore_spq_return_entry(p_hwfn, p_ent);
818                                 return rc;
819                         }
820                 }
821         }
822
823         return ECORE_SUCCESS;
824 }
825
826 static enum _ecore_status_t ecore_spq_pend_post(struct ecore_hwfn *p_hwfn)
827 {
828         struct ecore_spq *p_spq = p_hwfn->p_spq;
829         struct ecore_spq_entry *p_ent = OSAL_NULL;
830
831         while (!OSAL_LIST_IS_EMPTY(&p_spq->free_pool)) {
832                 if (OSAL_LIST_IS_EMPTY(&p_spq->unlimited_pending))
833                         break;
834
835                 p_ent = OSAL_LIST_FIRST_ENTRY(&p_spq->unlimited_pending,
836                                               struct ecore_spq_entry, list);
837                 if (!p_ent)
838                         return ECORE_INVAL;
839
840 #if defined(_NTDDK_)
841 #pragma warning(suppress : 6011)
842 #endif
843                 OSAL_LIST_REMOVE_ENTRY(&p_ent->list, &p_spq->unlimited_pending);
844
845                 ecore_spq_add_entry(p_hwfn, p_ent, p_ent->priority);
846         }
847
848         return ecore_spq_post_list(p_hwfn,
849                                  &p_spq->pending, SPQ_HIGH_PRI_RESERVE_DEFAULT);
850 }
851
852 enum _ecore_status_t ecore_spq_post(struct ecore_hwfn *p_hwfn,
853                                     struct ecore_spq_entry *p_ent,
854                                     u8 *fw_return_code)
855 {
856         enum _ecore_status_t rc = ECORE_SUCCESS;
857         struct ecore_spq *p_spq = p_hwfn ? p_hwfn->p_spq : OSAL_NULL;
858         bool b_ret_ent = true;
859
860         if (!p_hwfn)
861                 return ECORE_INVAL;
862
863         if (!p_ent) {
864                 DP_NOTICE(p_hwfn, true, "Got a NULL pointer\n");
865                 return ECORE_INVAL;
866         }
867
868         if (p_hwfn->p_dev->recov_in_prog) {
869                 DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
870                            "Recovery is in progress -> skip spq post"
871                            " [cmd %02x protocol %02x]\n",
872                            p_ent->elem.hdr.cmd_id, p_ent->elem.hdr.protocol_id);
873                 /* Return success to let the flows to be completed successfully
874                  * w/o any error handling.
875                  */
876                 return ECORE_SUCCESS;
877         }
878
879         OSAL_SPIN_LOCK(&p_spq->lock);
880
881         /* Complete the entry */
882         rc = ecore_spq_fill_entry(p_hwfn, p_ent);
883
884         /* Check return value after LOCK is taken for cleaner error flow */
885         if (rc)
886                 goto spq_post_fail;
887
888         /* Add the request to the pending queue */
889         rc = ecore_spq_add_entry(p_hwfn, p_ent, p_ent->priority);
890         if (rc)
891                 goto spq_post_fail;
892
893         rc = ecore_spq_pend_post(p_hwfn);
894         if (rc) {
895                 /* Since it's possible that pending failed for a different
896                  * entry [although unlikely], the failed entry was already
897                  * dealt with; No need to return it here.
898                  */
899                 b_ret_ent = false;
900                 goto spq_post_fail;
901         }
902
903         OSAL_SPIN_UNLOCK(&p_spq->lock);
904
905         if (p_ent->comp_mode == ECORE_SPQ_MODE_EBLOCK) {
906                 /* For entries in ECORE BLOCK mode, the completion code cannot
907                  * perform the necessary cleanup - if it did, we couldn't
908                  * access p_ent here to see whether it's successful or not.
909                  * Thus, after gaining the answer perform the cleanup here.
910                  */
911                 rc = ecore_spq_block(p_hwfn, p_ent, fw_return_code,
912                                      p_ent->queue == &p_spq->unlimited_pending);
913
914                 if (p_ent->queue == &p_spq->unlimited_pending) {
915                         /* This is an allocated p_ent which does not need to
916                          * return to pool.
917                          */
918                         OSAL_FREE(p_hwfn->p_dev, p_ent);
919
920                         /* TBD: handle error flow and remove p_ent from
921                          * completion pending
922                          */
923                         return rc;
924                 }
925
926                 if (rc)
927                         goto spq_post_fail2;
928
929                 /* return to pool */
930                 ecore_spq_return_entry(p_hwfn, p_ent);
931         }
932         return rc;
933
934 spq_post_fail2:
935         OSAL_SPIN_LOCK(&p_spq->lock);
936         OSAL_LIST_REMOVE_ENTRY(&p_ent->list, &p_spq->completion_pending);
937         ecore_chain_return_produced(&p_spq->chain);
938
939 spq_post_fail:
940         /* return to the free pool */
941         if (b_ret_ent)
942                 __ecore_spq_return_entry(p_hwfn, p_ent);
943         OSAL_SPIN_UNLOCK(&p_spq->lock);
944
945         return rc;
946 }
947
948 enum _ecore_status_t ecore_spq_completion(struct ecore_hwfn *p_hwfn,
949                                           __le16 echo,
950                                           u8 fw_return_code,
951                                           union event_ring_data *p_data)
952 {
953         struct ecore_spq *p_spq;
954         struct ecore_spq_entry *p_ent = OSAL_NULL;
955         struct ecore_spq_entry *tmp;
956         struct ecore_spq_entry *found = OSAL_NULL;
957         enum _ecore_status_t rc;
958
959         p_spq = p_hwfn->p_spq;
960         if (!p_spq) {
961                 DP_ERR(p_hwfn, "Unexpected NULL p_spq\n");
962                 return ECORE_INVAL;
963         }
964
965         OSAL_SPIN_LOCK(&p_spq->lock);
966         OSAL_LIST_FOR_EACH_ENTRY_SAFE(p_ent,
967                                       tmp,
968                                       &p_spq->completion_pending,
969                                       list, struct ecore_spq_entry) {
970                 if (p_ent->elem.hdr.echo == echo) {
971                         OSAL_LIST_REMOVE_ENTRY(&p_ent->list,
972                                                &p_spq->completion_pending);
973
974                         /* Avoid overriding of SPQ entries when getting
975                          * out-of-order completions, by marking the completions
976                          * in a bitmap and increasing the chain consumer only
977                          * for the first successive completed entries.
978                          */
979                         SPQ_COMP_BMAP_SET_BIT(p_spq, echo);
980                         while (SPQ_COMP_BMAP_TEST_BIT(p_spq,
981                                                       p_spq->comp_bitmap_idx)) {
982                                 SPQ_COMP_BMAP_CLEAR_BIT(p_spq,
983                                                         p_spq->comp_bitmap_idx);
984                                 p_spq->comp_bitmap_idx++;
985                                 ecore_chain_return_produced(&p_spq->chain);
986                         }
987
988                         p_spq->comp_count++;
989                         found = p_ent;
990                         break;
991                 }
992
993                 /* This is debug and should be relatively uncommon - depends
994                  * on scenarios which have mutliple per-PF sent ramrods.
995                  */
996                 DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
997                            "Got completion for echo %04x - doesn't match"
998                            " echo %04x in completion pending list\n",
999                            OSAL_LE16_TO_CPU(echo),
1000                            OSAL_LE16_TO_CPU(p_ent->elem.hdr.echo));
1001         }
1002
1003         /* Release lock before callback, as callback may post
1004          * an additional ramrod.
1005          */
1006         OSAL_SPIN_UNLOCK(&p_spq->lock);
1007
1008         if (!found) {
1009                 DP_NOTICE(p_hwfn, true,
1010                           "Failed to find an entry this"
1011                           " EQE [echo %04x] completes\n",
1012                           OSAL_LE16_TO_CPU(echo));
1013                 return ECORE_EXISTS;
1014         }
1015
1016         DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
1017                    "Complete EQE [echo %04x]: func %p cookie %p)\n",
1018                    OSAL_LE16_TO_CPU(echo),
1019                    p_ent->comp_cb.function, p_ent->comp_cb.cookie);
1020         if (found->comp_cb.function)
1021                 found->comp_cb.function(p_hwfn, found->comp_cb.cookie, p_data,
1022                                         fw_return_code);
1023         else
1024                 DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
1025                            "Got a completion without a callback function\n");
1026
1027         if ((found->comp_mode != ECORE_SPQ_MODE_EBLOCK) ||
1028             (found->queue == &p_spq->unlimited_pending))
1029                 /* EBLOCK  is responsible for returning its own entry into the
1030                  * free list, unless it originally added the entry into the
1031                  * unlimited pending list.
1032                  */
1033                 ecore_spq_return_entry(p_hwfn, found);
1034
1035         /* Attempt to post pending requests */
1036         OSAL_SPIN_LOCK(&p_spq->lock);
1037         rc = ecore_spq_pend_post(p_hwfn);
1038         OSAL_SPIN_UNLOCK(&p_spq->lock);
1039
1040         return rc;
1041 }
1042
1043 enum _ecore_status_t ecore_consq_alloc(struct ecore_hwfn *p_hwfn)
1044 {
1045         struct ecore_consq *p_consq;
1046
1047         /* Allocate ConsQ struct */
1048         p_consq =
1049             OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*p_consq));
1050         if (!p_consq) {
1051                 DP_NOTICE(p_hwfn, false,
1052                           "Failed to allocate `struct ecore_consq'\n");
1053                 return ECORE_NOMEM;
1054         }
1055
1056         /* Allocate and initialize EQ chain */
1057         if (ecore_chain_alloc(p_hwfn->p_dev,
1058                               ECORE_CHAIN_USE_TO_PRODUCE,
1059                               ECORE_CHAIN_MODE_PBL,
1060                               ECORE_CHAIN_CNT_TYPE_U16,
1061                               ECORE_CHAIN_PAGE_SIZE / 0x80,
1062                               0x80,
1063                               &p_consq->chain, OSAL_NULL) != ECORE_SUCCESS) {
1064                 DP_NOTICE(p_hwfn, false, "Failed to allocate consq chain");
1065                 goto consq_allocate_fail;
1066         }
1067
1068         p_hwfn->p_consq = p_consq;
1069         return ECORE_SUCCESS;
1070
1071 consq_allocate_fail:
1072         OSAL_FREE(p_hwfn->p_dev, p_consq);
1073         return ECORE_NOMEM;
1074 }
1075
1076 void ecore_consq_setup(struct ecore_hwfn *p_hwfn)
1077 {
1078         ecore_chain_reset(&p_hwfn->p_consq->chain);
1079 }
1080
1081 void ecore_consq_free(struct ecore_hwfn *p_hwfn)
1082 {
1083         if (!p_hwfn->p_consq)
1084                 return;
1085
1086         ecore_chain_free(p_hwfn->p_dev, &p_hwfn->p_consq->chain);
1087         OSAL_FREE(p_hwfn->p_dev, p_hwfn->p_consq);
1088 }