1a02ba2a37ce8c9f233abc68e0633b657f7eca0f
[dpdk.git] / drivers / net / qede / base / ecore_spq.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright (c) 2016 - 2018 Cavium Inc.
3  * All rights reserved.
4  * www.cavium.com
5  */
6
7 #include "bcm_osal.h"
8 #include "reg_addr.h"
9 #include "ecore_gtt_reg_addr.h"
10 #include "ecore_hsi_common.h"
11 #include "ecore.h"
12 #include "ecore_sp_api.h"
13 #include "ecore_spq.h"
14 #include "ecore_iro.h"
15 #include "ecore_init_fw_funcs.h"
16 #include "ecore_cxt.h"
17 #include "ecore_int.h"
18 #include "ecore_dev_api.h"
19 #include "ecore_mcp.h"
20 #include "ecore_hw.h"
21 #include "ecore_sriov.h"
22
23 /***************************************************************************
24  * Structures & Definitions
25  ***************************************************************************/
26
27 #define SPQ_HIGH_PRI_RESERVE_DEFAULT    (1)
28
29 #define SPQ_BLOCK_DELAY_MAX_ITER        (10)
30 #define SPQ_BLOCK_DELAY_US              (10)
31 #define SPQ_BLOCK_SLEEP_MAX_ITER        (200)
32 #define SPQ_BLOCK_SLEEP_MS              (5)
33
34 /***************************************************************************
35  * Blocking Imp. (BLOCK/EBLOCK mode)
36  ***************************************************************************/
37 static void ecore_spq_blocking_cb(struct ecore_hwfn *p_hwfn, void *cookie,
38                                   union event_ring_data OSAL_UNUSED * data,
39                                   u8 fw_return_code)
40 {
41         struct ecore_spq_comp_done *comp_done;
42
43         comp_done = (struct ecore_spq_comp_done *)cookie;
44
45         comp_done->done = 0x1;
46         comp_done->fw_return_code = fw_return_code;
47
48         /* make update visible to waiting thread */
49         OSAL_SMP_WMB(p_hwfn->p_dev);
50 }
51
52 static enum _ecore_status_t __ecore_spq_block(struct ecore_hwfn *p_hwfn,
53                                               struct ecore_spq_entry *p_ent,
54                                               u8 *p_fw_ret,
55                                               bool sleep_between_iter)
56 {
57         struct ecore_spq_comp_done *comp_done;
58         u32 iter_cnt;
59
60         comp_done = (struct ecore_spq_comp_done *)p_ent->comp_cb.cookie;
61         iter_cnt = sleep_between_iter ? p_hwfn->p_spq->block_sleep_max_iter
62                                       : SPQ_BLOCK_DELAY_MAX_ITER;
63 #ifndef ASIC_ONLY
64         if (CHIP_REV_IS_EMUL(p_hwfn->p_dev) && sleep_between_iter)
65                 iter_cnt *= 5;
66 #endif
67
68         while (iter_cnt--) {
69                 OSAL_POLL_MODE_DPC(p_hwfn);
70                 OSAL_SMP_RMB(p_hwfn->p_dev);
71                 if (comp_done->done == 1) {
72                         if (p_fw_ret)
73                                 *p_fw_ret = comp_done->fw_return_code;
74                         return ECORE_SUCCESS;
75                 }
76
77                 if (sleep_between_iter)
78                         OSAL_MSLEEP(SPQ_BLOCK_SLEEP_MS);
79                 else
80                         OSAL_UDELAY(SPQ_BLOCK_DELAY_US);
81         }
82
83         return ECORE_TIMEOUT;
84 }
85
86 static enum _ecore_status_t ecore_spq_block(struct ecore_hwfn *p_hwfn,
87                                             struct ecore_spq_entry *p_ent,
88                                             u8 *p_fw_ret, bool skip_quick_poll)
89 {
90         struct ecore_spq_comp_done *comp_done;
91         struct ecore_ptt *p_ptt;
92         enum _ecore_status_t rc;
93
94         /* A relatively short polling period w/o sleeping, to allow the FW to
95          * complete the ramrod and thus possibly to avoid the following sleeps.
96          */
97         if (!skip_quick_poll) {
98                 rc = __ecore_spq_block(p_hwfn, p_ent, p_fw_ret, false);
99                 if (rc == ECORE_SUCCESS)
100                         return ECORE_SUCCESS;
101         }
102
103         /* Move to polling with a sleeping period between iterations */
104         rc = __ecore_spq_block(p_hwfn, p_ent, p_fw_ret, true);
105         if (rc == ECORE_SUCCESS)
106                 return ECORE_SUCCESS;
107
108         p_ptt = ecore_ptt_acquire(p_hwfn);
109         if (!p_ptt)
110                 return ECORE_AGAIN;
111
112         DP_INFO(p_hwfn, "Ramrod is stuck, requesting MCP drain\n");
113         rc = ecore_mcp_drain(p_hwfn, p_ptt);
114         ecore_ptt_release(p_hwfn, p_ptt);
115         if (rc != ECORE_SUCCESS) {
116                 DP_NOTICE(p_hwfn, true, "MCP drain failed\n");
117                 goto err;
118         }
119
120         /* Retry after drain */
121         rc = __ecore_spq_block(p_hwfn, p_ent, p_fw_ret, true);
122         if (rc == ECORE_SUCCESS)
123                 return ECORE_SUCCESS;
124
125         comp_done = (struct ecore_spq_comp_done *)p_ent->comp_cb.cookie;
126         if (comp_done->done == 1) {
127                 if (p_fw_ret)
128                         *p_fw_ret = comp_done->fw_return_code;
129                 return ECORE_SUCCESS;
130         }
131 err:
132         DP_NOTICE(p_hwfn, true,
133                   "Ramrod is stuck [CID %08x cmd %02x proto %02x echo %04x]\n",
134                   OSAL_LE32_TO_CPU(p_ent->elem.hdr.cid),
135                   p_ent->elem.hdr.cmd_id, p_ent->elem.hdr.protocol_id,
136                   OSAL_LE16_TO_CPU(p_ent->elem.hdr.echo));
137
138         ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_RAMROD_FAIL);
139
140         return ECORE_BUSY;
141 }
142
143 void ecore_set_spq_block_timeout(struct ecore_hwfn *p_hwfn,
144                                  u32 spq_timeout_ms)
145 {
146         p_hwfn->p_spq->block_sleep_max_iter = spq_timeout_ms ?
147                 spq_timeout_ms / SPQ_BLOCK_SLEEP_MS :
148                 SPQ_BLOCK_SLEEP_MAX_ITER;
149 }
150
151 /***************************************************************************
152  * SPQ entries inner API
153  ***************************************************************************/
154 static enum _ecore_status_t
155 ecore_spq_fill_entry(struct ecore_hwfn *p_hwfn, struct ecore_spq_entry *p_ent)
156 {
157         p_ent->flags = 0;
158
159         switch (p_ent->comp_mode) {
160         case ECORE_SPQ_MODE_EBLOCK:
161         case ECORE_SPQ_MODE_BLOCK:
162                 p_ent->comp_cb.function = ecore_spq_blocking_cb;
163                 break;
164         case ECORE_SPQ_MODE_CB:
165                 break;
166         default:
167                 DP_NOTICE(p_hwfn, true, "Unknown SPQE completion mode %d\n",
168                           p_ent->comp_mode);
169                 return ECORE_INVAL;
170         }
171
172         DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
173                    "Ramrod header: [CID 0x%08x CMD 0x%02x protocol 0x%02x]"
174                    " Data pointer: [%08x:%08x] Completion Mode: %s\n",
175                    p_ent->elem.hdr.cid, p_ent->elem.hdr.cmd_id,
176                    p_ent->elem.hdr.protocol_id,
177                    p_ent->elem.data_ptr.hi, p_ent->elem.data_ptr.lo,
178                    D_TRINE(p_ent->comp_mode, ECORE_SPQ_MODE_EBLOCK,
179                            ECORE_SPQ_MODE_BLOCK, "MODE_EBLOCK", "MODE_BLOCK",
180                            "MODE_CB"));
181
182         return ECORE_SUCCESS;
183 }
184
185 /***************************************************************************
186  * HSI access
187  ***************************************************************************/
188 static void ecore_spq_hw_initialize(struct ecore_hwfn *p_hwfn,
189                                     struct ecore_spq *p_spq)
190 {
191         struct e4_core_conn_context *p_cxt;
192         struct ecore_cxt_info cxt_info;
193         u16 physical_q;
194         enum _ecore_status_t rc;
195
196         cxt_info.iid = p_spq->cid;
197
198         rc = ecore_cxt_get_cid_info(p_hwfn, &cxt_info);
199
200         if (rc < 0) {
201                 DP_NOTICE(p_hwfn, true, "Cannot find context info for cid=%d\n",
202                           p_spq->cid);
203                 return;
204         }
205
206         p_cxt = cxt_info.p_cxt;
207
208         /* @@@TBD we zero the context until we have ilt_reset implemented. */
209         OSAL_MEM_ZERO(p_cxt, sizeof(*p_cxt));
210
211         if (ECORE_IS_BB(p_hwfn->p_dev) || ECORE_IS_AH(p_hwfn->p_dev)) {
212                 SET_FIELD(p_cxt->xstorm_ag_context.flags10,
213                           E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN, 1);
214                 SET_FIELD(p_cxt->xstorm_ag_context.flags1,
215                           E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE, 1);
216                 /* SET_FIELD(p_cxt->xstorm_ag_context.flags10,
217                  *        E4_XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN, 1);
218                  */
219                 SET_FIELD(p_cxt->xstorm_ag_context.flags9,
220                           E4_XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN, 1);
221         }
222
223         /* CDU validation - FIXME currently disabled */
224
225         /* QM physical queue */
226         physical_q = ecore_get_cm_pq_idx(p_hwfn, PQ_FLAGS_LB);
227         p_cxt->xstorm_ag_context.physical_q0 = OSAL_CPU_TO_LE16(physical_q);
228
229         p_cxt->xstorm_st_context.spq_base_lo =
230             DMA_LO_LE(p_spq->chain.p_phys_addr);
231         p_cxt->xstorm_st_context.spq_base_hi =
232             DMA_HI_LE(p_spq->chain.p_phys_addr);
233
234         DMA_REGPAIR_LE(p_cxt->xstorm_st_context.consolid_base_addr,
235                        p_hwfn->p_consq->chain.p_phys_addr);
236 }
237
238 static enum _ecore_status_t ecore_spq_hw_post(struct ecore_hwfn *p_hwfn,
239                                               struct ecore_spq *p_spq,
240                                               struct ecore_spq_entry *p_ent)
241 {
242         struct ecore_chain *p_chain = &p_hwfn->p_spq->chain;
243         struct core_db_data *p_db_data = &p_spq->db_data;
244         u16 echo = ecore_chain_get_prod_idx(p_chain);
245         struct slow_path_element *elem;
246
247         p_ent->elem.hdr.echo = OSAL_CPU_TO_LE16(echo);
248         elem = ecore_chain_produce(p_chain);
249         if (!elem) {
250                 DP_NOTICE(p_hwfn, true, "Failed to produce from SPQ chain\n");
251                 return ECORE_INVAL;
252         }
253
254         *elem = p_ent->elem;    /* Struct assignment */
255
256         p_db_data->spq_prod =
257                 OSAL_CPU_TO_LE16(ecore_chain_get_prod_idx(p_chain));
258
259         /* Make sure the SPQE is updated before the doorbell */
260         OSAL_WMB(p_hwfn->p_dev);
261
262         DOORBELL(p_hwfn, p_spq->db_addr_offset, *(u32 *)p_db_data);
263
264         /* Make sure doorbell is rang */
265         OSAL_WMB(p_hwfn->p_dev);
266
267         DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
268                    "Doorbelled [0x%08x, CID 0x%08x] with Flags: %02x"
269                    " agg_params: %02x, prod: %04x\n",
270                    p_spq->db_addr_offset, p_spq->cid, p_db_data->params,
271                    p_db_data->agg_flags, ecore_chain_get_prod_idx(p_chain));
272
273         return ECORE_SUCCESS;
274 }
275
276 /***************************************************************************
277  * Asynchronous events
278  ***************************************************************************/
279
280 static enum _ecore_status_t
281 ecore_async_event_completion(struct ecore_hwfn *p_hwfn,
282                              struct event_ring_entry *p_eqe)
283 {
284         ecore_spq_async_comp_cb cb;
285
286         if (p_eqe->protocol_id >= MAX_PROTOCOL_TYPE) {
287                 DP_ERR(p_hwfn, "Wrong protocol: %d\n", p_eqe->protocol_id);
288                 return ECORE_INVAL;
289         }
290
291         cb = p_hwfn->p_spq->async_comp_cb[p_eqe->protocol_id];
292         if (cb) {
293                 return cb(p_hwfn, p_eqe->opcode, p_eqe->echo,
294                           &p_eqe->data, p_eqe->fw_return_code);
295         } else {
296                 DP_NOTICE(p_hwfn,
297                           true, "Unknown Async completion for protocol: %d\n",
298                           p_eqe->protocol_id);
299                 return ECORE_INVAL;
300         }
301 }
302
303 enum _ecore_status_t
304 ecore_spq_register_async_cb(struct ecore_hwfn *p_hwfn,
305                             enum protocol_type protocol_id,
306                             ecore_spq_async_comp_cb cb)
307 {
308         if (!p_hwfn->p_spq || (protocol_id >= MAX_PROTOCOL_TYPE))
309                 return ECORE_INVAL;
310
311         p_hwfn->p_spq->async_comp_cb[protocol_id] = cb;
312         return ECORE_SUCCESS;
313 }
314
315 void
316 ecore_spq_unregister_async_cb(struct ecore_hwfn *p_hwfn,
317                               enum protocol_type protocol_id)
318 {
319         if (!p_hwfn->p_spq || (protocol_id >= MAX_PROTOCOL_TYPE))
320                 return;
321
322         p_hwfn->p_spq->async_comp_cb[protocol_id] = OSAL_NULL;
323 }
324
325 /***************************************************************************
326  * EQ API
327  ***************************************************************************/
328 void ecore_eq_prod_update(struct ecore_hwfn *p_hwfn, u16 prod)
329 {
330         u32 addr = GTT_BAR0_MAP_REG_USDM_RAM +
331             USTORM_EQE_CONS_OFFSET(p_hwfn->rel_pf_id);
332
333         REG_WR16(p_hwfn, addr, prod);
334
335         /* keep prod updates ordered */
336         OSAL_MMIOWB(p_hwfn->p_dev);
337 }
338
339 enum _ecore_status_t ecore_eq_completion(struct ecore_hwfn *p_hwfn,
340                                          void *cookie)
341 {
342         struct ecore_eq *p_eq = cookie;
343         struct ecore_chain *p_chain = &p_eq->chain;
344         u16 fw_cons_idx             = 0;
345         enum _ecore_status_t rc = 0;
346
347         if (!p_hwfn->p_spq) {
348                 DP_ERR(p_hwfn, "Unexpected NULL p_spq\n");
349                 return ECORE_INVAL;
350         }
351
352         /* take a snapshot of the FW consumer */
353         fw_cons_idx = OSAL_LE16_TO_CPU(*p_eq->p_fw_cons);
354
355         DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ, "fw_cons_idx %x\n", fw_cons_idx);
356
357         /* Need to guarantee the fw_cons index we use points to a usuable
358          * element (to comply with our chain), so our macros would comply
359          */
360         if ((fw_cons_idx & ecore_chain_get_usable_per_page(p_chain)) ==
361             ecore_chain_get_usable_per_page(p_chain)) {
362                 fw_cons_idx += ecore_chain_get_unusable_per_page(p_chain);
363         }
364
365         /* Complete current segment of eq entries */
366         while (fw_cons_idx != ecore_chain_get_cons_idx(p_chain)) {
367                 struct event_ring_entry *p_eqe = ecore_chain_consume(p_chain);
368                 if (!p_eqe) {
369                         rc = ECORE_INVAL;
370                         break;
371                 }
372
373                 DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
374                            "op %x prot %x res0 %x echo %x fwret %x flags %x\n",
375                            p_eqe->opcode,            /* Event Opcode */
376                            p_eqe->protocol_id,  /* Event Protocol ID */
377                            p_eqe->reserved0,    /* Reserved */
378                            /* Echo value from ramrod data on the host */
379                            OSAL_LE16_TO_CPU(p_eqe->echo),
380                            p_eqe->fw_return_code,    /* FW return code for SP
381                                                       * ramrods
382                                                       */
383                            p_eqe->flags);
384
385                 if (GET_FIELD(p_eqe->flags, EVENT_RING_ENTRY_ASYNC)) {
386                         if (ecore_async_event_completion(p_hwfn, p_eqe))
387                                 rc = ECORE_INVAL;
388                 } else if (ecore_spq_completion(p_hwfn,
389                                                 p_eqe->echo,
390                                                 p_eqe->fw_return_code,
391                                                 &p_eqe->data)) {
392                         rc = ECORE_INVAL;
393                 }
394
395                 ecore_chain_recycle_consumed(p_chain);
396         }
397
398         ecore_eq_prod_update(p_hwfn, ecore_chain_get_prod_idx(p_chain));
399
400         return rc;
401 }
402
403 enum _ecore_status_t ecore_eq_alloc(struct ecore_hwfn *p_hwfn, u16 num_elem)
404 {
405         struct ecore_eq *p_eq;
406
407         /* Allocate EQ struct */
408         p_eq = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*p_eq));
409         if (!p_eq) {
410                 DP_NOTICE(p_hwfn, false,
411                           "Failed to allocate `struct ecore_eq'\n");
412                 return ECORE_NOMEM;
413         }
414
415         /* Allocate and initialize EQ chain*/
416         if (ecore_chain_alloc(p_hwfn->p_dev,
417                               ECORE_CHAIN_USE_TO_PRODUCE,
418                               ECORE_CHAIN_MODE_PBL,
419                               ECORE_CHAIN_CNT_TYPE_U16,
420                               num_elem,
421                               sizeof(union event_ring_element),
422                               &p_eq->chain, OSAL_NULL) != ECORE_SUCCESS) {
423                 DP_NOTICE(p_hwfn, false, "Failed to allocate eq chain\n");
424                 goto eq_allocate_fail;
425         }
426
427         /* register EQ completion on the SP SB */
428         ecore_int_register_cb(p_hwfn, ecore_eq_completion,
429                               p_eq, &p_eq->eq_sb_index, &p_eq->p_fw_cons);
430
431         p_hwfn->p_eq = p_eq;
432         return ECORE_SUCCESS;
433
434 eq_allocate_fail:
435         OSAL_FREE(p_hwfn->p_dev, p_eq);
436         return ECORE_NOMEM;
437 }
438
439 void ecore_eq_setup(struct ecore_hwfn *p_hwfn)
440 {
441         ecore_chain_reset(&p_hwfn->p_eq->chain);
442 }
443
444 void ecore_eq_free(struct ecore_hwfn *p_hwfn)
445 {
446         if (!p_hwfn->p_eq)
447                 return;
448
449         ecore_chain_free(p_hwfn->p_dev, &p_hwfn->p_eq->chain);
450
451         OSAL_FREE(p_hwfn->p_dev, p_hwfn->p_eq);
452         p_hwfn->p_eq = OSAL_NULL;
453 }
454
455 /***************************************************************************
456 * CQE API - manipulate EQ functionality
457 ***************************************************************************/
458 static enum _ecore_status_t ecore_cqe_completion(struct ecore_hwfn *p_hwfn,
459                                                  struct eth_slow_path_rx_cqe
460                                                  *cqe,
461                                                  enum protocol_type protocol)
462 {
463         if (IS_VF(p_hwfn->p_dev))
464                 return OSAL_VF_CQE_COMPLETION(p_hwfn, cqe, protocol);
465
466         /* @@@tmp - it's possible we'll eventually want to handle some
467          * actual commands that can arrive here, but for now this is only
468          * used to complete the ramrod using the echo value on the cqe
469          */
470         return ecore_spq_completion(p_hwfn, cqe->echo, 0, OSAL_NULL);
471 }
472
473 enum _ecore_status_t ecore_eth_cqe_completion(struct ecore_hwfn *p_hwfn,
474                                               struct eth_slow_path_rx_cqe *cqe)
475 {
476         enum _ecore_status_t rc;
477
478         rc = ecore_cqe_completion(p_hwfn, cqe, PROTOCOLID_ETH);
479         if (rc) {
480                 DP_NOTICE(p_hwfn, true,
481                           "Failed to handle RXQ CQE [cmd 0x%02x]\n",
482                           cqe->ramrod_cmd_id);
483         }
484
485         return rc;
486 }
487
488 /***************************************************************************
489  * Slow hwfn Queue (spq)
490  ***************************************************************************/
491 void ecore_spq_setup(struct ecore_hwfn *p_hwfn)
492 {
493         struct ecore_spq *p_spq = p_hwfn->p_spq;
494         struct ecore_spq_entry *p_virt = OSAL_NULL;
495         struct core_db_data *p_db_data;
496         void OSAL_IOMEM *db_addr;
497         dma_addr_t p_phys = 0;
498         u32 i, capacity;
499         enum _ecore_status_t rc;
500
501         OSAL_LIST_INIT(&p_spq->pending);
502         OSAL_LIST_INIT(&p_spq->completion_pending);
503         OSAL_LIST_INIT(&p_spq->free_pool);
504         OSAL_LIST_INIT(&p_spq->unlimited_pending);
505         OSAL_SPIN_LOCK_INIT(&p_spq->lock);
506
507         /* SPQ empty pool */
508         p_phys = p_spq->p_phys + OFFSETOF(struct ecore_spq_entry, ramrod);
509         p_virt = p_spq->p_virt;
510
511         capacity = ecore_chain_get_capacity(&p_spq->chain);
512         for (i = 0; i < capacity; i++) {
513                 DMA_REGPAIR_LE(p_virt->elem.data_ptr, p_phys);
514
515                 OSAL_LIST_PUSH_TAIL(&p_virt->list, &p_spq->free_pool);
516
517                 p_virt++;
518                 p_phys += sizeof(struct ecore_spq_entry);
519         }
520
521         /* Statistics */
522         p_spq->normal_count = 0;
523         p_spq->comp_count = 0;
524         p_spq->comp_sent_count = 0;
525         p_spq->unlimited_pending_count = 0;
526
527         OSAL_MEM_ZERO(p_spq->p_comp_bitmap,
528                       SPQ_COMP_BMAP_SIZE * sizeof(unsigned long));
529         p_spq->comp_bitmap_idx = 0;
530
531         /* SPQ cid, cannot fail */
532         ecore_cxt_acquire_cid(p_hwfn, PROTOCOLID_CORE, &p_spq->cid);
533         ecore_spq_hw_initialize(p_hwfn, p_spq);
534
535         /* reset the chain itself */
536         ecore_chain_reset(&p_spq->chain);
537
538         /* Initialize the address/data of the SPQ doorbell */
539         p_spq->db_addr_offset = DB_ADDR(p_spq->cid, DQ_DEMS_LEGACY);
540         p_db_data = &p_spq->db_data;
541         OSAL_MEM_ZERO(p_db_data, sizeof(*p_db_data));
542         SET_FIELD(p_db_data->params, CORE_DB_DATA_DEST, DB_DEST_XCM);
543         SET_FIELD(p_db_data->params, CORE_DB_DATA_AGG_CMD, DB_AGG_CMD_MAX);
544         SET_FIELD(p_db_data->params, CORE_DB_DATA_AGG_VAL_SEL,
545                   DQ_XCM_CORE_SPQ_PROD_CMD);
546         p_db_data->agg_flags = DQ_XCM_CORE_DQ_CF_CMD;
547
548         /* Register the SPQ doorbell with the doorbell recovery mechanism */
549         db_addr = (void *)((u8 *)p_hwfn->doorbells + p_spq->db_addr_offset);
550         rc = ecore_db_recovery_add(p_hwfn->p_dev, db_addr, &p_spq->db_data,
551                                    DB_REC_WIDTH_32B, DB_REC_KERNEL);
552         if (rc != ECORE_SUCCESS)
553                 DP_INFO(p_hwfn,
554                         "Failed to register the SPQ doorbell with the doorbell recovery mechanism\n");
555 }
556
557 enum _ecore_status_t ecore_spq_alloc(struct ecore_hwfn *p_hwfn)
558 {
559         struct ecore_spq_entry *p_virt = OSAL_NULL;
560         struct ecore_spq *p_spq = OSAL_NULL;
561         dma_addr_t p_phys = 0;
562         u32 capacity;
563
564         /* SPQ struct */
565         p_spq =
566             OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(struct ecore_spq));
567         if (!p_spq) {
568                 DP_NOTICE(p_hwfn, false, "Failed to allocate `struct ecore_spq'\n");
569                 return ECORE_NOMEM;
570         }
571
572         /* SPQ ring  */
573         if (ecore_chain_alloc(p_hwfn->p_dev,
574                               ECORE_CHAIN_USE_TO_PRODUCE,
575                               ECORE_CHAIN_MODE_SINGLE,
576                               ECORE_CHAIN_CNT_TYPE_U16,
577                               0, /* N/A when the mode is SINGLE */
578                               sizeof(struct slow_path_element),
579                               &p_spq->chain, OSAL_NULL)) {
580                 DP_NOTICE(p_hwfn, false, "Failed to allocate spq chain\n");
581                 goto spq_allocate_fail;
582         }
583
584         /* allocate and fill the SPQ elements (incl. ramrod data list) */
585         capacity = ecore_chain_get_capacity(&p_spq->chain);
586         p_virt = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, &p_phys,
587                                          capacity *
588                                          sizeof(struct ecore_spq_entry));
589         if (!p_virt)
590                 goto spq_allocate_fail;
591
592         p_spq->p_virt = p_virt;
593         p_spq->p_phys = p_phys;
594
595 #ifdef CONFIG_ECORE_LOCK_ALLOC
596         if (OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_spq->lock))
597                 goto spq_allocate_fail;
598 #endif
599
600         p_hwfn->p_spq = p_spq;
601         return ECORE_SUCCESS;
602
603 spq_allocate_fail:
604         ecore_chain_free(p_hwfn->p_dev, &p_spq->chain);
605         OSAL_FREE(p_hwfn->p_dev, p_spq);
606         return ECORE_NOMEM;
607 }
608
609 void ecore_spq_free(struct ecore_hwfn *p_hwfn)
610 {
611         struct ecore_spq *p_spq = p_hwfn->p_spq;
612         void OSAL_IOMEM *db_addr;
613         u32 capacity;
614
615         if (!p_spq)
616                 return;
617
618         /* Delete the SPQ doorbell from the doorbell recovery mechanism */
619         db_addr = (void *)((u8 *)p_hwfn->doorbells + p_spq->db_addr_offset);
620         ecore_db_recovery_del(p_hwfn->p_dev, db_addr, &p_spq->db_data);
621
622         if (p_spq->p_virt) {
623                 capacity = ecore_chain_get_capacity(&p_spq->chain);
624                 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
625                                        p_spq->p_virt,
626                                        p_spq->p_phys,
627                                        capacity *
628                                        sizeof(struct ecore_spq_entry));
629         }
630
631         ecore_chain_free(p_hwfn->p_dev, &p_spq->chain);
632 #ifdef CONFIG_ECORE_LOCK_ALLOC
633         OSAL_SPIN_LOCK_DEALLOC(&p_spq->lock);
634 #endif
635
636         OSAL_FREE(p_hwfn->p_dev, p_spq);
637 }
638
639 enum _ecore_status_t
640 ecore_spq_get_entry(struct ecore_hwfn *p_hwfn, struct ecore_spq_entry **pp_ent)
641 {
642         struct ecore_spq *p_spq = p_hwfn->p_spq;
643         struct ecore_spq_entry *p_ent = OSAL_NULL;
644         enum _ecore_status_t rc = ECORE_SUCCESS;
645
646         OSAL_SPIN_LOCK(&p_spq->lock);
647
648         if (OSAL_LIST_IS_EMPTY(&p_spq->free_pool)) {
649                 p_ent = OSAL_ZALLOC(p_hwfn->p_dev, GFP_ATOMIC, sizeof(*p_ent));
650                 if (!p_ent) {
651                         DP_NOTICE(p_hwfn, false, "Failed to allocate an SPQ entry for a pending ramrod\n");
652                         rc = ECORE_NOMEM;
653                         goto out_unlock;
654                 }
655                 p_ent->queue = &p_spq->unlimited_pending;
656         } else {
657                 p_ent = OSAL_LIST_FIRST_ENTRY(&p_spq->free_pool,
658                                               struct ecore_spq_entry, list);
659                 OSAL_LIST_REMOVE_ENTRY(&p_ent->list, &p_spq->free_pool);
660                 p_ent->queue = &p_spq->pending;
661         }
662
663         *pp_ent = p_ent;
664
665 out_unlock:
666         OSAL_SPIN_UNLOCK(&p_spq->lock);
667         return rc;
668 }
669
670 /* Locked variant; Should be called while the SPQ lock is taken */
671 static void __ecore_spq_return_entry(struct ecore_hwfn *p_hwfn,
672                                      struct ecore_spq_entry *p_ent)
673 {
674         OSAL_LIST_PUSH_TAIL(&p_ent->list, &p_hwfn->p_spq->free_pool);
675 }
676
677 void ecore_spq_return_entry(struct ecore_hwfn *p_hwfn,
678                             struct ecore_spq_entry *p_ent)
679 {
680         OSAL_SPIN_LOCK(&p_hwfn->p_spq->lock);
681         __ecore_spq_return_entry(p_hwfn, p_ent);
682         OSAL_SPIN_UNLOCK(&p_hwfn->p_spq->lock);
683 }
684
685 /**
686  * @brief ecore_spq_add_entry - adds a new entry to the pending
687  *        list. Should be used while lock is being held.
688  *
689  * Addes an entry to the pending list is there is room (en empty
690  * element is available in the free_pool), or else places the
691  * entry in the unlimited_pending pool.
692  *
693  * @param p_hwfn
694  * @param p_ent
695  * @param priority
696  *
697  * @return enum _ecore_status_t
698  */
699 static enum _ecore_status_t
700 ecore_spq_add_entry(struct ecore_hwfn *p_hwfn,
701                     struct ecore_spq_entry *p_ent, enum spq_priority priority)
702 {
703         struct ecore_spq *p_spq = p_hwfn->p_spq;
704
705         if (p_ent->queue == &p_spq->unlimited_pending) {
706                 if (OSAL_LIST_IS_EMPTY(&p_spq->free_pool)) {
707                         OSAL_LIST_PUSH_TAIL(&p_ent->list,
708                                             &p_spq->unlimited_pending);
709                         p_spq->unlimited_pending_count++;
710
711                         return ECORE_SUCCESS;
712
713                 } else {
714                         struct ecore_spq_entry *p_en2;
715
716                         p_en2 = OSAL_LIST_FIRST_ENTRY(&p_spq->free_pool,
717                                                      struct ecore_spq_entry,
718                                                      list);
719                         OSAL_LIST_REMOVE_ENTRY(&p_en2->list, &p_spq->free_pool);
720
721                         /* Copy the ring element physical pointer to the new
722                          * entry, since we are about to override the entire ring
723                          * entry and don't want to lose the pointer.
724                          */
725                         p_ent->elem.data_ptr = p_en2->elem.data_ptr;
726
727                         *p_en2 = *p_ent;
728
729                         /* EBLOCK responsible to free the allocated p_ent */
730                         if (p_ent->comp_mode != ECORE_SPQ_MODE_EBLOCK)
731                                 OSAL_FREE(p_hwfn->p_dev, p_ent);
732
733                         p_ent = p_en2;
734                 }
735         }
736
737         /* entry is to be placed in 'pending' queue */
738         switch (priority) {
739         case ECORE_SPQ_PRIORITY_NORMAL:
740                 OSAL_LIST_PUSH_TAIL(&p_ent->list, &p_spq->pending);
741                 p_spq->normal_count++;
742                 break;
743         case ECORE_SPQ_PRIORITY_HIGH:
744                 OSAL_LIST_PUSH_HEAD(&p_ent->list, &p_spq->pending);
745                 p_spq->high_count++;
746                 break;
747         default:
748                 return ECORE_INVAL;
749         }
750
751         return ECORE_SUCCESS;
752 }
753
754 /***************************************************************************
755  * Accessor
756  ***************************************************************************/
757
758 u32 ecore_spq_get_cid(struct ecore_hwfn *p_hwfn)
759 {
760         if (!p_hwfn->p_spq)
761                 return 0xffffffff;      /* illegal */
762         return p_hwfn->p_spq->cid;
763 }
764
765 /***************************************************************************
766  * Posting new Ramrods
767  ***************************************************************************/
768
769 static enum _ecore_status_t ecore_spq_post_list(struct ecore_hwfn *p_hwfn,
770                                                 osal_list_t *head,
771                                                 u32 keep_reserve)
772 {
773         struct ecore_spq *p_spq = p_hwfn->p_spq;
774         enum _ecore_status_t rc;
775
776         /* TODO - implementation might be wasteful; will always keep room
777          * for an additional high priority ramrod (even if one is already
778          * pending FW)
779          */
780         while (ecore_chain_get_elem_left(&p_spq->chain) > keep_reserve &&
781                !OSAL_LIST_IS_EMPTY(head)) {
782                 struct ecore_spq_entry *p_ent =
783                     OSAL_LIST_FIRST_ENTRY(head, struct ecore_spq_entry, list);
784                 if (p_ent != OSAL_NULL) {
785 #if defined(_NTDDK_)
786 #pragma warning(suppress : 6011 28182)
787 #endif
788                         OSAL_LIST_REMOVE_ENTRY(&p_ent->list, head);
789                         OSAL_LIST_PUSH_TAIL(&p_ent->list,
790                                             &p_spq->completion_pending);
791                         p_spq->comp_sent_count++;
792
793                         rc = ecore_spq_hw_post(p_hwfn, p_spq, p_ent);
794                         if (rc) {
795                                 OSAL_LIST_REMOVE_ENTRY(&p_ent->list,
796                                                     &p_spq->completion_pending);
797                                 __ecore_spq_return_entry(p_hwfn, p_ent);
798                                 return rc;
799                         }
800                 }
801         }
802
803         return ECORE_SUCCESS;
804 }
805
806 static enum _ecore_status_t ecore_spq_pend_post(struct ecore_hwfn *p_hwfn)
807 {
808         struct ecore_spq *p_spq = p_hwfn->p_spq;
809         struct ecore_spq_entry *p_ent = OSAL_NULL;
810
811         while (!OSAL_LIST_IS_EMPTY(&p_spq->free_pool)) {
812                 if (OSAL_LIST_IS_EMPTY(&p_spq->unlimited_pending))
813                         break;
814
815                 p_ent = OSAL_LIST_FIRST_ENTRY(&p_spq->unlimited_pending,
816                                               struct ecore_spq_entry, list);
817                 if (!p_ent)
818                         return ECORE_INVAL;
819
820 #if defined(_NTDDK_)
821 #pragma warning(suppress : 6011)
822 #endif
823                 OSAL_LIST_REMOVE_ENTRY(&p_ent->list, &p_spq->unlimited_pending);
824
825                 ecore_spq_add_entry(p_hwfn, p_ent, p_ent->priority);
826         }
827
828         return ecore_spq_post_list(p_hwfn,
829                                  &p_spq->pending, SPQ_HIGH_PRI_RESERVE_DEFAULT);
830 }
831
832 enum _ecore_status_t ecore_spq_post(struct ecore_hwfn *p_hwfn,
833                                     struct ecore_spq_entry *p_ent,
834                                     u8 *fw_return_code)
835 {
836         enum _ecore_status_t rc = ECORE_SUCCESS;
837         struct ecore_spq *p_spq = p_hwfn ? p_hwfn->p_spq : OSAL_NULL;
838         bool b_ret_ent = true;
839
840         if (!p_hwfn)
841                 return ECORE_INVAL;
842
843         if (!p_ent) {
844                 DP_NOTICE(p_hwfn, true, "Got a NULL pointer\n");
845                 return ECORE_INVAL;
846         }
847
848         if (p_hwfn->p_dev->recov_in_prog) {
849                 DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
850                            "Recovery is in progress -> skip spq post"
851                            " [cmd %02x protocol %02x]\n",
852                            p_ent->elem.hdr.cmd_id, p_ent->elem.hdr.protocol_id);
853                 /* Return success to let the flows to be completed successfully
854                  * w/o any error handling.
855                  */
856                 return ECORE_SUCCESS;
857         }
858
859         OSAL_SPIN_LOCK(&p_spq->lock);
860
861         /* Complete the entry */
862         rc = ecore_spq_fill_entry(p_hwfn, p_ent);
863
864         /* Check return value after LOCK is taken for cleaner error flow */
865         if (rc)
866                 goto spq_post_fail;
867
868         /* Add the request to the pending queue */
869         rc = ecore_spq_add_entry(p_hwfn, p_ent, p_ent->priority);
870         if (rc)
871                 goto spq_post_fail;
872
873         rc = ecore_spq_pend_post(p_hwfn);
874         if (rc) {
875                 /* Since it's possible that pending failed for a different
876                  * entry [although unlikely], the failed entry was already
877                  * dealt with; No need to return it here.
878                  */
879                 b_ret_ent = false;
880                 goto spq_post_fail;
881         }
882
883         OSAL_SPIN_UNLOCK(&p_spq->lock);
884
885         if (p_ent->comp_mode == ECORE_SPQ_MODE_EBLOCK) {
886                 /* For entries in ECORE BLOCK mode, the completion code cannot
887                  * perform the necessary cleanup - if it did, we couldn't
888                  * access p_ent here to see whether it's successful or not.
889                  * Thus, after gaining the answer perform the cleanup here.
890                  */
891                 rc = ecore_spq_block(p_hwfn, p_ent, fw_return_code,
892                                      p_ent->queue == &p_spq->unlimited_pending);
893
894                 if (p_ent->queue == &p_spq->unlimited_pending) {
895                         /* This is an allocated p_ent which does not need to
896                          * return to pool.
897                          */
898                         OSAL_FREE(p_hwfn->p_dev, p_ent);
899
900                         /* TBD: handle error flow and remove p_ent from
901                          * completion pending
902                          */
903                         return rc;
904                 }
905
906                 if (rc)
907                         goto spq_post_fail2;
908
909                 /* return to pool */
910                 ecore_spq_return_entry(p_hwfn, p_ent);
911         }
912         return rc;
913
914 spq_post_fail2:
915         OSAL_SPIN_LOCK(&p_spq->lock);
916         OSAL_LIST_REMOVE_ENTRY(&p_ent->list, &p_spq->completion_pending);
917         ecore_chain_return_produced(&p_spq->chain);
918
919 spq_post_fail:
920         /* return to the free pool */
921         if (b_ret_ent)
922                 __ecore_spq_return_entry(p_hwfn, p_ent);
923         OSAL_SPIN_UNLOCK(&p_spq->lock);
924
925         return rc;
926 }
927
928 enum _ecore_status_t ecore_spq_completion(struct ecore_hwfn *p_hwfn,
929                                           __le16 echo,
930                                           u8 fw_return_code,
931                                           union event_ring_data *p_data)
932 {
933         struct ecore_spq *p_spq;
934         struct ecore_spq_entry *p_ent = OSAL_NULL;
935         struct ecore_spq_entry *tmp;
936         struct ecore_spq_entry *found = OSAL_NULL;
937         enum _ecore_status_t rc;
938
939         if (!p_hwfn)
940                 return ECORE_INVAL;
941
942         p_spq = p_hwfn->p_spq;
943         if (!p_spq)
944                 return ECORE_INVAL;
945
946         OSAL_SPIN_LOCK(&p_spq->lock);
947         OSAL_LIST_FOR_EACH_ENTRY_SAFE(p_ent,
948                                       tmp,
949                                       &p_spq->completion_pending,
950                                       list, struct ecore_spq_entry) {
951                 if (p_ent->elem.hdr.echo == echo) {
952                         OSAL_LIST_REMOVE_ENTRY(&p_ent->list,
953                                                &p_spq->completion_pending);
954
955                         /* Avoid overriding of SPQ entries when getting
956                          * out-of-order completions, by marking the completions
957                          * in a bitmap and increasing the chain consumer only
958                          * for the first successive completed entries.
959                          */
960                         SPQ_COMP_BMAP_SET_BIT(p_spq, echo);
961                         while (SPQ_COMP_BMAP_TEST_BIT(p_spq,
962                                                       p_spq->comp_bitmap_idx)) {
963                                 SPQ_COMP_BMAP_CLEAR_BIT(p_spq,
964                                                         p_spq->comp_bitmap_idx);
965                                 p_spq->comp_bitmap_idx++;
966                                 ecore_chain_return_produced(&p_spq->chain);
967                         }
968
969                         p_spq->comp_count++;
970                         found = p_ent;
971                         break;
972                 }
973
974                 /* This is debug and should be relatively uncommon - depends
975                  * on scenarios which have mutliple per-PF sent ramrods.
976                  */
977                 DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
978                            "Got completion for echo %04x - doesn't match"
979                            " echo %04x in completion pending list\n",
980                            OSAL_LE16_TO_CPU(echo),
981                            OSAL_LE16_TO_CPU(p_ent->elem.hdr.echo));
982         }
983
984         /* Release lock before callback, as callback may post
985          * an additional ramrod.
986          */
987         OSAL_SPIN_UNLOCK(&p_spq->lock);
988
989         if (!found) {
990                 DP_NOTICE(p_hwfn, true,
991                           "Failed to find an entry this"
992                           " EQE [echo %04x] completes\n",
993                           OSAL_LE16_TO_CPU(echo));
994                 return ECORE_EXISTS;
995         }
996
997         DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
998                    "Complete EQE [echo %04x]: func %p cookie %p)\n",
999                    OSAL_LE16_TO_CPU(echo),
1000                    p_ent->comp_cb.function, p_ent->comp_cb.cookie);
1001         if (found->comp_cb.function)
1002                 found->comp_cb.function(p_hwfn, found->comp_cb.cookie, p_data,
1003                                         fw_return_code);
1004         else
1005                 DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
1006                            "Got a completion without a callback function\n");
1007
1008         if ((found->comp_mode != ECORE_SPQ_MODE_EBLOCK) ||
1009             (found->queue == &p_spq->unlimited_pending))
1010                 /* EBLOCK  is responsible for returning its own entry into the
1011                  * free list, unless it originally added the entry into the
1012                  * unlimited pending list.
1013                  */
1014                 ecore_spq_return_entry(p_hwfn, found);
1015
1016         /* Attempt to post pending requests */
1017         OSAL_SPIN_LOCK(&p_spq->lock);
1018         rc = ecore_spq_pend_post(p_hwfn);
1019         OSAL_SPIN_UNLOCK(&p_spq->lock);
1020
1021         return rc;
1022 }
1023
1024 enum _ecore_status_t ecore_consq_alloc(struct ecore_hwfn *p_hwfn)
1025 {
1026         struct ecore_consq *p_consq;
1027
1028         /* Allocate ConsQ struct */
1029         p_consq =
1030             OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*p_consq));
1031         if (!p_consq) {
1032                 DP_NOTICE(p_hwfn, false,
1033                           "Failed to allocate `struct ecore_consq'\n");
1034                 return ECORE_NOMEM;
1035         }
1036
1037         /* Allocate and initialize EQ chain */
1038         if (ecore_chain_alloc(p_hwfn->p_dev,
1039                               ECORE_CHAIN_USE_TO_PRODUCE,
1040                               ECORE_CHAIN_MODE_PBL,
1041                               ECORE_CHAIN_CNT_TYPE_U16,
1042                               ECORE_CHAIN_PAGE_SIZE / 0x80,
1043                               0x80,
1044                               &p_consq->chain, OSAL_NULL) != ECORE_SUCCESS) {
1045                 DP_NOTICE(p_hwfn, false, "Failed to allocate consq chain");
1046                 goto consq_allocate_fail;
1047         }
1048
1049         p_hwfn->p_consq = p_consq;
1050         return ECORE_SUCCESS;
1051
1052 consq_allocate_fail:
1053         OSAL_FREE(p_hwfn->p_dev, p_consq);
1054         return ECORE_NOMEM;
1055 }
1056
1057 void ecore_consq_setup(struct ecore_hwfn *p_hwfn)
1058 {
1059         ecore_chain_reset(&p_hwfn->p_consq->chain);
1060 }
1061
1062 void ecore_consq_free(struct ecore_hwfn *p_hwfn)
1063 {
1064         if (!p_hwfn->p_consq)
1065                 return;
1066
1067         ecore_chain_free(p_hwfn->p_dev, &p_hwfn->p_consq->chain);
1068         OSAL_FREE(p_hwfn->p_dev, p_hwfn->p_consq);
1069 }