a213bb8813dce8d1d4b536cef3b60f202b920095
[dpdk.git] / drivers / net / qede / base / ecore_spq.c
1 /*
2  * Copyright (c) 2016 QLogic Corporation.
3  * All rights reserved.
4  * www.qlogic.com
5  *
6  * See LICENSE.qede_pmd for copyright and licensing details.
7  */
8
9 #include "bcm_osal.h"
10 #include "reg_addr.h"
11 #include "ecore_gtt_reg_addr.h"
12 #include "ecore_hsi_common.h"
13 #include "ecore.h"
14 #include "ecore_sp_api.h"
15 #include "ecore_spq.h"
16 #include "ecore_iro.h"
17 #include "ecore_init_fw_funcs.h"
18 #include "ecore_cxt.h"
19 #include "ecore_int.h"
20 #include "ecore_dev_api.h"
21 #include "ecore_mcp.h"
22 #include "ecore_hw.h"
23 #include "ecore_sriov.h"
24
25 /***************************************************************************
26  * Structures & Definitions
27  ***************************************************************************/
28
29 #define SPQ_HIGH_PRI_RESERVE_DEFAULT    (1)
30
31 #define SPQ_BLOCK_DELAY_MAX_ITER        (10)
32 #define SPQ_BLOCK_DELAY_US              (10)
33 #define SPQ_BLOCK_SLEEP_MAX_ITER        (200)
34 #define SPQ_BLOCK_SLEEP_MS              (5)
35
36 /***************************************************************************
37  * Blocking Imp. (BLOCK/EBLOCK mode)
38  ***************************************************************************/
39 static void ecore_spq_blocking_cb(struct ecore_hwfn *p_hwfn, void *cookie,
40                                   union event_ring_data OSAL_UNUSED * data,
41                                   u8 fw_return_code)
42 {
43         struct ecore_spq_comp_done *comp_done;
44
45         comp_done = (struct ecore_spq_comp_done *)cookie;
46
47         comp_done->done = 0x1;
48         comp_done->fw_return_code = fw_return_code;
49
50         /* make update visible to waiting thread */
51         OSAL_SMP_WMB(p_hwfn->p_dev);
52 }
53
54 static enum _ecore_status_t __ecore_spq_block(struct ecore_hwfn *p_hwfn,
55                                               struct ecore_spq_entry *p_ent,
56                                               u8 *p_fw_ret,
57                                               bool sleep_between_iter)
58 {
59         struct ecore_spq_comp_done *comp_done;
60         u32 iter_cnt;
61
62         comp_done = (struct ecore_spq_comp_done *)p_ent->comp_cb.cookie;
63         iter_cnt = sleep_between_iter ? p_hwfn->p_spq->block_sleep_max_iter
64                                       : SPQ_BLOCK_DELAY_MAX_ITER;
65 #ifndef ASIC_ONLY
66         if (CHIP_REV_IS_EMUL(p_hwfn->p_dev) && sleep_between_iter)
67                 iter_cnt *= 5;
68 #endif
69
70         while (iter_cnt--) {
71                 OSAL_POLL_MODE_DPC(p_hwfn);
72                 OSAL_SMP_RMB(p_hwfn->p_dev);
73                 if (comp_done->done == 1) {
74                         if (p_fw_ret)
75                                 *p_fw_ret = comp_done->fw_return_code;
76                         return ECORE_SUCCESS;
77                 }
78
79                 if (sleep_between_iter)
80                         OSAL_MSLEEP(SPQ_BLOCK_SLEEP_MS);
81                 else
82                         OSAL_UDELAY(SPQ_BLOCK_DELAY_US);
83         }
84
85         return ECORE_TIMEOUT;
86 }
87
88 static enum _ecore_status_t ecore_spq_block(struct ecore_hwfn *p_hwfn,
89                                             struct ecore_spq_entry *p_ent,
90                                             u8 *p_fw_ret, bool skip_quick_poll)
91 {
92         struct ecore_spq_comp_done *comp_done;
93         struct ecore_ptt *p_ptt;
94         enum _ecore_status_t rc;
95
96         /* A relatively short polling period w/o sleeping, to allow the FW to
97          * complete the ramrod and thus possibly to avoid the following sleeps.
98          */
99         if (!skip_quick_poll) {
100                 rc = __ecore_spq_block(p_hwfn, p_ent, p_fw_ret, false);
101                 if (rc == ECORE_SUCCESS)
102                         return ECORE_SUCCESS;
103         }
104
105         /* Move to polling with a sleeping period between iterations */
106         rc = __ecore_spq_block(p_hwfn, p_ent, p_fw_ret, true);
107         if (rc == ECORE_SUCCESS)
108                 return ECORE_SUCCESS;
109
110         p_ptt = ecore_ptt_acquire(p_hwfn);
111         if (!p_ptt)
112                 return ECORE_AGAIN;
113
114         DP_INFO(p_hwfn, "Ramrod is stuck, requesting MCP drain\n");
115         rc = ecore_mcp_drain(p_hwfn, p_ptt);
116         ecore_ptt_release(p_hwfn, p_ptt);
117         if (rc != ECORE_SUCCESS) {
118                 DP_NOTICE(p_hwfn, true, "MCP drain failed\n");
119                 goto err;
120         }
121
122         /* Retry after drain */
123         rc = __ecore_spq_block(p_hwfn, p_ent, p_fw_ret, true);
124         if (rc == ECORE_SUCCESS)
125                 return ECORE_SUCCESS;
126
127         comp_done = (struct ecore_spq_comp_done *)p_ent->comp_cb.cookie;
128         if (comp_done->done == 1) {
129                 if (p_fw_ret)
130                         *p_fw_ret = comp_done->fw_return_code;
131                 return ECORE_SUCCESS;
132         }
133 err:
134         DP_NOTICE(p_hwfn, true,
135                   "Ramrod is stuck [CID %08x cmd %02x proto %02x echo %04x]\n",
136                   OSAL_LE32_TO_CPU(p_ent->elem.hdr.cid),
137                   p_ent->elem.hdr.cmd_id, p_ent->elem.hdr.protocol_id,
138                   OSAL_LE16_TO_CPU(p_ent->elem.hdr.echo));
139
140         ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_RAMROD_FAIL);
141
142         return ECORE_BUSY;
143 }
144
145 void ecore_set_spq_block_timeout(struct ecore_hwfn *p_hwfn,
146                                  u32 spq_timeout_ms)
147 {
148         p_hwfn->p_spq->block_sleep_max_iter = spq_timeout_ms ?
149                 spq_timeout_ms / SPQ_BLOCK_SLEEP_MS :
150                 SPQ_BLOCK_SLEEP_MAX_ITER;
151 }
152
153 /***************************************************************************
154  * SPQ entries inner API
155  ***************************************************************************/
156 static enum _ecore_status_t
157 ecore_spq_fill_entry(struct ecore_hwfn *p_hwfn, struct ecore_spq_entry *p_ent)
158 {
159         p_ent->flags = 0;
160
161         switch (p_ent->comp_mode) {
162         case ECORE_SPQ_MODE_EBLOCK:
163         case ECORE_SPQ_MODE_BLOCK:
164                 p_ent->comp_cb.function = ecore_spq_blocking_cb;
165                 break;
166         case ECORE_SPQ_MODE_CB:
167                 break;
168         default:
169                 DP_NOTICE(p_hwfn, true, "Unknown SPQE completion mode %d\n",
170                           p_ent->comp_mode);
171                 return ECORE_INVAL;
172         }
173
174         DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
175                    "Ramrod header: [CID 0x%08x CMD 0x%02x protocol 0x%02x]"
176                    " Data pointer: [%08x:%08x] Completion Mode: %s\n",
177                    p_ent->elem.hdr.cid, p_ent->elem.hdr.cmd_id,
178                    p_ent->elem.hdr.protocol_id,
179                    p_ent->elem.data_ptr.hi, p_ent->elem.data_ptr.lo,
180                    D_TRINE(p_ent->comp_mode, ECORE_SPQ_MODE_EBLOCK,
181                            ECORE_SPQ_MODE_BLOCK, "MODE_EBLOCK", "MODE_BLOCK",
182                            "MODE_CB"));
183
184         return ECORE_SUCCESS;
185 }
186
187 /***************************************************************************
188  * HSI access
189  ***************************************************************************/
190 static void ecore_spq_hw_initialize(struct ecore_hwfn *p_hwfn,
191                                     struct ecore_spq *p_spq)
192 {
193         struct e4_core_conn_context *p_cxt;
194         struct ecore_cxt_info cxt_info;
195         u16 physical_q;
196         enum _ecore_status_t rc;
197
198         cxt_info.iid = p_spq->cid;
199
200         rc = ecore_cxt_get_cid_info(p_hwfn, &cxt_info);
201
202         if (rc < 0) {
203                 DP_NOTICE(p_hwfn, true, "Cannot find context info for cid=%d\n",
204                           p_spq->cid);
205                 return;
206         }
207
208         p_cxt = cxt_info.p_cxt;
209
210         /* @@@TBD we zero the context until we have ilt_reset implemented. */
211         OSAL_MEM_ZERO(p_cxt, sizeof(*p_cxt));
212
213         if (ECORE_IS_BB(p_hwfn->p_dev) || ECORE_IS_AH(p_hwfn->p_dev)) {
214                 SET_FIELD(p_cxt->xstorm_ag_context.flags10,
215                           E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN, 1);
216                 SET_FIELD(p_cxt->xstorm_ag_context.flags1,
217                           E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE, 1);
218                 /* SET_FIELD(p_cxt->xstorm_ag_context.flags10,
219                  *        E4_XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN, 1);
220                  */
221                 SET_FIELD(p_cxt->xstorm_ag_context.flags9,
222                           E4_XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN, 1);
223         }
224
225         /* CDU validation - FIXME currently disabled */
226
227         /* QM physical queue */
228         physical_q = ecore_get_cm_pq_idx(p_hwfn, PQ_FLAGS_LB);
229         p_cxt->xstorm_ag_context.physical_q0 = OSAL_CPU_TO_LE16(physical_q);
230
231         p_cxt->xstorm_st_context.spq_base_lo =
232             DMA_LO_LE(p_spq->chain.p_phys_addr);
233         p_cxt->xstorm_st_context.spq_base_hi =
234             DMA_HI_LE(p_spq->chain.p_phys_addr);
235
236         DMA_REGPAIR_LE(p_cxt->xstorm_st_context.consolid_base_addr,
237                        p_hwfn->p_consq->chain.p_phys_addr);
238 }
239
240 static enum _ecore_status_t ecore_spq_hw_post(struct ecore_hwfn *p_hwfn,
241                                               struct ecore_spq *p_spq,
242                                               struct ecore_spq_entry *p_ent)
243 {
244         struct ecore_chain *p_chain = &p_hwfn->p_spq->chain;
245         struct core_db_data *p_db_data = &p_spq->db_data;
246         u16 echo = ecore_chain_get_prod_idx(p_chain);
247         struct slow_path_element *elem;
248
249         p_ent->elem.hdr.echo = OSAL_CPU_TO_LE16(echo);
250         elem = ecore_chain_produce(p_chain);
251         if (!elem) {
252                 DP_NOTICE(p_hwfn, true, "Failed to produce from SPQ chain\n");
253                 return ECORE_INVAL;
254         }
255
256         *elem = p_ent->elem;    /* Struct assignment */
257
258         p_db_data->spq_prod =
259                 OSAL_CPU_TO_LE16(ecore_chain_get_prod_idx(p_chain));
260
261         /* Make sure the SPQE is updated before the doorbell */
262         OSAL_WMB(p_hwfn->p_dev);
263
264         DOORBELL(p_hwfn, p_spq->db_addr_offset, *(u32 *)p_db_data);
265
266         /* Make sure doorbell is rang */
267         OSAL_WMB(p_hwfn->p_dev);
268
269         DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
270                    "Doorbelled [0x%08x, CID 0x%08x] with Flags: %02x"
271                    " agg_params: %02x, prod: %04x\n",
272                    p_spq->db_addr_offset, p_spq->cid, p_db_data->params,
273                    p_db_data->agg_flags, ecore_chain_get_prod_idx(p_chain));
274
275         return ECORE_SUCCESS;
276 }
277
278 /***************************************************************************
279  * Asynchronous events
280  ***************************************************************************/
281
282 static enum _ecore_status_t
283 ecore_async_event_completion(struct ecore_hwfn *p_hwfn,
284                              struct event_ring_entry *p_eqe)
285 {
286         ecore_spq_async_comp_cb cb;
287
288         if (!p_hwfn->p_spq || (p_eqe->protocol_id >= MAX_PROTOCOL_TYPE))
289                 return ECORE_INVAL;
290
291         cb = p_hwfn->p_spq->async_comp_cb[p_eqe->protocol_id];
292         if (cb) {
293                 return cb(p_hwfn, p_eqe->opcode, p_eqe->echo,
294                           &p_eqe->data, p_eqe->fw_return_code);
295         } else {
296                 DP_NOTICE(p_hwfn,
297                           true, "Unknown Async completion for protocol: %d\n",
298                           p_eqe->protocol_id);
299                 return ECORE_INVAL;
300         }
301 }
302
303 enum _ecore_status_t
304 ecore_spq_register_async_cb(struct ecore_hwfn *p_hwfn,
305                             enum protocol_type protocol_id,
306                             ecore_spq_async_comp_cb cb)
307 {
308         if (!p_hwfn->p_spq || (protocol_id >= MAX_PROTOCOL_TYPE))
309                 return ECORE_INVAL;
310
311         p_hwfn->p_spq->async_comp_cb[protocol_id] = cb;
312         return ECORE_SUCCESS;
313 }
314
315 void
316 ecore_spq_unregister_async_cb(struct ecore_hwfn *p_hwfn,
317                               enum protocol_type protocol_id)
318 {
319         if (!p_hwfn->p_spq || (protocol_id >= MAX_PROTOCOL_TYPE))
320                 return;
321
322         p_hwfn->p_spq->async_comp_cb[protocol_id] = OSAL_NULL;
323 }
324
325 /***************************************************************************
326  * EQ API
327  ***************************************************************************/
328 void ecore_eq_prod_update(struct ecore_hwfn *p_hwfn, u16 prod)
329 {
330         u32 addr = GTT_BAR0_MAP_REG_USDM_RAM +
331             USTORM_EQE_CONS_OFFSET(p_hwfn->rel_pf_id);
332
333         REG_WR16(p_hwfn, addr, prod);
334
335         /* keep prod updates ordered */
336         OSAL_MMIOWB(p_hwfn->p_dev);
337 }
338
339 enum _ecore_status_t ecore_eq_completion(struct ecore_hwfn *p_hwfn,
340                                          void *cookie)
341 {
342         struct ecore_eq *p_eq = cookie;
343         struct ecore_chain *p_chain = &p_eq->chain;
344         enum _ecore_status_t rc = 0;
345
346         /* take a snapshot of the FW consumer */
347         u16 fw_cons_idx = OSAL_LE16_TO_CPU(*p_eq->p_fw_cons);
348
349         DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ, "fw_cons_idx %x\n", fw_cons_idx);
350
351         /* Need to guarantee the fw_cons index we use points to a usuable
352          * element (to comply with our chain), so our macros would comply
353          */
354         if ((fw_cons_idx & ecore_chain_get_usable_per_page(p_chain)) ==
355             ecore_chain_get_usable_per_page(p_chain)) {
356                 fw_cons_idx += ecore_chain_get_unusable_per_page(p_chain);
357         }
358
359         /* Complete current segment of eq entries */
360         while (fw_cons_idx != ecore_chain_get_cons_idx(p_chain)) {
361                 struct event_ring_entry *p_eqe = ecore_chain_consume(p_chain);
362                 if (!p_eqe) {
363                         rc = ECORE_INVAL;
364                         break;
365                 }
366
367                 DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
368                            "op %x prot %x res0 %x echo %x fwret %x flags %x\n",
369                            p_eqe->opcode,            /* Event Opcode */
370                            p_eqe->protocol_id,  /* Event Protocol ID */
371                            p_eqe->reserved0,    /* Reserved */
372                            /* Echo value from ramrod data on the host */
373                            OSAL_LE16_TO_CPU(p_eqe->echo),
374                            p_eqe->fw_return_code,    /* FW return code for SP
375                                                       * ramrods
376                                                       */
377                            p_eqe->flags);
378
379                 if (GET_FIELD(p_eqe->flags, EVENT_RING_ENTRY_ASYNC)) {
380                         if (ecore_async_event_completion(p_hwfn, p_eqe))
381                                 rc = ECORE_INVAL;
382                 } else if (ecore_spq_completion(p_hwfn,
383                                                 p_eqe->echo,
384                                                 p_eqe->fw_return_code,
385                                                 &p_eqe->data)) {
386                         rc = ECORE_INVAL;
387                 }
388
389                 ecore_chain_recycle_consumed(p_chain);
390         }
391
392         ecore_eq_prod_update(p_hwfn, ecore_chain_get_prod_idx(p_chain));
393
394         return rc;
395 }
396
397 enum _ecore_status_t ecore_eq_alloc(struct ecore_hwfn *p_hwfn, u16 num_elem)
398 {
399         struct ecore_eq *p_eq;
400
401         /* Allocate EQ struct */
402         p_eq = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*p_eq));
403         if (!p_eq) {
404                 DP_NOTICE(p_hwfn, true,
405                           "Failed to allocate `struct ecore_eq'\n");
406                 return ECORE_NOMEM;
407         }
408
409         /* Allocate and initialize EQ chain*/
410         if (ecore_chain_alloc(p_hwfn->p_dev,
411                               ECORE_CHAIN_USE_TO_PRODUCE,
412                               ECORE_CHAIN_MODE_PBL,
413                               ECORE_CHAIN_CNT_TYPE_U16,
414                               num_elem,
415                               sizeof(union event_ring_element),
416                               &p_eq->chain, OSAL_NULL) != ECORE_SUCCESS) {
417                 DP_NOTICE(p_hwfn, true, "Failed to allocate eq chain\n");
418                 goto eq_allocate_fail;
419         }
420
421         /* register EQ completion on the SP SB */
422         ecore_int_register_cb(p_hwfn, ecore_eq_completion,
423                               p_eq, &p_eq->eq_sb_index, &p_eq->p_fw_cons);
424
425         p_hwfn->p_eq = p_eq;
426         return ECORE_SUCCESS;
427
428 eq_allocate_fail:
429         OSAL_FREE(p_hwfn->p_dev, p_eq);
430         return ECORE_NOMEM;
431 }
432
433 void ecore_eq_setup(struct ecore_hwfn *p_hwfn)
434 {
435         ecore_chain_reset(&p_hwfn->p_eq->chain);
436 }
437
438 void ecore_eq_free(struct ecore_hwfn *p_hwfn)
439 {
440         if (!p_hwfn->p_eq)
441                 return;
442
443         ecore_chain_free(p_hwfn->p_dev, &p_hwfn->p_eq->chain);
444
445         OSAL_FREE(p_hwfn->p_dev, p_hwfn->p_eq);
446         p_hwfn->p_eq = OSAL_NULL;
447 }
448
449 /***************************************************************************
450 * CQE API - manipulate EQ functionality
451 ***************************************************************************/
452 static enum _ecore_status_t ecore_cqe_completion(struct ecore_hwfn *p_hwfn,
453                                                  struct eth_slow_path_rx_cqe
454                                                  *cqe,
455                                                  enum protocol_type protocol)
456 {
457         if (IS_VF(p_hwfn->p_dev))
458                 return OSAL_VF_CQE_COMPLETION(p_hwfn, cqe, protocol);
459
460         /* @@@tmp - it's possible we'll eventually want to handle some
461          * actual commands that can arrive here, but for now this is only
462          * used to complete the ramrod using the echo value on the cqe
463          */
464         return ecore_spq_completion(p_hwfn, cqe->echo, 0, OSAL_NULL);
465 }
466
467 enum _ecore_status_t ecore_eth_cqe_completion(struct ecore_hwfn *p_hwfn,
468                                               struct eth_slow_path_rx_cqe *cqe)
469 {
470         enum _ecore_status_t rc;
471
472         rc = ecore_cqe_completion(p_hwfn, cqe, PROTOCOLID_ETH);
473         if (rc) {
474                 DP_NOTICE(p_hwfn, true,
475                           "Failed to handle RXQ CQE [cmd 0x%02x]\n",
476                           cqe->ramrod_cmd_id);
477         }
478
479         return rc;
480 }
481
482 /***************************************************************************
483  * Slow hwfn Queue (spq)
484  ***************************************************************************/
485 void ecore_spq_setup(struct ecore_hwfn *p_hwfn)
486 {
487         struct ecore_spq *p_spq = p_hwfn->p_spq;
488         struct ecore_spq_entry *p_virt = OSAL_NULL;
489         struct core_db_data *p_db_data;
490         void OSAL_IOMEM *db_addr;
491         dma_addr_t p_phys = 0;
492         u32 i, capacity;
493         enum _ecore_status_t rc;
494
495         OSAL_LIST_INIT(&p_spq->pending);
496         OSAL_LIST_INIT(&p_spq->completion_pending);
497         OSAL_LIST_INIT(&p_spq->free_pool);
498         OSAL_LIST_INIT(&p_spq->unlimited_pending);
499         OSAL_SPIN_LOCK_INIT(&p_spq->lock);
500
501         /* SPQ empty pool */
502         p_phys = p_spq->p_phys + OFFSETOF(struct ecore_spq_entry, ramrod);
503         p_virt = p_spq->p_virt;
504
505         capacity = ecore_chain_get_capacity(&p_spq->chain);
506         for (i = 0; i < capacity; i++) {
507                 DMA_REGPAIR_LE(p_virt->elem.data_ptr, p_phys);
508
509                 OSAL_LIST_PUSH_TAIL(&p_virt->list, &p_spq->free_pool);
510
511                 p_virt++;
512                 p_phys += sizeof(struct ecore_spq_entry);
513         }
514
515         /* Statistics */
516         p_spq->normal_count = 0;
517         p_spq->comp_count = 0;
518         p_spq->comp_sent_count = 0;
519         p_spq->unlimited_pending_count = 0;
520
521         OSAL_MEM_ZERO(p_spq->p_comp_bitmap,
522                       SPQ_COMP_BMAP_SIZE * sizeof(unsigned long));
523         p_spq->comp_bitmap_idx = 0;
524
525         /* SPQ cid, cannot fail */
526         ecore_cxt_acquire_cid(p_hwfn, PROTOCOLID_CORE, &p_spq->cid);
527         ecore_spq_hw_initialize(p_hwfn, p_spq);
528
529         /* reset the chain itself */
530         ecore_chain_reset(&p_spq->chain);
531
532         /* Initialize the address/data of the SPQ doorbell */
533         p_spq->db_addr_offset = DB_ADDR(p_spq->cid, DQ_DEMS_LEGACY);
534         p_db_data = &p_spq->db_data;
535         OSAL_MEM_ZERO(p_db_data, sizeof(*p_db_data));
536         SET_FIELD(p_db_data->params, CORE_DB_DATA_DEST, DB_DEST_XCM);
537         SET_FIELD(p_db_data->params, CORE_DB_DATA_AGG_CMD, DB_AGG_CMD_MAX);
538         SET_FIELD(p_db_data->params, CORE_DB_DATA_AGG_VAL_SEL,
539                   DQ_XCM_CORE_SPQ_PROD_CMD);
540         p_db_data->agg_flags = DQ_XCM_CORE_DQ_CF_CMD;
541
542         /* Register the SPQ doorbell with the doorbell recovery mechanism */
543         db_addr = (void *)((u8 *)p_hwfn->doorbells + p_spq->db_addr_offset);
544         rc = ecore_db_recovery_add(p_hwfn->p_dev, db_addr, &p_spq->db_data,
545                                    DB_REC_WIDTH_32B, DB_REC_KERNEL);
546         if (rc != ECORE_SUCCESS)
547                 DP_INFO(p_hwfn,
548                         "Failed to register the SPQ doorbell with the doorbell recovery mechanism\n");
549 }
550
551 enum _ecore_status_t ecore_spq_alloc(struct ecore_hwfn *p_hwfn)
552 {
553         struct ecore_spq_entry *p_virt = OSAL_NULL;
554         struct ecore_spq *p_spq = OSAL_NULL;
555         dma_addr_t p_phys = 0;
556         u32 capacity;
557
558         /* SPQ struct */
559         p_spq =
560             OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(struct ecore_spq));
561         if (!p_spq) {
562                 DP_NOTICE(p_hwfn, true,
563                           "Failed to allocate `struct ecore_spq'\n");
564                 return ECORE_NOMEM;
565         }
566
567         /* SPQ ring  */
568         if (ecore_chain_alloc(p_hwfn->p_dev,
569                               ECORE_CHAIN_USE_TO_PRODUCE,
570                               ECORE_CHAIN_MODE_SINGLE,
571                               ECORE_CHAIN_CNT_TYPE_U16,
572                               0, /* N/A when the mode is SINGLE */
573                               sizeof(struct slow_path_element),
574                               &p_spq->chain, OSAL_NULL)) {
575                 DP_NOTICE(p_hwfn, true, "Failed to allocate spq chain\n");
576                 goto spq_allocate_fail;
577         }
578
579         /* allocate and fill the SPQ elements (incl. ramrod data list) */
580         capacity = ecore_chain_get_capacity(&p_spq->chain);
581         p_virt = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, &p_phys,
582                                          capacity *
583                                          sizeof(struct ecore_spq_entry));
584         if (!p_virt)
585                 goto spq_allocate_fail;
586
587         p_spq->p_virt = p_virt;
588         p_spq->p_phys = p_phys;
589
590 #ifdef CONFIG_ECORE_LOCK_ALLOC
591         OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_spq->lock);
592 #endif
593
594         p_hwfn->p_spq = p_spq;
595         return ECORE_SUCCESS;
596
597 spq_allocate_fail:
598         ecore_chain_free(p_hwfn->p_dev, &p_spq->chain);
599         OSAL_FREE(p_hwfn->p_dev, p_spq);
600         return ECORE_NOMEM;
601 }
602
603 void ecore_spq_free(struct ecore_hwfn *p_hwfn)
604 {
605         struct ecore_spq *p_spq = p_hwfn->p_spq;
606         void OSAL_IOMEM *db_addr;
607         u32 capacity;
608
609         if (!p_spq)
610                 return;
611
612         /* Delete the SPQ doorbell from the doorbell recovery mechanism */
613         db_addr = (void *)((u8 *)p_hwfn->doorbells + p_spq->db_addr_offset);
614         ecore_db_recovery_del(p_hwfn->p_dev, db_addr, &p_spq->db_data);
615
616         if (p_spq->p_virt) {
617                 capacity = ecore_chain_get_capacity(&p_spq->chain);
618                 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
619                                        p_spq->p_virt,
620                                        p_spq->p_phys,
621                                        capacity *
622                                        sizeof(struct ecore_spq_entry));
623         }
624
625         ecore_chain_free(p_hwfn->p_dev, &p_spq->chain);
626 #ifdef CONFIG_ECORE_LOCK_ALLOC
627         OSAL_SPIN_LOCK_DEALLOC(&p_spq->lock);
628 #endif
629
630         OSAL_FREE(p_hwfn->p_dev, p_spq);
631 }
632
633 enum _ecore_status_t
634 ecore_spq_get_entry(struct ecore_hwfn *p_hwfn, struct ecore_spq_entry **pp_ent)
635 {
636         struct ecore_spq *p_spq = p_hwfn->p_spq;
637         struct ecore_spq_entry *p_ent = OSAL_NULL;
638         enum _ecore_status_t rc = ECORE_SUCCESS;
639
640         OSAL_SPIN_LOCK(&p_spq->lock);
641
642         if (OSAL_LIST_IS_EMPTY(&p_spq->free_pool)) {
643                 p_ent = OSAL_ZALLOC(p_hwfn->p_dev, GFP_ATOMIC, sizeof(*p_ent));
644                 if (!p_ent) {
645                         DP_NOTICE(p_hwfn, true,
646                                  "Failed to allocate an SPQ entry for a pending"
647                                  " ramrod\n");
648                         rc = ECORE_NOMEM;
649                         goto out_unlock;
650                 }
651                 p_ent->queue = &p_spq->unlimited_pending;
652         } else {
653                 p_ent = OSAL_LIST_FIRST_ENTRY(&p_spq->free_pool,
654                                               struct ecore_spq_entry, list);
655                 OSAL_LIST_REMOVE_ENTRY(&p_ent->list, &p_spq->free_pool);
656                 p_ent->queue = &p_spq->pending;
657         }
658
659         *pp_ent = p_ent;
660
661 out_unlock:
662         OSAL_SPIN_UNLOCK(&p_spq->lock);
663         return rc;
664 }
665
666 /* Locked variant; Should be called while the SPQ lock is taken */
667 static void __ecore_spq_return_entry(struct ecore_hwfn *p_hwfn,
668                                      struct ecore_spq_entry *p_ent)
669 {
670         OSAL_LIST_PUSH_TAIL(&p_ent->list, &p_hwfn->p_spq->free_pool);
671 }
672
673 void ecore_spq_return_entry(struct ecore_hwfn *p_hwfn,
674                             struct ecore_spq_entry *p_ent)
675 {
676         OSAL_SPIN_LOCK(&p_hwfn->p_spq->lock);
677         __ecore_spq_return_entry(p_hwfn, p_ent);
678         OSAL_SPIN_UNLOCK(&p_hwfn->p_spq->lock);
679 }
680
681 /**
682  * @brief ecore_spq_add_entry - adds a new entry to the pending
683  *        list. Should be used while lock is being held.
684  *
685  * Addes an entry to the pending list is there is room (en empty
686  * element is available in the free_pool), or else places the
687  * entry in the unlimited_pending pool.
688  *
689  * @param p_hwfn
690  * @param p_ent
691  * @param priority
692  *
693  * @return enum _ecore_status_t
694  */
695 static enum _ecore_status_t
696 ecore_spq_add_entry(struct ecore_hwfn *p_hwfn,
697                     struct ecore_spq_entry *p_ent, enum spq_priority priority)
698 {
699         struct ecore_spq *p_spq = p_hwfn->p_spq;
700
701         if (p_ent->queue == &p_spq->unlimited_pending) {
702                 if (OSAL_LIST_IS_EMPTY(&p_spq->free_pool)) {
703                         OSAL_LIST_PUSH_TAIL(&p_ent->list,
704                                             &p_spq->unlimited_pending);
705                         p_spq->unlimited_pending_count++;
706
707                         return ECORE_SUCCESS;
708
709                 } else {
710                         struct ecore_spq_entry *p_en2;
711
712                         p_en2 = OSAL_LIST_FIRST_ENTRY(&p_spq->free_pool,
713                                                      struct ecore_spq_entry,
714                                                      list);
715                         OSAL_LIST_REMOVE_ENTRY(&p_en2->list, &p_spq->free_pool);
716
717                         /* Copy the ring element physical pointer to the new
718                          * entry, since we are about to override the entire ring
719                          * entry and don't want to lose the pointer.
720                          */
721                         p_ent->elem.data_ptr = p_en2->elem.data_ptr;
722
723                         *p_en2 = *p_ent;
724
725                         /* EBLOCK responsible to free the allocated p_ent */
726                         if (p_ent->comp_mode != ECORE_SPQ_MODE_EBLOCK)
727                                 OSAL_FREE(p_hwfn->p_dev, p_ent);
728
729                         p_ent = p_en2;
730                 }
731         }
732
733         /* entry is to be placed in 'pending' queue */
734         switch (priority) {
735         case ECORE_SPQ_PRIORITY_NORMAL:
736                 OSAL_LIST_PUSH_TAIL(&p_ent->list, &p_spq->pending);
737                 p_spq->normal_count++;
738                 break;
739         case ECORE_SPQ_PRIORITY_HIGH:
740                 OSAL_LIST_PUSH_HEAD(&p_ent->list, &p_spq->pending);
741                 p_spq->high_count++;
742                 break;
743         default:
744                 return ECORE_INVAL;
745         }
746
747         return ECORE_SUCCESS;
748 }
749
750 /***************************************************************************
751  * Accessor
752  ***************************************************************************/
753
754 u32 ecore_spq_get_cid(struct ecore_hwfn *p_hwfn)
755 {
756         if (!p_hwfn->p_spq)
757                 return 0xffffffff;      /* illegal */
758         return p_hwfn->p_spq->cid;
759 }
760
761 /***************************************************************************
762  * Posting new Ramrods
763  ***************************************************************************/
764
765 static enum _ecore_status_t ecore_spq_post_list(struct ecore_hwfn *p_hwfn,
766                                                 osal_list_t *head,
767                                                 u32 keep_reserve)
768 {
769         struct ecore_spq *p_spq = p_hwfn->p_spq;
770         enum _ecore_status_t rc;
771
772         /* TODO - implementation might be wasteful; will always keep room
773          * for an additional high priority ramrod (even if one is already
774          * pending FW)
775          */
776         while (ecore_chain_get_elem_left(&p_spq->chain) > keep_reserve &&
777                !OSAL_LIST_IS_EMPTY(head)) {
778                 struct ecore_spq_entry *p_ent =
779                     OSAL_LIST_FIRST_ENTRY(head, struct ecore_spq_entry, list);
780                 if (p_ent != OSAL_NULL) {
781 #if defined(_NTDDK_)
782 #pragma warning(suppress : 6011 28182)
783 #endif
784                         OSAL_LIST_REMOVE_ENTRY(&p_ent->list, head);
785                         OSAL_LIST_PUSH_TAIL(&p_ent->list,
786                                             &p_spq->completion_pending);
787                         p_spq->comp_sent_count++;
788
789                         rc = ecore_spq_hw_post(p_hwfn, p_spq, p_ent);
790                         if (rc) {
791                                 OSAL_LIST_REMOVE_ENTRY(&p_ent->list,
792                                                     &p_spq->completion_pending);
793                                 __ecore_spq_return_entry(p_hwfn, p_ent);
794                                 return rc;
795                         }
796                 }
797         }
798
799         return ECORE_SUCCESS;
800 }
801
802 static enum _ecore_status_t ecore_spq_pend_post(struct ecore_hwfn *p_hwfn)
803 {
804         struct ecore_spq *p_spq = p_hwfn->p_spq;
805         struct ecore_spq_entry *p_ent = OSAL_NULL;
806
807         while (!OSAL_LIST_IS_EMPTY(&p_spq->free_pool)) {
808                 if (OSAL_LIST_IS_EMPTY(&p_spq->unlimited_pending))
809                         break;
810
811                 p_ent = OSAL_LIST_FIRST_ENTRY(&p_spq->unlimited_pending,
812                                               struct ecore_spq_entry, list);
813                 if (!p_ent)
814                         return ECORE_INVAL;
815
816 #if defined(_NTDDK_)
817 #pragma warning(suppress : 6011)
818 #endif
819                 OSAL_LIST_REMOVE_ENTRY(&p_ent->list, &p_spq->unlimited_pending);
820
821                 ecore_spq_add_entry(p_hwfn, p_ent, p_ent->priority);
822         }
823
824         return ecore_spq_post_list(p_hwfn,
825                                  &p_spq->pending, SPQ_HIGH_PRI_RESERVE_DEFAULT);
826 }
827
828 enum _ecore_status_t ecore_spq_post(struct ecore_hwfn *p_hwfn,
829                                     struct ecore_spq_entry *p_ent,
830                                     u8 *fw_return_code)
831 {
832         enum _ecore_status_t rc = ECORE_SUCCESS;
833         struct ecore_spq *p_spq = p_hwfn ? p_hwfn->p_spq : OSAL_NULL;
834         bool b_ret_ent = true;
835
836         if (!p_hwfn)
837                 return ECORE_INVAL;
838
839         if (!p_ent) {
840                 DP_NOTICE(p_hwfn, true, "Got a NULL pointer\n");
841                 return ECORE_INVAL;
842         }
843
844         if (p_hwfn->p_dev->recov_in_prog) {
845                 DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
846                            "Recovery is in progress -> skip spq post"
847                            " [cmd %02x protocol %02x]\n",
848                            p_ent->elem.hdr.cmd_id, p_ent->elem.hdr.protocol_id);
849                 /* Return success to let the flows to be completed successfully
850                  * w/o any error handling.
851                  */
852                 return ECORE_SUCCESS;
853         }
854
855         OSAL_SPIN_LOCK(&p_spq->lock);
856
857         /* Complete the entry */
858         rc = ecore_spq_fill_entry(p_hwfn, p_ent);
859
860         /* Check return value after LOCK is taken for cleaner error flow */
861         if (rc)
862                 goto spq_post_fail;
863
864         /* Add the request to the pending queue */
865         rc = ecore_spq_add_entry(p_hwfn, p_ent, p_ent->priority);
866         if (rc)
867                 goto spq_post_fail;
868
869         rc = ecore_spq_pend_post(p_hwfn);
870         if (rc) {
871                 /* Since it's possible that pending failed for a different
872                  * entry [although unlikely], the failed entry was already
873                  * dealt with; No need to return it here.
874                  */
875                 b_ret_ent = false;
876                 goto spq_post_fail;
877         }
878
879         OSAL_SPIN_UNLOCK(&p_spq->lock);
880
881         if (p_ent->comp_mode == ECORE_SPQ_MODE_EBLOCK) {
882                 /* For entries in ECORE BLOCK mode, the completion code cannot
883                  * perform the necessary cleanup - if it did, we couldn't
884                  * access p_ent here to see whether it's successful or not.
885                  * Thus, after gaining the answer perform the cleanup here.
886                  */
887                 rc = ecore_spq_block(p_hwfn, p_ent, fw_return_code,
888                                      p_ent->queue == &p_spq->unlimited_pending);
889
890                 if (p_ent->queue == &p_spq->unlimited_pending) {
891                         /* This is an allocated p_ent which does not need to
892                          * return to pool.
893                          */
894                         OSAL_FREE(p_hwfn->p_dev, p_ent);
895
896                         /* TBD: handle error flow and remove p_ent from
897                          * completion pending
898                          */
899                         return rc;
900                 }
901
902                 if (rc)
903                         goto spq_post_fail2;
904
905                 /* return to pool */
906                 ecore_spq_return_entry(p_hwfn, p_ent);
907         }
908         return rc;
909
910 spq_post_fail2:
911         OSAL_SPIN_LOCK(&p_spq->lock);
912         OSAL_LIST_REMOVE_ENTRY(&p_ent->list, &p_spq->completion_pending);
913         ecore_chain_return_produced(&p_spq->chain);
914
915 spq_post_fail:
916         /* return to the free pool */
917         if (b_ret_ent)
918                 __ecore_spq_return_entry(p_hwfn, p_ent);
919         OSAL_SPIN_UNLOCK(&p_spq->lock);
920
921         return rc;
922 }
923
924 enum _ecore_status_t ecore_spq_completion(struct ecore_hwfn *p_hwfn,
925                                           __le16 echo,
926                                           u8 fw_return_code,
927                                           union event_ring_data *p_data)
928 {
929         struct ecore_spq *p_spq;
930         struct ecore_spq_entry *p_ent = OSAL_NULL;
931         struct ecore_spq_entry *tmp;
932         struct ecore_spq_entry *found = OSAL_NULL;
933         enum _ecore_status_t rc;
934
935         if (!p_hwfn)
936                 return ECORE_INVAL;
937
938         p_spq = p_hwfn->p_spq;
939         if (!p_spq)
940                 return ECORE_INVAL;
941
942         OSAL_SPIN_LOCK(&p_spq->lock);
943         OSAL_LIST_FOR_EACH_ENTRY_SAFE(p_ent,
944                                       tmp,
945                                       &p_spq->completion_pending,
946                                       list, struct ecore_spq_entry) {
947                 if (p_ent->elem.hdr.echo == echo) {
948                         OSAL_LIST_REMOVE_ENTRY(&p_ent->list,
949                                                &p_spq->completion_pending);
950
951                         /* Avoid overriding of SPQ entries when getting
952                          * out-of-order completions, by marking the completions
953                          * in a bitmap and increasing the chain consumer only
954                          * for the first successive completed entries.
955                          */
956                         SPQ_COMP_BMAP_SET_BIT(p_spq, echo);
957                         while (SPQ_COMP_BMAP_TEST_BIT(p_spq,
958                                                       p_spq->comp_bitmap_idx)) {
959                                 SPQ_COMP_BMAP_CLEAR_BIT(p_spq,
960                                                         p_spq->comp_bitmap_idx);
961                                 p_spq->comp_bitmap_idx++;
962                                 ecore_chain_return_produced(&p_spq->chain);
963                         }
964
965                         p_spq->comp_count++;
966                         found = p_ent;
967                         break;
968                 }
969
970                 /* This is debug and should be relatively uncommon - depends
971                  * on scenarios which have mutliple per-PF sent ramrods.
972                  */
973                 DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
974                            "Got completion for echo %04x - doesn't match"
975                            " echo %04x in completion pending list\n",
976                            OSAL_LE16_TO_CPU(echo),
977                            OSAL_LE16_TO_CPU(p_ent->elem.hdr.echo));
978         }
979
980         /* Release lock before callback, as callback may post
981          * an additional ramrod.
982          */
983         OSAL_SPIN_UNLOCK(&p_spq->lock);
984
985         if (!found) {
986                 DP_NOTICE(p_hwfn, true,
987                           "Failed to find an entry this"
988                           " EQE [echo %04x] completes\n",
989                           OSAL_LE16_TO_CPU(echo));
990                 return ECORE_EXISTS;
991         }
992
993         DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
994                    "Complete EQE [echo %04x]: func %p cookie %p)\n",
995                    OSAL_LE16_TO_CPU(echo),
996                    p_ent->comp_cb.function, p_ent->comp_cb.cookie);
997         if (found->comp_cb.function)
998                 found->comp_cb.function(p_hwfn, found->comp_cb.cookie, p_data,
999                                         fw_return_code);
1000         else
1001                 DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
1002                            "Got a completion without a callback function\n");
1003
1004         if ((found->comp_mode != ECORE_SPQ_MODE_EBLOCK) ||
1005             (found->queue == &p_spq->unlimited_pending))
1006                 /* EBLOCK  is responsible for returning its own entry into the
1007                  * free list, unless it originally added the entry into the
1008                  * unlimited pending list.
1009                  */
1010                 ecore_spq_return_entry(p_hwfn, found);
1011
1012         /* Attempt to post pending requests */
1013         OSAL_SPIN_LOCK(&p_spq->lock);
1014         rc = ecore_spq_pend_post(p_hwfn);
1015         OSAL_SPIN_UNLOCK(&p_spq->lock);
1016
1017         return rc;
1018 }
1019
1020 enum _ecore_status_t ecore_consq_alloc(struct ecore_hwfn *p_hwfn)
1021 {
1022         struct ecore_consq *p_consq;
1023
1024         /* Allocate ConsQ struct */
1025         p_consq =
1026             OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*p_consq));
1027         if (!p_consq) {
1028                 DP_NOTICE(p_hwfn, true,
1029                           "Failed to allocate `struct ecore_consq'\n");
1030                 return ECORE_NOMEM;
1031         }
1032
1033         /* Allocate and initialize EQ chain */
1034         if (ecore_chain_alloc(p_hwfn->p_dev,
1035                               ECORE_CHAIN_USE_TO_PRODUCE,
1036                               ECORE_CHAIN_MODE_PBL,
1037                               ECORE_CHAIN_CNT_TYPE_U16,
1038                               ECORE_CHAIN_PAGE_SIZE / 0x80,
1039                               0x80,
1040                               &p_consq->chain, OSAL_NULL) != ECORE_SUCCESS) {
1041                 DP_NOTICE(p_hwfn, true, "Failed to allocate consq chain");
1042                 goto consq_allocate_fail;
1043         }
1044
1045         p_hwfn->p_consq = p_consq;
1046         return ECORE_SUCCESS;
1047
1048 consq_allocate_fail:
1049         OSAL_FREE(p_hwfn->p_dev, p_consq);
1050         return ECORE_NOMEM;
1051 }
1052
1053 void ecore_consq_setup(struct ecore_hwfn *p_hwfn)
1054 {
1055         ecore_chain_reset(&p_hwfn->p_consq->chain);
1056 }
1057
1058 void ecore_consq_free(struct ecore_hwfn *p_hwfn)
1059 {
1060         if (!p_hwfn->p_consq)
1061                 return;
1062
1063         ecore_chain_free(p_hwfn->p_dev, &p_hwfn->p_consq->chain);
1064         OSAL_FREE(p_hwfn->p_dev, p_hwfn->p_consq);
1065 }