25d573e9702ce9b587d19e9ef24ab9081795defa
[dpdk.git] / drivers / net / qede / base / ecore_spq.c
1 /*
2  * Copyright (c) 2016 QLogic Corporation.
3  * All rights reserved.
4  * www.qlogic.com
5  *
6  * See LICENSE.qede_pmd for copyright and licensing details.
7  */
8
9 #include "bcm_osal.h"
10 #include "reg_addr.h"
11 #include "ecore_gtt_reg_addr.h"
12 #include "ecore_hsi_common.h"
13 #include "ecore.h"
14 #include "ecore_sp_api.h"
15 #include "ecore_spq.h"
16 #include "ecore_iro.h"
17 #include "ecore_init_fw_funcs.h"
18 #include "ecore_cxt.h"
19 #include "ecore_int.h"
20 #include "ecore_dev_api.h"
21 #include "ecore_mcp.h"
22 #include "ecore_hw.h"
23 #include "ecore_sriov.h"
24
25 /***************************************************************************
26  * Structures & Definitions
27  ***************************************************************************/
28
29 #define SPQ_HIGH_PRI_RESERVE_DEFAULT    (1)
30
31 #define SPQ_BLOCK_DELAY_MAX_ITER        (10)
32 #define SPQ_BLOCK_DELAY_US              (10)
33 #define SPQ_BLOCK_SLEEP_MAX_ITER        (1000)
34 #define SPQ_BLOCK_SLEEP_MS              (5)
35
36 /***************************************************************************
37  * Blocking Imp. (BLOCK/EBLOCK mode)
38  ***************************************************************************/
39 static void ecore_spq_blocking_cb(struct ecore_hwfn *p_hwfn,
40                                   void *cookie,
41                                   union event_ring_data *data,
42                                   u8 fw_return_code)
43 {
44         struct ecore_spq_comp_done *comp_done;
45
46         comp_done = (struct ecore_spq_comp_done *)cookie;
47
48         comp_done->done = 0x1;
49         comp_done->fw_return_code = fw_return_code;
50
51         /* make update visible to waiting thread */
52         OSAL_SMP_WMB(p_hwfn->p_dev);
53 }
54
55 static enum _ecore_status_t __ecore_spq_block(struct ecore_hwfn *p_hwfn,
56                                               struct ecore_spq_entry *p_ent,
57                                               u8 *p_fw_ret,
58                                               bool sleep_between_iter)
59 {
60         struct ecore_spq_comp_done *comp_done;
61         u32 iter_cnt;
62
63         comp_done = (struct ecore_spq_comp_done *)p_ent->comp_cb.cookie;
64         iter_cnt = sleep_between_iter ? SPQ_BLOCK_SLEEP_MAX_ITER
65                                       : SPQ_BLOCK_DELAY_MAX_ITER;
66
67         while (iter_cnt--) {
68                 OSAL_POLL_MODE_DPC(p_hwfn);
69                 OSAL_SMP_RMB(p_hwfn->p_dev);
70                 if (comp_done->done == 1) {
71                         if (p_fw_ret)
72                                 *p_fw_ret = comp_done->fw_return_code;
73                         return ECORE_SUCCESS;
74                 }
75
76                 if (sleep_between_iter)
77                         OSAL_MSLEEP(SPQ_BLOCK_SLEEP_MS);
78                 else
79                         OSAL_UDELAY(SPQ_BLOCK_DELAY_US);
80         }
81
82         return ECORE_TIMEOUT;
83 }
84
85 static enum _ecore_status_t ecore_spq_block(struct ecore_hwfn *p_hwfn,
86                                             struct ecore_spq_entry *p_ent,
87                                             u8 *p_fw_ret, bool skip_quick_poll)
88 {
89         struct ecore_spq_comp_done *comp_done;
90         struct ecore_ptt *p_ptt;
91         enum _ecore_status_t rc;
92
93         /* A relatively short polling period w/o sleeping, to allow the FW to
94          * complete the ramrod and thus possibly to avoid the following sleeps.
95          */
96         if (!skip_quick_poll) {
97                 rc = __ecore_spq_block(p_hwfn, p_ent, p_fw_ret, false);
98                 if (rc == ECORE_SUCCESS)
99                         return ECORE_SUCCESS;
100         }
101
102         /* Move to polling with a sleeping period between iterations */
103         rc = __ecore_spq_block(p_hwfn, p_ent, p_fw_ret, true);
104         if (rc == ECORE_SUCCESS)
105                 return ECORE_SUCCESS;
106
107         p_ptt = ecore_ptt_acquire(p_hwfn);
108         if (!p_ptt)
109                 return ECORE_AGAIN;
110
111         DP_INFO(p_hwfn, "Ramrod is stuck, requesting MCP drain\n");
112         rc = ecore_mcp_drain(p_hwfn, p_ptt);
113         ecore_ptt_release(p_hwfn, p_ptt);
114         if (rc != ECORE_SUCCESS) {
115                 DP_NOTICE(p_hwfn, true, "MCP drain failed\n");
116                 goto err;
117         }
118
119         /* Retry after drain */
120         rc = __ecore_spq_block(p_hwfn, p_ent, p_fw_ret, true);
121         if (rc == ECORE_SUCCESS)
122                 return ECORE_SUCCESS;
123
124         comp_done = (struct ecore_spq_comp_done *)p_ent->comp_cb.cookie;
125         if (comp_done->done == 1) {
126                 if (p_fw_ret)
127                         *p_fw_ret = comp_done->fw_return_code;
128                 return ECORE_SUCCESS;
129         }
130 err:
131         DP_NOTICE(p_hwfn, true,
132                   "Ramrod is stuck [CID %08x cmd %02x proto %02x echo %04x]\n",
133                   OSAL_LE32_TO_CPU(p_ent->elem.hdr.cid),
134                   p_ent->elem.hdr.cmd_id, p_ent->elem.hdr.protocol_id,
135                   OSAL_LE16_TO_CPU(p_ent->elem.hdr.echo));
136
137         ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_RAMROD_FAIL);
138
139         return ECORE_BUSY;
140 }
141
142 /***************************************************************************
143  * SPQ entries inner API
144  ***************************************************************************/
145 static enum _ecore_status_t
146 ecore_spq_fill_entry(struct ecore_hwfn *p_hwfn, struct ecore_spq_entry *p_ent)
147 {
148         p_ent->flags = 0;
149
150         switch (p_ent->comp_mode) {
151         case ECORE_SPQ_MODE_EBLOCK:
152         case ECORE_SPQ_MODE_BLOCK:
153                 p_ent->comp_cb.function = ecore_spq_blocking_cb;
154                 break;
155         case ECORE_SPQ_MODE_CB:
156                 break;
157         default:
158                 DP_NOTICE(p_hwfn, true, "Unknown SPQE completion mode %d\n",
159                           p_ent->comp_mode);
160                 return ECORE_INVAL;
161         }
162
163         DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
164                    "Ramrod header: [CID 0x%08x CMD 0x%02x protocol 0x%02x]"
165                    " Data pointer: [%08x:%08x] Completion Mode: %s\n",
166                    p_ent->elem.hdr.cid, p_ent->elem.hdr.cmd_id,
167                    p_ent->elem.hdr.protocol_id,
168                    p_ent->elem.data_ptr.hi, p_ent->elem.data_ptr.lo,
169                    D_TRINE(p_ent->comp_mode, ECORE_SPQ_MODE_EBLOCK,
170                            ECORE_SPQ_MODE_BLOCK, "MODE_EBLOCK", "MODE_BLOCK",
171                            "MODE_CB"));
172
173         return ECORE_SUCCESS;
174 }
175
176 /***************************************************************************
177  * HSI access
178  ***************************************************************************/
179 static void ecore_spq_hw_initialize(struct ecore_hwfn *p_hwfn,
180                                     struct ecore_spq *p_spq)
181 {
182         struct ecore_cxt_info cxt_info;
183         struct core_conn_context *p_cxt;
184         enum _ecore_status_t rc;
185         u16 physical_q;
186
187         cxt_info.iid = p_spq->cid;
188
189         rc = ecore_cxt_get_cid_info(p_hwfn, &cxt_info);
190
191         if (rc < 0) {
192                 DP_NOTICE(p_hwfn, true, "Cannot find context info for cid=%d\n",
193                           p_spq->cid);
194                 return;
195         }
196
197         p_cxt = cxt_info.p_cxt;
198
199         /* @@@TBD we zero the context until we have ilt_reset implemented. */
200         OSAL_MEM_ZERO(p_cxt, sizeof(*p_cxt));
201
202         if (ECORE_IS_BB(p_hwfn->p_dev) || ECORE_IS_AH(p_hwfn->p_dev)) {
203                 SET_FIELD(p_cxt->xstorm_ag_context.flags10,
204                           E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN, 1);
205                 SET_FIELD(p_cxt->xstorm_ag_context.flags1,
206                           E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE, 1);
207                 /* SET_FIELD(p_cxt->xstorm_ag_context.flags10,
208                  *        E4_XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN, 1);
209                  */
210                 SET_FIELD(p_cxt->xstorm_ag_context.flags9,
211                           E4_XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN, 1);
212         }
213
214         /* CDU validation - FIXME currently disabled */
215
216         /* QM physical queue */
217         physical_q = ecore_get_cm_pq_idx(p_hwfn, PQ_FLAGS_LB);
218         p_cxt->xstorm_ag_context.physical_q0 = OSAL_CPU_TO_LE16(physical_q);
219
220         p_cxt->xstorm_st_context.spq_base_lo =
221             DMA_LO_LE(p_spq->chain.p_phys_addr);
222         p_cxt->xstorm_st_context.spq_base_hi =
223             DMA_HI_LE(p_spq->chain.p_phys_addr);
224
225         DMA_REGPAIR_LE(p_cxt->xstorm_st_context.consolid_base_addr,
226                        p_hwfn->p_consq->chain.p_phys_addr);
227 }
228
229 static enum _ecore_status_t ecore_spq_hw_post(struct ecore_hwfn *p_hwfn,
230                                               struct ecore_spq *p_spq,
231                                               struct ecore_spq_entry *p_ent)
232 {
233         struct ecore_chain *p_chain = &p_hwfn->p_spq->chain;
234         u16 echo = ecore_chain_get_prod_idx(p_chain);
235         struct slow_path_element *elem;
236         struct core_db_data db;
237
238         p_ent->elem.hdr.echo = OSAL_CPU_TO_LE16(echo);
239         elem = ecore_chain_produce(p_chain);
240         if (!elem) {
241                 DP_NOTICE(p_hwfn, true, "Failed to produce from SPQ chain\n");
242                 return ECORE_INVAL;
243         }
244
245         *elem = p_ent->elem;    /* struct assignment */
246
247         /* send a doorbell on the slow hwfn session */
248         OSAL_MEMSET(&db, 0, sizeof(db));
249         SET_FIELD(db.params, CORE_DB_DATA_DEST, DB_DEST_XCM);
250         SET_FIELD(db.params, CORE_DB_DATA_AGG_CMD, DB_AGG_CMD_SET);
251         SET_FIELD(db.params, CORE_DB_DATA_AGG_VAL_SEL,
252                   DQ_XCM_CORE_SPQ_PROD_CMD);
253         db.agg_flags = DQ_XCM_CORE_DQ_CF_CMD;
254         db.spq_prod = OSAL_CPU_TO_LE16(ecore_chain_get_prod_idx(p_chain));
255
256         /* make sure the SPQE is updated before the doorbell */
257         OSAL_WMB(p_hwfn->p_dev);
258
259         DOORBELL(p_hwfn, DB_ADDR(p_spq->cid, DQ_DEMS_LEGACY),
260                  *(u32 *)&db);
261
262         /* make sure doorbell is rang */
263         OSAL_WMB(p_hwfn->p_dev);
264
265         DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
266                    "Doorbelled [0x%08x, CID 0x%08x] with Flags: %02x"
267                    " agg_params: %02x, prod: %04x\n",
268                    DB_ADDR(p_spq->cid, DQ_DEMS_LEGACY), p_spq->cid, db.params,
269                    db.agg_flags, ecore_chain_get_prod_idx(p_chain));
270
271         return ECORE_SUCCESS;
272 }
273
274 /***************************************************************************
275  * Asynchronous events
276  ***************************************************************************/
277
278 static enum _ecore_status_t
279 ecore_async_event_completion(struct ecore_hwfn *p_hwfn,
280                              struct event_ring_entry *p_eqe)
281 {
282         switch (p_eqe->protocol_id) {
283         case PROTOCOLID_COMMON:
284                 return ecore_sriov_eqe_event(p_hwfn,
285                                              p_eqe->opcode,
286                                              p_eqe->echo, &p_eqe->data);
287         default:
288                 DP_NOTICE(p_hwfn,
289                           true, "Unknown Async completion for protocol: %d\n",
290                           p_eqe->protocol_id);
291                 return ECORE_INVAL;
292         }
293 }
294
295 /***************************************************************************
296  * EQ API
297  ***************************************************************************/
298 void ecore_eq_prod_update(struct ecore_hwfn *p_hwfn, u16 prod)
299 {
300         u32 addr = GTT_BAR0_MAP_REG_USDM_RAM +
301             USTORM_EQE_CONS_OFFSET(p_hwfn->rel_pf_id);
302
303         REG_WR16(p_hwfn, addr, prod);
304
305         /* keep prod updates ordered */
306         OSAL_MMIOWB(p_hwfn->p_dev);
307 }
308
309 enum _ecore_status_t ecore_eq_completion(struct ecore_hwfn *p_hwfn,
310                                          void *cookie)
311 {
312         struct ecore_eq *p_eq = cookie;
313         struct ecore_chain *p_chain = &p_eq->chain;
314         enum _ecore_status_t rc = 0;
315
316         /* take a snapshot of the FW consumer */
317         u16 fw_cons_idx = OSAL_LE16_TO_CPU(*p_eq->p_fw_cons);
318
319         DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ, "fw_cons_idx %x\n", fw_cons_idx);
320
321         /* Need to guarantee the fw_cons index we use points to a usuable
322          * element (to comply with our chain), so our macros would comply
323          */
324         if ((fw_cons_idx & ecore_chain_get_usable_per_page(p_chain)) ==
325             ecore_chain_get_usable_per_page(p_chain)) {
326                 fw_cons_idx += ecore_chain_get_unusable_per_page(p_chain);
327         }
328
329         /* Complete current segment of eq entries */
330         while (fw_cons_idx != ecore_chain_get_cons_idx(p_chain)) {
331                 struct event_ring_entry *p_eqe = ecore_chain_consume(p_chain);
332                 if (!p_eqe) {
333                         rc = ECORE_INVAL;
334                         break;
335                 }
336
337                 DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
338                            "op %x prot %x res0 %x echo %x fwret %x flags %x\n",
339                            p_eqe->opcode,            /* Event Opcode */
340                            p_eqe->protocol_id,  /* Event Protocol ID */
341                            p_eqe->reserved0,    /* Reserved */
342                            /* Echo value from ramrod data on the host */
343                            OSAL_LE16_TO_CPU(p_eqe->echo),
344                            p_eqe->fw_return_code,    /* FW return code for SP
345                                                       * ramrods
346                                                       */
347                            p_eqe->flags);
348
349                 if (GET_FIELD(p_eqe->flags, EVENT_RING_ENTRY_ASYNC)) {
350                         if (ecore_async_event_completion(p_hwfn, p_eqe))
351                                 rc = ECORE_INVAL;
352                 } else if (ecore_spq_completion(p_hwfn,
353                                                 p_eqe->echo,
354                                                 p_eqe->fw_return_code,
355                                                 &p_eqe->data)) {
356                         rc = ECORE_INVAL;
357                 }
358
359                 ecore_chain_recycle_consumed(p_chain);
360         }
361
362         ecore_eq_prod_update(p_hwfn, ecore_chain_get_prod_idx(p_chain));
363
364         return rc;
365 }
366
367 enum _ecore_status_t ecore_eq_alloc(struct ecore_hwfn *p_hwfn, u16 num_elem)
368 {
369         struct ecore_eq *p_eq;
370
371         /* Allocate EQ struct */
372         p_eq = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*p_eq));
373         if (!p_eq) {
374                 DP_NOTICE(p_hwfn, true,
375                           "Failed to allocate `struct ecore_eq'\n");
376                 return ECORE_NOMEM;
377         }
378
379         /* Allocate and initialize EQ chain*/
380         if (ecore_chain_alloc(p_hwfn->p_dev,
381                               ECORE_CHAIN_USE_TO_PRODUCE,
382                               ECORE_CHAIN_MODE_PBL,
383                               ECORE_CHAIN_CNT_TYPE_U16,
384                               num_elem,
385                               sizeof(union event_ring_element),
386                               &p_eq->chain, OSAL_NULL) != ECORE_SUCCESS) {
387                 DP_NOTICE(p_hwfn, true, "Failed to allocate eq chain\n");
388                 goto eq_allocate_fail;
389         }
390
391         /* register EQ completion on the SP SB */
392         ecore_int_register_cb(p_hwfn, ecore_eq_completion,
393                               p_eq, &p_eq->eq_sb_index, &p_eq->p_fw_cons);
394
395         p_hwfn->p_eq = p_eq;
396         return ECORE_SUCCESS;
397
398 eq_allocate_fail:
399         OSAL_FREE(p_hwfn->p_dev, p_eq);
400         return ECORE_NOMEM;
401 }
402
403 void ecore_eq_setup(struct ecore_hwfn *p_hwfn)
404 {
405         ecore_chain_reset(&p_hwfn->p_eq->chain);
406 }
407
408 void ecore_eq_free(struct ecore_hwfn *p_hwfn)
409 {
410         if (!p_hwfn->p_eq)
411                 return;
412
413         ecore_chain_free(p_hwfn->p_dev, &p_hwfn->p_eq->chain);
414
415         OSAL_FREE(p_hwfn->p_dev, p_hwfn->p_eq);
416         p_hwfn->p_eq = OSAL_NULL;
417 }
418
419 /***************************************************************************
420 * CQE API - manipulate EQ functionality
421 ***************************************************************************/
422 static enum _ecore_status_t ecore_cqe_completion(struct ecore_hwfn *p_hwfn,
423                                                  struct eth_slow_path_rx_cqe
424                                                  *cqe,
425                                                  enum protocol_type protocol)
426 {
427         if (IS_VF(p_hwfn->p_dev))
428                 return OSAL_VF_CQE_COMPLETION(p_hwfn, cqe, protocol);
429
430         /* @@@tmp - it's possible we'll eventually want to handle some
431          * actual commands that can arrive here, but for now this is only
432          * used to complete the ramrod using the echo value on the cqe
433          */
434         return ecore_spq_completion(p_hwfn, cqe->echo, 0, OSAL_NULL);
435 }
436
437 enum _ecore_status_t ecore_eth_cqe_completion(struct ecore_hwfn *p_hwfn,
438                                               struct eth_slow_path_rx_cqe *cqe)
439 {
440         enum _ecore_status_t rc;
441
442         rc = ecore_cqe_completion(p_hwfn, cqe, PROTOCOLID_ETH);
443         if (rc) {
444                 DP_NOTICE(p_hwfn, true,
445                           "Failed to handle RXQ CQE [cmd 0x%02x]\n",
446                           cqe->ramrod_cmd_id);
447         }
448
449         return rc;
450 }
451
452 /***************************************************************************
453  * Slow hwfn Queue (spq)
454  ***************************************************************************/
455 void ecore_spq_setup(struct ecore_hwfn *p_hwfn)
456 {
457         struct ecore_spq *p_spq = p_hwfn->p_spq;
458         struct ecore_spq_entry *p_virt = OSAL_NULL;
459         dma_addr_t p_phys = 0;
460         u32 i, capacity;
461
462         OSAL_LIST_INIT(&p_spq->pending);
463         OSAL_LIST_INIT(&p_spq->completion_pending);
464         OSAL_LIST_INIT(&p_spq->free_pool);
465         OSAL_LIST_INIT(&p_spq->unlimited_pending);
466         OSAL_SPIN_LOCK_INIT(&p_spq->lock);
467
468         /* SPQ empty pool */
469         p_phys = p_spq->p_phys + OFFSETOF(struct ecore_spq_entry, ramrod);
470         p_virt = p_spq->p_virt;
471
472         capacity = ecore_chain_get_capacity(&p_spq->chain);
473         for (i = 0; i < capacity; i++) {
474                 DMA_REGPAIR_LE(p_virt->elem.data_ptr, p_phys);
475
476                 OSAL_LIST_PUSH_TAIL(&p_virt->list, &p_spq->free_pool);
477
478                 p_virt++;
479                 p_phys += sizeof(struct ecore_spq_entry);
480         }
481
482         /* Statistics */
483         p_spq->normal_count = 0;
484         p_spq->comp_count = 0;
485         p_spq->comp_sent_count = 0;
486         p_spq->unlimited_pending_count = 0;
487
488         OSAL_MEM_ZERO(p_spq->p_comp_bitmap,
489                       SPQ_COMP_BMAP_SIZE * sizeof(unsigned long));
490         p_spq->comp_bitmap_idx = 0;
491
492         /* SPQ cid, cannot fail */
493         ecore_cxt_acquire_cid(p_hwfn, PROTOCOLID_CORE, &p_spq->cid);
494         ecore_spq_hw_initialize(p_hwfn, p_spq);
495
496         /* reset the chain itself */
497         ecore_chain_reset(&p_spq->chain);
498 }
499
500 enum _ecore_status_t ecore_spq_alloc(struct ecore_hwfn *p_hwfn)
501 {
502         struct ecore_spq_entry *p_virt = OSAL_NULL;
503         struct ecore_spq *p_spq = OSAL_NULL;
504         dma_addr_t p_phys = 0;
505         u32 capacity;
506
507         /* SPQ struct */
508         p_spq =
509             OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(struct ecore_spq));
510         if (!p_spq) {
511                 DP_NOTICE(p_hwfn, true,
512                           "Failed to allocate `struct ecore_spq'\n");
513                 return ECORE_NOMEM;
514         }
515
516         /* SPQ ring  */
517         if (ecore_chain_alloc(p_hwfn->p_dev,
518                               ECORE_CHAIN_USE_TO_PRODUCE,
519                               ECORE_CHAIN_MODE_SINGLE,
520                               ECORE_CHAIN_CNT_TYPE_U16,
521                               0, /* N/A when the mode is SINGLE */
522                               sizeof(struct slow_path_element),
523                               &p_spq->chain, OSAL_NULL)) {
524                 DP_NOTICE(p_hwfn, true, "Failed to allocate spq chain\n");
525                 goto spq_allocate_fail;
526         }
527
528         /* allocate and fill the SPQ elements (incl. ramrod data list) */
529         capacity = ecore_chain_get_capacity(&p_spq->chain);
530         p_virt = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, &p_phys,
531                                          capacity *
532                                          sizeof(struct ecore_spq_entry));
533         if (!p_virt)
534                 goto spq_allocate_fail;
535
536         p_spq->p_virt = p_virt;
537         p_spq->p_phys = p_phys;
538
539         OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_spq->lock);
540
541         p_hwfn->p_spq = p_spq;
542         return ECORE_SUCCESS;
543
544 spq_allocate_fail:
545         ecore_chain_free(p_hwfn->p_dev, &p_spq->chain);
546         OSAL_FREE(p_hwfn->p_dev, p_spq);
547         return ECORE_NOMEM;
548 }
549
550 void ecore_spq_free(struct ecore_hwfn *p_hwfn)
551 {
552         struct ecore_spq *p_spq = p_hwfn->p_spq;
553         u32 capacity;
554
555         if (!p_spq)
556                 return;
557
558         if (p_spq->p_virt) {
559                 capacity = ecore_chain_get_capacity(&p_spq->chain);
560                 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
561                                        p_spq->p_virt,
562                                        p_spq->p_phys,
563                                        capacity *
564                                        sizeof(struct ecore_spq_entry));
565         }
566
567         ecore_chain_free(p_hwfn->p_dev, &p_spq->chain);
568         OSAL_SPIN_LOCK_DEALLOC(&p_spq->lock);
569         OSAL_FREE(p_hwfn->p_dev, p_spq);
570 }
571
572 enum _ecore_status_t
573 ecore_spq_get_entry(struct ecore_hwfn *p_hwfn, struct ecore_spq_entry **pp_ent)
574 {
575         struct ecore_spq *p_spq = p_hwfn->p_spq;
576         struct ecore_spq_entry *p_ent = OSAL_NULL;
577         enum _ecore_status_t rc = ECORE_SUCCESS;
578
579         OSAL_SPIN_LOCK(&p_spq->lock);
580
581         if (OSAL_LIST_IS_EMPTY(&p_spq->free_pool)) {
582                 p_ent = OSAL_ZALLOC(p_hwfn->p_dev, GFP_ATOMIC, sizeof(*p_ent));
583                 if (!p_ent) {
584                         DP_NOTICE(p_hwfn, true,
585                                  "Failed to allocate an SPQ entry for a pending"
586                                  " ramrod\n");
587                         rc = ECORE_NOMEM;
588                         goto out_unlock;
589                 }
590                 p_ent->queue = &p_spq->unlimited_pending;
591         } else {
592                 p_ent = OSAL_LIST_FIRST_ENTRY(&p_spq->free_pool,
593                                               struct ecore_spq_entry, list);
594                 OSAL_LIST_REMOVE_ENTRY(&p_ent->list, &p_spq->free_pool);
595                 p_ent->queue = &p_spq->pending;
596         }
597
598         *pp_ent = p_ent;
599
600 out_unlock:
601         OSAL_SPIN_UNLOCK(&p_spq->lock);
602         return rc;
603 }
604
605 /* Locked variant; Should be called while the SPQ lock is taken */
606 static void __ecore_spq_return_entry(struct ecore_hwfn *p_hwfn,
607                                      struct ecore_spq_entry *p_ent)
608 {
609         OSAL_LIST_PUSH_TAIL(&p_ent->list, &p_hwfn->p_spq->free_pool);
610 }
611
612 void ecore_spq_return_entry(struct ecore_hwfn *p_hwfn,
613                             struct ecore_spq_entry *p_ent)
614 {
615         OSAL_SPIN_LOCK(&p_hwfn->p_spq->lock);
616         __ecore_spq_return_entry(p_hwfn, p_ent);
617         OSAL_SPIN_UNLOCK(&p_hwfn->p_spq->lock);
618 }
619
620 /**
621  * @brief ecore_spq_add_entry - adds a new entry to the pending
622  *        list. Should be used while lock is being held.
623  *
624  * Addes an entry to the pending list is there is room (en empty
625  * element is available in the free_pool), or else places the
626  * entry in the unlimited_pending pool.
627  *
628  * @param p_hwfn
629  * @param p_ent
630  * @param priority
631  *
632  * @return enum _ecore_status_t
633  */
634 static enum _ecore_status_t
635 ecore_spq_add_entry(struct ecore_hwfn *p_hwfn,
636                     struct ecore_spq_entry *p_ent, enum spq_priority priority)
637 {
638         struct ecore_spq *p_spq = p_hwfn->p_spq;
639
640         if (p_ent->queue == &p_spq->unlimited_pending) {
641                 if (OSAL_LIST_IS_EMPTY(&p_spq->free_pool)) {
642                         OSAL_LIST_PUSH_TAIL(&p_ent->list,
643                                             &p_spq->unlimited_pending);
644                         p_spq->unlimited_pending_count++;
645
646                         return ECORE_SUCCESS;
647
648                 } else {
649                         struct ecore_spq_entry *p_en2;
650
651                         p_en2 = OSAL_LIST_FIRST_ENTRY(&p_spq->free_pool,
652                                                      struct ecore_spq_entry,
653                                                      list);
654                         OSAL_LIST_REMOVE_ENTRY(&p_en2->list, &p_spq->free_pool);
655
656                         /* Copy the ring element physical pointer to the new
657                          * entry, since we are about to override the entire ring
658                          * entry and don't want to lose the pointer.
659                          */
660                         p_ent->elem.data_ptr = p_en2->elem.data_ptr;
661
662                         *p_en2 = *p_ent;
663
664                         /* EBLOCK responsible to free the allocated p_ent */
665                         if (p_ent->comp_mode != ECORE_SPQ_MODE_EBLOCK)
666                                 OSAL_FREE(p_hwfn->p_dev, p_ent);
667
668                         p_ent = p_en2;
669                 }
670         }
671
672         /* entry is to be placed in 'pending' queue */
673         switch (priority) {
674         case ECORE_SPQ_PRIORITY_NORMAL:
675                 OSAL_LIST_PUSH_TAIL(&p_ent->list, &p_spq->pending);
676                 p_spq->normal_count++;
677                 break;
678         case ECORE_SPQ_PRIORITY_HIGH:
679                 OSAL_LIST_PUSH_HEAD(&p_ent->list, &p_spq->pending);
680                 p_spq->high_count++;
681                 break;
682         default:
683                 return ECORE_INVAL;
684         }
685
686         return ECORE_SUCCESS;
687 }
688
689 /***************************************************************************
690  * Accessor
691  ***************************************************************************/
692
693 u32 ecore_spq_get_cid(struct ecore_hwfn *p_hwfn)
694 {
695         if (!p_hwfn->p_spq)
696                 return 0xffffffff;      /* illegal */
697         return p_hwfn->p_spq->cid;
698 }
699
700 /***************************************************************************
701  * Posting new Ramrods
702  ***************************************************************************/
703
704 static enum _ecore_status_t ecore_spq_post_list(struct ecore_hwfn *p_hwfn,
705                                                 osal_list_t *head,
706                                                 u32 keep_reserve)
707 {
708         struct ecore_spq *p_spq = p_hwfn->p_spq;
709         enum _ecore_status_t rc;
710
711         /* TODO - implementation might be wasteful; will always keep room
712          * for an additional high priority ramrod (even if one is already
713          * pending FW)
714          */
715         while (ecore_chain_get_elem_left(&p_spq->chain) > keep_reserve &&
716                !OSAL_LIST_IS_EMPTY(head)) {
717                 struct ecore_spq_entry *p_ent =
718                     OSAL_LIST_FIRST_ENTRY(head, struct ecore_spq_entry, list);
719                 if (p_ent != OSAL_NULL) {
720 #if defined(_NTDDK_)
721 #pragma warning(suppress : 6011 28182)
722 #endif
723                         OSAL_LIST_REMOVE_ENTRY(&p_ent->list, head);
724                         OSAL_LIST_PUSH_TAIL(&p_ent->list,
725                                             &p_spq->completion_pending);
726                         p_spq->comp_sent_count++;
727
728                         rc = ecore_spq_hw_post(p_hwfn, p_spq, p_ent);
729                         if (rc) {
730                                 OSAL_LIST_REMOVE_ENTRY(&p_ent->list,
731                                                     &p_spq->completion_pending);
732                                 __ecore_spq_return_entry(p_hwfn, p_ent);
733                                 return rc;
734                         }
735                 }
736         }
737
738         return ECORE_SUCCESS;
739 }
740
741 static enum _ecore_status_t ecore_spq_pend_post(struct ecore_hwfn *p_hwfn)
742 {
743         struct ecore_spq *p_spq = p_hwfn->p_spq;
744         struct ecore_spq_entry *p_ent = OSAL_NULL;
745
746         while (!OSAL_LIST_IS_EMPTY(&p_spq->free_pool)) {
747                 if (OSAL_LIST_IS_EMPTY(&p_spq->unlimited_pending))
748                         break;
749
750                 p_ent = OSAL_LIST_FIRST_ENTRY(&p_spq->unlimited_pending,
751                                               struct ecore_spq_entry, list);
752                 if (!p_ent)
753                         return ECORE_INVAL;
754
755 #if defined(_NTDDK_)
756 #pragma warning(suppress : 6011)
757 #endif
758                 OSAL_LIST_REMOVE_ENTRY(&p_ent->list, &p_spq->unlimited_pending);
759
760                 ecore_spq_add_entry(p_hwfn, p_ent, p_ent->priority);
761         }
762
763         return ecore_spq_post_list(p_hwfn,
764                                  &p_spq->pending, SPQ_HIGH_PRI_RESERVE_DEFAULT);
765 }
766
767 enum _ecore_status_t ecore_spq_post(struct ecore_hwfn *p_hwfn,
768                                     struct ecore_spq_entry *p_ent,
769                                     u8 *fw_return_code)
770 {
771         enum _ecore_status_t rc = ECORE_SUCCESS;
772         struct ecore_spq *p_spq = p_hwfn ? p_hwfn->p_spq : OSAL_NULL;
773         bool b_ret_ent = true;
774
775         if (!p_hwfn)
776                 return ECORE_INVAL;
777
778         if (!p_ent) {
779                 DP_NOTICE(p_hwfn, true, "Got a NULL pointer\n");
780                 return ECORE_INVAL;
781         }
782
783         if (p_hwfn->p_dev->recov_in_prog) {
784                 DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
785                            "Recovery is in progress -> skip spq post"
786                            " [cmd %02x protocol %02x]\n",
787                            p_ent->elem.hdr.cmd_id, p_ent->elem.hdr.protocol_id);
788                 /* Return success to let the flows to be completed successfully
789                  * w/o any error handling.
790                  */
791                 return ECORE_SUCCESS;
792         }
793
794         OSAL_SPIN_LOCK(&p_spq->lock);
795
796         /* Complete the entry */
797         rc = ecore_spq_fill_entry(p_hwfn, p_ent);
798
799         /* Check return value after LOCK is taken for cleaner error flow */
800         if (rc)
801                 goto spq_post_fail;
802
803         /* Add the request to the pending queue */
804         rc = ecore_spq_add_entry(p_hwfn, p_ent, p_ent->priority);
805         if (rc)
806                 goto spq_post_fail;
807
808         rc = ecore_spq_pend_post(p_hwfn);
809         if (rc) {
810                 /* Since it's possible that pending failed for a different
811                  * entry [although unlikely], the failed entry was already
812                  * dealt with; No need to return it here.
813                  */
814                 b_ret_ent = false;
815                 goto spq_post_fail;
816         }
817
818         OSAL_SPIN_UNLOCK(&p_spq->lock);
819
820         if (p_ent->comp_mode == ECORE_SPQ_MODE_EBLOCK) {
821                 /* For entries in ECORE BLOCK mode, the completion code cannot
822                  * perform the necessary cleanup - if it did, we couldn't
823                  * access p_ent here to see whether it's successful or not.
824                  * Thus, after gaining the answer perform the cleanup here.
825                  */
826                 rc = ecore_spq_block(p_hwfn, p_ent, fw_return_code,
827                                      p_ent->queue == &p_spq->unlimited_pending);
828
829                 if (p_ent->queue == &p_spq->unlimited_pending) {
830                         /* This is an allocated p_ent which does not need to
831                          * return to pool.
832                          */
833                         OSAL_FREE(p_hwfn->p_dev, p_ent);
834
835                         /* TBD: handle error flow and remove p_ent from
836                          * completion pending
837                          */
838                         return rc;
839                 }
840
841                 if (rc)
842                         goto spq_post_fail2;
843
844                 /* return to pool */
845                 ecore_spq_return_entry(p_hwfn, p_ent);
846         }
847         return rc;
848
849 spq_post_fail2:
850         OSAL_SPIN_LOCK(&p_spq->lock);
851         OSAL_LIST_REMOVE_ENTRY(&p_ent->list, &p_spq->completion_pending);
852         ecore_chain_return_produced(&p_spq->chain);
853
854 spq_post_fail:
855         /* return to the free pool */
856         if (b_ret_ent)
857                 __ecore_spq_return_entry(p_hwfn, p_ent);
858         OSAL_SPIN_UNLOCK(&p_spq->lock);
859
860         return rc;
861 }
862
863 enum _ecore_status_t ecore_spq_completion(struct ecore_hwfn *p_hwfn,
864                                           __le16 echo,
865                                           u8 fw_return_code,
866                                           union event_ring_data *p_data)
867 {
868         struct ecore_spq *p_spq;
869         struct ecore_spq_entry *p_ent = OSAL_NULL;
870         struct ecore_spq_entry *tmp;
871         struct ecore_spq_entry *found = OSAL_NULL;
872         enum _ecore_status_t rc;
873
874         if (!p_hwfn)
875                 return ECORE_INVAL;
876
877         p_spq = p_hwfn->p_spq;
878         if (!p_spq)
879                 return ECORE_INVAL;
880
881         OSAL_SPIN_LOCK(&p_spq->lock);
882         OSAL_LIST_FOR_EACH_ENTRY_SAFE(p_ent,
883                                       tmp,
884                                       &p_spq->completion_pending,
885                                       list, struct ecore_spq_entry) {
886                 if (p_ent->elem.hdr.echo == echo) {
887                         OSAL_LIST_REMOVE_ENTRY(&p_ent->list,
888                                                &p_spq->completion_pending);
889
890                         /* Avoid overriding of SPQ entries when getting
891                          * out-of-order completions, by marking the completions
892                          * in a bitmap and increasing the chain consumer only
893                          * for the first successive completed entries.
894                          */
895                         SPQ_COMP_BMAP_SET_BIT(p_spq, echo);
896                         while (SPQ_COMP_BMAP_TEST_BIT(p_spq,
897                                                       p_spq->comp_bitmap_idx)) {
898                                 SPQ_COMP_BMAP_CLEAR_BIT(p_spq,
899                                                         p_spq->comp_bitmap_idx);
900                                 p_spq->comp_bitmap_idx++;
901                                 ecore_chain_return_produced(&p_spq->chain);
902                         }
903
904                         p_spq->comp_count++;
905                         found = p_ent;
906                         break;
907                 }
908
909                 /* This is debug and should be relatively uncommon - depends
910                  * on scenarios which have mutliple per-PF sent ramrods.
911                  */
912                 DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
913                            "Got completion for echo %04x - doesn't match"
914                            " echo %04x in completion pending list\n",
915                            OSAL_LE16_TO_CPU(echo),
916                            OSAL_LE16_TO_CPU(p_ent->elem.hdr.echo));
917         }
918
919         /* Release lock before callback, as callback may post
920          * an additional ramrod.
921          */
922         OSAL_SPIN_UNLOCK(&p_spq->lock);
923
924         if (!found) {
925                 DP_NOTICE(p_hwfn, true,
926                           "Failed to find an entry this"
927                           " EQE [echo %04x] completes\n",
928                           OSAL_LE16_TO_CPU(echo));
929                 return ECORE_EXISTS;
930         }
931
932         DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
933                    "Complete EQE [echo %04x]: func %p cookie %p)\n",
934                    OSAL_LE16_TO_CPU(echo),
935                    p_ent->comp_cb.function, p_ent->comp_cb.cookie);
936         if (found->comp_cb.function)
937                 found->comp_cb.function(p_hwfn, found->comp_cb.cookie, p_data,
938                                         fw_return_code);
939         else
940                 DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
941                            "Got a completion without a callback function\n");
942
943         if ((found->comp_mode != ECORE_SPQ_MODE_EBLOCK) ||
944             (found->queue == &p_spq->unlimited_pending))
945                 /* EBLOCK  is responsible for returning its own entry into the
946                  * free list, unless it originally added the entry into the
947                  * unlimited pending list.
948                  */
949                 ecore_spq_return_entry(p_hwfn, found);
950
951         /* Attempt to post pending requests */
952         OSAL_SPIN_LOCK(&p_spq->lock);
953         rc = ecore_spq_pend_post(p_hwfn);
954         OSAL_SPIN_UNLOCK(&p_spq->lock);
955
956         return rc;
957 }
958
959 enum _ecore_status_t ecore_consq_alloc(struct ecore_hwfn *p_hwfn)
960 {
961         struct ecore_consq *p_consq;
962
963         /* Allocate ConsQ struct */
964         p_consq =
965             OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*p_consq));
966         if (!p_consq) {
967                 DP_NOTICE(p_hwfn, true,
968                           "Failed to allocate `struct ecore_consq'\n");
969                 return ECORE_NOMEM;
970         }
971
972         /* Allocate and initialize EQ chain */
973         if (ecore_chain_alloc(p_hwfn->p_dev,
974                               ECORE_CHAIN_USE_TO_PRODUCE,
975                               ECORE_CHAIN_MODE_PBL,
976                               ECORE_CHAIN_CNT_TYPE_U16,
977                               ECORE_CHAIN_PAGE_SIZE / 0x80,
978                               0x80,
979                               &p_consq->chain, OSAL_NULL) != ECORE_SUCCESS) {
980                 DP_NOTICE(p_hwfn, true, "Failed to allocate consq chain");
981                 goto consq_allocate_fail;
982         }
983
984         p_hwfn->p_consq = p_consq;
985         return ECORE_SUCCESS;
986
987 consq_allocate_fail:
988         OSAL_FREE(p_hwfn->p_dev, p_consq);
989         return ECORE_NOMEM;
990 }
991
992 void ecore_consq_setup(struct ecore_hwfn *p_hwfn)
993 {
994         ecore_chain_reset(&p_hwfn->p_consq->chain);
995 }
996
997 void ecore_consq_free(struct ecore_hwfn *p_hwfn)
998 {
999         if (!p_hwfn->p_consq)
1000                 return;
1001
1002         ecore_chain_free(p_hwfn->p_dev, &p_hwfn->p_consq->chain);
1003         OSAL_FREE(p_hwfn->p_dev, p_hwfn->p_consq);
1004 }