qede: add base driver
[dpdk.git] / drivers / net / qede / base / ecore_spq.c
1 /*
2  * Copyright (c) 2016 QLogic Corporation.
3  * All rights reserved.
4  * www.qlogic.com
5  *
6  * See LICENSE.qede_pmd for copyright and licensing details.
7  */
8
9 #include "bcm_osal.h"
10 #include "reg_addr.h"
11 #include "ecore_gtt_reg_addr.h"
12 #include "ecore_hsi_common.h"
13 #include "ecore.h"
14 #include "ecore_sp_api.h"
15 #include "ecore_spq.h"
16 #include "ecore_iro.h"
17 #include "ecore_init_fw_funcs.h"
18 #include "ecore_cxt.h"
19 #include "ecore_int.h"
20 #include "ecore_dev_api.h"
21 #include "ecore_mcp.h"
22 #include "ecore_hw.h"
23
24 /***************************************************************************
25  * Structures & Definitions
26  ***************************************************************************/
27
28 #define SPQ_HIGH_PRI_RESERVE_DEFAULT    (1)
29 #define SPQ_BLOCK_SLEEP_LENGTH          (1000)
30
31 /***************************************************************************
32  * Blocking Imp. (BLOCK/EBLOCK mode)
33  ***************************************************************************/
34 static void ecore_spq_blocking_cb(struct ecore_hwfn *p_hwfn,
35                                   void *cookie,
36                                   union event_ring_data *data,
37                                   u8 fw_return_code)
38 {
39         struct ecore_spq_comp_done *comp_done;
40
41         comp_done = (struct ecore_spq_comp_done *)cookie;
42
43         comp_done->done = 0x1;
44         comp_done->fw_return_code = fw_return_code;
45
46         /* make update visible to waiting thread */
47         OSAL_SMP_WMB(p_hwfn->p_dev);
48 }
49
50 static enum _ecore_status_t ecore_spq_block(struct ecore_hwfn *p_hwfn,
51                                             struct ecore_spq_entry *p_ent,
52                                             u8 *p_fw_ret)
53 {
54         int sleep_count = SPQ_BLOCK_SLEEP_LENGTH;
55         struct ecore_spq_comp_done *comp_done;
56         enum _ecore_status_t rc;
57
58         comp_done = (struct ecore_spq_comp_done *)p_ent->comp_cb.cookie;
59         while (sleep_count) {
60                 OSAL_POLL_MODE_DPC(p_hwfn);
61                 /* validate we receive completion update */
62                 OSAL_SMP_RMB(p_hwfn->p_dev);
63                 if (comp_done->done == 1) {
64                         if (p_fw_ret)
65                                 *p_fw_ret = comp_done->fw_return_code;
66                         return ECORE_SUCCESS;
67                 }
68                 OSAL_MSLEEP(5);
69                 sleep_count--;
70         }
71
72         DP_INFO(p_hwfn, "Ramrod is stuck, requesting MCP drain\n");
73         rc = ecore_mcp_drain(p_hwfn, p_hwfn->p_main_ptt);
74         if (rc != ECORE_SUCCESS)
75                 DP_NOTICE(p_hwfn, true, "MCP drain failed\n");
76
77         /* Retry after drain */
78         sleep_count = SPQ_BLOCK_SLEEP_LENGTH;
79         while (sleep_count) {
80                 /* validate we receive completion update */
81                 OSAL_SMP_RMB(p_hwfn->p_dev);
82                 if (comp_done->done == 1) {
83                         if (p_fw_ret)
84                                 *p_fw_ret = comp_done->fw_return_code;
85                         return ECORE_SUCCESS;
86                 }
87                 OSAL_MSLEEP(5);
88                 sleep_count--;
89         }
90
91         if (comp_done->done == 1) {
92                 if (p_fw_ret)
93                         *p_fw_ret = comp_done->fw_return_code;
94                 return ECORE_SUCCESS;
95         }
96
97         DP_NOTICE(p_hwfn, true,
98                   "Ramrod is stuck [CID %08x cmd %02x proto %02x echo %04x]\n",
99                   OSAL_LE32_TO_CPU(p_ent->elem.hdr.cid),
100                   p_ent->elem.hdr.cmd_id, p_ent->elem.hdr.protocol_id,
101                   OSAL_LE16_TO_CPU(p_ent->elem.hdr.echo));
102
103         ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_RAMROD_FAIL);
104
105         return ECORE_BUSY;
106 }
107
108 /***************************************************************************
109  * SPQ entries inner API
110  ***************************************************************************/
111 static enum _ecore_status_t
112 ecore_spq_fill_entry(struct ecore_hwfn *p_hwfn, struct ecore_spq_entry *p_ent)
113 {
114         p_ent->flags = 0;
115
116         switch (p_ent->comp_mode) {
117         case ECORE_SPQ_MODE_EBLOCK:
118         case ECORE_SPQ_MODE_BLOCK:
119                 p_ent->comp_cb.function = ecore_spq_blocking_cb;
120                 break;
121         case ECORE_SPQ_MODE_CB:
122                 break;
123         default:
124                 DP_NOTICE(p_hwfn, true, "Unknown SPQE completion mode %d\n",
125                           p_ent->comp_mode);
126                 return ECORE_INVAL;
127         }
128
129         DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
130                    "Ramrod header: [CID 0x%08x CMD 0x%02x protocol 0x%02x]"
131                    " Data pointer: [%08x:%08x] Completion Mode: %s\n",
132                    p_ent->elem.hdr.cid, p_ent->elem.hdr.cmd_id,
133                    p_ent->elem.hdr.protocol_id,
134                    p_ent->elem.data_ptr.hi, p_ent->elem.data_ptr.lo,
135                    D_TRINE(p_ent->comp_mode, ECORE_SPQ_MODE_EBLOCK,
136                            ECORE_SPQ_MODE_BLOCK, "MODE_EBLOCK", "MODE_BLOCK",
137                            "MODE_CB"));
138
139         return ECORE_SUCCESS;
140 }
141
142 /***************************************************************************
143  * HSI access
144  ***************************************************************************/
145 static void ecore_spq_hw_initialize(struct ecore_hwfn *p_hwfn,
146                                     struct ecore_spq *p_spq)
147 {
148         u16 pq;
149         struct ecore_cxt_info cxt_info;
150         struct core_conn_context *p_cxt;
151         union ecore_qm_pq_params pq_params;
152         enum _ecore_status_t rc;
153
154         cxt_info.iid = p_spq->cid;
155
156         rc = ecore_cxt_get_cid_info(p_hwfn, &cxt_info);
157
158         if (rc < 0) {
159                 DP_NOTICE(p_hwfn, true, "Cannot find context info for cid=%d",
160                           p_spq->cid);
161                 return;
162         }
163
164         p_cxt = cxt_info.p_cxt;
165
166         SET_FIELD(p_cxt->xstorm_ag_context.flags10,
167                   XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN, 1);
168         SET_FIELD(p_cxt->xstorm_ag_context.flags1,
169                   XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE, 1);
170         /* SET_FIELD(p_cxt->xstorm_ag_context.flags10,
171          *           XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN, 1);
172          */
173         SET_FIELD(p_cxt->xstorm_ag_context.flags9,
174                   XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN, 1);
175
176         /* CDU validation - FIXME currently disabled */
177
178         /* QM physical queue */
179         OSAL_MEMSET(&pq_params, 0, sizeof(pq_params));
180         pq_params.core.tc = LB_TC;
181         pq = ecore_get_qm_pq(p_hwfn, PROTOCOLID_CORE, &pq_params);
182         p_cxt->xstorm_ag_context.physical_q0 = OSAL_CPU_TO_LE16(pq);
183
184         p_cxt->xstorm_st_context.spq_base_lo =
185             DMA_LO_LE(p_spq->chain.p_phys_addr);
186         p_cxt->xstorm_st_context.spq_base_hi =
187             DMA_HI_LE(p_spq->chain.p_phys_addr);
188
189         p_cxt->xstorm_st_context.consolid_base_addr.lo =
190             DMA_LO_LE(p_hwfn->p_consq->chain.p_phys_addr);
191         p_cxt->xstorm_st_context.consolid_base_addr.hi =
192             DMA_HI_LE(p_hwfn->p_consq->chain.p_phys_addr);
193 }
194
195 static enum _ecore_status_t ecore_spq_hw_post(struct ecore_hwfn *p_hwfn,
196                                               struct ecore_spq *p_spq,
197                                               struct ecore_spq_entry *p_ent)
198 {
199         struct ecore_chain *p_chain = &p_hwfn->p_spq->chain;
200         u16 echo = ecore_chain_get_prod_idx(p_chain);
201         struct slow_path_element *elem;
202         struct core_db_data db;
203
204         p_ent->elem.hdr.echo = OSAL_CPU_TO_LE16(echo);
205         elem = ecore_chain_produce(p_chain);
206         if (!elem) {
207                 DP_NOTICE(p_hwfn, true, "Failed to produce from SPQ chain\n");
208                 return ECORE_INVAL;
209         }
210
211         *elem = p_ent->elem;    /* struct assignment */
212
213         /* send a doorbell on the slow hwfn session */
214         OSAL_MEMSET(&db, 0, sizeof(db));
215         SET_FIELD(db.params, CORE_DB_DATA_DEST, DB_DEST_XCM);
216         SET_FIELD(db.params, CORE_DB_DATA_AGG_CMD, DB_AGG_CMD_SET);
217         SET_FIELD(db.params, CORE_DB_DATA_AGG_VAL_SEL,
218                   DQ_XCM_CORE_SPQ_PROD_CMD);
219         db.agg_flags = DQ_XCM_CORE_DQ_CF_CMD;
220
221         /* validate producer is up to-date */
222         OSAL_RMB(p_hwfn->p_dev);
223
224         db.spq_prod = OSAL_CPU_TO_LE16(ecore_chain_get_prod_idx(p_chain));
225
226         /* do not reorder */
227         OSAL_BARRIER(p_hwfn->p_dev);
228
229         DOORBELL(p_hwfn, DB_ADDR(p_spq->cid, DQ_DEMS_LEGACY), *(u32 *)&db);
230
231         /* make sure doorbell is rang */
232         OSAL_MMIOWB(p_hwfn->p_dev);
233
234         DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
235                    "Doorbelled [0x%08x, CID 0x%08x] with Flags: %02x"
236                    " agg_params: %02x, prod: %04x\n",
237                    DB_ADDR(p_spq->cid, DQ_DEMS_LEGACY), p_spq->cid, db.params,
238                    db.agg_flags, ecore_chain_get_prod_idx(p_chain));
239
240         return ECORE_SUCCESS;
241 }
242
243 /***************************************************************************
244  * Asynchronous events
245  ***************************************************************************/
246
247 static enum _ecore_status_t
248 ecore_async_event_completion(struct ecore_hwfn *p_hwfn,
249                              struct event_ring_entry *p_eqe)
250 {
251         switch (p_eqe->protocol_id) {
252         case PROTOCOLID_COMMON:
253                 return ECORE_SUCCESS;
254         default:
255                 DP_NOTICE(p_hwfn,
256                           true, "Unknown Async completion for protocol: %d\n",
257                           p_eqe->protocol_id);
258                 return ECORE_INVAL;
259         }
260 }
261
262 /***************************************************************************
263  * EQ API
264  ***************************************************************************/
265 void ecore_eq_prod_update(struct ecore_hwfn *p_hwfn, u16 prod)
266 {
267         u32 addr = GTT_BAR0_MAP_REG_USDM_RAM +
268             USTORM_EQE_CONS_OFFSET(p_hwfn->rel_pf_id);
269
270         REG_WR16(p_hwfn, addr, prod);
271
272         /* keep prod updates ordered */
273         OSAL_MMIOWB(p_hwfn->p_dev);
274 }
275
276 enum _ecore_status_t ecore_eq_completion(struct ecore_hwfn *p_hwfn,
277                                          void *cookie)
278 {
279         struct ecore_eq *p_eq = cookie;
280         struct ecore_chain *p_chain = &p_eq->chain;
281         enum _ecore_status_t rc = 0;
282
283         /* take a snapshot of the FW consumer */
284         u16 fw_cons_idx = OSAL_LE16_TO_CPU(*p_eq->p_fw_cons);
285
286         DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ, "fw_cons_idx %x\n", fw_cons_idx);
287
288         /* Need to guarantee the fw_cons index we use points to a usuable
289          * element (to comply with our chain), so our macros would comply
290          */
291         if ((fw_cons_idx & ecore_chain_get_usable_per_page(p_chain)) ==
292             ecore_chain_get_usable_per_page(p_chain)) {
293                 fw_cons_idx += ecore_chain_get_unusable_per_page(p_chain);
294         }
295
296         /* Complete current segment of eq entries */
297         while (fw_cons_idx != ecore_chain_get_cons_idx(p_chain)) {
298                 struct event_ring_entry *p_eqe = ecore_chain_consume(p_chain);
299                 if (!p_eqe) {
300                         rc = ECORE_INVAL;
301                         break;
302                 }
303
304                 DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
305                                 "op %x prot %x res0 %x echo %x "
306                                 "fwret %x flags %x\n", p_eqe->opcode,
307                            p_eqe->protocol_id,  /* Event Protocol ID */
308                            p_eqe->reserved0,    /* Reserved */
309                            OSAL_LE16_TO_CPU(p_eqe->echo),
310                            p_eqe->fw_return_code,       /* FW return code for SP
311                                                          * ramrods
312                                                          */
313                            p_eqe->flags);
314
315                 if (GET_FIELD(p_eqe->flags, EVENT_RING_ENTRY_ASYNC)) {
316                         if (ecore_async_event_completion(p_hwfn, p_eqe))
317                                 rc = ECORE_INVAL;
318                 } else if (ecore_spq_completion(p_hwfn,
319                                                 p_eqe->echo,
320                                                 p_eqe->fw_return_code,
321                                                 &p_eqe->data)) {
322                         rc = ECORE_INVAL;
323                 }
324
325                 ecore_chain_recycle_consumed(p_chain);
326         }
327
328         ecore_eq_prod_update(p_hwfn, ecore_chain_get_prod_idx(p_chain));
329
330         return rc;
331 }
332
333 struct ecore_eq *ecore_eq_alloc(struct ecore_hwfn *p_hwfn, u16 num_elem)
334 {
335         struct ecore_eq *p_eq;
336
337         /* Allocate EQ struct */
338         p_eq = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(struct ecore_eq));
339         if (!p_eq) {
340                 DP_NOTICE(p_hwfn, true,
341                           "Failed to allocate `struct ecore_eq'\n");
342                 return OSAL_NULL;
343         }
344
345         /* Allocate and initialize EQ chain */
346         if (ecore_chain_alloc(p_hwfn->p_dev,
347                               ECORE_CHAIN_USE_TO_PRODUCE,
348                               ECORE_CHAIN_MODE_PBL,
349                               ECORE_CHAIN_CNT_TYPE_U16,
350                               num_elem,
351                               sizeof(union event_ring_element), &p_eq->chain)) {
352                 DP_NOTICE(p_hwfn, true, "Failed to allocate eq chain");
353                 goto eq_allocate_fail;
354         }
355
356         /* register EQ completion on the SP SB */
357         ecore_int_register_cb(p_hwfn,
358                               ecore_eq_completion,
359                               p_eq, &p_eq->eq_sb_index, &p_eq->p_fw_cons);
360
361         return p_eq;
362
363 eq_allocate_fail:
364         ecore_eq_free(p_hwfn, p_eq);
365         return OSAL_NULL;
366 }
367
368 void ecore_eq_setup(struct ecore_hwfn *p_hwfn, struct ecore_eq *p_eq)
369 {
370         ecore_chain_reset(&p_eq->chain);
371 }
372
373 void ecore_eq_free(struct ecore_hwfn *p_hwfn, struct ecore_eq *p_eq)
374 {
375         if (!p_eq)
376                 return;
377         ecore_chain_free(p_hwfn->p_dev, &p_eq->chain);
378         OSAL_FREE(p_hwfn->p_dev, p_eq);
379 }
380
381 /***************************************************************************
382 * CQE API - manipulate EQ functionality
383 ***************************************************************************/
384 static enum _ecore_status_t ecore_cqe_completion(struct ecore_hwfn *p_hwfn,
385                                                  struct eth_slow_path_rx_cqe
386                                                  *cqe,
387                                                  enum protocol_type protocol)
388 {
389         /* @@@tmp - it's possible we'll eventually want to handle some
390          * actual commands that can arrive here, but for now this is only
391          * used to complete the ramrod using the echo value on the cqe
392          */
393         return ecore_spq_completion(p_hwfn, cqe->echo, 0, OSAL_NULL);
394 }
395
396 enum _ecore_status_t ecore_eth_cqe_completion(struct ecore_hwfn *p_hwfn,
397                                               struct eth_slow_path_rx_cqe *cqe)
398 {
399         enum _ecore_status_t rc;
400
401         rc = ecore_cqe_completion(p_hwfn, cqe, PROTOCOLID_ETH);
402         if (rc) {
403                 DP_NOTICE(p_hwfn, true,
404                           "Failed to handle RXQ CQE [cmd 0x%02x]\n",
405                           cqe->ramrod_cmd_id);
406         }
407
408         return rc;
409 }
410
411 /***************************************************************************
412  * Slow hwfn Queue (spq)
413  ***************************************************************************/
414 void ecore_spq_setup(struct ecore_hwfn *p_hwfn)
415 {
416         struct ecore_spq_entry *p_virt = OSAL_NULL;
417         struct ecore_spq *p_spq = p_hwfn->p_spq;
418         dma_addr_t p_phys = 0;
419         u32 i, capacity;
420
421         OSAL_LIST_INIT(&p_spq->pending);
422         OSAL_LIST_INIT(&p_spq->completion_pending);
423         OSAL_LIST_INIT(&p_spq->free_pool);
424         OSAL_LIST_INIT(&p_spq->unlimited_pending);
425         OSAL_SPIN_LOCK_INIT(&p_spq->lock);
426
427         /* SPQ empty pool */
428         p_phys = p_spq->p_phys + OFFSETOF(struct ecore_spq_entry, ramrod);
429         p_virt = p_spq->p_virt;
430
431         capacity = ecore_chain_get_capacity(&p_spq->chain);
432         for (i = 0; i < capacity; i++) {
433                 p_virt->elem.data_ptr.hi = DMA_HI_LE(p_phys);
434                 p_virt->elem.data_ptr.lo = DMA_LO_LE(p_phys);
435
436                 OSAL_LIST_PUSH_TAIL(&p_virt->list, &p_spq->free_pool);
437
438                 p_virt++;
439                 p_phys += sizeof(struct ecore_spq_entry);
440         }
441
442         /* Statistics */
443         p_spq->normal_count = 0;
444         p_spq->comp_count = 0;
445         p_spq->comp_sent_count = 0;
446         p_spq->unlimited_pending_count = 0;
447
448         OSAL_MEM_ZERO(p_spq->p_comp_bitmap,
449                       SPQ_COMP_BMAP_SIZE * sizeof(unsigned long));
450         p_spq->comp_bitmap_idx = 0;
451
452         /* SPQ cid, cannot fail */
453         ecore_cxt_acquire_cid(p_hwfn, PROTOCOLID_CORE, &p_spq->cid);
454         ecore_spq_hw_initialize(p_hwfn, p_spq);
455
456         /* reset the chain itself */
457         ecore_chain_reset(&p_spq->chain);
458 }
459
460 enum _ecore_status_t ecore_spq_alloc(struct ecore_hwfn *p_hwfn)
461 {
462         struct ecore_spq_entry *p_virt = OSAL_NULL;
463         struct ecore_spq *p_spq = OSAL_NULL;
464         dma_addr_t p_phys = 0;
465         u32 capacity;
466
467         /* SPQ struct */
468         p_spq =
469             OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(struct ecore_spq));
470         if (!p_spq) {
471                 DP_NOTICE(p_hwfn, true,
472                           "Failed to allocate `struct ecore_spq'");
473                 return ECORE_NOMEM;
474         }
475
476         /* SPQ ring  */
477         if (ecore_chain_alloc(p_hwfn->p_dev, ECORE_CHAIN_USE_TO_PRODUCE,
478                         ECORE_CHAIN_MODE_SINGLE, ECORE_CHAIN_CNT_TYPE_U16, 0,
479                         /* N/A when the mode is SINGLE */
480                         sizeof(struct slow_path_element), &p_spq->chain)) {
481                 DP_NOTICE(p_hwfn, true, "Failed to allocate spq chain");
482                 goto spq_allocate_fail;
483         }
484
485         /* allocate and fill the SPQ elements (incl. ramrod data list) */
486         capacity = ecore_chain_get_capacity(&p_spq->chain);
487         p_virt = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, &p_phys,
488                                          capacity *
489                                          sizeof(struct ecore_spq_entry));
490         if (!p_virt)
491                 goto spq_allocate_fail;
492
493         p_spq->p_virt = p_virt;
494         p_spq->p_phys = p_phys;
495
496         OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_spq->lock);
497
498         p_hwfn->p_spq = p_spq;
499         return ECORE_SUCCESS;
500
501 spq_allocate_fail:
502         ecore_chain_free(p_hwfn->p_dev, &p_spq->chain);
503         OSAL_FREE(p_hwfn->p_dev, p_spq);
504         return ECORE_NOMEM;
505 }
506
507 void ecore_spq_free(struct ecore_hwfn *p_hwfn)
508 {
509         struct ecore_spq *p_spq = p_hwfn->p_spq;
510         u32 capacity;
511
512         if (!p_spq)
513                 return;
514
515         if (p_spq->p_virt) {
516                 capacity = ecore_chain_get_capacity(&p_spq->chain);
517                 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
518                                        p_spq->p_virt,
519                                        p_spq->p_phys,
520                                        capacity *
521                                        sizeof(struct ecore_spq_entry));
522         }
523
524         ecore_chain_free(p_hwfn->p_dev, &p_spq->chain);
525         OSAL_SPIN_LOCK_DEALLOC(&p_spq->lock);
526         OSAL_FREE(p_hwfn->p_dev, p_spq);
527 }
528
529 enum _ecore_status_t
530 ecore_spq_get_entry(struct ecore_hwfn *p_hwfn, struct ecore_spq_entry **pp_ent)
531 {
532         struct ecore_spq *p_spq = p_hwfn->p_spq;
533         struct ecore_spq_entry *p_ent = OSAL_NULL;
534
535         OSAL_SPIN_LOCK(&p_spq->lock);
536
537         if (OSAL_LIST_IS_EMPTY(&p_spq->free_pool)) {
538                 p_ent = OSAL_ZALLOC(p_hwfn->p_dev, GFP_ATOMIC,
539                                     sizeof(struct ecore_spq_entry));
540                 if (!p_ent) {
541                         OSAL_SPIN_UNLOCK(&p_spq->lock);
542                         DP_NOTICE(p_hwfn, true,
543                                   "Failed to allocate an SPQ entry"
544                                   " for a pending ramrod\n");
545                         return ECORE_NOMEM;
546                 }
547                 p_ent->queue = &p_spq->unlimited_pending;
548         } else {
549                 p_ent = OSAL_LIST_FIRST_ENTRY(&p_spq->free_pool,
550                                               struct ecore_spq_entry, list);
551                 OSAL_LIST_REMOVE_ENTRY(&p_ent->list, &p_spq->free_pool);
552                 p_ent->queue = &p_spq->pending;
553         }
554
555         *pp_ent = p_ent;
556
557         OSAL_SPIN_UNLOCK(&p_spq->lock);
558
559         return ECORE_SUCCESS;
560 }
561
562 /* Locked variant; Should be called while the SPQ lock is taken */
563 static void __ecore_spq_return_entry(struct ecore_hwfn *p_hwfn,
564                                      struct ecore_spq_entry *p_ent)
565 {
566         OSAL_LIST_PUSH_TAIL(&p_ent->list, &p_hwfn->p_spq->free_pool);
567 }
568
569 void ecore_spq_return_entry(struct ecore_hwfn *p_hwfn,
570                             struct ecore_spq_entry *p_ent)
571 {
572         OSAL_SPIN_LOCK(&p_hwfn->p_spq->lock);
573         __ecore_spq_return_entry(p_hwfn, p_ent);
574         OSAL_SPIN_UNLOCK(&p_hwfn->p_spq->lock);
575 }
576
577 /**
578  * @brief ecore_spq_add_entry - adds a new entry to the pending
579  *        list. Should be used while lock is being held.
580  *
581  * Addes an entry to the pending list is there is room (en empty
582  * element is available in the free_pool), or else places the
583  * entry in the unlimited_pending pool.
584  *
585  * @param p_hwfn
586  * @param p_ent
587  * @param priority
588  *
589  * @return enum _ecore_status_t
590  */
591 static enum _ecore_status_t
592 ecore_spq_add_entry(struct ecore_hwfn *p_hwfn,
593                     struct ecore_spq_entry *p_ent, enum spq_priority priority)
594 {
595         struct ecore_spq *p_spq = p_hwfn->p_spq;
596
597         if (p_ent->queue == &p_spq->unlimited_pending) {
598                 if (OSAL_LIST_IS_EMPTY(&p_spq->free_pool)) {
599                         OSAL_LIST_PUSH_TAIL(&p_ent->list,
600                                             &p_spq->unlimited_pending);
601                         p_spq->unlimited_pending_count++;
602
603                         return ECORE_SUCCESS;
604                 }
605
606                 struct ecore_spq_entry *p_en2;
607
608                 p_en2 = OSAL_LIST_FIRST_ENTRY(&p_spq->free_pool,
609                                               struct ecore_spq_entry,
610                                               list);
611                 OSAL_LIST_REMOVE_ENTRY(&p_en2->list, &p_spq->free_pool);
612
613                 /* Copy the ring element physical pointer to the new
614                  * entry, since we are about to override the entire ring
615                  * entry and don't want to lose the pointer.
616                  */
617                 p_ent->elem.data_ptr = p_en2->elem.data_ptr;
618
619                 /* Setting the cookie to the comp_done of the
620                  * new element.
621                  */
622                 if (p_ent->comp_cb.cookie == &p_ent->comp_done)
623                         p_ent->comp_cb.cookie = &p_en2->comp_done;
624
625                 *p_en2 = *p_ent;
626
627                 OSAL_FREE(p_hwfn->p_dev, p_ent);
628
629                 p_ent = p_en2;
630         }
631
632         /* entry is to be placed in 'pending' queue */
633         switch (priority) {
634         case ECORE_SPQ_PRIORITY_NORMAL:
635                 OSAL_LIST_PUSH_TAIL(&p_ent->list, &p_spq->pending);
636                 p_spq->normal_count++;
637                 break;
638         case ECORE_SPQ_PRIORITY_HIGH:
639                 OSAL_LIST_PUSH_HEAD(&p_ent->list, &p_spq->pending);
640                 p_spq->high_count++;
641                 break;
642         default:
643                 return ECORE_INVAL;
644         }
645
646         return ECORE_SUCCESS;
647 }
648
649 /***************************************************************************
650  * Accessor
651  ***************************************************************************/
652
653 u32 ecore_spq_get_cid(struct ecore_hwfn *p_hwfn)
654 {
655         if (!p_hwfn->p_spq)
656                 return 0xffffffff;      /* illegal */
657         return p_hwfn->p_spq->cid;
658 }
659
660 /***************************************************************************
661  * Posting new Ramrods
662  ***************************************************************************/
663
664 static enum _ecore_status_t ecore_spq_post_list(struct ecore_hwfn *p_hwfn,
665                                                 osal_list_t *head,
666                                                 u32 keep_reserve)
667 {
668         struct ecore_spq *p_spq = p_hwfn->p_spq;
669         enum _ecore_status_t rc;
670
671         /* TODO - implementation might be wasteful; will always keep room
672          * for an additional high priority ramrod (even if one is already
673          * pending FW)
674          */
675         while (ecore_chain_get_elem_left(&p_spq->chain) > keep_reserve &&
676                !OSAL_LIST_IS_EMPTY(head)) {
677                 struct ecore_spq_entry *p_ent =
678                     OSAL_LIST_FIRST_ENTRY(head, struct ecore_spq_entry, list);
679                 OSAL_LIST_REMOVE_ENTRY(&p_ent->list, head);
680                 OSAL_LIST_PUSH_TAIL(&p_ent->list, &p_spq->completion_pending);
681                 p_spq->comp_sent_count++;
682
683                 rc = ecore_spq_hw_post(p_hwfn, p_spq, p_ent);
684                 if (rc) {
685                         OSAL_LIST_REMOVE_ENTRY(&p_ent->list,
686                                                &p_spq->completion_pending);
687                         __ecore_spq_return_entry(p_hwfn, p_ent);
688                         return rc;
689                 }
690         }
691
692         return ECORE_SUCCESS;
693 }
694
695 static enum _ecore_status_t ecore_spq_pend_post(struct ecore_hwfn *p_hwfn)
696 {
697         enum _ecore_status_t rc = ECORE_NOTIMPL;
698         struct ecore_spq *p_spq = p_hwfn->p_spq;
699         struct ecore_spq_entry *p_ent = OSAL_NULL;
700
701         while (!OSAL_LIST_IS_EMPTY(&p_spq->free_pool)) {
702                 if (OSAL_LIST_IS_EMPTY(&p_spq->unlimited_pending))
703                         break;
704
705                 p_ent = OSAL_LIST_FIRST_ENTRY(&p_spq->unlimited_pending,
706                                               struct ecore_spq_entry, list);
707                 if (!p_ent)
708                         return ECORE_INVAL;
709
710                 OSAL_LIST_REMOVE_ENTRY(&p_ent->list, &p_spq->unlimited_pending);
711
712                 ecore_spq_add_entry(p_hwfn, p_ent, p_ent->priority);
713         }
714
715         rc = ecore_spq_post_list(p_hwfn,
716                                  &p_spq->pending, SPQ_HIGH_PRI_RESERVE_DEFAULT);
717         if (rc)
718                 return rc;
719
720         return ECORE_SUCCESS;
721 }
722
723 enum _ecore_status_t ecore_spq_post(struct ecore_hwfn *p_hwfn,
724                                     struct ecore_spq_entry *p_ent,
725                                     u8 *fw_return_code)
726 {
727         enum _ecore_status_t rc = ECORE_SUCCESS;
728         struct ecore_spq *p_spq = p_hwfn ? p_hwfn->p_spq : OSAL_NULL;
729         bool b_ret_ent = true;
730
731         if (!p_hwfn)
732                 return ECORE_INVAL;
733
734         if (!p_ent) {
735                 DP_NOTICE(p_hwfn, true, "Got a NULL pointer\n");
736                 return ECORE_INVAL;
737         }
738
739         if (p_hwfn->p_dev->recov_in_prog) {
740                 DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
741                            "Recovery is in progress -> skip spq post"
742                            " [cmd %02x protocol %02x]",
743                            p_ent->elem.hdr.cmd_id, p_ent->elem.hdr.protocol_id);
744                 /* Return success to let the flows to be completed successfully
745                  * w/o any error handling.
746                  */
747                 return ECORE_SUCCESS;
748         }
749
750         OSAL_SPIN_LOCK(&p_spq->lock);
751
752         /* Complete the entry */
753         rc = ecore_spq_fill_entry(p_hwfn, p_ent);
754
755         /* Check return value after LOCK is taken for cleaner error flow */
756         if (rc)
757                 goto spq_post_fail;
758
759         /* Add the request to the pending queue */
760         rc = ecore_spq_add_entry(p_hwfn, p_ent, p_ent->priority);
761         if (rc)
762                 goto spq_post_fail;
763
764         rc = ecore_spq_pend_post(p_hwfn);
765         if (rc) {
766                 /* Since it's possible that pending failed for a different
767                  * entry [although unlikely], the failed entry was already
768                  * dealt with; No need to return it here.
769                  */
770                 b_ret_ent = false;
771                 goto spq_post_fail;
772         }
773
774         OSAL_SPIN_UNLOCK(&p_spq->lock);
775
776         if (p_ent->comp_mode == ECORE_SPQ_MODE_EBLOCK) {
777                 /* For entries in ECORE BLOCK mode, the completion code cannot
778                  * perform the necessary cleanup - if it did, we couldn't
779                  * access p_ent here to see whether it's successful or not.
780                  * Thus, after gaining the answer perform the cleanup here.
781                  */
782                 rc = ecore_spq_block(p_hwfn, p_ent, fw_return_code);
783                 if (rc)
784                         goto spq_post_fail2;
785
786                 /* return to pool */
787                 ecore_spq_return_entry(p_hwfn, p_ent);
788         }
789         return rc;
790
791 spq_post_fail2:
792         OSAL_SPIN_LOCK(&p_spq->lock);
793         OSAL_LIST_REMOVE_ENTRY(&p_ent->list, &p_spq->completion_pending);
794         ecore_chain_return_produced(&p_spq->chain);
795
796 spq_post_fail:
797         /* return to the free pool */
798         if (b_ret_ent)
799                 __ecore_spq_return_entry(p_hwfn, p_ent);
800         OSAL_SPIN_UNLOCK(&p_spq->lock);
801
802         return rc;
803 }
804
805 enum _ecore_status_t ecore_spq_completion(struct ecore_hwfn *p_hwfn,
806                                           __le16 echo,
807                                           u8 fw_return_code,
808                                           union event_ring_data *p_data)
809 {
810         struct ecore_spq *p_spq;
811         struct ecore_spq_entry *p_ent = OSAL_NULL;
812         struct ecore_spq_entry *tmp;
813         struct ecore_spq_entry *found = OSAL_NULL;
814         enum _ecore_status_t rc;
815
816         if (!p_hwfn)
817                 return ECORE_INVAL;
818
819         p_spq = p_hwfn->p_spq;
820         if (!p_spq)
821                 return ECORE_INVAL;
822
823         OSAL_SPIN_LOCK(&p_spq->lock);
824         OSAL_LIST_FOR_EACH_ENTRY_SAFE(p_ent,
825                                       tmp,
826                                       &p_spq->completion_pending,
827                                       list, struct ecore_spq_entry) {
828                 if (p_ent->elem.hdr.echo == echo) {
829                         OSAL_LIST_REMOVE_ENTRY(&p_ent->list,
830                                                &p_spq->completion_pending);
831
832                         /* Avoid overriding of SPQ entries when getting
833                          * out-of-order completions, by marking the completions
834                          * in a bitmap and increasing the chain consumer only
835                          * for the first successive completed entries.
836                          */
837                         SPQ_COMP_BMAP_SET_BIT(p_spq, echo);
838                         while (SPQ_COMP_BMAP_TEST_BIT(p_spq,
839                                                       p_spq->comp_bitmap_idx)) {
840                                 SPQ_COMP_BMAP_CLEAR_BIT(p_spq,
841                                                         p_spq->comp_bitmap_idx);
842                                 p_spq->comp_bitmap_idx++;
843                                 ecore_chain_return_produced(&p_spq->chain);
844                         }
845
846                         p_spq->comp_count++;
847                         found = p_ent;
848                         break;
849                 }
850
851                 /* This is debug and should be relatively uncommon - depends
852                  * on scenarios which have mutliple per-PF sent ramrods.
853                  */
854                 DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
855                            "Got completion for echo %04x - doesn't match"
856                            " echo %04x in completion pending list\n",
857                            OSAL_LE16_TO_CPU(echo),
858                            OSAL_LE16_TO_CPU(p_ent->elem.hdr.echo));
859         }
860
861         /* Release lock before callback, as callback may post
862          * an additional ramrod.
863          */
864         OSAL_SPIN_UNLOCK(&p_spq->lock);
865
866         if (!found) {
867                 DP_NOTICE(p_hwfn, true,
868                           "Failed to find an entry this"
869                           " EQE [echo %04x] completes\n",
870                           OSAL_LE16_TO_CPU(echo));
871                 return ECORE_EXISTS;
872         }
873
874         DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
875                    "Complete EQE [echo %04x]: func %p cookie %p)\n",
876                    OSAL_LE16_TO_CPU(echo),
877                    p_ent->comp_cb.function, p_ent->comp_cb.cookie);
878         if (found->comp_cb.function)
879                 found->comp_cb.function(p_hwfn, found->comp_cb.cookie, p_data,
880                                         fw_return_code);
881
882         if (found->comp_mode != ECORE_SPQ_MODE_EBLOCK) {
883                 /* EBLOCK is responsible for freeing its own entry */
884                 ecore_spq_return_entry(p_hwfn, found);
885         }
886
887         /* Attempt to post pending requests */
888         OSAL_SPIN_LOCK(&p_spq->lock);
889         rc = ecore_spq_pend_post(p_hwfn);
890         OSAL_SPIN_UNLOCK(&p_spq->lock);
891
892         return rc;
893 }
894
895 struct ecore_consq *ecore_consq_alloc(struct ecore_hwfn *p_hwfn)
896 {
897         struct ecore_consq *p_consq;
898
899         /* Allocate ConsQ struct */
900         p_consq =
901             OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(struct ecore_consq));
902         if (!p_consq) {
903                 DP_NOTICE(p_hwfn, true,
904                           "Failed to allocate `struct ecore_consq'\n");
905                 return OSAL_NULL;
906         }
907
908         /* Allocate and initialize EQ chain */
909         if (ecore_chain_alloc(p_hwfn->p_dev,
910                               ECORE_CHAIN_USE_TO_PRODUCE,
911                               ECORE_CHAIN_MODE_PBL,
912                               ECORE_CHAIN_CNT_TYPE_U16,
913                               ECORE_CHAIN_PAGE_SIZE / 0x80,
914                               0x80, &p_consq->chain)) {
915                 DP_NOTICE(p_hwfn, true, "Failed to allocate consq chain");
916                 goto consq_allocate_fail;
917         }
918
919         return p_consq;
920
921 consq_allocate_fail:
922         ecore_consq_free(p_hwfn, p_consq);
923         return OSAL_NULL;
924 }
925
926 void ecore_consq_setup(struct ecore_hwfn *p_hwfn, struct ecore_consq *p_consq)
927 {
928         ecore_chain_reset(&p_consq->chain);
929 }
930
931 void ecore_consq_free(struct ecore_hwfn *p_hwfn, struct ecore_consq *p_consq)
932 {
933         if (!p_consq)
934                 return;
935         ecore_chain_free(p_hwfn->p_dev, &p_consq->chain);
936         OSAL_FREE(p_hwfn->p_dev, p_consq);
937 }