ee0f06c72659b0ffaacce6075b50696fab4bb163
[dpdk.git] / drivers / net / qede / base / ecore_spq.c
1 /*
2  * Copyright (c) 2016 QLogic Corporation.
3  * All rights reserved.
4  * www.qlogic.com
5  *
6  * See LICENSE.qede_pmd for copyright and licensing details.
7  */
8
9 #include "bcm_osal.h"
10 #include "reg_addr.h"
11 #include "ecore_gtt_reg_addr.h"
12 #include "ecore_hsi_common.h"
13 #include "ecore.h"
14 #include "ecore_sp_api.h"
15 #include "ecore_spq.h"
16 #include "ecore_iro.h"
17 #include "ecore_init_fw_funcs.h"
18 #include "ecore_cxt.h"
19 #include "ecore_int.h"
20 #include "ecore_dev_api.h"
21 #include "ecore_mcp.h"
22 #include "ecore_hw.h"
23 #include "ecore_sriov.h"
24
25 /***************************************************************************
26  * Structures & Definitions
27  ***************************************************************************/
28
29 #define SPQ_HIGH_PRI_RESERVE_DEFAULT    (1)
30
31 #define SPQ_BLOCK_DELAY_MAX_ITER        (10)
32 #define SPQ_BLOCK_DELAY_US              (10)
33 #define SPQ_BLOCK_SLEEP_MAX_ITER        (1000)
34 #define SPQ_BLOCK_SLEEP_MS              (5)
35
36 /***************************************************************************
37  * Blocking Imp. (BLOCK/EBLOCK mode)
38  ***************************************************************************/
39 static void ecore_spq_blocking_cb(struct ecore_hwfn *p_hwfn, void *cookie,
40                                   union event_ring_data OSAL_UNUSED * data,
41                                   u8 fw_return_code)
42 {
43         struct ecore_spq_comp_done *comp_done;
44
45         comp_done = (struct ecore_spq_comp_done *)cookie;
46
47         comp_done->done = 0x1;
48         comp_done->fw_return_code = fw_return_code;
49
50         /* make update visible to waiting thread */
51         OSAL_SMP_WMB(p_hwfn->p_dev);
52 }
53
54 static enum _ecore_status_t __ecore_spq_block(struct ecore_hwfn *p_hwfn,
55                                               struct ecore_spq_entry *p_ent,
56                                               u8 *p_fw_ret,
57                                               bool sleep_between_iter)
58 {
59         struct ecore_spq_comp_done *comp_done;
60         u32 iter_cnt;
61
62         comp_done = (struct ecore_spq_comp_done *)p_ent->comp_cb.cookie;
63         iter_cnt = sleep_between_iter ? SPQ_BLOCK_SLEEP_MAX_ITER
64                                       : SPQ_BLOCK_DELAY_MAX_ITER;
65
66         while (iter_cnt--) {
67                 OSAL_POLL_MODE_DPC(p_hwfn);
68                 OSAL_SMP_RMB(p_hwfn->p_dev);
69                 if (comp_done->done == 1) {
70                         if (p_fw_ret)
71                                 *p_fw_ret = comp_done->fw_return_code;
72                         return ECORE_SUCCESS;
73                 }
74
75                 if (sleep_between_iter)
76                         OSAL_MSLEEP(SPQ_BLOCK_SLEEP_MS);
77                 else
78                         OSAL_UDELAY(SPQ_BLOCK_DELAY_US);
79         }
80
81         return ECORE_TIMEOUT;
82 }
83
84 static enum _ecore_status_t ecore_spq_block(struct ecore_hwfn *p_hwfn,
85                                             struct ecore_spq_entry *p_ent,
86                                             u8 *p_fw_ret, bool skip_quick_poll)
87 {
88         struct ecore_spq_comp_done *comp_done;
89         struct ecore_ptt *p_ptt;
90         enum _ecore_status_t rc;
91
92         /* A relatively short polling period w/o sleeping, to allow the FW to
93          * complete the ramrod and thus possibly to avoid the following sleeps.
94          */
95         if (!skip_quick_poll) {
96                 rc = __ecore_spq_block(p_hwfn, p_ent, p_fw_ret, false);
97                 if (rc == ECORE_SUCCESS)
98                         return ECORE_SUCCESS;
99         }
100
101         /* Move to polling with a sleeping period between iterations */
102         rc = __ecore_spq_block(p_hwfn, p_ent, p_fw_ret, true);
103         if (rc == ECORE_SUCCESS)
104                 return ECORE_SUCCESS;
105
106         p_ptt = ecore_ptt_acquire(p_hwfn);
107         if (!p_ptt)
108                 return ECORE_AGAIN;
109
110         DP_INFO(p_hwfn, "Ramrod is stuck, requesting MCP drain\n");
111         rc = ecore_mcp_drain(p_hwfn, p_ptt);
112         ecore_ptt_release(p_hwfn, p_ptt);
113         if (rc != ECORE_SUCCESS) {
114                 DP_NOTICE(p_hwfn, true, "MCP drain failed\n");
115                 goto err;
116         }
117
118         /* Retry after drain */
119         rc = __ecore_spq_block(p_hwfn, p_ent, p_fw_ret, true);
120         if (rc == ECORE_SUCCESS)
121                 return ECORE_SUCCESS;
122
123         comp_done = (struct ecore_spq_comp_done *)p_ent->comp_cb.cookie;
124         if (comp_done->done == 1) {
125                 if (p_fw_ret)
126                         *p_fw_ret = comp_done->fw_return_code;
127                 return ECORE_SUCCESS;
128         }
129 err:
130         DP_NOTICE(p_hwfn, true,
131                   "Ramrod is stuck [CID %08x cmd %02x proto %02x echo %04x]\n",
132                   OSAL_LE32_TO_CPU(p_ent->elem.hdr.cid),
133                   p_ent->elem.hdr.cmd_id, p_ent->elem.hdr.protocol_id,
134                   OSAL_LE16_TO_CPU(p_ent->elem.hdr.echo));
135
136         ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_RAMROD_FAIL);
137
138         return ECORE_BUSY;
139 }
140
141 /***************************************************************************
142  * SPQ entries inner API
143  ***************************************************************************/
144 static enum _ecore_status_t
145 ecore_spq_fill_entry(struct ecore_hwfn *p_hwfn, struct ecore_spq_entry *p_ent)
146 {
147         p_ent->flags = 0;
148
149         switch (p_ent->comp_mode) {
150         case ECORE_SPQ_MODE_EBLOCK:
151         case ECORE_SPQ_MODE_BLOCK:
152                 p_ent->comp_cb.function = ecore_spq_blocking_cb;
153                 break;
154         case ECORE_SPQ_MODE_CB:
155                 break;
156         default:
157                 DP_NOTICE(p_hwfn, true, "Unknown SPQE completion mode %d\n",
158                           p_ent->comp_mode);
159                 return ECORE_INVAL;
160         }
161
162         DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
163                    "Ramrod header: [CID 0x%08x CMD 0x%02x protocol 0x%02x]"
164                    " Data pointer: [%08x:%08x] Completion Mode: %s\n",
165                    p_ent->elem.hdr.cid, p_ent->elem.hdr.cmd_id,
166                    p_ent->elem.hdr.protocol_id,
167                    p_ent->elem.data_ptr.hi, p_ent->elem.data_ptr.lo,
168                    D_TRINE(p_ent->comp_mode, ECORE_SPQ_MODE_EBLOCK,
169                            ECORE_SPQ_MODE_BLOCK, "MODE_EBLOCK", "MODE_BLOCK",
170                            "MODE_CB"));
171
172         return ECORE_SUCCESS;
173 }
174
175 /***************************************************************************
176  * HSI access
177  ***************************************************************************/
178 static void ecore_spq_hw_initialize(struct ecore_hwfn *p_hwfn,
179                                     struct ecore_spq *p_spq)
180 {
181         struct ecore_cxt_info cxt_info;
182         struct core_conn_context *p_cxt;
183         enum _ecore_status_t rc;
184         u16 physical_q;
185
186         cxt_info.iid = p_spq->cid;
187
188         rc = ecore_cxt_get_cid_info(p_hwfn, &cxt_info);
189
190         if (rc < 0) {
191                 DP_NOTICE(p_hwfn, true, "Cannot find context info for cid=%d\n",
192                           p_spq->cid);
193                 return;
194         }
195
196         p_cxt = cxt_info.p_cxt;
197
198         /* @@@TBD we zero the context until we have ilt_reset implemented. */
199         OSAL_MEM_ZERO(p_cxt, sizeof(*p_cxt));
200
201         if (ECORE_IS_BB(p_hwfn->p_dev) || ECORE_IS_AH(p_hwfn->p_dev)) {
202                 SET_FIELD(p_cxt->xstorm_ag_context.flags10,
203                           E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN, 1);
204                 SET_FIELD(p_cxt->xstorm_ag_context.flags1,
205                           E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE, 1);
206                 /* SET_FIELD(p_cxt->xstorm_ag_context.flags10,
207                  *        E4_XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN, 1);
208                  */
209                 SET_FIELD(p_cxt->xstorm_ag_context.flags9,
210                           E4_XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN, 1);
211         }
212
213         /* CDU validation - FIXME currently disabled */
214
215         /* QM physical queue */
216         physical_q = ecore_get_cm_pq_idx(p_hwfn, PQ_FLAGS_LB);
217         p_cxt->xstorm_ag_context.physical_q0 = OSAL_CPU_TO_LE16(physical_q);
218
219         p_cxt->xstorm_st_context.spq_base_lo =
220             DMA_LO_LE(p_spq->chain.p_phys_addr);
221         p_cxt->xstorm_st_context.spq_base_hi =
222             DMA_HI_LE(p_spq->chain.p_phys_addr);
223
224         DMA_REGPAIR_LE(p_cxt->xstorm_st_context.consolid_base_addr,
225                        p_hwfn->p_consq->chain.p_phys_addr);
226 }
227
228 static enum _ecore_status_t ecore_spq_hw_post(struct ecore_hwfn *p_hwfn,
229                                               struct ecore_spq *p_spq,
230                                               struct ecore_spq_entry *p_ent)
231 {
232         struct ecore_chain *p_chain = &p_hwfn->p_spq->chain;
233         struct core_db_data *p_db_data = &p_spq->db_data;
234         u16 echo = ecore_chain_get_prod_idx(p_chain);
235         struct slow_path_element *elem;
236
237         p_ent->elem.hdr.echo = OSAL_CPU_TO_LE16(echo);
238         elem = ecore_chain_produce(p_chain);
239         if (!elem) {
240                 DP_NOTICE(p_hwfn, true, "Failed to produce from SPQ chain\n");
241                 return ECORE_INVAL;
242         }
243
244         *elem = p_ent->elem;    /* Struct assignment */
245
246         p_db_data->spq_prod =
247                 OSAL_CPU_TO_LE16(ecore_chain_get_prod_idx(p_chain));
248
249         /* Make sure the SPQE is updated before the doorbell */
250         OSAL_WMB(p_hwfn->p_dev);
251
252         DOORBELL(p_hwfn, p_spq->db_addr_offset, *(u32 *)p_db_data);
253
254         /* Make sure doorbell is rang */
255         OSAL_WMB(p_hwfn->p_dev);
256
257         DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
258                    "Doorbelled [0x%08x, CID 0x%08x] with Flags: %02x"
259                    " agg_params: %02x, prod: %04x\n",
260                    p_spq->db_addr_offset, p_spq->cid, p_db_data->params,
261                    p_db_data->agg_flags, ecore_chain_get_prod_idx(p_chain));
262
263         return ECORE_SUCCESS;
264 }
265
266 /***************************************************************************
267  * Asynchronous events
268  ***************************************************************************/
269
270 static enum _ecore_status_t
271 ecore_async_event_completion(struct ecore_hwfn *p_hwfn,
272                              struct event_ring_entry *p_eqe)
273 {
274         switch (p_eqe->protocol_id) {
275         case PROTOCOLID_COMMON:
276                 return ecore_sriov_eqe_event(p_hwfn,
277                                              p_eqe->opcode,
278                                              p_eqe->echo, &p_eqe->data);
279         default:
280                 DP_NOTICE(p_hwfn,
281                           true, "Unknown Async completion for protocol: %d\n",
282                           p_eqe->protocol_id);
283                 return ECORE_INVAL;
284         }
285 }
286
287 /***************************************************************************
288  * EQ API
289  ***************************************************************************/
290 void ecore_eq_prod_update(struct ecore_hwfn *p_hwfn, u16 prod)
291 {
292         u32 addr = GTT_BAR0_MAP_REG_USDM_RAM +
293             USTORM_EQE_CONS_OFFSET(p_hwfn->rel_pf_id);
294
295         REG_WR16(p_hwfn, addr, prod);
296
297         /* keep prod updates ordered */
298         OSAL_MMIOWB(p_hwfn->p_dev);
299 }
300
301 enum _ecore_status_t ecore_eq_completion(struct ecore_hwfn *p_hwfn,
302                                          void *cookie)
303 {
304         struct ecore_eq *p_eq = cookie;
305         struct ecore_chain *p_chain = &p_eq->chain;
306         enum _ecore_status_t rc = 0;
307
308         /* take a snapshot of the FW consumer */
309         u16 fw_cons_idx = OSAL_LE16_TO_CPU(*p_eq->p_fw_cons);
310
311         DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ, "fw_cons_idx %x\n", fw_cons_idx);
312
313         /* Need to guarantee the fw_cons index we use points to a usuable
314          * element (to comply with our chain), so our macros would comply
315          */
316         if ((fw_cons_idx & ecore_chain_get_usable_per_page(p_chain)) ==
317             ecore_chain_get_usable_per_page(p_chain)) {
318                 fw_cons_idx += ecore_chain_get_unusable_per_page(p_chain);
319         }
320
321         /* Complete current segment of eq entries */
322         while (fw_cons_idx != ecore_chain_get_cons_idx(p_chain)) {
323                 struct event_ring_entry *p_eqe = ecore_chain_consume(p_chain);
324                 if (!p_eqe) {
325                         rc = ECORE_INVAL;
326                         break;
327                 }
328
329                 DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
330                            "op %x prot %x res0 %x echo %x fwret %x flags %x\n",
331                            p_eqe->opcode,            /* Event Opcode */
332                            p_eqe->protocol_id,  /* Event Protocol ID */
333                            p_eqe->reserved0,    /* Reserved */
334                            /* Echo value from ramrod data on the host */
335                            OSAL_LE16_TO_CPU(p_eqe->echo),
336                            p_eqe->fw_return_code,    /* FW return code for SP
337                                                       * ramrods
338                                                       */
339                            p_eqe->flags);
340
341                 if (GET_FIELD(p_eqe->flags, EVENT_RING_ENTRY_ASYNC)) {
342                         if (ecore_async_event_completion(p_hwfn, p_eqe))
343                                 rc = ECORE_INVAL;
344                 } else if (ecore_spq_completion(p_hwfn,
345                                                 p_eqe->echo,
346                                                 p_eqe->fw_return_code,
347                                                 &p_eqe->data)) {
348                         rc = ECORE_INVAL;
349                 }
350
351                 ecore_chain_recycle_consumed(p_chain);
352         }
353
354         ecore_eq_prod_update(p_hwfn, ecore_chain_get_prod_idx(p_chain));
355
356         return rc;
357 }
358
359 enum _ecore_status_t ecore_eq_alloc(struct ecore_hwfn *p_hwfn, u16 num_elem)
360 {
361         struct ecore_eq *p_eq;
362
363         /* Allocate EQ struct */
364         p_eq = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*p_eq));
365         if (!p_eq) {
366                 DP_NOTICE(p_hwfn, true,
367                           "Failed to allocate `struct ecore_eq'\n");
368                 return ECORE_NOMEM;
369         }
370
371         /* Allocate and initialize EQ chain*/
372         if (ecore_chain_alloc(p_hwfn->p_dev,
373                               ECORE_CHAIN_USE_TO_PRODUCE,
374                               ECORE_CHAIN_MODE_PBL,
375                               ECORE_CHAIN_CNT_TYPE_U16,
376                               num_elem,
377                               sizeof(union event_ring_element),
378                               &p_eq->chain, OSAL_NULL) != ECORE_SUCCESS) {
379                 DP_NOTICE(p_hwfn, true, "Failed to allocate eq chain\n");
380                 goto eq_allocate_fail;
381         }
382
383         /* register EQ completion on the SP SB */
384         ecore_int_register_cb(p_hwfn, ecore_eq_completion,
385                               p_eq, &p_eq->eq_sb_index, &p_eq->p_fw_cons);
386
387         p_hwfn->p_eq = p_eq;
388         return ECORE_SUCCESS;
389
390 eq_allocate_fail:
391         OSAL_FREE(p_hwfn->p_dev, p_eq);
392         return ECORE_NOMEM;
393 }
394
395 void ecore_eq_setup(struct ecore_hwfn *p_hwfn)
396 {
397         ecore_chain_reset(&p_hwfn->p_eq->chain);
398 }
399
400 void ecore_eq_free(struct ecore_hwfn *p_hwfn)
401 {
402         if (!p_hwfn->p_eq)
403                 return;
404
405         ecore_chain_free(p_hwfn->p_dev, &p_hwfn->p_eq->chain);
406
407         OSAL_FREE(p_hwfn->p_dev, p_hwfn->p_eq);
408         p_hwfn->p_eq = OSAL_NULL;
409 }
410
411 /***************************************************************************
412 * CQE API - manipulate EQ functionality
413 ***************************************************************************/
414 static enum _ecore_status_t ecore_cqe_completion(struct ecore_hwfn *p_hwfn,
415                                                  struct eth_slow_path_rx_cqe
416                                                  *cqe,
417                                                  enum protocol_type protocol)
418 {
419         if (IS_VF(p_hwfn->p_dev))
420                 return OSAL_VF_CQE_COMPLETION(p_hwfn, cqe, protocol);
421
422         /* @@@tmp - it's possible we'll eventually want to handle some
423          * actual commands that can arrive here, but for now this is only
424          * used to complete the ramrod using the echo value on the cqe
425          */
426         return ecore_spq_completion(p_hwfn, cqe->echo, 0, OSAL_NULL);
427 }
428
429 enum _ecore_status_t ecore_eth_cqe_completion(struct ecore_hwfn *p_hwfn,
430                                               struct eth_slow_path_rx_cqe *cqe)
431 {
432         enum _ecore_status_t rc;
433
434         rc = ecore_cqe_completion(p_hwfn, cqe, PROTOCOLID_ETH);
435         if (rc) {
436                 DP_NOTICE(p_hwfn, true,
437                           "Failed to handle RXQ CQE [cmd 0x%02x]\n",
438                           cqe->ramrod_cmd_id);
439         }
440
441         return rc;
442 }
443
444 /***************************************************************************
445  * Slow hwfn Queue (spq)
446  ***************************************************************************/
447 void ecore_spq_setup(struct ecore_hwfn *p_hwfn)
448 {
449         struct ecore_spq *p_spq = p_hwfn->p_spq;
450         struct ecore_spq_entry *p_virt = OSAL_NULL;
451         struct core_db_data *p_db_data;
452         void OSAL_IOMEM *db_addr;
453         dma_addr_t p_phys = 0;
454         u32 i, capacity;
455         enum _ecore_status_t rc;
456
457         OSAL_LIST_INIT(&p_spq->pending);
458         OSAL_LIST_INIT(&p_spq->completion_pending);
459         OSAL_LIST_INIT(&p_spq->free_pool);
460         OSAL_LIST_INIT(&p_spq->unlimited_pending);
461         OSAL_SPIN_LOCK_INIT(&p_spq->lock);
462
463         /* SPQ empty pool */
464         p_phys = p_spq->p_phys + OFFSETOF(struct ecore_spq_entry, ramrod);
465         p_virt = p_spq->p_virt;
466
467         capacity = ecore_chain_get_capacity(&p_spq->chain);
468         for (i = 0; i < capacity; i++) {
469                 DMA_REGPAIR_LE(p_virt->elem.data_ptr, p_phys);
470
471                 OSAL_LIST_PUSH_TAIL(&p_virt->list, &p_spq->free_pool);
472
473                 p_virt++;
474                 p_phys += sizeof(struct ecore_spq_entry);
475         }
476
477         /* Statistics */
478         p_spq->normal_count = 0;
479         p_spq->comp_count = 0;
480         p_spq->comp_sent_count = 0;
481         p_spq->unlimited_pending_count = 0;
482
483         OSAL_MEM_ZERO(p_spq->p_comp_bitmap,
484                       SPQ_COMP_BMAP_SIZE * sizeof(unsigned long));
485         p_spq->comp_bitmap_idx = 0;
486
487         /* SPQ cid, cannot fail */
488         ecore_cxt_acquire_cid(p_hwfn, PROTOCOLID_CORE, &p_spq->cid);
489         ecore_spq_hw_initialize(p_hwfn, p_spq);
490
491         /* reset the chain itself */
492         ecore_chain_reset(&p_spq->chain);
493
494         /* Initialize the address/data of the SPQ doorbell */
495         p_spq->db_addr_offset = DB_ADDR(p_spq->cid, DQ_DEMS_LEGACY);
496         p_db_data = &p_spq->db_data;
497         OSAL_MEM_ZERO(p_db_data, sizeof(*p_db_data));
498         SET_FIELD(p_db_data->params, CORE_DB_DATA_DEST, DB_DEST_XCM);
499         SET_FIELD(p_db_data->params, CORE_DB_DATA_AGG_CMD, DB_AGG_CMD_MAX);
500         SET_FIELD(p_db_data->params, CORE_DB_DATA_AGG_VAL_SEL,
501                   DQ_XCM_CORE_SPQ_PROD_CMD);
502         p_db_data->agg_flags = DQ_XCM_CORE_DQ_CF_CMD;
503
504         /* Register the SPQ doorbell with the doorbell recovery mechanism */
505         db_addr = (void *)((u8 *)p_hwfn->doorbells + p_spq->db_addr_offset);
506         rc = ecore_db_recovery_add(p_hwfn->p_dev, db_addr, &p_spq->db_data,
507                                    DB_REC_WIDTH_32B, DB_REC_KERNEL);
508         if (rc != ECORE_SUCCESS)
509                 DP_INFO(p_hwfn,
510                         "Failed to register the SPQ doorbell with the doorbell recovery mechanism\n");
511 }
512
513 enum _ecore_status_t ecore_spq_alloc(struct ecore_hwfn *p_hwfn)
514 {
515         struct ecore_spq_entry *p_virt = OSAL_NULL;
516         struct ecore_spq *p_spq = OSAL_NULL;
517         dma_addr_t p_phys = 0;
518         u32 capacity;
519
520         /* SPQ struct */
521         p_spq =
522             OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(struct ecore_spq));
523         if (!p_spq) {
524                 DP_NOTICE(p_hwfn, true,
525                           "Failed to allocate `struct ecore_spq'\n");
526                 return ECORE_NOMEM;
527         }
528
529         /* SPQ ring  */
530         if (ecore_chain_alloc(p_hwfn->p_dev,
531                               ECORE_CHAIN_USE_TO_PRODUCE,
532                               ECORE_CHAIN_MODE_SINGLE,
533                               ECORE_CHAIN_CNT_TYPE_U16,
534                               0, /* N/A when the mode is SINGLE */
535                               sizeof(struct slow_path_element),
536                               &p_spq->chain, OSAL_NULL)) {
537                 DP_NOTICE(p_hwfn, true, "Failed to allocate spq chain\n");
538                 goto spq_allocate_fail;
539         }
540
541         /* allocate and fill the SPQ elements (incl. ramrod data list) */
542         capacity = ecore_chain_get_capacity(&p_spq->chain);
543         p_virt = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, &p_phys,
544                                          capacity *
545                                          sizeof(struct ecore_spq_entry));
546         if (!p_virt)
547                 goto spq_allocate_fail;
548
549         p_spq->p_virt = p_virt;
550         p_spq->p_phys = p_phys;
551
552 #ifdef CONFIG_ECORE_LOCK_ALLOC
553         OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_spq->lock);
554 #endif
555
556         p_hwfn->p_spq = p_spq;
557         return ECORE_SUCCESS;
558
559 spq_allocate_fail:
560         ecore_chain_free(p_hwfn->p_dev, &p_spq->chain);
561         OSAL_FREE(p_hwfn->p_dev, p_spq);
562         return ECORE_NOMEM;
563 }
564
565 void ecore_spq_free(struct ecore_hwfn *p_hwfn)
566 {
567         struct ecore_spq *p_spq = p_hwfn->p_spq;
568         void OSAL_IOMEM *db_addr;
569         u32 capacity;
570
571         if (!p_spq)
572                 return;
573
574         /* Delete the SPQ doorbell from the doorbell recovery mechanism */
575         db_addr = (void *)((u8 *)p_hwfn->doorbells + p_spq->db_addr_offset);
576         ecore_db_recovery_del(p_hwfn->p_dev, db_addr, &p_spq->db_data);
577
578         if (p_spq->p_virt) {
579                 capacity = ecore_chain_get_capacity(&p_spq->chain);
580                 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
581                                        p_spq->p_virt,
582                                        p_spq->p_phys,
583                                        capacity *
584                                        sizeof(struct ecore_spq_entry));
585         }
586
587         ecore_chain_free(p_hwfn->p_dev, &p_spq->chain);
588 #ifdef CONFIG_ECORE_LOCK_ALLOC
589         OSAL_SPIN_LOCK_DEALLOC(&p_spq->lock);
590 #endif
591
592         OSAL_FREE(p_hwfn->p_dev, p_spq);
593 }
594
595 enum _ecore_status_t
596 ecore_spq_get_entry(struct ecore_hwfn *p_hwfn, struct ecore_spq_entry **pp_ent)
597 {
598         struct ecore_spq *p_spq = p_hwfn->p_spq;
599         struct ecore_spq_entry *p_ent = OSAL_NULL;
600         enum _ecore_status_t rc = ECORE_SUCCESS;
601
602         OSAL_SPIN_LOCK(&p_spq->lock);
603
604         if (OSAL_LIST_IS_EMPTY(&p_spq->free_pool)) {
605                 p_ent = OSAL_ZALLOC(p_hwfn->p_dev, GFP_ATOMIC, sizeof(*p_ent));
606                 if (!p_ent) {
607                         DP_NOTICE(p_hwfn, true,
608                                  "Failed to allocate an SPQ entry for a pending"
609                                  " ramrod\n");
610                         rc = ECORE_NOMEM;
611                         goto out_unlock;
612                 }
613                 p_ent->queue = &p_spq->unlimited_pending;
614         } else {
615                 p_ent = OSAL_LIST_FIRST_ENTRY(&p_spq->free_pool,
616                                               struct ecore_spq_entry, list);
617                 OSAL_LIST_REMOVE_ENTRY(&p_ent->list, &p_spq->free_pool);
618                 p_ent->queue = &p_spq->pending;
619         }
620
621         *pp_ent = p_ent;
622
623 out_unlock:
624         OSAL_SPIN_UNLOCK(&p_spq->lock);
625         return rc;
626 }
627
628 /* Locked variant; Should be called while the SPQ lock is taken */
629 static void __ecore_spq_return_entry(struct ecore_hwfn *p_hwfn,
630                                      struct ecore_spq_entry *p_ent)
631 {
632         OSAL_LIST_PUSH_TAIL(&p_ent->list, &p_hwfn->p_spq->free_pool);
633 }
634
635 void ecore_spq_return_entry(struct ecore_hwfn *p_hwfn,
636                             struct ecore_spq_entry *p_ent)
637 {
638         OSAL_SPIN_LOCK(&p_hwfn->p_spq->lock);
639         __ecore_spq_return_entry(p_hwfn, p_ent);
640         OSAL_SPIN_UNLOCK(&p_hwfn->p_spq->lock);
641 }
642
643 /**
644  * @brief ecore_spq_add_entry - adds a new entry to the pending
645  *        list. Should be used while lock is being held.
646  *
647  * Addes an entry to the pending list is there is room (en empty
648  * element is available in the free_pool), or else places the
649  * entry in the unlimited_pending pool.
650  *
651  * @param p_hwfn
652  * @param p_ent
653  * @param priority
654  *
655  * @return enum _ecore_status_t
656  */
657 static enum _ecore_status_t
658 ecore_spq_add_entry(struct ecore_hwfn *p_hwfn,
659                     struct ecore_spq_entry *p_ent, enum spq_priority priority)
660 {
661         struct ecore_spq *p_spq = p_hwfn->p_spq;
662
663         if (p_ent->queue == &p_spq->unlimited_pending) {
664                 if (OSAL_LIST_IS_EMPTY(&p_spq->free_pool)) {
665                         OSAL_LIST_PUSH_TAIL(&p_ent->list,
666                                             &p_spq->unlimited_pending);
667                         p_spq->unlimited_pending_count++;
668
669                         return ECORE_SUCCESS;
670
671                 } else {
672                         struct ecore_spq_entry *p_en2;
673
674                         p_en2 = OSAL_LIST_FIRST_ENTRY(&p_spq->free_pool,
675                                                      struct ecore_spq_entry,
676                                                      list);
677                         OSAL_LIST_REMOVE_ENTRY(&p_en2->list, &p_spq->free_pool);
678
679                         /* Copy the ring element physical pointer to the new
680                          * entry, since we are about to override the entire ring
681                          * entry and don't want to lose the pointer.
682                          */
683                         p_ent->elem.data_ptr = p_en2->elem.data_ptr;
684
685                         *p_en2 = *p_ent;
686
687                         /* EBLOCK responsible to free the allocated p_ent */
688                         if (p_ent->comp_mode != ECORE_SPQ_MODE_EBLOCK)
689                                 OSAL_FREE(p_hwfn->p_dev, p_ent);
690
691                         p_ent = p_en2;
692                 }
693         }
694
695         /* entry is to be placed in 'pending' queue */
696         switch (priority) {
697         case ECORE_SPQ_PRIORITY_NORMAL:
698                 OSAL_LIST_PUSH_TAIL(&p_ent->list, &p_spq->pending);
699                 p_spq->normal_count++;
700                 break;
701         case ECORE_SPQ_PRIORITY_HIGH:
702                 OSAL_LIST_PUSH_HEAD(&p_ent->list, &p_spq->pending);
703                 p_spq->high_count++;
704                 break;
705         default:
706                 return ECORE_INVAL;
707         }
708
709         return ECORE_SUCCESS;
710 }
711
712 /***************************************************************************
713  * Accessor
714  ***************************************************************************/
715
716 u32 ecore_spq_get_cid(struct ecore_hwfn *p_hwfn)
717 {
718         if (!p_hwfn->p_spq)
719                 return 0xffffffff;      /* illegal */
720         return p_hwfn->p_spq->cid;
721 }
722
723 /***************************************************************************
724  * Posting new Ramrods
725  ***************************************************************************/
726
727 static enum _ecore_status_t ecore_spq_post_list(struct ecore_hwfn *p_hwfn,
728                                                 osal_list_t *head,
729                                                 u32 keep_reserve)
730 {
731         struct ecore_spq *p_spq = p_hwfn->p_spq;
732         enum _ecore_status_t rc;
733
734         /* TODO - implementation might be wasteful; will always keep room
735          * for an additional high priority ramrod (even if one is already
736          * pending FW)
737          */
738         while (ecore_chain_get_elem_left(&p_spq->chain) > keep_reserve &&
739                !OSAL_LIST_IS_EMPTY(head)) {
740                 struct ecore_spq_entry *p_ent =
741                     OSAL_LIST_FIRST_ENTRY(head, struct ecore_spq_entry, list);
742                 if (p_ent != OSAL_NULL) {
743 #if defined(_NTDDK_)
744 #pragma warning(suppress : 6011 28182)
745 #endif
746                         OSAL_LIST_REMOVE_ENTRY(&p_ent->list, head);
747                         OSAL_LIST_PUSH_TAIL(&p_ent->list,
748                                             &p_spq->completion_pending);
749                         p_spq->comp_sent_count++;
750
751                         rc = ecore_spq_hw_post(p_hwfn, p_spq, p_ent);
752                         if (rc) {
753                                 OSAL_LIST_REMOVE_ENTRY(&p_ent->list,
754                                                     &p_spq->completion_pending);
755                                 __ecore_spq_return_entry(p_hwfn, p_ent);
756                                 return rc;
757                         }
758                 }
759         }
760
761         return ECORE_SUCCESS;
762 }
763
764 static enum _ecore_status_t ecore_spq_pend_post(struct ecore_hwfn *p_hwfn)
765 {
766         struct ecore_spq *p_spq = p_hwfn->p_spq;
767         struct ecore_spq_entry *p_ent = OSAL_NULL;
768
769         while (!OSAL_LIST_IS_EMPTY(&p_spq->free_pool)) {
770                 if (OSAL_LIST_IS_EMPTY(&p_spq->unlimited_pending))
771                         break;
772
773                 p_ent = OSAL_LIST_FIRST_ENTRY(&p_spq->unlimited_pending,
774                                               struct ecore_spq_entry, list);
775                 if (!p_ent)
776                         return ECORE_INVAL;
777
778 #if defined(_NTDDK_)
779 #pragma warning(suppress : 6011)
780 #endif
781                 OSAL_LIST_REMOVE_ENTRY(&p_ent->list, &p_spq->unlimited_pending);
782
783                 ecore_spq_add_entry(p_hwfn, p_ent, p_ent->priority);
784         }
785
786         return ecore_spq_post_list(p_hwfn,
787                                  &p_spq->pending, SPQ_HIGH_PRI_RESERVE_DEFAULT);
788 }
789
790 enum _ecore_status_t ecore_spq_post(struct ecore_hwfn *p_hwfn,
791                                     struct ecore_spq_entry *p_ent,
792                                     u8 *fw_return_code)
793 {
794         enum _ecore_status_t rc = ECORE_SUCCESS;
795         struct ecore_spq *p_spq = p_hwfn ? p_hwfn->p_spq : OSAL_NULL;
796         bool b_ret_ent = true;
797
798         if (!p_hwfn)
799                 return ECORE_INVAL;
800
801         if (!p_ent) {
802                 DP_NOTICE(p_hwfn, true, "Got a NULL pointer\n");
803                 return ECORE_INVAL;
804         }
805
806         if (p_hwfn->p_dev->recov_in_prog) {
807                 DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
808                            "Recovery is in progress -> skip spq post"
809                            " [cmd %02x protocol %02x]\n",
810                            p_ent->elem.hdr.cmd_id, p_ent->elem.hdr.protocol_id);
811                 /* Return success to let the flows to be completed successfully
812                  * w/o any error handling.
813                  */
814                 return ECORE_SUCCESS;
815         }
816
817         OSAL_SPIN_LOCK(&p_spq->lock);
818
819         /* Complete the entry */
820         rc = ecore_spq_fill_entry(p_hwfn, p_ent);
821
822         /* Check return value after LOCK is taken for cleaner error flow */
823         if (rc)
824                 goto spq_post_fail;
825
826         /* Add the request to the pending queue */
827         rc = ecore_spq_add_entry(p_hwfn, p_ent, p_ent->priority);
828         if (rc)
829                 goto spq_post_fail;
830
831         rc = ecore_spq_pend_post(p_hwfn);
832         if (rc) {
833                 /* Since it's possible that pending failed for a different
834                  * entry [although unlikely], the failed entry was already
835                  * dealt with; No need to return it here.
836                  */
837                 b_ret_ent = false;
838                 goto spq_post_fail;
839         }
840
841         OSAL_SPIN_UNLOCK(&p_spq->lock);
842
843         if (p_ent->comp_mode == ECORE_SPQ_MODE_EBLOCK) {
844                 /* For entries in ECORE BLOCK mode, the completion code cannot
845                  * perform the necessary cleanup - if it did, we couldn't
846                  * access p_ent here to see whether it's successful or not.
847                  * Thus, after gaining the answer perform the cleanup here.
848                  */
849                 rc = ecore_spq_block(p_hwfn, p_ent, fw_return_code,
850                                      p_ent->queue == &p_spq->unlimited_pending);
851
852                 if (p_ent->queue == &p_spq->unlimited_pending) {
853                         /* This is an allocated p_ent which does not need to
854                          * return to pool.
855                          */
856                         OSAL_FREE(p_hwfn->p_dev, p_ent);
857
858                         /* TBD: handle error flow and remove p_ent from
859                          * completion pending
860                          */
861                         return rc;
862                 }
863
864                 if (rc)
865                         goto spq_post_fail2;
866
867                 /* return to pool */
868                 ecore_spq_return_entry(p_hwfn, p_ent);
869         }
870         return rc;
871
872 spq_post_fail2:
873         OSAL_SPIN_LOCK(&p_spq->lock);
874         OSAL_LIST_REMOVE_ENTRY(&p_ent->list, &p_spq->completion_pending);
875         ecore_chain_return_produced(&p_spq->chain);
876
877 spq_post_fail:
878         /* return to the free pool */
879         if (b_ret_ent)
880                 __ecore_spq_return_entry(p_hwfn, p_ent);
881         OSAL_SPIN_UNLOCK(&p_spq->lock);
882
883         return rc;
884 }
885
886 enum _ecore_status_t ecore_spq_completion(struct ecore_hwfn *p_hwfn,
887                                           __le16 echo,
888                                           u8 fw_return_code,
889                                           union event_ring_data *p_data)
890 {
891         struct ecore_spq *p_spq;
892         struct ecore_spq_entry *p_ent = OSAL_NULL;
893         struct ecore_spq_entry *tmp;
894         struct ecore_spq_entry *found = OSAL_NULL;
895         enum _ecore_status_t rc;
896
897         if (!p_hwfn)
898                 return ECORE_INVAL;
899
900         p_spq = p_hwfn->p_spq;
901         if (!p_spq)
902                 return ECORE_INVAL;
903
904         OSAL_SPIN_LOCK(&p_spq->lock);
905         OSAL_LIST_FOR_EACH_ENTRY_SAFE(p_ent,
906                                       tmp,
907                                       &p_spq->completion_pending,
908                                       list, struct ecore_spq_entry) {
909                 if (p_ent->elem.hdr.echo == echo) {
910                         OSAL_LIST_REMOVE_ENTRY(&p_ent->list,
911                                                &p_spq->completion_pending);
912
913                         /* Avoid overriding of SPQ entries when getting
914                          * out-of-order completions, by marking the completions
915                          * in a bitmap and increasing the chain consumer only
916                          * for the first successive completed entries.
917                          */
918                         SPQ_COMP_BMAP_SET_BIT(p_spq, echo);
919                         while (SPQ_COMP_BMAP_TEST_BIT(p_spq,
920                                                       p_spq->comp_bitmap_idx)) {
921                                 SPQ_COMP_BMAP_CLEAR_BIT(p_spq,
922                                                         p_spq->comp_bitmap_idx);
923                                 p_spq->comp_bitmap_idx++;
924                                 ecore_chain_return_produced(&p_spq->chain);
925                         }
926
927                         p_spq->comp_count++;
928                         found = p_ent;
929                         break;
930                 }
931
932                 /* This is debug and should be relatively uncommon - depends
933                  * on scenarios which have mutliple per-PF sent ramrods.
934                  */
935                 DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
936                            "Got completion for echo %04x - doesn't match"
937                            " echo %04x in completion pending list\n",
938                            OSAL_LE16_TO_CPU(echo),
939                            OSAL_LE16_TO_CPU(p_ent->elem.hdr.echo));
940         }
941
942         /* Release lock before callback, as callback may post
943          * an additional ramrod.
944          */
945         OSAL_SPIN_UNLOCK(&p_spq->lock);
946
947         if (!found) {
948                 DP_NOTICE(p_hwfn, true,
949                           "Failed to find an entry this"
950                           " EQE [echo %04x] completes\n",
951                           OSAL_LE16_TO_CPU(echo));
952                 return ECORE_EXISTS;
953         }
954
955         DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
956                    "Complete EQE [echo %04x]: func %p cookie %p)\n",
957                    OSAL_LE16_TO_CPU(echo),
958                    p_ent->comp_cb.function, p_ent->comp_cb.cookie);
959         if (found->comp_cb.function)
960                 found->comp_cb.function(p_hwfn, found->comp_cb.cookie, p_data,
961                                         fw_return_code);
962         else
963                 DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
964                            "Got a completion without a callback function\n");
965
966         if ((found->comp_mode != ECORE_SPQ_MODE_EBLOCK) ||
967             (found->queue == &p_spq->unlimited_pending))
968                 /* EBLOCK  is responsible for returning its own entry into the
969                  * free list, unless it originally added the entry into the
970                  * unlimited pending list.
971                  */
972                 ecore_spq_return_entry(p_hwfn, found);
973
974         /* Attempt to post pending requests */
975         OSAL_SPIN_LOCK(&p_spq->lock);
976         rc = ecore_spq_pend_post(p_hwfn);
977         OSAL_SPIN_UNLOCK(&p_spq->lock);
978
979         return rc;
980 }
981
982 enum _ecore_status_t ecore_consq_alloc(struct ecore_hwfn *p_hwfn)
983 {
984         struct ecore_consq *p_consq;
985
986         /* Allocate ConsQ struct */
987         p_consq =
988             OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*p_consq));
989         if (!p_consq) {
990                 DP_NOTICE(p_hwfn, true,
991                           "Failed to allocate `struct ecore_consq'\n");
992                 return ECORE_NOMEM;
993         }
994
995         /* Allocate and initialize EQ chain */
996         if (ecore_chain_alloc(p_hwfn->p_dev,
997                               ECORE_CHAIN_USE_TO_PRODUCE,
998                               ECORE_CHAIN_MODE_PBL,
999                               ECORE_CHAIN_CNT_TYPE_U16,
1000                               ECORE_CHAIN_PAGE_SIZE / 0x80,
1001                               0x80,
1002                               &p_consq->chain, OSAL_NULL) != ECORE_SUCCESS) {
1003                 DP_NOTICE(p_hwfn, true, "Failed to allocate consq chain");
1004                 goto consq_allocate_fail;
1005         }
1006
1007         p_hwfn->p_consq = p_consq;
1008         return ECORE_SUCCESS;
1009
1010 consq_allocate_fail:
1011         OSAL_FREE(p_hwfn->p_dev, p_consq);
1012         return ECORE_NOMEM;
1013 }
1014
1015 void ecore_consq_setup(struct ecore_hwfn *p_hwfn)
1016 {
1017         ecore_chain_reset(&p_hwfn->p_consq->chain);
1018 }
1019
1020 void ecore_consq_free(struct ecore_hwfn *p_hwfn)
1021 {
1022         if (!p_hwfn->p_consq)
1023                 return;
1024
1025         ecore_chain_free(p_hwfn->p_dev, &p_hwfn->p_consq->chain);
1026         OSAL_FREE(p_hwfn->p_dev, p_hwfn->p_consq);
1027 }