net/qede/base: multi-Txq support on same queue-zone for VFs
[dpdk.git] / drivers / net / qede / base / ecore_spq.c
1 /*
2  * Copyright (c) 2016 QLogic Corporation.
3  * All rights reserved.
4  * www.qlogic.com
5  *
6  * See LICENSE.qede_pmd for copyright and licensing details.
7  */
8
9 #include "bcm_osal.h"
10 #include "reg_addr.h"
11 #include "ecore_gtt_reg_addr.h"
12 #include "ecore_hsi_common.h"
13 #include "ecore.h"
14 #include "ecore_sp_api.h"
15 #include "ecore_spq.h"
16 #include "ecore_iro.h"
17 #include "ecore_init_fw_funcs.h"
18 #include "ecore_cxt.h"
19 #include "ecore_int.h"
20 #include "ecore_dev_api.h"
21 #include "ecore_mcp.h"
22 #include "ecore_hw.h"
23 #include "ecore_sriov.h"
24
25 /***************************************************************************
26  * Structures & Definitions
27  ***************************************************************************/
28
29 #define SPQ_HIGH_PRI_RESERVE_DEFAULT    (1)
30
31 #define SPQ_BLOCK_DELAY_MAX_ITER        (10)
32 #define SPQ_BLOCK_DELAY_US              (10)
33 #define SPQ_BLOCK_SLEEP_MAX_ITER        (1000)
34 #define SPQ_BLOCK_SLEEP_MS              (5)
35
36 /***************************************************************************
37  * Blocking Imp. (BLOCK/EBLOCK mode)
38  ***************************************************************************/
39 static void ecore_spq_blocking_cb(struct ecore_hwfn *p_hwfn,
40                                   void *cookie,
41                                   union event_ring_data *data,
42                                   u8 fw_return_code)
43 {
44         struct ecore_spq_comp_done *comp_done;
45
46         comp_done = (struct ecore_spq_comp_done *)cookie;
47
48         comp_done->done = 0x1;
49         comp_done->fw_return_code = fw_return_code;
50
51         /* make update visible to waiting thread */
52         OSAL_SMP_WMB(p_hwfn->p_dev);
53 }
54
55 static enum _ecore_status_t __ecore_spq_block(struct ecore_hwfn *p_hwfn,
56                                               struct ecore_spq_entry *p_ent,
57                                               u8 *p_fw_ret,
58                                               bool sleep_between_iter)
59 {
60         struct ecore_spq_comp_done *comp_done;
61         u32 iter_cnt;
62
63         comp_done = (struct ecore_spq_comp_done *)p_ent->comp_cb.cookie;
64         iter_cnt = sleep_between_iter ? SPQ_BLOCK_SLEEP_MAX_ITER
65                                       : SPQ_BLOCK_DELAY_MAX_ITER;
66
67         while (iter_cnt--) {
68                 OSAL_POLL_MODE_DPC(p_hwfn);
69                 OSAL_SMP_RMB(p_hwfn->p_dev);
70                 if (comp_done->done == 1) {
71                         if (p_fw_ret)
72                                 *p_fw_ret = comp_done->fw_return_code;
73                         return ECORE_SUCCESS;
74                 }
75
76                 if (sleep_between_iter)
77                         OSAL_MSLEEP(SPQ_BLOCK_SLEEP_MS);
78                 else
79                         OSAL_UDELAY(SPQ_BLOCK_DELAY_US);
80         }
81
82         return ECORE_TIMEOUT;
83 }
84
85 static enum _ecore_status_t ecore_spq_block(struct ecore_hwfn *p_hwfn,
86                                             struct ecore_spq_entry *p_ent,
87                                             u8 *p_fw_ret, bool skip_quick_poll)
88 {
89         struct ecore_spq_comp_done *comp_done;
90         enum _ecore_status_t rc;
91
92         /* A relatively short polling period w/o sleeping, to allow the FW to
93          * complete the ramrod and thus possibly to avoid the following sleeps.
94          */
95         if (!skip_quick_poll) {
96                 rc = __ecore_spq_block(p_hwfn, p_ent, p_fw_ret, false);
97                 if (rc == ECORE_SUCCESS)
98                         return ECORE_SUCCESS;
99         }
100
101         /* Move to polling with a sleeping period between iterations */
102         rc = __ecore_spq_block(p_hwfn, p_ent, p_fw_ret, true);
103         if (rc == ECORE_SUCCESS)
104                 return ECORE_SUCCESS;
105
106         DP_INFO(p_hwfn, "Ramrod is stuck, requesting MCP drain\n");
107         rc = ecore_mcp_drain(p_hwfn, p_hwfn->p_main_ptt);
108         if (rc != ECORE_SUCCESS) {
109                 DP_NOTICE(p_hwfn, true, "MCP drain failed\n");
110                 goto err;
111         }
112
113         /* Retry after drain */
114         rc = __ecore_spq_block(p_hwfn, p_ent, p_fw_ret, true);
115         if (rc == ECORE_SUCCESS)
116                 return ECORE_SUCCESS;
117
118         comp_done = (struct ecore_spq_comp_done *)p_ent->comp_cb.cookie;
119         if (comp_done->done == 1) {
120                 if (p_fw_ret)
121                         *p_fw_ret = comp_done->fw_return_code;
122                 return ECORE_SUCCESS;
123         }
124 err:
125         DP_NOTICE(p_hwfn, true,
126                   "Ramrod is stuck [CID %08x cmd %02x proto %02x echo %04x]\n",
127                   OSAL_LE32_TO_CPU(p_ent->elem.hdr.cid),
128                   p_ent->elem.hdr.cmd_id, p_ent->elem.hdr.protocol_id,
129                   OSAL_LE16_TO_CPU(p_ent->elem.hdr.echo));
130
131         ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_RAMROD_FAIL);
132
133         return ECORE_BUSY;
134 }
135
136 /***************************************************************************
137  * SPQ entries inner API
138  ***************************************************************************/
139 static enum _ecore_status_t
140 ecore_spq_fill_entry(struct ecore_hwfn *p_hwfn, struct ecore_spq_entry *p_ent)
141 {
142         p_ent->flags = 0;
143
144         switch (p_ent->comp_mode) {
145         case ECORE_SPQ_MODE_EBLOCK:
146         case ECORE_SPQ_MODE_BLOCK:
147                 p_ent->comp_cb.function = ecore_spq_blocking_cb;
148                 break;
149         case ECORE_SPQ_MODE_CB:
150                 break;
151         default:
152                 DP_NOTICE(p_hwfn, true, "Unknown SPQE completion mode %d\n",
153                           p_ent->comp_mode);
154                 return ECORE_INVAL;
155         }
156
157         DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
158                    "Ramrod header: [CID 0x%08x CMD 0x%02x protocol 0x%02x]"
159                    " Data pointer: [%08x:%08x] Completion Mode: %s\n",
160                    p_ent->elem.hdr.cid, p_ent->elem.hdr.cmd_id,
161                    p_ent->elem.hdr.protocol_id,
162                    p_ent->elem.data_ptr.hi, p_ent->elem.data_ptr.lo,
163                    D_TRINE(p_ent->comp_mode, ECORE_SPQ_MODE_EBLOCK,
164                            ECORE_SPQ_MODE_BLOCK, "MODE_EBLOCK", "MODE_BLOCK",
165                            "MODE_CB"));
166
167         return ECORE_SUCCESS;
168 }
169
170 /***************************************************************************
171  * HSI access
172  ***************************************************************************/
173 static void ecore_spq_hw_initialize(struct ecore_hwfn *p_hwfn,
174                                     struct ecore_spq *p_spq)
175 {
176         struct ecore_cxt_info cxt_info;
177         struct core_conn_context *p_cxt;
178         enum _ecore_status_t rc;
179         u16 physical_q;
180
181         cxt_info.iid = p_spq->cid;
182
183         rc = ecore_cxt_get_cid_info(p_hwfn, &cxt_info);
184
185         if (rc < 0) {
186                 DP_NOTICE(p_hwfn, true, "Cannot find context info for cid=%d\n",
187                           p_spq->cid);
188                 return;
189         }
190
191         p_cxt = cxt_info.p_cxt;
192
193         if (ECORE_IS_BB(p_hwfn->p_dev) || ECORE_IS_AH(p_hwfn->p_dev)) {
194                 SET_FIELD(p_cxt->xstorm_ag_context.flags10,
195                           E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN, 1);
196                 SET_FIELD(p_cxt->xstorm_ag_context.flags1,
197                           E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE, 1);
198                 /* SET_FIELD(p_cxt->xstorm_ag_context.flags10,
199                  *        E4_XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN, 1);
200                  */
201                 SET_FIELD(p_cxt->xstorm_ag_context.flags9,
202                           E4_XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN, 1);
203         }
204
205         /* CDU validation - FIXME currently disabled */
206
207         /* QM physical queue */
208         physical_q = ecore_get_cm_pq_idx(p_hwfn, PQ_FLAGS_LB);
209         p_cxt->xstorm_ag_context.physical_q0 = OSAL_CPU_TO_LE16(physical_q);
210
211         p_cxt->xstorm_st_context.spq_base_lo =
212             DMA_LO_LE(p_spq->chain.p_phys_addr);
213         p_cxt->xstorm_st_context.spq_base_hi =
214             DMA_HI_LE(p_spq->chain.p_phys_addr);
215
216         DMA_REGPAIR_LE(p_cxt->xstorm_st_context.consolid_base_addr,
217                        p_hwfn->p_consq->chain.p_phys_addr);
218 }
219
220 static enum _ecore_status_t ecore_spq_hw_post(struct ecore_hwfn *p_hwfn,
221                                               struct ecore_spq *p_spq,
222                                               struct ecore_spq_entry *p_ent)
223 {
224         struct ecore_chain *p_chain = &p_hwfn->p_spq->chain;
225         u16 echo = ecore_chain_get_prod_idx(p_chain);
226         struct slow_path_element *elem;
227         struct core_db_data db;
228
229         p_ent->elem.hdr.echo = OSAL_CPU_TO_LE16(echo);
230         elem = ecore_chain_produce(p_chain);
231         if (!elem) {
232                 DP_NOTICE(p_hwfn, true, "Failed to produce from SPQ chain\n");
233                 return ECORE_INVAL;
234         }
235
236         *elem = p_ent->elem;    /* struct assignment */
237
238         /* send a doorbell on the slow hwfn session */
239         OSAL_MEMSET(&db, 0, sizeof(db));
240         SET_FIELD(db.params, CORE_DB_DATA_DEST, DB_DEST_XCM);
241         SET_FIELD(db.params, CORE_DB_DATA_AGG_CMD, DB_AGG_CMD_SET);
242         SET_FIELD(db.params, CORE_DB_DATA_AGG_VAL_SEL,
243                   DQ_XCM_CORE_SPQ_PROD_CMD);
244         db.agg_flags = DQ_XCM_CORE_DQ_CF_CMD;
245         db.spq_prod = OSAL_CPU_TO_LE16(ecore_chain_get_prod_idx(p_chain));
246
247         /* make sure the SPQE is updated before the doorbell */
248         OSAL_WMB(p_hwfn->p_dev);
249
250         DOORBELL(p_hwfn, DB_ADDR(p_spq->cid, DQ_DEMS_LEGACY),
251                  *(u32 *)&db);
252
253         /* make sure doorbell is rang */
254         OSAL_WMB(p_hwfn->p_dev);
255
256         DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
257                    "Doorbelled [0x%08x, CID 0x%08x] with Flags: %02x"
258                    " agg_params: %02x, prod: %04x\n",
259                    DB_ADDR(p_spq->cid, DQ_DEMS_LEGACY), p_spq->cid, db.params,
260                    db.agg_flags, ecore_chain_get_prod_idx(p_chain));
261
262         return ECORE_SUCCESS;
263 }
264
265 /***************************************************************************
266  * Asynchronous events
267  ***************************************************************************/
268
269 static enum _ecore_status_t
270 ecore_async_event_completion(struct ecore_hwfn *p_hwfn,
271                              struct event_ring_entry *p_eqe)
272 {
273         switch (p_eqe->protocol_id) {
274         case PROTOCOLID_COMMON:
275                 return ecore_sriov_eqe_event(p_hwfn,
276                                              p_eqe->opcode,
277                                              p_eqe->echo, &p_eqe->data);
278         default:
279                 DP_NOTICE(p_hwfn,
280                           true, "Unknown Async completion for protocol: %d\n",
281                           p_eqe->protocol_id);
282                 return ECORE_INVAL;
283         }
284 }
285
286 /***************************************************************************
287  * EQ API
288  ***************************************************************************/
289 void ecore_eq_prod_update(struct ecore_hwfn *p_hwfn, u16 prod)
290 {
291         u32 addr = GTT_BAR0_MAP_REG_USDM_RAM +
292             USTORM_EQE_CONS_OFFSET(p_hwfn->rel_pf_id);
293
294         REG_WR16(p_hwfn, addr, prod);
295
296         /* keep prod updates ordered */
297         OSAL_MMIOWB(p_hwfn->p_dev);
298 }
299
300 enum _ecore_status_t ecore_eq_completion(struct ecore_hwfn *p_hwfn,
301                                          void *cookie)
302 {
303         struct ecore_eq *p_eq = cookie;
304         struct ecore_chain *p_chain = &p_eq->chain;
305         enum _ecore_status_t rc = 0;
306
307         /* take a snapshot of the FW consumer */
308         u16 fw_cons_idx = OSAL_LE16_TO_CPU(*p_eq->p_fw_cons);
309
310         DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ, "fw_cons_idx %x\n", fw_cons_idx);
311
312         /* Need to guarantee the fw_cons index we use points to a usuable
313          * element (to comply with our chain), so our macros would comply
314          */
315         if ((fw_cons_idx & ecore_chain_get_usable_per_page(p_chain)) ==
316             ecore_chain_get_usable_per_page(p_chain)) {
317                 fw_cons_idx += ecore_chain_get_unusable_per_page(p_chain);
318         }
319
320         /* Complete current segment of eq entries */
321         while (fw_cons_idx != ecore_chain_get_cons_idx(p_chain)) {
322                 struct event_ring_entry *p_eqe = ecore_chain_consume(p_chain);
323                 if (!p_eqe) {
324                         rc = ECORE_INVAL;
325                         break;
326                 }
327
328                 DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
329                            "op %x prot %x res0 %x echo %x fwret %x flags %x\n",
330                            p_eqe->opcode,            /* Event Opcode */
331                            p_eqe->protocol_id,  /* Event Protocol ID */
332                            p_eqe->reserved0,    /* Reserved */
333                            /* Echo value from ramrod data on the host */
334                            OSAL_LE16_TO_CPU(p_eqe->echo),
335                            p_eqe->fw_return_code,    /* FW return code for SP
336                                                       * ramrods
337                                                       */
338                            p_eqe->flags);
339
340                 if (GET_FIELD(p_eqe->flags, EVENT_RING_ENTRY_ASYNC)) {
341                         if (ecore_async_event_completion(p_hwfn, p_eqe))
342                                 rc = ECORE_INVAL;
343                 } else if (ecore_spq_completion(p_hwfn,
344                                                 p_eqe->echo,
345                                                 p_eqe->fw_return_code,
346                                                 &p_eqe->data)) {
347                         rc = ECORE_INVAL;
348                 }
349
350                 ecore_chain_recycle_consumed(p_chain);
351         }
352
353         ecore_eq_prod_update(p_hwfn, ecore_chain_get_prod_idx(p_chain));
354
355         return rc;
356 }
357
358 enum _ecore_status_t ecore_eq_alloc(struct ecore_hwfn *p_hwfn, u16 num_elem)
359 {
360         struct ecore_eq *p_eq;
361
362         /* Allocate EQ struct */
363         p_eq = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*p_eq));
364         if (!p_eq) {
365                 DP_NOTICE(p_hwfn, true,
366                           "Failed to allocate `struct ecore_eq'\n");
367                 return ECORE_NOMEM;
368         }
369
370         /* Allocate and initialize EQ chain*/
371         if (ecore_chain_alloc(p_hwfn->p_dev,
372                               ECORE_CHAIN_USE_TO_PRODUCE,
373                               ECORE_CHAIN_MODE_PBL,
374                               ECORE_CHAIN_CNT_TYPE_U16,
375                               num_elem,
376                               sizeof(union event_ring_element),
377                               &p_eq->chain, OSAL_NULL) != ECORE_SUCCESS) {
378                 DP_NOTICE(p_hwfn, true, "Failed to allocate eq chain\n");
379                 goto eq_allocate_fail;
380         }
381
382         /* register EQ completion on the SP SB */
383         ecore_int_register_cb(p_hwfn, ecore_eq_completion,
384                               p_eq, &p_eq->eq_sb_index, &p_eq->p_fw_cons);
385
386         p_hwfn->p_eq = p_eq;
387         return ECORE_SUCCESS;
388
389 eq_allocate_fail:
390         OSAL_FREE(p_hwfn->p_dev, p_eq);
391         return ECORE_NOMEM;
392 }
393
394 void ecore_eq_setup(struct ecore_hwfn *p_hwfn)
395 {
396         ecore_chain_reset(&p_hwfn->p_eq->chain);
397 }
398
399 void ecore_eq_free(struct ecore_hwfn *p_hwfn)
400 {
401         if (!p_hwfn->p_eq)
402                 return;
403
404         ecore_chain_free(p_hwfn->p_dev, &p_hwfn->p_eq->chain);
405
406         OSAL_FREE(p_hwfn->p_dev, p_hwfn->p_eq);
407         p_hwfn->p_eq = OSAL_NULL;
408 }
409
410 /***************************************************************************
411 * CQE API - manipulate EQ functionality
412 ***************************************************************************/
413 static enum _ecore_status_t ecore_cqe_completion(struct ecore_hwfn *p_hwfn,
414                                                  struct eth_slow_path_rx_cqe
415                                                  *cqe,
416                                                  enum protocol_type protocol)
417 {
418         if (IS_VF(p_hwfn->p_dev))
419                 return OSAL_VF_CQE_COMPLETION(p_hwfn, cqe, protocol);
420
421         /* @@@tmp - it's possible we'll eventually want to handle some
422          * actual commands that can arrive here, but for now this is only
423          * used to complete the ramrod using the echo value on the cqe
424          */
425         return ecore_spq_completion(p_hwfn, cqe->echo, 0, OSAL_NULL);
426 }
427
428 enum _ecore_status_t ecore_eth_cqe_completion(struct ecore_hwfn *p_hwfn,
429                                               struct eth_slow_path_rx_cqe *cqe)
430 {
431         enum _ecore_status_t rc;
432
433         rc = ecore_cqe_completion(p_hwfn, cqe, PROTOCOLID_ETH);
434         if (rc) {
435                 DP_NOTICE(p_hwfn, true,
436                           "Failed to handle RXQ CQE [cmd 0x%02x]\n",
437                           cqe->ramrod_cmd_id);
438         }
439
440         return rc;
441 }
442
443 /***************************************************************************
444  * Slow hwfn Queue (spq)
445  ***************************************************************************/
446 void ecore_spq_setup(struct ecore_hwfn *p_hwfn)
447 {
448         struct ecore_spq *p_spq = p_hwfn->p_spq;
449         struct ecore_spq_entry *p_virt = OSAL_NULL;
450         dma_addr_t p_phys = 0;
451         u32 i, capacity;
452
453         OSAL_LIST_INIT(&p_spq->pending);
454         OSAL_LIST_INIT(&p_spq->completion_pending);
455         OSAL_LIST_INIT(&p_spq->free_pool);
456         OSAL_LIST_INIT(&p_spq->unlimited_pending);
457         OSAL_SPIN_LOCK_INIT(&p_spq->lock);
458
459         /* SPQ empty pool */
460         p_phys = p_spq->p_phys + OFFSETOF(struct ecore_spq_entry, ramrod);
461         p_virt = p_spq->p_virt;
462
463         capacity = ecore_chain_get_capacity(&p_spq->chain);
464         for (i = 0; i < capacity; i++) {
465                 DMA_REGPAIR_LE(p_virt->elem.data_ptr, p_phys);
466
467                 OSAL_LIST_PUSH_TAIL(&p_virt->list, &p_spq->free_pool);
468
469                 p_virt++;
470                 p_phys += sizeof(struct ecore_spq_entry);
471         }
472
473         /* Statistics */
474         p_spq->normal_count = 0;
475         p_spq->comp_count = 0;
476         p_spq->comp_sent_count = 0;
477         p_spq->unlimited_pending_count = 0;
478
479         OSAL_MEM_ZERO(p_spq->p_comp_bitmap,
480                       SPQ_COMP_BMAP_SIZE * sizeof(unsigned long));
481         p_spq->comp_bitmap_idx = 0;
482
483         /* SPQ cid, cannot fail */
484         ecore_cxt_acquire_cid(p_hwfn, PROTOCOLID_CORE, &p_spq->cid);
485         ecore_spq_hw_initialize(p_hwfn, p_spq);
486
487         /* reset the chain itself */
488         ecore_chain_reset(&p_spq->chain);
489 }
490
491 enum _ecore_status_t ecore_spq_alloc(struct ecore_hwfn *p_hwfn)
492 {
493         struct ecore_spq_entry *p_virt = OSAL_NULL;
494         struct ecore_spq *p_spq = OSAL_NULL;
495         dma_addr_t p_phys = 0;
496         u32 capacity;
497
498         /* SPQ struct */
499         p_spq =
500             OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(struct ecore_spq));
501         if (!p_spq) {
502                 DP_NOTICE(p_hwfn, true,
503                           "Failed to allocate `struct ecore_spq'\n");
504                 return ECORE_NOMEM;
505         }
506
507         /* SPQ ring  */
508         if (ecore_chain_alloc(p_hwfn->p_dev,
509                               ECORE_CHAIN_USE_TO_PRODUCE,
510                               ECORE_CHAIN_MODE_SINGLE,
511                               ECORE_CHAIN_CNT_TYPE_U16,
512                               0, /* N/A when the mode is SINGLE */
513                               sizeof(struct slow_path_element),
514                               &p_spq->chain, OSAL_NULL)) {
515                 DP_NOTICE(p_hwfn, true, "Failed to allocate spq chain\n");
516                 goto spq_allocate_fail;
517         }
518
519         /* allocate and fill the SPQ elements (incl. ramrod data list) */
520         capacity = ecore_chain_get_capacity(&p_spq->chain);
521         p_virt = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, &p_phys,
522                                          capacity *
523                                          sizeof(struct ecore_spq_entry));
524         if (!p_virt)
525                 goto spq_allocate_fail;
526
527         p_spq->p_virt = p_virt;
528         p_spq->p_phys = p_phys;
529
530         OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_spq->lock);
531
532         p_hwfn->p_spq = p_spq;
533         return ECORE_SUCCESS;
534
535 spq_allocate_fail:
536         ecore_chain_free(p_hwfn->p_dev, &p_spq->chain);
537         OSAL_FREE(p_hwfn->p_dev, p_spq);
538         return ECORE_NOMEM;
539 }
540
541 void ecore_spq_free(struct ecore_hwfn *p_hwfn)
542 {
543         struct ecore_spq *p_spq = p_hwfn->p_spq;
544         u32 capacity;
545
546         if (!p_spq)
547                 return;
548
549         if (p_spq->p_virt) {
550                 capacity = ecore_chain_get_capacity(&p_spq->chain);
551                 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
552                                        p_spq->p_virt,
553                                        p_spq->p_phys,
554                                        capacity *
555                                        sizeof(struct ecore_spq_entry));
556         }
557
558         ecore_chain_free(p_hwfn->p_dev, &p_spq->chain);
559         OSAL_SPIN_LOCK_DEALLOC(&p_spq->lock);
560         OSAL_FREE(p_hwfn->p_dev, p_spq);
561 }
562
563 enum _ecore_status_t
564 ecore_spq_get_entry(struct ecore_hwfn *p_hwfn, struct ecore_spq_entry **pp_ent)
565 {
566         struct ecore_spq *p_spq = p_hwfn->p_spq;
567         struct ecore_spq_entry *p_ent = OSAL_NULL;
568         enum _ecore_status_t rc = ECORE_SUCCESS;
569
570         OSAL_SPIN_LOCK(&p_spq->lock);
571
572         if (OSAL_LIST_IS_EMPTY(&p_spq->free_pool)) {
573                 p_ent = OSAL_ZALLOC(p_hwfn->p_dev, GFP_ATOMIC, sizeof(*p_ent));
574                 if (!p_ent) {
575                         DP_NOTICE(p_hwfn, true,
576                                  "Failed to allocate an SPQ entry for a pending"
577                                  " ramrod\n");
578                         rc = ECORE_NOMEM;
579                         goto out_unlock;
580                 }
581                 p_ent->queue = &p_spq->unlimited_pending;
582         } else {
583                 p_ent = OSAL_LIST_FIRST_ENTRY(&p_spq->free_pool,
584                                               struct ecore_spq_entry, list);
585                 OSAL_LIST_REMOVE_ENTRY(&p_ent->list, &p_spq->free_pool);
586                 p_ent->queue = &p_spq->pending;
587         }
588
589         *pp_ent = p_ent;
590
591 out_unlock:
592         OSAL_SPIN_UNLOCK(&p_spq->lock);
593         return rc;
594 }
595
596 /* Locked variant; Should be called while the SPQ lock is taken */
597 static void __ecore_spq_return_entry(struct ecore_hwfn *p_hwfn,
598                                      struct ecore_spq_entry *p_ent)
599 {
600         OSAL_LIST_PUSH_TAIL(&p_ent->list, &p_hwfn->p_spq->free_pool);
601 }
602
603 void ecore_spq_return_entry(struct ecore_hwfn *p_hwfn,
604                             struct ecore_spq_entry *p_ent)
605 {
606         OSAL_SPIN_LOCK(&p_hwfn->p_spq->lock);
607         __ecore_spq_return_entry(p_hwfn, p_ent);
608         OSAL_SPIN_UNLOCK(&p_hwfn->p_spq->lock);
609 }
610
611 /**
612  * @brief ecore_spq_add_entry - adds a new entry to the pending
613  *        list. Should be used while lock is being held.
614  *
615  * Addes an entry to the pending list is there is room (en empty
616  * element is available in the free_pool), or else places the
617  * entry in the unlimited_pending pool.
618  *
619  * @param p_hwfn
620  * @param p_ent
621  * @param priority
622  *
623  * @return enum _ecore_status_t
624  */
625 static enum _ecore_status_t
626 ecore_spq_add_entry(struct ecore_hwfn *p_hwfn,
627                     struct ecore_spq_entry *p_ent, enum spq_priority priority)
628 {
629         struct ecore_spq *p_spq = p_hwfn->p_spq;
630
631         if (p_ent->queue == &p_spq->unlimited_pending) {
632                 if (OSAL_LIST_IS_EMPTY(&p_spq->free_pool)) {
633                         OSAL_LIST_PUSH_TAIL(&p_ent->list,
634                                             &p_spq->unlimited_pending);
635                         p_spq->unlimited_pending_count++;
636
637                         return ECORE_SUCCESS;
638
639                 } else {
640                         struct ecore_spq_entry *p_en2;
641
642                         p_en2 = OSAL_LIST_FIRST_ENTRY(&p_spq->free_pool,
643                                                      struct ecore_spq_entry,
644                                                      list);
645                         OSAL_LIST_REMOVE_ENTRY(&p_en2->list, &p_spq->free_pool);
646
647                         /* Copy the ring element physical pointer to the new
648                          * entry, since we are about to override the entire ring
649                          * entry and don't want to lose the pointer.
650                          */
651                         p_ent->elem.data_ptr = p_en2->elem.data_ptr;
652
653                         *p_en2 = *p_ent;
654
655                         /* EBLOCK responsible to free the allocated p_ent */
656                         if (p_ent->comp_mode != ECORE_SPQ_MODE_EBLOCK)
657                                 OSAL_FREE(p_hwfn->p_dev, p_ent);
658
659                         p_ent = p_en2;
660                 }
661         }
662
663         /* entry is to be placed in 'pending' queue */
664         switch (priority) {
665         case ECORE_SPQ_PRIORITY_NORMAL:
666                 OSAL_LIST_PUSH_TAIL(&p_ent->list, &p_spq->pending);
667                 p_spq->normal_count++;
668                 break;
669         case ECORE_SPQ_PRIORITY_HIGH:
670                 OSAL_LIST_PUSH_HEAD(&p_ent->list, &p_spq->pending);
671                 p_spq->high_count++;
672                 break;
673         default:
674                 return ECORE_INVAL;
675         }
676
677         return ECORE_SUCCESS;
678 }
679
680 /***************************************************************************
681  * Accessor
682  ***************************************************************************/
683
684 u32 ecore_spq_get_cid(struct ecore_hwfn *p_hwfn)
685 {
686         if (!p_hwfn->p_spq)
687                 return 0xffffffff;      /* illegal */
688         return p_hwfn->p_spq->cid;
689 }
690
691 /***************************************************************************
692  * Posting new Ramrods
693  ***************************************************************************/
694
695 static enum _ecore_status_t ecore_spq_post_list(struct ecore_hwfn *p_hwfn,
696                                                 osal_list_t *head,
697                                                 u32 keep_reserve)
698 {
699         struct ecore_spq *p_spq = p_hwfn->p_spq;
700         enum _ecore_status_t rc;
701
702         /* TODO - implementation might be wasteful; will always keep room
703          * for an additional high priority ramrod (even if one is already
704          * pending FW)
705          */
706         while (ecore_chain_get_elem_left(&p_spq->chain) > keep_reserve &&
707                !OSAL_LIST_IS_EMPTY(head)) {
708                 struct ecore_spq_entry *p_ent =
709                     OSAL_LIST_FIRST_ENTRY(head, struct ecore_spq_entry, list);
710                 if (p_ent != OSAL_NULL) {
711 #if defined(_NTDDK_)
712 #pragma warning(suppress : 6011 28182)
713 #endif
714                         OSAL_LIST_REMOVE_ENTRY(&p_ent->list, head);
715                         OSAL_LIST_PUSH_TAIL(&p_ent->list,
716                                             &p_spq->completion_pending);
717                         p_spq->comp_sent_count++;
718
719                         rc = ecore_spq_hw_post(p_hwfn, p_spq, p_ent);
720                         if (rc) {
721                                 OSAL_LIST_REMOVE_ENTRY(&p_ent->list,
722                                                     &p_spq->completion_pending);
723                                 __ecore_spq_return_entry(p_hwfn, p_ent);
724                                 return rc;
725                         }
726                 }
727         }
728
729         return ECORE_SUCCESS;
730 }
731
732 static enum _ecore_status_t ecore_spq_pend_post(struct ecore_hwfn *p_hwfn)
733 {
734         struct ecore_spq *p_spq = p_hwfn->p_spq;
735         struct ecore_spq_entry *p_ent = OSAL_NULL;
736
737         while (!OSAL_LIST_IS_EMPTY(&p_spq->free_pool)) {
738                 if (OSAL_LIST_IS_EMPTY(&p_spq->unlimited_pending))
739                         break;
740
741                 p_ent = OSAL_LIST_FIRST_ENTRY(&p_spq->unlimited_pending,
742                                               struct ecore_spq_entry, list);
743                 if (!p_ent)
744                         return ECORE_INVAL;
745
746 #if defined(_NTDDK_)
747 #pragma warning(suppress : 6011)
748 #endif
749                 OSAL_LIST_REMOVE_ENTRY(&p_ent->list, &p_spq->unlimited_pending);
750
751                 ecore_spq_add_entry(p_hwfn, p_ent, p_ent->priority);
752         }
753
754         return ecore_spq_post_list(p_hwfn,
755                                  &p_spq->pending, SPQ_HIGH_PRI_RESERVE_DEFAULT);
756 }
757
758 enum _ecore_status_t ecore_spq_post(struct ecore_hwfn *p_hwfn,
759                                     struct ecore_spq_entry *p_ent,
760                                     u8 *fw_return_code)
761 {
762         enum _ecore_status_t rc = ECORE_SUCCESS;
763         struct ecore_spq *p_spq = p_hwfn ? p_hwfn->p_spq : OSAL_NULL;
764         bool b_ret_ent = true;
765
766         if (!p_hwfn)
767                 return ECORE_INVAL;
768
769         if (!p_ent) {
770                 DP_NOTICE(p_hwfn, true, "Got a NULL pointer\n");
771                 return ECORE_INVAL;
772         }
773
774         if (p_hwfn->p_dev->recov_in_prog) {
775                 DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
776                            "Recovery is in progress -> skip spq post"
777                            " [cmd %02x protocol %02x]\n",
778                            p_ent->elem.hdr.cmd_id, p_ent->elem.hdr.protocol_id);
779                 /* Return success to let the flows to be completed successfully
780                  * w/o any error handling.
781                  */
782                 return ECORE_SUCCESS;
783         }
784
785         OSAL_SPIN_LOCK(&p_spq->lock);
786
787         /* Complete the entry */
788         rc = ecore_spq_fill_entry(p_hwfn, p_ent);
789
790         /* Check return value after LOCK is taken for cleaner error flow */
791         if (rc)
792                 goto spq_post_fail;
793
794         /* Add the request to the pending queue */
795         rc = ecore_spq_add_entry(p_hwfn, p_ent, p_ent->priority);
796         if (rc)
797                 goto spq_post_fail;
798
799         rc = ecore_spq_pend_post(p_hwfn);
800         if (rc) {
801                 /* Since it's possible that pending failed for a different
802                  * entry [although unlikely], the failed entry was already
803                  * dealt with; No need to return it here.
804                  */
805                 b_ret_ent = false;
806                 goto spq_post_fail;
807         }
808
809         OSAL_SPIN_UNLOCK(&p_spq->lock);
810
811         if (p_ent->comp_mode == ECORE_SPQ_MODE_EBLOCK) {
812                 /* For entries in ECORE BLOCK mode, the completion code cannot
813                  * perform the necessary cleanup - if it did, we couldn't
814                  * access p_ent here to see whether it's successful or not.
815                  * Thus, after gaining the answer perform the cleanup here.
816                  */
817                 rc = ecore_spq_block(p_hwfn, p_ent, fw_return_code,
818                                      p_ent->queue == &p_spq->unlimited_pending);
819
820                 if (p_ent->queue == &p_spq->unlimited_pending) {
821                         /* This is an allocated p_ent which does not need to
822                          * return to pool.
823                          */
824                         OSAL_FREE(p_hwfn->p_dev, p_ent);
825
826                         /* TBD: handle error flow and remove p_ent from
827                          * completion pending
828                          */
829                         return rc;
830                 }
831
832                 if (rc)
833                         goto spq_post_fail2;
834
835                 /* return to pool */
836                 ecore_spq_return_entry(p_hwfn, p_ent);
837         }
838         return rc;
839
840 spq_post_fail2:
841         OSAL_SPIN_LOCK(&p_spq->lock);
842         OSAL_LIST_REMOVE_ENTRY(&p_ent->list, &p_spq->completion_pending);
843         ecore_chain_return_produced(&p_spq->chain);
844
845 spq_post_fail:
846         /* return to the free pool */
847         if (b_ret_ent)
848                 __ecore_spq_return_entry(p_hwfn, p_ent);
849         OSAL_SPIN_UNLOCK(&p_spq->lock);
850
851         return rc;
852 }
853
854 enum _ecore_status_t ecore_spq_completion(struct ecore_hwfn *p_hwfn,
855                                           __le16 echo,
856                                           u8 fw_return_code,
857                                           union event_ring_data *p_data)
858 {
859         struct ecore_spq *p_spq;
860         struct ecore_spq_entry *p_ent = OSAL_NULL;
861         struct ecore_spq_entry *tmp;
862         struct ecore_spq_entry *found = OSAL_NULL;
863         enum _ecore_status_t rc;
864
865         if (!p_hwfn)
866                 return ECORE_INVAL;
867
868         p_spq = p_hwfn->p_spq;
869         if (!p_spq)
870                 return ECORE_INVAL;
871
872         OSAL_SPIN_LOCK(&p_spq->lock);
873         OSAL_LIST_FOR_EACH_ENTRY_SAFE(p_ent,
874                                       tmp,
875                                       &p_spq->completion_pending,
876                                       list, struct ecore_spq_entry) {
877                 if (p_ent->elem.hdr.echo == echo) {
878                         OSAL_LIST_REMOVE_ENTRY(&p_ent->list,
879                                                &p_spq->completion_pending);
880
881                         /* Avoid overriding of SPQ entries when getting
882                          * out-of-order completions, by marking the completions
883                          * in a bitmap and increasing the chain consumer only
884                          * for the first successive completed entries.
885                          */
886                         SPQ_COMP_BMAP_SET_BIT(p_spq, echo);
887                         while (SPQ_COMP_BMAP_TEST_BIT(p_spq,
888                                                       p_spq->comp_bitmap_idx)) {
889                                 SPQ_COMP_BMAP_CLEAR_BIT(p_spq,
890                                                         p_spq->comp_bitmap_idx);
891                                 p_spq->comp_bitmap_idx++;
892                                 ecore_chain_return_produced(&p_spq->chain);
893                         }
894
895                         p_spq->comp_count++;
896                         found = p_ent;
897                         break;
898                 }
899
900                 /* This is debug and should be relatively uncommon - depends
901                  * on scenarios which have mutliple per-PF sent ramrods.
902                  */
903                 DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
904                            "Got completion for echo %04x - doesn't match"
905                            " echo %04x in completion pending list\n",
906                            OSAL_LE16_TO_CPU(echo),
907                            OSAL_LE16_TO_CPU(p_ent->elem.hdr.echo));
908         }
909
910         /* Release lock before callback, as callback may post
911          * an additional ramrod.
912          */
913         OSAL_SPIN_UNLOCK(&p_spq->lock);
914
915         if (!found) {
916                 DP_NOTICE(p_hwfn, true,
917                           "Failed to find an entry this"
918                           " EQE [echo %04x] completes\n",
919                           OSAL_LE16_TO_CPU(echo));
920                 return ECORE_EXISTS;
921         }
922
923         DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
924                    "Complete EQE [echo %04x]: func %p cookie %p)\n",
925                    OSAL_LE16_TO_CPU(echo),
926                    p_ent->comp_cb.function, p_ent->comp_cb.cookie);
927         if (found->comp_cb.function)
928                 found->comp_cb.function(p_hwfn, found->comp_cb.cookie, p_data,
929                                         fw_return_code);
930         else
931                 DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
932                            "Got a completion without a callback function\n");
933
934         if ((found->comp_mode != ECORE_SPQ_MODE_EBLOCK) ||
935             (found->queue == &p_spq->unlimited_pending))
936                 /* EBLOCK  is responsible for returning its own entry into the
937                  * free list, unless it originally added the entry into the
938                  * unlimited pending list.
939                  */
940                 ecore_spq_return_entry(p_hwfn, found);
941
942         /* Attempt to post pending requests */
943         OSAL_SPIN_LOCK(&p_spq->lock);
944         rc = ecore_spq_pend_post(p_hwfn);
945         OSAL_SPIN_UNLOCK(&p_spq->lock);
946
947         return rc;
948 }
949
950 enum _ecore_status_t ecore_consq_alloc(struct ecore_hwfn *p_hwfn)
951 {
952         struct ecore_consq *p_consq;
953
954         /* Allocate ConsQ struct */
955         p_consq =
956             OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*p_consq));
957         if (!p_consq) {
958                 DP_NOTICE(p_hwfn, true,
959                           "Failed to allocate `struct ecore_consq'\n");
960                 return ECORE_NOMEM;
961         }
962
963         /* Allocate and initialize EQ chain */
964         if (ecore_chain_alloc(p_hwfn->p_dev,
965                               ECORE_CHAIN_USE_TO_PRODUCE,
966                               ECORE_CHAIN_MODE_PBL,
967                               ECORE_CHAIN_CNT_TYPE_U16,
968                               ECORE_CHAIN_PAGE_SIZE / 0x80,
969                               0x80,
970                               &p_consq->chain, OSAL_NULL) != ECORE_SUCCESS) {
971                 DP_NOTICE(p_hwfn, true, "Failed to allocate consq chain");
972                 goto consq_allocate_fail;
973         }
974
975         p_hwfn->p_consq = p_consq;
976         return ECORE_SUCCESS;
977
978 consq_allocate_fail:
979         OSAL_FREE(p_hwfn->p_dev, p_consq);
980         return ECORE_NOMEM;
981 }
982
983 void ecore_consq_setup(struct ecore_hwfn *p_hwfn)
984 {
985         ecore_chain_reset(&p_hwfn->p_consq->chain);
986 }
987
988 void ecore_consq_free(struct ecore_hwfn *p_hwfn)
989 {
990         if (!p_hwfn->p_consq)
991                 return;
992
993         ecore_chain_free(p_hwfn->p_dev, &p_hwfn->p_consq->chain);
994         OSAL_FREE(p_hwfn->p_dev, p_hwfn->p_consq);
995 }