2 * Copyright (c) 2016 QLogic Corporation.
6 * See LICENSE.qede_pmd for copyright and licensing details.
11 #include "ecore_gtt_reg_addr.h"
12 #include "ecore_hsi_common.h"
14 #include "ecore_sp_api.h"
15 #include "ecore_spq.h"
16 #include "ecore_iro.h"
17 #include "ecore_init_fw_funcs.h"
18 #include "ecore_cxt.h"
19 #include "ecore_int.h"
20 #include "ecore_dev_api.h"
21 #include "ecore_mcp.h"
24 /***************************************************************************
25 * Structures & Definitions
26 ***************************************************************************/
28 #define SPQ_HIGH_PRI_RESERVE_DEFAULT (1)
29 #define SPQ_BLOCK_SLEEP_LENGTH (1000)
31 /***************************************************************************
32 * Blocking Imp. (BLOCK/EBLOCK mode)
33 ***************************************************************************/
34 static void ecore_spq_blocking_cb(struct ecore_hwfn *p_hwfn,
36 union event_ring_data *data,
39 struct ecore_spq_comp_done *comp_done;
41 comp_done = (struct ecore_spq_comp_done *)cookie;
43 comp_done->done = 0x1;
44 comp_done->fw_return_code = fw_return_code;
46 /* make update visible to waiting thread */
47 OSAL_SMP_WMB(p_hwfn->p_dev);
50 static enum _ecore_status_t ecore_spq_block(struct ecore_hwfn *p_hwfn,
51 struct ecore_spq_entry *p_ent,
54 int sleep_count = SPQ_BLOCK_SLEEP_LENGTH;
55 struct ecore_spq_comp_done *comp_done;
56 enum _ecore_status_t rc;
58 comp_done = (struct ecore_spq_comp_done *)p_ent->comp_cb.cookie;
60 OSAL_POLL_MODE_DPC(p_hwfn);
61 /* validate we receive completion update */
62 OSAL_SMP_RMB(p_hwfn->p_dev);
63 if (comp_done->done == 1) {
65 *p_fw_ret = comp_done->fw_return_code;
72 DP_INFO(p_hwfn, "Ramrod is stuck, requesting MCP drain\n");
73 rc = ecore_mcp_drain(p_hwfn, p_hwfn->p_main_ptt);
74 if (rc != ECORE_SUCCESS)
75 DP_NOTICE(p_hwfn, true, "MCP drain failed\n");
77 /* Retry after drain */
78 sleep_count = SPQ_BLOCK_SLEEP_LENGTH;
80 /* validate we receive completion update */
81 OSAL_SMP_RMB(p_hwfn->p_dev);
82 if (comp_done->done == 1) {
84 *p_fw_ret = comp_done->fw_return_code;
91 if (comp_done->done == 1) {
93 *p_fw_ret = comp_done->fw_return_code;
97 DP_NOTICE(p_hwfn, true,
98 "Ramrod is stuck [CID %08x cmd %02x proto %02x echo %04x]\n",
99 OSAL_LE32_TO_CPU(p_ent->elem.hdr.cid),
100 p_ent->elem.hdr.cmd_id, p_ent->elem.hdr.protocol_id,
101 OSAL_LE16_TO_CPU(p_ent->elem.hdr.echo));
103 ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_RAMROD_FAIL);
108 /***************************************************************************
109 * SPQ entries inner API
110 ***************************************************************************/
111 static enum _ecore_status_t
112 ecore_spq_fill_entry(struct ecore_hwfn *p_hwfn, struct ecore_spq_entry *p_ent)
116 switch (p_ent->comp_mode) {
117 case ECORE_SPQ_MODE_EBLOCK:
118 case ECORE_SPQ_MODE_BLOCK:
119 p_ent->comp_cb.function = ecore_spq_blocking_cb;
121 case ECORE_SPQ_MODE_CB:
124 DP_NOTICE(p_hwfn, true, "Unknown SPQE completion mode %d\n",
129 DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
130 "Ramrod header: [CID 0x%08x CMD 0x%02x protocol 0x%02x]"
131 " Data pointer: [%08x:%08x] Completion Mode: %s\n",
132 p_ent->elem.hdr.cid, p_ent->elem.hdr.cmd_id,
133 p_ent->elem.hdr.protocol_id,
134 p_ent->elem.data_ptr.hi, p_ent->elem.data_ptr.lo,
135 D_TRINE(p_ent->comp_mode, ECORE_SPQ_MODE_EBLOCK,
136 ECORE_SPQ_MODE_BLOCK, "MODE_EBLOCK", "MODE_BLOCK",
139 return ECORE_SUCCESS;
142 /***************************************************************************
144 ***************************************************************************/
145 static void ecore_spq_hw_initialize(struct ecore_hwfn *p_hwfn,
146 struct ecore_spq *p_spq)
149 struct ecore_cxt_info cxt_info;
150 struct core_conn_context *p_cxt;
151 union ecore_qm_pq_params pq_params;
152 enum _ecore_status_t rc;
154 cxt_info.iid = p_spq->cid;
156 rc = ecore_cxt_get_cid_info(p_hwfn, &cxt_info);
159 DP_NOTICE(p_hwfn, true, "Cannot find context info for cid=%d",
164 p_cxt = cxt_info.p_cxt;
166 SET_FIELD(p_cxt->xstorm_ag_context.flags10,
167 XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN, 1);
168 SET_FIELD(p_cxt->xstorm_ag_context.flags1,
169 XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE, 1);
170 /* SET_FIELD(p_cxt->xstorm_ag_context.flags10,
171 * XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN, 1);
173 SET_FIELD(p_cxt->xstorm_ag_context.flags9,
174 XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN, 1);
176 /* CDU validation - FIXME currently disabled */
178 /* QM physical queue */
179 OSAL_MEMSET(&pq_params, 0, sizeof(pq_params));
180 pq_params.core.tc = LB_TC;
181 pq = ecore_get_qm_pq(p_hwfn, PROTOCOLID_CORE, &pq_params);
182 p_cxt->xstorm_ag_context.physical_q0 = OSAL_CPU_TO_LE16(pq);
184 p_cxt->xstorm_st_context.spq_base_lo =
185 DMA_LO_LE(p_spq->chain.p_phys_addr);
186 p_cxt->xstorm_st_context.spq_base_hi =
187 DMA_HI_LE(p_spq->chain.p_phys_addr);
189 p_cxt->xstorm_st_context.consolid_base_addr.lo =
190 DMA_LO_LE(p_hwfn->p_consq->chain.p_phys_addr);
191 p_cxt->xstorm_st_context.consolid_base_addr.hi =
192 DMA_HI_LE(p_hwfn->p_consq->chain.p_phys_addr);
195 static enum _ecore_status_t ecore_spq_hw_post(struct ecore_hwfn *p_hwfn,
196 struct ecore_spq *p_spq,
197 struct ecore_spq_entry *p_ent)
199 struct ecore_chain *p_chain = &p_hwfn->p_spq->chain;
200 u16 echo = ecore_chain_get_prod_idx(p_chain);
201 struct slow_path_element *elem;
202 struct core_db_data db;
204 p_ent->elem.hdr.echo = OSAL_CPU_TO_LE16(echo);
205 elem = ecore_chain_produce(p_chain);
207 DP_NOTICE(p_hwfn, true, "Failed to produce from SPQ chain\n");
211 *elem = p_ent->elem; /* struct assignment */
213 /* send a doorbell on the slow hwfn session */
214 OSAL_MEMSET(&db, 0, sizeof(db));
215 SET_FIELD(db.params, CORE_DB_DATA_DEST, DB_DEST_XCM);
216 SET_FIELD(db.params, CORE_DB_DATA_AGG_CMD, DB_AGG_CMD_SET);
217 SET_FIELD(db.params, CORE_DB_DATA_AGG_VAL_SEL,
218 DQ_XCM_CORE_SPQ_PROD_CMD);
219 db.agg_flags = DQ_XCM_CORE_DQ_CF_CMD;
221 /* validate producer is up to-date */
222 OSAL_RMB(p_hwfn->p_dev);
224 db.spq_prod = OSAL_CPU_TO_LE16(ecore_chain_get_prod_idx(p_chain));
227 OSAL_BARRIER(p_hwfn->p_dev);
229 DOORBELL(p_hwfn, DB_ADDR(p_spq->cid, DQ_DEMS_LEGACY), *(u32 *)&db);
231 /* make sure doorbell is rang */
232 OSAL_MMIOWB(p_hwfn->p_dev);
234 DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
235 "Doorbelled [0x%08x, CID 0x%08x] with Flags: %02x"
236 " agg_params: %02x, prod: %04x\n",
237 DB_ADDR(p_spq->cid, DQ_DEMS_LEGACY), p_spq->cid, db.params,
238 db.agg_flags, ecore_chain_get_prod_idx(p_chain));
240 return ECORE_SUCCESS;
243 /***************************************************************************
244 * Asynchronous events
245 ***************************************************************************/
247 static enum _ecore_status_t
248 ecore_async_event_completion(struct ecore_hwfn *p_hwfn,
249 struct event_ring_entry *p_eqe)
251 switch (p_eqe->protocol_id) {
252 case PROTOCOLID_COMMON:
253 return ECORE_SUCCESS;
256 true, "Unknown Async completion for protocol: %d\n",
262 /***************************************************************************
264 ***************************************************************************/
265 void ecore_eq_prod_update(struct ecore_hwfn *p_hwfn, u16 prod)
267 u32 addr = GTT_BAR0_MAP_REG_USDM_RAM +
268 USTORM_EQE_CONS_OFFSET(p_hwfn->rel_pf_id);
270 REG_WR16(p_hwfn, addr, prod);
272 /* keep prod updates ordered */
273 OSAL_MMIOWB(p_hwfn->p_dev);
276 enum _ecore_status_t ecore_eq_completion(struct ecore_hwfn *p_hwfn,
279 struct ecore_eq *p_eq = cookie;
280 struct ecore_chain *p_chain = &p_eq->chain;
281 enum _ecore_status_t rc = 0;
283 /* take a snapshot of the FW consumer */
284 u16 fw_cons_idx = OSAL_LE16_TO_CPU(*p_eq->p_fw_cons);
286 DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ, "fw_cons_idx %x\n", fw_cons_idx);
288 /* Need to guarantee the fw_cons index we use points to a usuable
289 * element (to comply with our chain), so our macros would comply
291 if ((fw_cons_idx & ecore_chain_get_usable_per_page(p_chain)) ==
292 ecore_chain_get_usable_per_page(p_chain)) {
293 fw_cons_idx += ecore_chain_get_unusable_per_page(p_chain);
296 /* Complete current segment of eq entries */
297 while (fw_cons_idx != ecore_chain_get_cons_idx(p_chain)) {
298 struct event_ring_entry *p_eqe = ecore_chain_consume(p_chain);
304 DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
305 "op %x prot %x res0 %x echo %x "
306 "fwret %x flags %x\n", p_eqe->opcode,
307 p_eqe->protocol_id, /* Event Protocol ID */
308 p_eqe->reserved0, /* Reserved */
309 OSAL_LE16_TO_CPU(p_eqe->echo),
310 p_eqe->fw_return_code, /* FW return code for SP
315 if (GET_FIELD(p_eqe->flags, EVENT_RING_ENTRY_ASYNC)) {
316 if (ecore_async_event_completion(p_hwfn, p_eqe))
318 } else if (ecore_spq_completion(p_hwfn,
320 p_eqe->fw_return_code,
325 ecore_chain_recycle_consumed(p_chain);
328 ecore_eq_prod_update(p_hwfn, ecore_chain_get_prod_idx(p_chain));
333 struct ecore_eq *ecore_eq_alloc(struct ecore_hwfn *p_hwfn, u16 num_elem)
335 struct ecore_eq *p_eq;
337 /* Allocate EQ struct */
338 p_eq = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(struct ecore_eq));
340 DP_NOTICE(p_hwfn, true,
341 "Failed to allocate `struct ecore_eq'\n");
345 /* Allocate and initialize EQ chain */
346 if (ecore_chain_alloc(p_hwfn->p_dev,
347 ECORE_CHAIN_USE_TO_PRODUCE,
348 ECORE_CHAIN_MODE_PBL,
349 ECORE_CHAIN_CNT_TYPE_U16,
351 sizeof(union event_ring_element), &p_eq->chain)) {
352 DP_NOTICE(p_hwfn, true, "Failed to allocate eq chain");
353 goto eq_allocate_fail;
356 /* register EQ completion on the SP SB */
357 ecore_int_register_cb(p_hwfn,
359 p_eq, &p_eq->eq_sb_index, &p_eq->p_fw_cons);
364 ecore_eq_free(p_hwfn, p_eq);
368 void ecore_eq_setup(struct ecore_hwfn *p_hwfn, struct ecore_eq *p_eq)
370 ecore_chain_reset(&p_eq->chain);
373 void ecore_eq_free(struct ecore_hwfn *p_hwfn, struct ecore_eq *p_eq)
377 ecore_chain_free(p_hwfn->p_dev, &p_eq->chain);
378 OSAL_FREE(p_hwfn->p_dev, p_eq);
381 /***************************************************************************
382 * CQE API - manipulate EQ functionality
383 ***************************************************************************/
384 static enum _ecore_status_t ecore_cqe_completion(struct ecore_hwfn *p_hwfn,
385 struct eth_slow_path_rx_cqe
387 enum protocol_type protocol)
389 /* @@@tmp - it's possible we'll eventually want to handle some
390 * actual commands that can arrive here, but for now this is only
391 * used to complete the ramrod using the echo value on the cqe
393 return ecore_spq_completion(p_hwfn, cqe->echo, 0, OSAL_NULL);
396 enum _ecore_status_t ecore_eth_cqe_completion(struct ecore_hwfn *p_hwfn,
397 struct eth_slow_path_rx_cqe *cqe)
399 enum _ecore_status_t rc;
401 rc = ecore_cqe_completion(p_hwfn, cqe, PROTOCOLID_ETH);
403 DP_NOTICE(p_hwfn, true,
404 "Failed to handle RXQ CQE [cmd 0x%02x]\n",
411 /***************************************************************************
412 * Slow hwfn Queue (spq)
413 ***************************************************************************/
414 void ecore_spq_setup(struct ecore_hwfn *p_hwfn)
416 struct ecore_spq_entry *p_virt = OSAL_NULL;
417 struct ecore_spq *p_spq = p_hwfn->p_spq;
418 dma_addr_t p_phys = 0;
421 OSAL_LIST_INIT(&p_spq->pending);
422 OSAL_LIST_INIT(&p_spq->completion_pending);
423 OSAL_LIST_INIT(&p_spq->free_pool);
424 OSAL_LIST_INIT(&p_spq->unlimited_pending);
425 OSAL_SPIN_LOCK_INIT(&p_spq->lock);
428 p_phys = p_spq->p_phys + OFFSETOF(struct ecore_spq_entry, ramrod);
429 p_virt = p_spq->p_virt;
431 capacity = ecore_chain_get_capacity(&p_spq->chain);
432 for (i = 0; i < capacity; i++) {
433 p_virt->elem.data_ptr.hi = DMA_HI_LE(p_phys);
434 p_virt->elem.data_ptr.lo = DMA_LO_LE(p_phys);
436 OSAL_LIST_PUSH_TAIL(&p_virt->list, &p_spq->free_pool);
439 p_phys += sizeof(struct ecore_spq_entry);
443 p_spq->normal_count = 0;
444 p_spq->comp_count = 0;
445 p_spq->comp_sent_count = 0;
446 p_spq->unlimited_pending_count = 0;
448 OSAL_MEM_ZERO(p_spq->p_comp_bitmap,
449 SPQ_COMP_BMAP_SIZE * sizeof(unsigned long));
450 p_spq->comp_bitmap_idx = 0;
452 /* SPQ cid, cannot fail */
453 ecore_cxt_acquire_cid(p_hwfn, PROTOCOLID_CORE, &p_spq->cid);
454 ecore_spq_hw_initialize(p_hwfn, p_spq);
456 /* reset the chain itself */
457 ecore_chain_reset(&p_spq->chain);
460 enum _ecore_status_t ecore_spq_alloc(struct ecore_hwfn *p_hwfn)
462 struct ecore_spq_entry *p_virt = OSAL_NULL;
463 struct ecore_spq *p_spq = OSAL_NULL;
464 dma_addr_t p_phys = 0;
469 OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(struct ecore_spq));
471 DP_NOTICE(p_hwfn, true,
472 "Failed to allocate `struct ecore_spq'");
477 if (ecore_chain_alloc(p_hwfn->p_dev, ECORE_CHAIN_USE_TO_PRODUCE,
478 ECORE_CHAIN_MODE_SINGLE, ECORE_CHAIN_CNT_TYPE_U16, 0,
479 /* N/A when the mode is SINGLE */
480 sizeof(struct slow_path_element), &p_spq->chain)) {
481 DP_NOTICE(p_hwfn, true, "Failed to allocate spq chain");
482 goto spq_allocate_fail;
485 /* allocate and fill the SPQ elements (incl. ramrod data list) */
486 capacity = ecore_chain_get_capacity(&p_spq->chain);
487 p_virt = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, &p_phys,
489 sizeof(struct ecore_spq_entry));
491 goto spq_allocate_fail;
493 p_spq->p_virt = p_virt;
494 p_spq->p_phys = p_phys;
496 OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_spq->lock);
498 p_hwfn->p_spq = p_spq;
499 return ECORE_SUCCESS;
502 ecore_chain_free(p_hwfn->p_dev, &p_spq->chain);
503 OSAL_FREE(p_hwfn->p_dev, p_spq);
507 void ecore_spq_free(struct ecore_hwfn *p_hwfn)
509 struct ecore_spq *p_spq = p_hwfn->p_spq;
516 capacity = ecore_chain_get_capacity(&p_spq->chain);
517 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
521 sizeof(struct ecore_spq_entry));
524 ecore_chain_free(p_hwfn->p_dev, &p_spq->chain);
525 OSAL_SPIN_LOCK_DEALLOC(&p_spq->lock);
526 OSAL_FREE(p_hwfn->p_dev, p_spq);
530 ecore_spq_get_entry(struct ecore_hwfn *p_hwfn, struct ecore_spq_entry **pp_ent)
532 struct ecore_spq *p_spq = p_hwfn->p_spq;
533 struct ecore_spq_entry *p_ent = OSAL_NULL;
535 OSAL_SPIN_LOCK(&p_spq->lock);
537 if (OSAL_LIST_IS_EMPTY(&p_spq->free_pool)) {
538 p_ent = OSAL_ZALLOC(p_hwfn->p_dev, GFP_ATOMIC,
539 sizeof(struct ecore_spq_entry));
541 OSAL_SPIN_UNLOCK(&p_spq->lock);
542 DP_NOTICE(p_hwfn, true,
543 "Failed to allocate an SPQ entry"
544 " for a pending ramrod\n");
547 p_ent->queue = &p_spq->unlimited_pending;
549 p_ent = OSAL_LIST_FIRST_ENTRY(&p_spq->free_pool,
550 struct ecore_spq_entry, list);
551 OSAL_LIST_REMOVE_ENTRY(&p_ent->list, &p_spq->free_pool);
552 p_ent->queue = &p_spq->pending;
557 OSAL_SPIN_UNLOCK(&p_spq->lock);
559 return ECORE_SUCCESS;
562 /* Locked variant; Should be called while the SPQ lock is taken */
563 static void __ecore_spq_return_entry(struct ecore_hwfn *p_hwfn,
564 struct ecore_spq_entry *p_ent)
566 OSAL_LIST_PUSH_TAIL(&p_ent->list, &p_hwfn->p_spq->free_pool);
569 void ecore_spq_return_entry(struct ecore_hwfn *p_hwfn,
570 struct ecore_spq_entry *p_ent)
572 OSAL_SPIN_LOCK(&p_hwfn->p_spq->lock);
573 __ecore_spq_return_entry(p_hwfn, p_ent);
574 OSAL_SPIN_UNLOCK(&p_hwfn->p_spq->lock);
578 * @brief ecore_spq_add_entry - adds a new entry to the pending
579 * list. Should be used while lock is being held.
581 * Addes an entry to the pending list is there is room (en empty
582 * element is available in the free_pool), or else places the
583 * entry in the unlimited_pending pool.
589 * @return enum _ecore_status_t
591 static enum _ecore_status_t
592 ecore_spq_add_entry(struct ecore_hwfn *p_hwfn,
593 struct ecore_spq_entry *p_ent, enum spq_priority priority)
595 struct ecore_spq *p_spq = p_hwfn->p_spq;
597 if (p_ent->queue == &p_spq->unlimited_pending) {
598 if (OSAL_LIST_IS_EMPTY(&p_spq->free_pool)) {
599 OSAL_LIST_PUSH_TAIL(&p_ent->list,
600 &p_spq->unlimited_pending);
601 p_spq->unlimited_pending_count++;
603 return ECORE_SUCCESS;
606 struct ecore_spq_entry *p_en2;
608 p_en2 = OSAL_LIST_FIRST_ENTRY(&p_spq->free_pool,
609 struct ecore_spq_entry,
611 OSAL_LIST_REMOVE_ENTRY(&p_en2->list, &p_spq->free_pool);
613 /* Copy the ring element physical pointer to the new
614 * entry, since we are about to override the entire ring
615 * entry and don't want to lose the pointer.
617 p_ent->elem.data_ptr = p_en2->elem.data_ptr;
619 /* Setting the cookie to the comp_done of the
622 if (p_ent->comp_cb.cookie == &p_ent->comp_done)
623 p_ent->comp_cb.cookie = &p_en2->comp_done;
627 OSAL_FREE(p_hwfn->p_dev, p_ent);
632 /* entry is to be placed in 'pending' queue */
634 case ECORE_SPQ_PRIORITY_NORMAL:
635 OSAL_LIST_PUSH_TAIL(&p_ent->list, &p_spq->pending);
636 p_spq->normal_count++;
638 case ECORE_SPQ_PRIORITY_HIGH:
639 OSAL_LIST_PUSH_HEAD(&p_ent->list, &p_spq->pending);
646 return ECORE_SUCCESS;
649 /***************************************************************************
651 ***************************************************************************/
653 u32 ecore_spq_get_cid(struct ecore_hwfn *p_hwfn)
656 return 0xffffffff; /* illegal */
657 return p_hwfn->p_spq->cid;
660 /***************************************************************************
661 * Posting new Ramrods
662 ***************************************************************************/
664 static enum _ecore_status_t ecore_spq_post_list(struct ecore_hwfn *p_hwfn,
668 struct ecore_spq *p_spq = p_hwfn->p_spq;
669 enum _ecore_status_t rc;
671 /* TODO - implementation might be wasteful; will always keep room
672 * for an additional high priority ramrod (even if one is already
675 while (ecore_chain_get_elem_left(&p_spq->chain) > keep_reserve &&
676 !OSAL_LIST_IS_EMPTY(head)) {
677 struct ecore_spq_entry *p_ent =
678 OSAL_LIST_FIRST_ENTRY(head, struct ecore_spq_entry, list);
679 OSAL_LIST_REMOVE_ENTRY(&p_ent->list, head);
680 OSAL_LIST_PUSH_TAIL(&p_ent->list, &p_spq->completion_pending);
681 p_spq->comp_sent_count++;
683 rc = ecore_spq_hw_post(p_hwfn, p_spq, p_ent);
685 OSAL_LIST_REMOVE_ENTRY(&p_ent->list,
686 &p_spq->completion_pending);
687 __ecore_spq_return_entry(p_hwfn, p_ent);
692 return ECORE_SUCCESS;
695 static enum _ecore_status_t ecore_spq_pend_post(struct ecore_hwfn *p_hwfn)
697 enum _ecore_status_t rc = ECORE_NOTIMPL;
698 struct ecore_spq *p_spq = p_hwfn->p_spq;
699 struct ecore_spq_entry *p_ent = OSAL_NULL;
701 while (!OSAL_LIST_IS_EMPTY(&p_spq->free_pool)) {
702 if (OSAL_LIST_IS_EMPTY(&p_spq->unlimited_pending))
705 p_ent = OSAL_LIST_FIRST_ENTRY(&p_spq->unlimited_pending,
706 struct ecore_spq_entry, list);
710 OSAL_LIST_REMOVE_ENTRY(&p_ent->list, &p_spq->unlimited_pending);
712 ecore_spq_add_entry(p_hwfn, p_ent, p_ent->priority);
715 rc = ecore_spq_post_list(p_hwfn,
716 &p_spq->pending, SPQ_HIGH_PRI_RESERVE_DEFAULT);
720 return ECORE_SUCCESS;
723 enum _ecore_status_t ecore_spq_post(struct ecore_hwfn *p_hwfn,
724 struct ecore_spq_entry *p_ent,
727 enum _ecore_status_t rc = ECORE_SUCCESS;
728 struct ecore_spq *p_spq = p_hwfn ? p_hwfn->p_spq : OSAL_NULL;
729 bool b_ret_ent = true;
735 DP_NOTICE(p_hwfn, true, "Got a NULL pointer\n");
739 if (p_hwfn->p_dev->recov_in_prog) {
740 DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
741 "Recovery is in progress -> skip spq post"
742 " [cmd %02x protocol %02x]",
743 p_ent->elem.hdr.cmd_id, p_ent->elem.hdr.protocol_id);
744 /* Return success to let the flows to be completed successfully
745 * w/o any error handling.
747 return ECORE_SUCCESS;
750 OSAL_SPIN_LOCK(&p_spq->lock);
752 /* Complete the entry */
753 rc = ecore_spq_fill_entry(p_hwfn, p_ent);
755 /* Check return value after LOCK is taken for cleaner error flow */
759 /* Add the request to the pending queue */
760 rc = ecore_spq_add_entry(p_hwfn, p_ent, p_ent->priority);
764 rc = ecore_spq_pend_post(p_hwfn);
766 /* Since it's possible that pending failed for a different
767 * entry [although unlikely], the failed entry was already
768 * dealt with; No need to return it here.
774 OSAL_SPIN_UNLOCK(&p_spq->lock);
776 if (p_ent->comp_mode == ECORE_SPQ_MODE_EBLOCK) {
777 /* For entries in ECORE BLOCK mode, the completion code cannot
778 * perform the necessary cleanup - if it did, we couldn't
779 * access p_ent here to see whether it's successful or not.
780 * Thus, after gaining the answer perform the cleanup here.
782 rc = ecore_spq_block(p_hwfn, p_ent, fw_return_code);
787 ecore_spq_return_entry(p_hwfn, p_ent);
792 OSAL_SPIN_LOCK(&p_spq->lock);
793 OSAL_LIST_REMOVE_ENTRY(&p_ent->list, &p_spq->completion_pending);
794 ecore_chain_return_produced(&p_spq->chain);
797 /* return to the free pool */
799 __ecore_spq_return_entry(p_hwfn, p_ent);
800 OSAL_SPIN_UNLOCK(&p_spq->lock);
805 enum _ecore_status_t ecore_spq_completion(struct ecore_hwfn *p_hwfn,
808 union event_ring_data *p_data)
810 struct ecore_spq *p_spq;
811 struct ecore_spq_entry *p_ent = OSAL_NULL;
812 struct ecore_spq_entry *tmp;
813 struct ecore_spq_entry *found = OSAL_NULL;
814 enum _ecore_status_t rc;
819 p_spq = p_hwfn->p_spq;
823 OSAL_SPIN_LOCK(&p_spq->lock);
824 OSAL_LIST_FOR_EACH_ENTRY_SAFE(p_ent,
826 &p_spq->completion_pending,
827 list, struct ecore_spq_entry) {
828 if (p_ent->elem.hdr.echo == echo) {
829 OSAL_LIST_REMOVE_ENTRY(&p_ent->list,
830 &p_spq->completion_pending);
832 /* Avoid overriding of SPQ entries when getting
833 * out-of-order completions, by marking the completions
834 * in a bitmap and increasing the chain consumer only
835 * for the first successive completed entries.
837 SPQ_COMP_BMAP_SET_BIT(p_spq, echo);
838 while (SPQ_COMP_BMAP_TEST_BIT(p_spq,
839 p_spq->comp_bitmap_idx)) {
840 SPQ_COMP_BMAP_CLEAR_BIT(p_spq,
841 p_spq->comp_bitmap_idx);
842 p_spq->comp_bitmap_idx++;
843 ecore_chain_return_produced(&p_spq->chain);
851 /* This is debug and should be relatively uncommon - depends
852 * on scenarios which have mutliple per-PF sent ramrods.
854 DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
855 "Got completion for echo %04x - doesn't match"
856 " echo %04x in completion pending list\n",
857 OSAL_LE16_TO_CPU(echo),
858 OSAL_LE16_TO_CPU(p_ent->elem.hdr.echo));
861 /* Release lock before callback, as callback may post
862 * an additional ramrod.
864 OSAL_SPIN_UNLOCK(&p_spq->lock);
867 DP_NOTICE(p_hwfn, true,
868 "Failed to find an entry this"
869 " EQE [echo %04x] completes\n",
870 OSAL_LE16_TO_CPU(echo));
874 DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
875 "Complete EQE [echo %04x]: func %p cookie %p)\n",
876 OSAL_LE16_TO_CPU(echo),
877 p_ent->comp_cb.function, p_ent->comp_cb.cookie);
878 if (found->comp_cb.function)
879 found->comp_cb.function(p_hwfn, found->comp_cb.cookie, p_data,
882 if (found->comp_mode != ECORE_SPQ_MODE_EBLOCK) {
883 /* EBLOCK is responsible for freeing its own entry */
884 ecore_spq_return_entry(p_hwfn, found);
887 /* Attempt to post pending requests */
888 OSAL_SPIN_LOCK(&p_spq->lock);
889 rc = ecore_spq_pend_post(p_hwfn);
890 OSAL_SPIN_UNLOCK(&p_spq->lock);
895 struct ecore_consq *ecore_consq_alloc(struct ecore_hwfn *p_hwfn)
897 struct ecore_consq *p_consq;
899 /* Allocate ConsQ struct */
901 OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(struct ecore_consq));
903 DP_NOTICE(p_hwfn, true,
904 "Failed to allocate `struct ecore_consq'\n");
908 /* Allocate and initialize EQ chain */
909 if (ecore_chain_alloc(p_hwfn->p_dev,
910 ECORE_CHAIN_USE_TO_PRODUCE,
911 ECORE_CHAIN_MODE_PBL,
912 ECORE_CHAIN_CNT_TYPE_U16,
913 ECORE_CHAIN_PAGE_SIZE / 0x80,
914 0x80, &p_consq->chain)) {
915 DP_NOTICE(p_hwfn, true, "Failed to allocate consq chain");
916 goto consq_allocate_fail;
922 ecore_consq_free(p_hwfn, p_consq);
926 void ecore_consq_setup(struct ecore_hwfn *p_hwfn, struct ecore_consq *p_consq)
928 ecore_chain_reset(&p_consq->chain);
931 void ecore_consq_free(struct ecore_hwfn *p_hwfn, struct ecore_consq *p_consq)
935 ecore_chain_free(p_hwfn->p_dev, &p_consq->chain);
936 OSAL_FREE(p_hwfn->p_dev, p_consq);