2 * Copyright (c) 2016 QLogic Corporation.
6 * See LICENSE.qede_pmd for copyright and licensing details.
11 #include "ecore_gtt_reg_addr.h"
12 #include "ecore_hsi_common.h"
14 #include "ecore_sp_api.h"
15 #include "ecore_spq.h"
16 #include "ecore_iro.h"
17 #include "ecore_init_fw_funcs.h"
18 #include "ecore_cxt.h"
19 #include "ecore_int.h"
20 #include "ecore_dev_api.h"
21 #include "ecore_mcp.h"
23 #include "ecore_sriov.h"
25 /***************************************************************************
26 * Structures & Definitions
27 ***************************************************************************/
29 #define SPQ_HIGH_PRI_RESERVE_DEFAULT (1)
30 #define SPQ_BLOCK_SLEEP_LENGTH (1000)
32 /***************************************************************************
33 * Blocking Imp. (BLOCK/EBLOCK mode)
34 ***************************************************************************/
35 static void ecore_spq_blocking_cb(struct ecore_hwfn *p_hwfn,
37 union event_ring_data *data,
40 struct ecore_spq_comp_done *comp_done;
42 comp_done = (struct ecore_spq_comp_done *)cookie;
44 comp_done->done = 0x1;
45 comp_done->fw_return_code = fw_return_code;
47 /* make update visible to waiting thread */
48 OSAL_SMP_WMB(p_hwfn->p_dev);
51 static enum _ecore_status_t ecore_spq_block(struct ecore_hwfn *p_hwfn,
52 struct ecore_spq_entry *p_ent,
55 int sleep_count = SPQ_BLOCK_SLEEP_LENGTH;
56 struct ecore_spq_comp_done *comp_done;
57 enum _ecore_status_t rc;
59 comp_done = (struct ecore_spq_comp_done *)p_ent->comp_cb.cookie;
61 OSAL_POLL_MODE_DPC(p_hwfn);
62 /* validate we receive completion update */
63 OSAL_SMP_RMB(p_hwfn->p_dev);
64 if (comp_done->done == 1) {
66 *p_fw_ret = comp_done->fw_return_code;
73 DP_INFO(p_hwfn, "Ramrod is stuck, requesting MCP drain\n");
74 rc = ecore_mcp_drain(p_hwfn, p_hwfn->p_main_ptt);
75 if (rc != ECORE_SUCCESS)
76 DP_NOTICE(p_hwfn, true, "MCP drain failed\n");
78 /* Retry after drain */
79 sleep_count = SPQ_BLOCK_SLEEP_LENGTH;
81 /* validate we receive completion update */
82 OSAL_SMP_RMB(p_hwfn->p_dev);
83 if (comp_done->done == 1) {
85 *p_fw_ret = comp_done->fw_return_code;
92 if (comp_done->done == 1) {
94 *p_fw_ret = comp_done->fw_return_code;
98 DP_NOTICE(p_hwfn, true,
99 "Ramrod is stuck [CID %08x cmd %02x proto %02x echo %04x]\n",
100 OSAL_LE32_TO_CPU(p_ent->elem.hdr.cid),
101 p_ent->elem.hdr.cmd_id, p_ent->elem.hdr.protocol_id,
102 OSAL_LE16_TO_CPU(p_ent->elem.hdr.echo));
104 ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_RAMROD_FAIL);
109 /***************************************************************************
110 * SPQ entries inner API
111 ***************************************************************************/
112 static enum _ecore_status_t
113 ecore_spq_fill_entry(struct ecore_hwfn *p_hwfn, struct ecore_spq_entry *p_ent)
117 switch (p_ent->comp_mode) {
118 case ECORE_SPQ_MODE_EBLOCK:
119 case ECORE_SPQ_MODE_BLOCK:
120 p_ent->comp_cb.function = ecore_spq_blocking_cb;
122 case ECORE_SPQ_MODE_CB:
125 DP_NOTICE(p_hwfn, true, "Unknown SPQE completion mode %d\n",
130 DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
131 "Ramrod header: [CID 0x%08x CMD 0x%02x protocol 0x%02x]"
132 " Data pointer: [%08x:%08x] Completion Mode: %s\n",
133 p_ent->elem.hdr.cid, p_ent->elem.hdr.cmd_id,
134 p_ent->elem.hdr.protocol_id,
135 p_ent->elem.data_ptr.hi, p_ent->elem.data_ptr.lo,
136 D_TRINE(p_ent->comp_mode, ECORE_SPQ_MODE_EBLOCK,
137 ECORE_SPQ_MODE_BLOCK, "MODE_EBLOCK", "MODE_BLOCK",
140 return ECORE_SUCCESS;
143 /***************************************************************************
145 ***************************************************************************/
146 static void ecore_spq_hw_initialize(struct ecore_hwfn *p_hwfn,
147 struct ecore_spq *p_spq)
150 struct ecore_cxt_info cxt_info;
151 struct core_conn_context *p_cxt;
152 union ecore_qm_pq_params pq_params;
153 enum _ecore_status_t rc;
155 cxt_info.iid = p_spq->cid;
157 rc = ecore_cxt_get_cid_info(p_hwfn, &cxt_info);
160 DP_NOTICE(p_hwfn, true, "Cannot find context info for cid=%d",
165 p_cxt = cxt_info.p_cxt;
167 SET_FIELD(p_cxt->xstorm_ag_context.flags10,
168 XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN, 1);
169 SET_FIELD(p_cxt->xstorm_ag_context.flags1,
170 XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE, 1);
171 /* SET_FIELD(p_cxt->xstorm_ag_context.flags10,
172 * XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN, 1);
174 SET_FIELD(p_cxt->xstorm_ag_context.flags9,
175 XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN, 1);
177 /* CDU validation - FIXME currently disabled */
179 /* QM physical queue */
180 OSAL_MEMSET(&pq_params, 0, sizeof(pq_params));
181 pq_params.core.tc = LB_TC;
182 pq = ecore_get_qm_pq(p_hwfn, PROTOCOLID_CORE, &pq_params);
183 p_cxt->xstorm_ag_context.physical_q0 = OSAL_CPU_TO_LE16(pq);
185 p_cxt->xstorm_st_context.spq_base_lo =
186 DMA_LO_LE(p_spq->chain.p_phys_addr);
187 p_cxt->xstorm_st_context.spq_base_hi =
188 DMA_HI_LE(p_spq->chain.p_phys_addr);
190 p_cxt->xstorm_st_context.consolid_base_addr.lo =
191 DMA_LO_LE(p_hwfn->p_consq->chain.p_phys_addr);
192 p_cxt->xstorm_st_context.consolid_base_addr.hi =
193 DMA_HI_LE(p_hwfn->p_consq->chain.p_phys_addr);
196 static enum _ecore_status_t ecore_spq_hw_post(struct ecore_hwfn *p_hwfn,
197 struct ecore_spq *p_spq,
198 struct ecore_spq_entry *p_ent)
200 struct ecore_chain *p_chain = &p_hwfn->p_spq->chain;
201 u16 echo = ecore_chain_get_prod_idx(p_chain);
202 struct slow_path_element *elem;
203 struct core_db_data db;
205 p_ent->elem.hdr.echo = OSAL_CPU_TO_LE16(echo);
206 elem = ecore_chain_produce(p_chain);
208 DP_NOTICE(p_hwfn, true, "Failed to produce from SPQ chain\n");
212 *elem = p_ent->elem; /* struct assignment */
214 /* send a doorbell on the slow hwfn session */
215 OSAL_MEMSET(&db, 0, sizeof(db));
216 SET_FIELD(db.params, CORE_DB_DATA_DEST, DB_DEST_XCM);
217 SET_FIELD(db.params, CORE_DB_DATA_AGG_CMD, DB_AGG_CMD_SET);
218 SET_FIELD(db.params, CORE_DB_DATA_AGG_VAL_SEL,
219 DQ_XCM_CORE_SPQ_PROD_CMD);
220 db.agg_flags = DQ_XCM_CORE_DQ_CF_CMD;
222 /* validate producer is up to-date */
223 OSAL_RMB(p_hwfn->p_dev);
225 db.spq_prod = OSAL_CPU_TO_LE16(ecore_chain_get_prod_idx(p_chain));
228 OSAL_BARRIER(p_hwfn->p_dev);
230 DOORBELL(p_hwfn, DB_ADDR(p_spq->cid, DQ_DEMS_LEGACY), *(u32 *)&db);
232 /* make sure doorbell is rang */
233 OSAL_MMIOWB(p_hwfn->p_dev);
235 DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
236 "Doorbelled [0x%08x, CID 0x%08x] with Flags: %02x"
237 " agg_params: %02x, prod: %04x\n",
238 DB_ADDR(p_spq->cid, DQ_DEMS_LEGACY), p_spq->cid, db.params,
239 db.agg_flags, ecore_chain_get_prod_idx(p_chain));
241 return ECORE_SUCCESS;
244 /***************************************************************************
245 * Asynchronous events
246 ***************************************************************************/
248 static enum _ecore_status_t
249 ecore_async_event_completion(struct ecore_hwfn *p_hwfn,
250 struct event_ring_entry *p_eqe)
252 switch (p_eqe->protocol_id) {
253 case PROTOCOLID_COMMON:
254 return ecore_sriov_eqe_event(p_hwfn,
256 p_eqe->echo, &p_eqe->data);
259 true, "Unknown Async completion for protocol: %d\n",
265 /***************************************************************************
267 ***************************************************************************/
268 void ecore_eq_prod_update(struct ecore_hwfn *p_hwfn, u16 prod)
270 u32 addr = GTT_BAR0_MAP_REG_USDM_RAM +
271 USTORM_EQE_CONS_OFFSET(p_hwfn->rel_pf_id);
273 REG_WR16(p_hwfn, addr, prod);
275 /* keep prod updates ordered */
276 OSAL_MMIOWB(p_hwfn->p_dev);
279 enum _ecore_status_t ecore_eq_completion(struct ecore_hwfn *p_hwfn,
282 struct ecore_eq *p_eq = cookie;
283 struct ecore_chain *p_chain = &p_eq->chain;
284 enum _ecore_status_t rc = 0;
286 /* take a snapshot of the FW consumer */
287 u16 fw_cons_idx = OSAL_LE16_TO_CPU(*p_eq->p_fw_cons);
289 DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ, "fw_cons_idx %x\n", fw_cons_idx);
291 /* Need to guarantee the fw_cons index we use points to a usuable
292 * element (to comply with our chain), so our macros would comply
294 if ((fw_cons_idx & ecore_chain_get_usable_per_page(p_chain)) ==
295 ecore_chain_get_usable_per_page(p_chain)) {
296 fw_cons_idx += ecore_chain_get_unusable_per_page(p_chain);
299 /* Complete current segment of eq entries */
300 while (fw_cons_idx != ecore_chain_get_cons_idx(p_chain)) {
301 struct event_ring_entry *p_eqe = ecore_chain_consume(p_chain);
307 DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
308 "op %x prot %x res0 %x echo %x "
309 "fwret %x flags %x\n", p_eqe->opcode,
310 p_eqe->protocol_id, /* Event Protocol ID */
311 p_eqe->reserved0, /* Reserved */
312 OSAL_LE16_TO_CPU(p_eqe->echo),
313 p_eqe->fw_return_code, /* FW return code for SP
318 if (GET_FIELD(p_eqe->flags, EVENT_RING_ENTRY_ASYNC)) {
319 if (ecore_async_event_completion(p_hwfn, p_eqe))
321 } else if (ecore_spq_completion(p_hwfn,
323 p_eqe->fw_return_code,
328 ecore_chain_recycle_consumed(p_chain);
331 ecore_eq_prod_update(p_hwfn, ecore_chain_get_prod_idx(p_chain));
336 struct ecore_eq *ecore_eq_alloc(struct ecore_hwfn *p_hwfn, u16 num_elem)
338 struct ecore_eq *p_eq;
340 /* Allocate EQ struct */
341 p_eq = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(struct ecore_eq));
343 DP_NOTICE(p_hwfn, true,
344 "Failed to allocate `struct ecore_eq'\n");
348 /* Allocate and initialize EQ chain*/
349 if (ecore_chain_alloc(p_hwfn->p_dev,
350 ECORE_CHAIN_USE_TO_PRODUCE,
351 ECORE_CHAIN_MODE_PBL,
352 ECORE_CHAIN_CNT_TYPE_U16,
354 sizeof(union event_ring_element), &p_eq->chain)) {
355 DP_NOTICE(p_hwfn, true, "Failed to allocate eq chain");
356 goto eq_allocate_fail;
359 /* register EQ completion on the SP SB */
360 ecore_int_register_cb(p_hwfn,
362 p_eq, &p_eq->eq_sb_index, &p_eq->p_fw_cons);
367 ecore_eq_free(p_hwfn, p_eq);
371 void ecore_eq_setup(struct ecore_hwfn *p_hwfn, struct ecore_eq *p_eq)
373 ecore_chain_reset(&p_eq->chain);
376 void ecore_eq_free(struct ecore_hwfn *p_hwfn, struct ecore_eq *p_eq)
380 ecore_chain_free(p_hwfn->p_dev, &p_eq->chain);
381 OSAL_FREE(p_hwfn->p_dev, p_eq);
384 /***************************************************************************
385 * CQE API - manipulate EQ functionality
386 ***************************************************************************/
387 static enum _ecore_status_t ecore_cqe_completion(struct ecore_hwfn *p_hwfn,
388 struct eth_slow_path_rx_cqe
390 enum protocol_type protocol)
392 if (IS_VF(p_hwfn->p_dev))
393 return OSAL_VF_CQE_COMPLETION(p_hwfn, cqe, protocol);
395 /* @@@tmp - it's possible we'll eventually want to handle some
396 * actual commands that can arrive here, but for now this is only
397 * used to complete the ramrod using the echo value on the cqe
399 return ecore_spq_completion(p_hwfn, cqe->echo, 0, OSAL_NULL);
402 enum _ecore_status_t ecore_eth_cqe_completion(struct ecore_hwfn *p_hwfn,
403 struct eth_slow_path_rx_cqe *cqe)
405 enum _ecore_status_t rc;
407 rc = ecore_cqe_completion(p_hwfn, cqe, PROTOCOLID_ETH);
409 DP_NOTICE(p_hwfn, true,
410 "Failed to handle RXQ CQE [cmd 0x%02x]\n",
417 /***************************************************************************
418 * Slow hwfn Queue (spq)
419 ***************************************************************************/
420 void ecore_spq_setup(struct ecore_hwfn *p_hwfn)
422 struct ecore_spq_entry *p_virt = OSAL_NULL;
423 struct ecore_spq *p_spq = p_hwfn->p_spq;
424 dma_addr_t p_phys = 0;
427 OSAL_LIST_INIT(&p_spq->pending);
428 OSAL_LIST_INIT(&p_spq->completion_pending);
429 OSAL_LIST_INIT(&p_spq->free_pool);
430 OSAL_LIST_INIT(&p_spq->unlimited_pending);
431 OSAL_SPIN_LOCK_INIT(&p_spq->lock);
434 p_phys = p_spq->p_phys + OFFSETOF(struct ecore_spq_entry, ramrod);
435 p_virt = p_spq->p_virt;
437 capacity = ecore_chain_get_capacity(&p_spq->chain);
438 for (i = 0; i < capacity; i++) {
439 p_virt->elem.data_ptr.hi = DMA_HI_LE(p_phys);
440 p_virt->elem.data_ptr.lo = DMA_LO_LE(p_phys);
442 OSAL_LIST_PUSH_TAIL(&p_virt->list, &p_spq->free_pool);
445 p_phys += sizeof(struct ecore_spq_entry);
449 p_spq->normal_count = 0;
450 p_spq->comp_count = 0;
451 p_spq->comp_sent_count = 0;
452 p_spq->unlimited_pending_count = 0;
454 OSAL_MEM_ZERO(p_spq->p_comp_bitmap,
455 SPQ_COMP_BMAP_SIZE * sizeof(unsigned long));
456 p_spq->comp_bitmap_idx = 0;
458 /* SPQ cid, cannot fail */
459 ecore_cxt_acquire_cid(p_hwfn, PROTOCOLID_CORE, &p_spq->cid);
460 ecore_spq_hw_initialize(p_hwfn, p_spq);
462 /* reset the chain itself */
463 ecore_chain_reset(&p_spq->chain);
466 enum _ecore_status_t ecore_spq_alloc(struct ecore_hwfn *p_hwfn)
468 struct ecore_spq_entry *p_virt = OSAL_NULL;
469 struct ecore_spq *p_spq = OSAL_NULL;
470 dma_addr_t p_phys = 0;
475 OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(struct ecore_spq));
477 DP_NOTICE(p_hwfn, true,
478 "Failed to allocate `struct ecore_spq'");
483 if (ecore_chain_alloc(p_hwfn->p_dev, ECORE_CHAIN_USE_TO_PRODUCE,
484 ECORE_CHAIN_MODE_SINGLE, ECORE_CHAIN_CNT_TYPE_U16, 0,
485 /* N/A when the mode is SINGLE */
486 sizeof(struct slow_path_element), &p_spq->chain)) {
487 DP_NOTICE(p_hwfn, true, "Failed to allocate spq chain");
488 goto spq_allocate_fail;
491 /* allocate and fill the SPQ elements (incl. ramrod data list) */
492 capacity = ecore_chain_get_capacity(&p_spq->chain);
493 p_virt = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, &p_phys,
495 sizeof(struct ecore_spq_entry));
497 goto spq_allocate_fail;
499 p_spq->p_virt = p_virt;
500 p_spq->p_phys = p_phys;
502 OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_spq->lock);
504 p_hwfn->p_spq = p_spq;
505 return ECORE_SUCCESS;
508 ecore_chain_free(p_hwfn->p_dev, &p_spq->chain);
509 OSAL_FREE(p_hwfn->p_dev, p_spq);
513 void ecore_spq_free(struct ecore_hwfn *p_hwfn)
515 struct ecore_spq *p_spq = p_hwfn->p_spq;
522 capacity = ecore_chain_get_capacity(&p_spq->chain);
523 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
527 sizeof(struct ecore_spq_entry));
530 ecore_chain_free(p_hwfn->p_dev, &p_spq->chain);
531 OSAL_SPIN_LOCK_DEALLOC(&p_spq->lock);
532 OSAL_FREE(p_hwfn->p_dev, p_spq);
536 ecore_spq_get_entry(struct ecore_hwfn *p_hwfn, struct ecore_spq_entry **pp_ent)
538 struct ecore_spq *p_spq = p_hwfn->p_spq;
539 struct ecore_spq_entry *p_ent = OSAL_NULL;
541 OSAL_SPIN_LOCK(&p_spq->lock);
543 if (OSAL_LIST_IS_EMPTY(&p_spq->free_pool)) {
544 p_ent = OSAL_ZALLOC(p_hwfn->p_dev, GFP_ATOMIC,
545 sizeof(struct ecore_spq_entry));
547 OSAL_SPIN_UNLOCK(&p_spq->lock);
548 DP_NOTICE(p_hwfn, true,
549 "Failed to allocate an SPQ entry"
550 " for a pending ramrod\n");
553 p_ent->queue = &p_spq->unlimited_pending;
555 p_ent = OSAL_LIST_FIRST_ENTRY(&p_spq->free_pool,
556 struct ecore_spq_entry, list);
557 OSAL_LIST_REMOVE_ENTRY(&p_ent->list, &p_spq->free_pool);
558 p_ent->queue = &p_spq->pending;
563 OSAL_SPIN_UNLOCK(&p_spq->lock);
565 return ECORE_SUCCESS;
568 /* Locked variant; Should be called while the SPQ lock is taken */
569 static void __ecore_spq_return_entry(struct ecore_hwfn *p_hwfn,
570 struct ecore_spq_entry *p_ent)
572 OSAL_LIST_PUSH_TAIL(&p_ent->list, &p_hwfn->p_spq->free_pool);
575 void ecore_spq_return_entry(struct ecore_hwfn *p_hwfn,
576 struct ecore_spq_entry *p_ent)
578 OSAL_SPIN_LOCK(&p_hwfn->p_spq->lock);
579 __ecore_spq_return_entry(p_hwfn, p_ent);
580 OSAL_SPIN_UNLOCK(&p_hwfn->p_spq->lock);
584 * @brief ecore_spq_add_entry - adds a new entry to the pending
585 * list. Should be used while lock is being held.
587 * Addes an entry to the pending list is there is room (en empty
588 * element is available in the free_pool), or else places the
589 * entry in the unlimited_pending pool.
595 * @return enum _ecore_status_t
597 static enum _ecore_status_t
598 ecore_spq_add_entry(struct ecore_hwfn *p_hwfn,
599 struct ecore_spq_entry *p_ent, enum spq_priority priority)
601 struct ecore_spq *p_spq = p_hwfn->p_spq;
603 if (p_ent->queue == &p_spq->unlimited_pending) {
604 if (OSAL_LIST_IS_EMPTY(&p_spq->free_pool)) {
605 OSAL_LIST_PUSH_TAIL(&p_ent->list,
606 &p_spq->unlimited_pending);
607 p_spq->unlimited_pending_count++;
609 return ECORE_SUCCESS;
611 struct ecore_spq_entry *p_en2;
613 p_en2 = OSAL_LIST_FIRST_ENTRY(&p_spq->free_pool,
614 struct ecore_spq_entry,
616 OSAL_LIST_REMOVE_ENTRY(&p_en2->list, &p_spq->free_pool);
618 /* Copy the ring element physical pointer to the new
619 * entry, since we are about to override the entire ring
620 * entry and don't want to lose the pointer.
622 p_ent->elem.data_ptr = p_en2->elem.data_ptr;
624 /* Setting the cookie to the comp_done of the
627 if (p_ent->comp_cb.cookie == &p_ent->comp_done)
628 p_ent->comp_cb.cookie = &p_en2->comp_done;
632 OSAL_FREE(p_hwfn->p_dev, p_ent);
638 /* entry is to be placed in 'pending' queue */
640 case ECORE_SPQ_PRIORITY_NORMAL:
641 OSAL_LIST_PUSH_TAIL(&p_ent->list, &p_spq->pending);
642 p_spq->normal_count++;
644 case ECORE_SPQ_PRIORITY_HIGH:
645 OSAL_LIST_PUSH_HEAD(&p_ent->list, &p_spq->pending);
652 return ECORE_SUCCESS;
655 /***************************************************************************
657 ***************************************************************************/
659 u32 ecore_spq_get_cid(struct ecore_hwfn *p_hwfn)
662 return 0xffffffff; /* illegal */
663 return p_hwfn->p_spq->cid;
666 /***************************************************************************
667 * Posting new Ramrods
668 ***************************************************************************/
670 static enum _ecore_status_t ecore_spq_post_list(struct ecore_hwfn *p_hwfn,
674 struct ecore_spq *p_spq = p_hwfn->p_spq;
675 enum _ecore_status_t rc;
677 /* TODO - implementation might be wasteful; will always keep room
678 * for an additional high priority ramrod (even if one is already
681 while (ecore_chain_get_elem_left(&p_spq->chain) > keep_reserve &&
682 !OSAL_LIST_IS_EMPTY(head)) {
683 struct ecore_spq_entry *p_ent =
684 OSAL_LIST_FIRST_ENTRY(head, struct ecore_spq_entry, list);
685 OSAL_LIST_REMOVE_ENTRY(&p_ent->list, head);
686 OSAL_LIST_PUSH_TAIL(&p_ent->list, &p_spq->completion_pending);
687 p_spq->comp_sent_count++;
689 rc = ecore_spq_hw_post(p_hwfn, p_spq, p_ent);
691 OSAL_LIST_REMOVE_ENTRY(&p_ent->list,
692 &p_spq->completion_pending);
693 __ecore_spq_return_entry(p_hwfn, p_ent);
698 return ECORE_SUCCESS;
701 static enum _ecore_status_t ecore_spq_pend_post(struct ecore_hwfn *p_hwfn)
703 enum _ecore_status_t rc = ECORE_NOTIMPL;
704 struct ecore_spq *p_spq = p_hwfn->p_spq;
705 struct ecore_spq_entry *p_ent = OSAL_NULL;
707 while (!OSAL_LIST_IS_EMPTY(&p_spq->free_pool)) {
708 if (OSAL_LIST_IS_EMPTY(&p_spq->unlimited_pending))
711 p_ent = OSAL_LIST_FIRST_ENTRY(&p_spq->unlimited_pending,
712 struct ecore_spq_entry, list);
716 OSAL_LIST_REMOVE_ENTRY(&p_ent->list, &p_spq->unlimited_pending);
718 ecore_spq_add_entry(p_hwfn, p_ent, p_ent->priority);
721 rc = ecore_spq_post_list(p_hwfn,
722 &p_spq->pending, SPQ_HIGH_PRI_RESERVE_DEFAULT);
726 return ECORE_SUCCESS;
729 enum _ecore_status_t ecore_spq_post(struct ecore_hwfn *p_hwfn,
730 struct ecore_spq_entry *p_ent,
733 enum _ecore_status_t rc = ECORE_SUCCESS;
734 struct ecore_spq *p_spq = p_hwfn ? p_hwfn->p_spq : OSAL_NULL;
735 bool b_ret_ent = true;
741 DP_NOTICE(p_hwfn, true, "Got a NULL pointer\n");
745 if (p_hwfn->p_dev->recov_in_prog) {
746 DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
747 "Recovery is in progress -> skip spq post"
748 " [cmd %02x protocol %02x]",
749 p_ent->elem.hdr.cmd_id, p_ent->elem.hdr.protocol_id);
750 /* Return success to let the flows to be completed successfully
751 * w/o any error handling.
753 return ECORE_SUCCESS;
756 OSAL_SPIN_LOCK(&p_spq->lock);
758 /* Complete the entry */
759 rc = ecore_spq_fill_entry(p_hwfn, p_ent);
761 /* Check return value after LOCK is taken for cleaner error flow */
765 /* Add the request to the pending queue */
766 rc = ecore_spq_add_entry(p_hwfn, p_ent, p_ent->priority);
770 rc = ecore_spq_pend_post(p_hwfn);
772 /* Since it's possible that pending failed for a different
773 * entry [although unlikely], the failed entry was already
774 * dealt with; No need to return it here.
780 OSAL_SPIN_UNLOCK(&p_spq->lock);
782 if (p_ent->comp_mode == ECORE_SPQ_MODE_EBLOCK) {
783 /* For entries in ECORE BLOCK mode, the completion code cannot
784 * perform the necessary cleanup - if it did, we couldn't
785 * access p_ent here to see whether it's successful or not.
786 * Thus, after gaining the answer perform the cleanup here.
788 rc = ecore_spq_block(p_hwfn, p_ent, fw_return_code);
793 ecore_spq_return_entry(p_hwfn, p_ent);
798 OSAL_SPIN_LOCK(&p_spq->lock);
799 OSAL_LIST_REMOVE_ENTRY(&p_ent->list, &p_spq->completion_pending);
800 ecore_chain_return_produced(&p_spq->chain);
803 /* return to the free pool */
805 __ecore_spq_return_entry(p_hwfn, p_ent);
806 OSAL_SPIN_UNLOCK(&p_spq->lock);
811 enum _ecore_status_t ecore_spq_completion(struct ecore_hwfn *p_hwfn,
814 union event_ring_data *p_data)
816 struct ecore_spq *p_spq;
817 struct ecore_spq_entry *p_ent = OSAL_NULL;
818 struct ecore_spq_entry *tmp;
819 struct ecore_spq_entry *found = OSAL_NULL;
820 enum _ecore_status_t rc;
825 p_spq = p_hwfn->p_spq;
829 OSAL_SPIN_LOCK(&p_spq->lock);
830 OSAL_LIST_FOR_EACH_ENTRY_SAFE(p_ent,
832 &p_spq->completion_pending,
833 list, struct ecore_spq_entry) {
834 if (p_ent->elem.hdr.echo == echo) {
835 OSAL_LIST_REMOVE_ENTRY(&p_ent->list,
836 &p_spq->completion_pending);
838 /* Avoid overriding of SPQ entries when getting
839 * out-of-order completions, by marking the completions
840 * in a bitmap and increasing the chain consumer only
841 * for the first successive completed entries.
843 SPQ_COMP_BMAP_SET_BIT(p_spq, echo);
844 while (SPQ_COMP_BMAP_TEST_BIT(p_spq,
845 p_spq->comp_bitmap_idx)) {
846 SPQ_COMP_BMAP_CLEAR_BIT(p_spq,
847 p_spq->comp_bitmap_idx);
848 p_spq->comp_bitmap_idx++;
849 ecore_chain_return_produced(&p_spq->chain);
857 /* This is debug and should be relatively uncommon - depends
858 * on scenarios which have mutliple per-PF sent ramrods.
860 DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
861 "Got completion for echo %04x - doesn't match"
862 " echo %04x in completion pending list\n",
863 OSAL_LE16_TO_CPU(echo),
864 OSAL_LE16_TO_CPU(p_ent->elem.hdr.echo));
867 /* Release lock before callback, as callback may post
868 * an additional ramrod.
870 OSAL_SPIN_UNLOCK(&p_spq->lock);
873 DP_NOTICE(p_hwfn, true,
874 "Failed to find an entry this"
875 " EQE [echo %04x] completes\n",
876 OSAL_LE16_TO_CPU(echo));
880 DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
881 "Complete EQE [echo %04x]: func %p cookie %p)\n",
882 OSAL_LE16_TO_CPU(echo),
883 p_ent->comp_cb.function, p_ent->comp_cb.cookie);
884 if (found->comp_cb.function)
885 found->comp_cb.function(p_hwfn, found->comp_cb.cookie, p_data,
888 if (found->comp_mode != ECORE_SPQ_MODE_EBLOCK) {
889 /* EBLOCK is responsible for freeing its own entry */
890 ecore_spq_return_entry(p_hwfn, found);
893 /* Attempt to post pending requests */
894 OSAL_SPIN_LOCK(&p_spq->lock);
895 rc = ecore_spq_pend_post(p_hwfn);
896 OSAL_SPIN_UNLOCK(&p_spq->lock);
901 struct ecore_consq *ecore_consq_alloc(struct ecore_hwfn *p_hwfn)
903 struct ecore_consq *p_consq;
905 /* Allocate ConsQ struct */
907 OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(struct ecore_consq));
909 DP_NOTICE(p_hwfn, true,
910 "Failed to allocate `struct ecore_consq'\n");
914 /* Allocate and initialize EQ chain */
915 if (ecore_chain_alloc(p_hwfn->p_dev,
916 ECORE_CHAIN_USE_TO_PRODUCE,
917 ECORE_CHAIN_MODE_PBL,
918 ECORE_CHAIN_CNT_TYPE_U16,
919 ECORE_CHAIN_PAGE_SIZE / 0x80,
920 0x80, &p_consq->chain)) {
921 DP_NOTICE(p_hwfn, true, "Failed to allocate consq chain");
922 goto consq_allocate_fail;
928 ecore_consq_free(p_hwfn, p_consq);
932 void ecore_consq_setup(struct ecore_hwfn *p_hwfn, struct ecore_consq *p_consq)
934 ecore_chain_reset(&p_consq->chain);
937 void ecore_consq_free(struct ecore_hwfn *p_hwfn, struct ecore_consq *p_consq)
941 ecore_chain_free(p_hwfn->p_dev, &p_consq->chain);
942 OSAL_FREE(p_hwfn->p_dev, p_consq);