2 * Copyright (c) 2016 QLogic Corporation.
6 * See LICENSE.qede_pmd for copyright and licensing details.
11 #include "ecore_gtt_reg_addr.h"
12 #include "ecore_hsi_common.h"
14 #include "ecore_sp_api.h"
15 #include "ecore_spq.h"
16 #include "ecore_iro.h"
17 #include "ecore_init_fw_funcs.h"
18 #include "ecore_cxt.h"
19 #include "ecore_int.h"
20 #include "ecore_dev_api.h"
21 #include "ecore_mcp.h"
23 #include "ecore_sriov.h"
25 /***************************************************************************
26 * Structures & Definitions
27 ***************************************************************************/
29 #define SPQ_HIGH_PRI_RESERVE_DEFAULT (1)
31 #define SPQ_BLOCK_DELAY_MAX_ITER (10)
32 #define SPQ_BLOCK_DELAY_US (10)
33 #define SPQ_BLOCK_SLEEP_MAX_ITER (1000)
34 #define SPQ_BLOCK_SLEEP_MS (5)
36 /***************************************************************************
37 * Blocking Imp. (BLOCK/EBLOCK mode)
38 ***************************************************************************/
39 static void ecore_spq_blocking_cb(struct ecore_hwfn *p_hwfn,
41 union event_ring_data *data,
44 struct ecore_spq_comp_done *comp_done;
46 comp_done = (struct ecore_spq_comp_done *)cookie;
48 comp_done->done = 0x1;
49 comp_done->fw_return_code = fw_return_code;
51 /* make update visible to waiting thread */
52 OSAL_SMP_WMB(p_hwfn->p_dev);
55 static enum _ecore_status_t __ecore_spq_block(struct ecore_hwfn *p_hwfn,
56 struct ecore_spq_entry *p_ent,
58 bool sleep_between_iter)
60 struct ecore_spq_comp_done *comp_done;
63 comp_done = (struct ecore_spq_comp_done *)p_ent->comp_cb.cookie;
64 iter_cnt = sleep_between_iter ? SPQ_BLOCK_SLEEP_MAX_ITER
65 : SPQ_BLOCK_DELAY_MAX_ITER;
68 OSAL_POLL_MODE_DPC(p_hwfn);
69 OSAL_SMP_RMB(p_hwfn->p_dev);
70 if (comp_done->done == 1) {
72 *p_fw_ret = comp_done->fw_return_code;
76 if (sleep_between_iter)
77 OSAL_MSLEEP(SPQ_BLOCK_SLEEP_MS);
79 OSAL_UDELAY(SPQ_BLOCK_DELAY_US);
85 static enum _ecore_status_t ecore_spq_block(struct ecore_hwfn *p_hwfn,
86 struct ecore_spq_entry *p_ent,
87 u8 *p_fw_ret, bool skip_quick_poll)
89 struct ecore_spq_comp_done *comp_done;
90 struct ecore_ptt *p_ptt;
91 enum _ecore_status_t rc;
93 /* A relatively short polling period w/o sleeping, to allow the FW to
94 * complete the ramrod and thus possibly to avoid the following sleeps.
96 if (!skip_quick_poll) {
97 rc = __ecore_spq_block(p_hwfn, p_ent, p_fw_ret, false);
98 if (rc == ECORE_SUCCESS)
102 /* Move to polling with a sleeping period between iterations */
103 rc = __ecore_spq_block(p_hwfn, p_ent, p_fw_ret, true);
104 if (rc == ECORE_SUCCESS)
105 return ECORE_SUCCESS;
107 p_ptt = ecore_ptt_acquire(p_hwfn);
111 DP_INFO(p_hwfn, "Ramrod is stuck, requesting MCP drain\n");
112 rc = ecore_mcp_drain(p_hwfn, p_ptt);
113 ecore_ptt_release(p_hwfn, p_ptt);
114 if (rc != ECORE_SUCCESS) {
115 DP_NOTICE(p_hwfn, true, "MCP drain failed\n");
119 /* Retry after drain */
120 rc = __ecore_spq_block(p_hwfn, p_ent, p_fw_ret, true);
121 if (rc == ECORE_SUCCESS)
122 return ECORE_SUCCESS;
124 comp_done = (struct ecore_spq_comp_done *)p_ent->comp_cb.cookie;
125 if (comp_done->done == 1) {
127 *p_fw_ret = comp_done->fw_return_code;
128 return ECORE_SUCCESS;
131 DP_NOTICE(p_hwfn, true,
132 "Ramrod is stuck [CID %08x cmd %02x proto %02x echo %04x]\n",
133 OSAL_LE32_TO_CPU(p_ent->elem.hdr.cid),
134 p_ent->elem.hdr.cmd_id, p_ent->elem.hdr.protocol_id,
135 OSAL_LE16_TO_CPU(p_ent->elem.hdr.echo));
137 ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_RAMROD_FAIL);
142 /***************************************************************************
143 * SPQ entries inner API
144 ***************************************************************************/
145 static enum _ecore_status_t
146 ecore_spq_fill_entry(struct ecore_hwfn *p_hwfn, struct ecore_spq_entry *p_ent)
150 switch (p_ent->comp_mode) {
151 case ECORE_SPQ_MODE_EBLOCK:
152 case ECORE_SPQ_MODE_BLOCK:
153 p_ent->comp_cb.function = ecore_spq_blocking_cb;
155 case ECORE_SPQ_MODE_CB:
158 DP_NOTICE(p_hwfn, true, "Unknown SPQE completion mode %d\n",
163 DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
164 "Ramrod header: [CID 0x%08x CMD 0x%02x protocol 0x%02x]"
165 " Data pointer: [%08x:%08x] Completion Mode: %s\n",
166 p_ent->elem.hdr.cid, p_ent->elem.hdr.cmd_id,
167 p_ent->elem.hdr.protocol_id,
168 p_ent->elem.data_ptr.hi, p_ent->elem.data_ptr.lo,
169 D_TRINE(p_ent->comp_mode, ECORE_SPQ_MODE_EBLOCK,
170 ECORE_SPQ_MODE_BLOCK, "MODE_EBLOCK", "MODE_BLOCK",
173 return ECORE_SUCCESS;
176 /***************************************************************************
178 ***************************************************************************/
179 static void ecore_spq_hw_initialize(struct ecore_hwfn *p_hwfn,
180 struct ecore_spq *p_spq)
182 struct ecore_cxt_info cxt_info;
183 struct core_conn_context *p_cxt;
184 enum _ecore_status_t rc;
187 cxt_info.iid = p_spq->cid;
189 rc = ecore_cxt_get_cid_info(p_hwfn, &cxt_info);
192 DP_NOTICE(p_hwfn, true, "Cannot find context info for cid=%d\n",
197 p_cxt = cxt_info.p_cxt;
199 /* @@@TBD we zero the context until we have ilt_reset implemented. */
200 OSAL_MEM_ZERO(p_cxt, sizeof(*p_cxt));
202 if (ECORE_IS_BB(p_hwfn->p_dev) || ECORE_IS_AH(p_hwfn->p_dev)) {
203 SET_FIELD(p_cxt->xstorm_ag_context.flags10,
204 E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN, 1);
205 SET_FIELD(p_cxt->xstorm_ag_context.flags1,
206 E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE, 1);
207 /* SET_FIELD(p_cxt->xstorm_ag_context.flags10,
208 * E4_XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN, 1);
210 SET_FIELD(p_cxt->xstorm_ag_context.flags9,
211 E4_XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN, 1);
214 /* CDU validation - FIXME currently disabled */
216 /* QM physical queue */
217 physical_q = ecore_get_cm_pq_idx(p_hwfn, PQ_FLAGS_LB);
218 p_cxt->xstorm_ag_context.physical_q0 = OSAL_CPU_TO_LE16(physical_q);
220 p_cxt->xstorm_st_context.spq_base_lo =
221 DMA_LO_LE(p_spq->chain.p_phys_addr);
222 p_cxt->xstorm_st_context.spq_base_hi =
223 DMA_HI_LE(p_spq->chain.p_phys_addr);
225 DMA_REGPAIR_LE(p_cxt->xstorm_st_context.consolid_base_addr,
226 p_hwfn->p_consq->chain.p_phys_addr);
229 static enum _ecore_status_t ecore_spq_hw_post(struct ecore_hwfn *p_hwfn,
230 struct ecore_spq *p_spq,
231 struct ecore_spq_entry *p_ent)
233 struct ecore_chain *p_chain = &p_hwfn->p_spq->chain;
234 u16 echo = ecore_chain_get_prod_idx(p_chain);
235 struct slow_path_element *elem;
236 struct core_db_data db;
238 p_ent->elem.hdr.echo = OSAL_CPU_TO_LE16(echo);
239 elem = ecore_chain_produce(p_chain);
241 DP_NOTICE(p_hwfn, true, "Failed to produce from SPQ chain\n");
245 *elem = p_ent->elem; /* struct assignment */
247 /* send a doorbell on the slow hwfn session */
248 OSAL_MEMSET(&db, 0, sizeof(db));
249 SET_FIELD(db.params, CORE_DB_DATA_DEST, DB_DEST_XCM);
250 SET_FIELD(db.params, CORE_DB_DATA_AGG_CMD, DB_AGG_CMD_SET);
251 SET_FIELD(db.params, CORE_DB_DATA_AGG_VAL_SEL,
252 DQ_XCM_CORE_SPQ_PROD_CMD);
253 db.agg_flags = DQ_XCM_CORE_DQ_CF_CMD;
254 db.spq_prod = OSAL_CPU_TO_LE16(ecore_chain_get_prod_idx(p_chain));
256 /* make sure the SPQE is updated before the doorbell */
257 OSAL_WMB(p_hwfn->p_dev);
259 DOORBELL(p_hwfn, DB_ADDR(p_spq->cid, DQ_DEMS_LEGACY),
262 /* make sure doorbell is rang */
263 OSAL_WMB(p_hwfn->p_dev);
265 DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
266 "Doorbelled [0x%08x, CID 0x%08x] with Flags: %02x"
267 " agg_params: %02x, prod: %04x\n",
268 DB_ADDR(p_spq->cid, DQ_DEMS_LEGACY), p_spq->cid, db.params,
269 db.agg_flags, ecore_chain_get_prod_idx(p_chain));
271 return ECORE_SUCCESS;
274 /***************************************************************************
275 * Asynchronous events
276 ***************************************************************************/
278 static enum _ecore_status_t
279 ecore_async_event_completion(struct ecore_hwfn *p_hwfn,
280 struct event_ring_entry *p_eqe)
282 switch (p_eqe->protocol_id) {
283 case PROTOCOLID_COMMON:
284 return ecore_sriov_eqe_event(p_hwfn,
286 p_eqe->echo, &p_eqe->data);
289 true, "Unknown Async completion for protocol: %d\n",
295 /***************************************************************************
297 ***************************************************************************/
298 void ecore_eq_prod_update(struct ecore_hwfn *p_hwfn, u16 prod)
300 u32 addr = GTT_BAR0_MAP_REG_USDM_RAM +
301 USTORM_EQE_CONS_OFFSET(p_hwfn->rel_pf_id);
303 REG_WR16(p_hwfn, addr, prod);
305 /* keep prod updates ordered */
306 OSAL_MMIOWB(p_hwfn->p_dev);
309 enum _ecore_status_t ecore_eq_completion(struct ecore_hwfn *p_hwfn,
312 struct ecore_eq *p_eq = cookie;
313 struct ecore_chain *p_chain = &p_eq->chain;
314 enum _ecore_status_t rc = 0;
316 /* take a snapshot of the FW consumer */
317 u16 fw_cons_idx = OSAL_LE16_TO_CPU(*p_eq->p_fw_cons);
319 DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ, "fw_cons_idx %x\n", fw_cons_idx);
321 /* Need to guarantee the fw_cons index we use points to a usuable
322 * element (to comply with our chain), so our macros would comply
324 if ((fw_cons_idx & ecore_chain_get_usable_per_page(p_chain)) ==
325 ecore_chain_get_usable_per_page(p_chain)) {
326 fw_cons_idx += ecore_chain_get_unusable_per_page(p_chain);
329 /* Complete current segment of eq entries */
330 while (fw_cons_idx != ecore_chain_get_cons_idx(p_chain)) {
331 struct event_ring_entry *p_eqe = ecore_chain_consume(p_chain);
337 DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
338 "op %x prot %x res0 %x echo %x fwret %x flags %x\n",
339 p_eqe->opcode, /* Event Opcode */
340 p_eqe->protocol_id, /* Event Protocol ID */
341 p_eqe->reserved0, /* Reserved */
342 /* Echo value from ramrod data on the host */
343 OSAL_LE16_TO_CPU(p_eqe->echo),
344 p_eqe->fw_return_code, /* FW return code for SP
349 if (GET_FIELD(p_eqe->flags, EVENT_RING_ENTRY_ASYNC)) {
350 if (ecore_async_event_completion(p_hwfn, p_eqe))
352 } else if (ecore_spq_completion(p_hwfn,
354 p_eqe->fw_return_code,
359 ecore_chain_recycle_consumed(p_chain);
362 ecore_eq_prod_update(p_hwfn, ecore_chain_get_prod_idx(p_chain));
367 enum _ecore_status_t ecore_eq_alloc(struct ecore_hwfn *p_hwfn, u16 num_elem)
369 struct ecore_eq *p_eq;
371 /* Allocate EQ struct */
372 p_eq = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*p_eq));
374 DP_NOTICE(p_hwfn, true,
375 "Failed to allocate `struct ecore_eq'\n");
379 /* Allocate and initialize EQ chain*/
380 if (ecore_chain_alloc(p_hwfn->p_dev,
381 ECORE_CHAIN_USE_TO_PRODUCE,
382 ECORE_CHAIN_MODE_PBL,
383 ECORE_CHAIN_CNT_TYPE_U16,
385 sizeof(union event_ring_element),
386 &p_eq->chain, OSAL_NULL) != ECORE_SUCCESS) {
387 DP_NOTICE(p_hwfn, true, "Failed to allocate eq chain\n");
388 goto eq_allocate_fail;
391 /* register EQ completion on the SP SB */
392 ecore_int_register_cb(p_hwfn, ecore_eq_completion,
393 p_eq, &p_eq->eq_sb_index, &p_eq->p_fw_cons);
396 return ECORE_SUCCESS;
399 OSAL_FREE(p_hwfn->p_dev, p_eq);
403 void ecore_eq_setup(struct ecore_hwfn *p_hwfn)
405 ecore_chain_reset(&p_hwfn->p_eq->chain);
408 void ecore_eq_free(struct ecore_hwfn *p_hwfn)
413 ecore_chain_free(p_hwfn->p_dev, &p_hwfn->p_eq->chain);
415 OSAL_FREE(p_hwfn->p_dev, p_hwfn->p_eq);
416 p_hwfn->p_eq = OSAL_NULL;
419 /***************************************************************************
420 * CQE API - manipulate EQ functionality
421 ***************************************************************************/
422 static enum _ecore_status_t ecore_cqe_completion(struct ecore_hwfn *p_hwfn,
423 struct eth_slow_path_rx_cqe
425 enum protocol_type protocol)
427 if (IS_VF(p_hwfn->p_dev))
428 return OSAL_VF_CQE_COMPLETION(p_hwfn, cqe, protocol);
430 /* @@@tmp - it's possible we'll eventually want to handle some
431 * actual commands that can arrive here, but for now this is only
432 * used to complete the ramrod using the echo value on the cqe
434 return ecore_spq_completion(p_hwfn, cqe->echo, 0, OSAL_NULL);
437 enum _ecore_status_t ecore_eth_cqe_completion(struct ecore_hwfn *p_hwfn,
438 struct eth_slow_path_rx_cqe *cqe)
440 enum _ecore_status_t rc;
442 rc = ecore_cqe_completion(p_hwfn, cqe, PROTOCOLID_ETH);
444 DP_NOTICE(p_hwfn, true,
445 "Failed to handle RXQ CQE [cmd 0x%02x]\n",
452 /***************************************************************************
453 * Slow hwfn Queue (spq)
454 ***************************************************************************/
455 void ecore_spq_setup(struct ecore_hwfn *p_hwfn)
457 struct ecore_spq *p_spq = p_hwfn->p_spq;
458 struct ecore_spq_entry *p_virt = OSAL_NULL;
459 dma_addr_t p_phys = 0;
462 OSAL_LIST_INIT(&p_spq->pending);
463 OSAL_LIST_INIT(&p_spq->completion_pending);
464 OSAL_LIST_INIT(&p_spq->free_pool);
465 OSAL_LIST_INIT(&p_spq->unlimited_pending);
466 OSAL_SPIN_LOCK_INIT(&p_spq->lock);
469 p_phys = p_spq->p_phys + OFFSETOF(struct ecore_spq_entry, ramrod);
470 p_virt = p_spq->p_virt;
472 capacity = ecore_chain_get_capacity(&p_spq->chain);
473 for (i = 0; i < capacity; i++) {
474 DMA_REGPAIR_LE(p_virt->elem.data_ptr, p_phys);
476 OSAL_LIST_PUSH_TAIL(&p_virt->list, &p_spq->free_pool);
479 p_phys += sizeof(struct ecore_spq_entry);
483 p_spq->normal_count = 0;
484 p_spq->comp_count = 0;
485 p_spq->comp_sent_count = 0;
486 p_spq->unlimited_pending_count = 0;
488 OSAL_MEM_ZERO(p_spq->p_comp_bitmap,
489 SPQ_COMP_BMAP_SIZE * sizeof(unsigned long));
490 p_spq->comp_bitmap_idx = 0;
492 /* SPQ cid, cannot fail */
493 ecore_cxt_acquire_cid(p_hwfn, PROTOCOLID_CORE, &p_spq->cid);
494 ecore_spq_hw_initialize(p_hwfn, p_spq);
496 /* reset the chain itself */
497 ecore_chain_reset(&p_spq->chain);
500 enum _ecore_status_t ecore_spq_alloc(struct ecore_hwfn *p_hwfn)
502 struct ecore_spq_entry *p_virt = OSAL_NULL;
503 struct ecore_spq *p_spq = OSAL_NULL;
504 dma_addr_t p_phys = 0;
509 OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(struct ecore_spq));
511 DP_NOTICE(p_hwfn, true,
512 "Failed to allocate `struct ecore_spq'\n");
517 if (ecore_chain_alloc(p_hwfn->p_dev,
518 ECORE_CHAIN_USE_TO_PRODUCE,
519 ECORE_CHAIN_MODE_SINGLE,
520 ECORE_CHAIN_CNT_TYPE_U16,
521 0, /* N/A when the mode is SINGLE */
522 sizeof(struct slow_path_element),
523 &p_spq->chain, OSAL_NULL)) {
524 DP_NOTICE(p_hwfn, true, "Failed to allocate spq chain\n");
525 goto spq_allocate_fail;
528 /* allocate and fill the SPQ elements (incl. ramrod data list) */
529 capacity = ecore_chain_get_capacity(&p_spq->chain);
530 p_virt = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, &p_phys,
532 sizeof(struct ecore_spq_entry));
534 goto spq_allocate_fail;
536 p_spq->p_virt = p_virt;
537 p_spq->p_phys = p_phys;
539 OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_spq->lock);
541 p_hwfn->p_spq = p_spq;
542 return ECORE_SUCCESS;
545 ecore_chain_free(p_hwfn->p_dev, &p_spq->chain);
546 OSAL_FREE(p_hwfn->p_dev, p_spq);
550 void ecore_spq_free(struct ecore_hwfn *p_hwfn)
552 struct ecore_spq *p_spq = p_hwfn->p_spq;
559 capacity = ecore_chain_get_capacity(&p_spq->chain);
560 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
564 sizeof(struct ecore_spq_entry));
567 ecore_chain_free(p_hwfn->p_dev, &p_spq->chain);
568 OSAL_SPIN_LOCK_DEALLOC(&p_spq->lock);
569 OSAL_FREE(p_hwfn->p_dev, p_spq);
573 ecore_spq_get_entry(struct ecore_hwfn *p_hwfn, struct ecore_spq_entry **pp_ent)
575 struct ecore_spq *p_spq = p_hwfn->p_spq;
576 struct ecore_spq_entry *p_ent = OSAL_NULL;
577 enum _ecore_status_t rc = ECORE_SUCCESS;
579 OSAL_SPIN_LOCK(&p_spq->lock);
581 if (OSAL_LIST_IS_EMPTY(&p_spq->free_pool)) {
582 p_ent = OSAL_ZALLOC(p_hwfn->p_dev, GFP_ATOMIC, sizeof(*p_ent));
584 DP_NOTICE(p_hwfn, true,
585 "Failed to allocate an SPQ entry for a pending"
590 p_ent->queue = &p_spq->unlimited_pending;
592 p_ent = OSAL_LIST_FIRST_ENTRY(&p_spq->free_pool,
593 struct ecore_spq_entry, list);
594 OSAL_LIST_REMOVE_ENTRY(&p_ent->list, &p_spq->free_pool);
595 p_ent->queue = &p_spq->pending;
601 OSAL_SPIN_UNLOCK(&p_spq->lock);
605 /* Locked variant; Should be called while the SPQ lock is taken */
606 static void __ecore_spq_return_entry(struct ecore_hwfn *p_hwfn,
607 struct ecore_spq_entry *p_ent)
609 OSAL_LIST_PUSH_TAIL(&p_ent->list, &p_hwfn->p_spq->free_pool);
612 void ecore_spq_return_entry(struct ecore_hwfn *p_hwfn,
613 struct ecore_spq_entry *p_ent)
615 OSAL_SPIN_LOCK(&p_hwfn->p_spq->lock);
616 __ecore_spq_return_entry(p_hwfn, p_ent);
617 OSAL_SPIN_UNLOCK(&p_hwfn->p_spq->lock);
621 * @brief ecore_spq_add_entry - adds a new entry to the pending
622 * list. Should be used while lock is being held.
624 * Addes an entry to the pending list is there is room (en empty
625 * element is available in the free_pool), or else places the
626 * entry in the unlimited_pending pool.
632 * @return enum _ecore_status_t
634 static enum _ecore_status_t
635 ecore_spq_add_entry(struct ecore_hwfn *p_hwfn,
636 struct ecore_spq_entry *p_ent, enum spq_priority priority)
638 struct ecore_spq *p_spq = p_hwfn->p_spq;
640 if (p_ent->queue == &p_spq->unlimited_pending) {
641 if (OSAL_LIST_IS_EMPTY(&p_spq->free_pool)) {
642 OSAL_LIST_PUSH_TAIL(&p_ent->list,
643 &p_spq->unlimited_pending);
644 p_spq->unlimited_pending_count++;
646 return ECORE_SUCCESS;
649 struct ecore_spq_entry *p_en2;
651 p_en2 = OSAL_LIST_FIRST_ENTRY(&p_spq->free_pool,
652 struct ecore_spq_entry,
654 OSAL_LIST_REMOVE_ENTRY(&p_en2->list, &p_spq->free_pool);
656 /* Copy the ring element physical pointer to the new
657 * entry, since we are about to override the entire ring
658 * entry and don't want to lose the pointer.
660 p_ent->elem.data_ptr = p_en2->elem.data_ptr;
664 /* EBLOCK responsible to free the allocated p_ent */
665 if (p_ent->comp_mode != ECORE_SPQ_MODE_EBLOCK)
666 OSAL_FREE(p_hwfn->p_dev, p_ent);
672 /* entry is to be placed in 'pending' queue */
674 case ECORE_SPQ_PRIORITY_NORMAL:
675 OSAL_LIST_PUSH_TAIL(&p_ent->list, &p_spq->pending);
676 p_spq->normal_count++;
678 case ECORE_SPQ_PRIORITY_HIGH:
679 OSAL_LIST_PUSH_HEAD(&p_ent->list, &p_spq->pending);
686 return ECORE_SUCCESS;
689 /***************************************************************************
691 ***************************************************************************/
693 u32 ecore_spq_get_cid(struct ecore_hwfn *p_hwfn)
696 return 0xffffffff; /* illegal */
697 return p_hwfn->p_spq->cid;
700 /***************************************************************************
701 * Posting new Ramrods
702 ***************************************************************************/
704 static enum _ecore_status_t ecore_spq_post_list(struct ecore_hwfn *p_hwfn,
708 struct ecore_spq *p_spq = p_hwfn->p_spq;
709 enum _ecore_status_t rc;
711 /* TODO - implementation might be wasteful; will always keep room
712 * for an additional high priority ramrod (even if one is already
715 while (ecore_chain_get_elem_left(&p_spq->chain) > keep_reserve &&
716 !OSAL_LIST_IS_EMPTY(head)) {
717 struct ecore_spq_entry *p_ent =
718 OSAL_LIST_FIRST_ENTRY(head, struct ecore_spq_entry, list);
719 if (p_ent != OSAL_NULL) {
721 #pragma warning(suppress : 6011 28182)
723 OSAL_LIST_REMOVE_ENTRY(&p_ent->list, head);
724 OSAL_LIST_PUSH_TAIL(&p_ent->list,
725 &p_spq->completion_pending);
726 p_spq->comp_sent_count++;
728 rc = ecore_spq_hw_post(p_hwfn, p_spq, p_ent);
730 OSAL_LIST_REMOVE_ENTRY(&p_ent->list,
731 &p_spq->completion_pending);
732 __ecore_spq_return_entry(p_hwfn, p_ent);
738 return ECORE_SUCCESS;
741 static enum _ecore_status_t ecore_spq_pend_post(struct ecore_hwfn *p_hwfn)
743 struct ecore_spq *p_spq = p_hwfn->p_spq;
744 struct ecore_spq_entry *p_ent = OSAL_NULL;
746 while (!OSAL_LIST_IS_EMPTY(&p_spq->free_pool)) {
747 if (OSAL_LIST_IS_EMPTY(&p_spq->unlimited_pending))
750 p_ent = OSAL_LIST_FIRST_ENTRY(&p_spq->unlimited_pending,
751 struct ecore_spq_entry, list);
756 #pragma warning(suppress : 6011)
758 OSAL_LIST_REMOVE_ENTRY(&p_ent->list, &p_spq->unlimited_pending);
760 ecore_spq_add_entry(p_hwfn, p_ent, p_ent->priority);
763 return ecore_spq_post_list(p_hwfn,
764 &p_spq->pending, SPQ_HIGH_PRI_RESERVE_DEFAULT);
767 enum _ecore_status_t ecore_spq_post(struct ecore_hwfn *p_hwfn,
768 struct ecore_spq_entry *p_ent,
771 enum _ecore_status_t rc = ECORE_SUCCESS;
772 struct ecore_spq *p_spq = p_hwfn ? p_hwfn->p_spq : OSAL_NULL;
773 bool b_ret_ent = true;
779 DP_NOTICE(p_hwfn, true, "Got a NULL pointer\n");
783 if (p_hwfn->p_dev->recov_in_prog) {
784 DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
785 "Recovery is in progress -> skip spq post"
786 " [cmd %02x protocol %02x]\n",
787 p_ent->elem.hdr.cmd_id, p_ent->elem.hdr.protocol_id);
788 /* Return success to let the flows to be completed successfully
789 * w/o any error handling.
791 return ECORE_SUCCESS;
794 OSAL_SPIN_LOCK(&p_spq->lock);
796 /* Complete the entry */
797 rc = ecore_spq_fill_entry(p_hwfn, p_ent);
799 /* Check return value after LOCK is taken for cleaner error flow */
803 /* Add the request to the pending queue */
804 rc = ecore_spq_add_entry(p_hwfn, p_ent, p_ent->priority);
808 rc = ecore_spq_pend_post(p_hwfn);
810 /* Since it's possible that pending failed for a different
811 * entry [although unlikely], the failed entry was already
812 * dealt with; No need to return it here.
818 OSAL_SPIN_UNLOCK(&p_spq->lock);
820 if (p_ent->comp_mode == ECORE_SPQ_MODE_EBLOCK) {
821 /* For entries in ECORE BLOCK mode, the completion code cannot
822 * perform the necessary cleanup - if it did, we couldn't
823 * access p_ent here to see whether it's successful or not.
824 * Thus, after gaining the answer perform the cleanup here.
826 rc = ecore_spq_block(p_hwfn, p_ent, fw_return_code,
827 p_ent->queue == &p_spq->unlimited_pending);
829 if (p_ent->queue == &p_spq->unlimited_pending) {
830 /* This is an allocated p_ent which does not need to
833 OSAL_FREE(p_hwfn->p_dev, p_ent);
835 /* TBD: handle error flow and remove p_ent from
845 ecore_spq_return_entry(p_hwfn, p_ent);
850 OSAL_SPIN_LOCK(&p_spq->lock);
851 OSAL_LIST_REMOVE_ENTRY(&p_ent->list, &p_spq->completion_pending);
852 ecore_chain_return_produced(&p_spq->chain);
855 /* return to the free pool */
857 __ecore_spq_return_entry(p_hwfn, p_ent);
858 OSAL_SPIN_UNLOCK(&p_spq->lock);
863 enum _ecore_status_t ecore_spq_completion(struct ecore_hwfn *p_hwfn,
866 union event_ring_data *p_data)
868 struct ecore_spq *p_spq;
869 struct ecore_spq_entry *p_ent = OSAL_NULL;
870 struct ecore_spq_entry *tmp;
871 struct ecore_spq_entry *found = OSAL_NULL;
872 enum _ecore_status_t rc;
877 p_spq = p_hwfn->p_spq;
881 OSAL_SPIN_LOCK(&p_spq->lock);
882 OSAL_LIST_FOR_EACH_ENTRY_SAFE(p_ent,
884 &p_spq->completion_pending,
885 list, struct ecore_spq_entry) {
886 if (p_ent->elem.hdr.echo == echo) {
887 OSAL_LIST_REMOVE_ENTRY(&p_ent->list,
888 &p_spq->completion_pending);
890 /* Avoid overriding of SPQ entries when getting
891 * out-of-order completions, by marking the completions
892 * in a bitmap and increasing the chain consumer only
893 * for the first successive completed entries.
895 SPQ_COMP_BMAP_SET_BIT(p_spq, echo);
896 while (SPQ_COMP_BMAP_TEST_BIT(p_spq,
897 p_spq->comp_bitmap_idx)) {
898 SPQ_COMP_BMAP_CLEAR_BIT(p_spq,
899 p_spq->comp_bitmap_idx);
900 p_spq->comp_bitmap_idx++;
901 ecore_chain_return_produced(&p_spq->chain);
909 /* This is debug and should be relatively uncommon - depends
910 * on scenarios which have mutliple per-PF sent ramrods.
912 DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
913 "Got completion for echo %04x - doesn't match"
914 " echo %04x in completion pending list\n",
915 OSAL_LE16_TO_CPU(echo),
916 OSAL_LE16_TO_CPU(p_ent->elem.hdr.echo));
919 /* Release lock before callback, as callback may post
920 * an additional ramrod.
922 OSAL_SPIN_UNLOCK(&p_spq->lock);
925 DP_NOTICE(p_hwfn, true,
926 "Failed to find an entry this"
927 " EQE [echo %04x] completes\n",
928 OSAL_LE16_TO_CPU(echo));
932 DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
933 "Complete EQE [echo %04x]: func %p cookie %p)\n",
934 OSAL_LE16_TO_CPU(echo),
935 p_ent->comp_cb.function, p_ent->comp_cb.cookie);
936 if (found->comp_cb.function)
937 found->comp_cb.function(p_hwfn, found->comp_cb.cookie, p_data,
940 DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
941 "Got a completion without a callback function\n");
943 if ((found->comp_mode != ECORE_SPQ_MODE_EBLOCK) ||
944 (found->queue == &p_spq->unlimited_pending))
945 /* EBLOCK is responsible for returning its own entry into the
946 * free list, unless it originally added the entry into the
947 * unlimited pending list.
949 ecore_spq_return_entry(p_hwfn, found);
951 /* Attempt to post pending requests */
952 OSAL_SPIN_LOCK(&p_spq->lock);
953 rc = ecore_spq_pend_post(p_hwfn);
954 OSAL_SPIN_UNLOCK(&p_spq->lock);
959 enum _ecore_status_t ecore_consq_alloc(struct ecore_hwfn *p_hwfn)
961 struct ecore_consq *p_consq;
963 /* Allocate ConsQ struct */
965 OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*p_consq));
967 DP_NOTICE(p_hwfn, true,
968 "Failed to allocate `struct ecore_consq'\n");
972 /* Allocate and initialize EQ chain */
973 if (ecore_chain_alloc(p_hwfn->p_dev,
974 ECORE_CHAIN_USE_TO_PRODUCE,
975 ECORE_CHAIN_MODE_PBL,
976 ECORE_CHAIN_CNT_TYPE_U16,
977 ECORE_CHAIN_PAGE_SIZE / 0x80,
979 &p_consq->chain, OSAL_NULL) != ECORE_SUCCESS) {
980 DP_NOTICE(p_hwfn, true, "Failed to allocate consq chain");
981 goto consq_allocate_fail;
984 p_hwfn->p_consq = p_consq;
985 return ECORE_SUCCESS;
988 OSAL_FREE(p_hwfn->p_dev, p_consq);
992 void ecore_consq_setup(struct ecore_hwfn *p_hwfn)
994 ecore_chain_reset(&p_hwfn->p_consq->chain);
997 void ecore_consq_free(struct ecore_hwfn *p_hwfn)
999 if (!p_hwfn->p_consq)
1002 ecore_chain_free(p_hwfn->p_dev, &p_hwfn->p_consq->chain);
1003 OSAL_FREE(p_hwfn->p_dev, p_hwfn->p_consq);