2 * Copyright (c) 2016 QLogic Corporation.
6 * See LICENSE.qede_pmd for copyright and licensing details.
11 #include "ecore_gtt_reg_addr.h"
12 #include "ecore_hsi_common.h"
14 #include "ecore_sp_api.h"
15 #include "ecore_spq.h"
16 #include "ecore_iro.h"
17 #include "ecore_init_fw_funcs.h"
18 #include "ecore_cxt.h"
19 #include "ecore_int.h"
20 #include "ecore_dev_api.h"
21 #include "ecore_mcp.h"
23 #include "ecore_sriov.h"
25 /***************************************************************************
26 * Structures & Definitions
27 ***************************************************************************/
29 #define SPQ_HIGH_PRI_RESERVE_DEFAULT (1)
31 #define SPQ_BLOCK_DELAY_MAX_ITER (10)
32 #define SPQ_BLOCK_DELAY_US (10)
33 #define SPQ_BLOCK_SLEEP_MAX_ITER (1000)
34 #define SPQ_BLOCK_SLEEP_MS (5)
36 /***************************************************************************
37 * Blocking Imp. (BLOCK/EBLOCK mode)
38 ***************************************************************************/
39 static void ecore_spq_blocking_cb(struct ecore_hwfn *p_hwfn, void *cookie,
40 union event_ring_data OSAL_UNUSED * data,
43 struct ecore_spq_comp_done *comp_done;
45 comp_done = (struct ecore_spq_comp_done *)cookie;
47 comp_done->done = 0x1;
48 comp_done->fw_return_code = fw_return_code;
50 /* make update visible to waiting thread */
51 OSAL_SMP_WMB(p_hwfn->p_dev);
54 static enum _ecore_status_t __ecore_spq_block(struct ecore_hwfn *p_hwfn,
55 struct ecore_spq_entry *p_ent,
57 bool sleep_between_iter)
59 struct ecore_spq_comp_done *comp_done;
62 comp_done = (struct ecore_spq_comp_done *)p_ent->comp_cb.cookie;
63 iter_cnt = sleep_between_iter ? SPQ_BLOCK_SLEEP_MAX_ITER
64 : SPQ_BLOCK_DELAY_MAX_ITER;
67 OSAL_POLL_MODE_DPC(p_hwfn);
68 OSAL_SMP_RMB(p_hwfn->p_dev);
69 if (comp_done->done == 1) {
71 *p_fw_ret = comp_done->fw_return_code;
75 if (sleep_between_iter)
76 OSAL_MSLEEP(SPQ_BLOCK_SLEEP_MS);
78 OSAL_UDELAY(SPQ_BLOCK_DELAY_US);
84 static enum _ecore_status_t ecore_spq_block(struct ecore_hwfn *p_hwfn,
85 struct ecore_spq_entry *p_ent,
86 u8 *p_fw_ret, bool skip_quick_poll)
88 struct ecore_spq_comp_done *comp_done;
89 struct ecore_ptt *p_ptt;
90 enum _ecore_status_t rc;
92 /* A relatively short polling period w/o sleeping, to allow the FW to
93 * complete the ramrod and thus possibly to avoid the following sleeps.
95 if (!skip_quick_poll) {
96 rc = __ecore_spq_block(p_hwfn, p_ent, p_fw_ret, false);
97 if (rc == ECORE_SUCCESS)
101 /* Move to polling with a sleeping period between iterations */
102 rc = __ecore_spq_block(p_hwfn, p_ent, p_fw_ret, true);
103 if (rc == ECORE_SUCCESS)
104 return ECORE_SUCCESS;
106 p_ptt = ecore_ptt_acquire(p_hwfn);
110 DP_INFO(p_hwfn, "Ramrod is stuck, requesting MCP drain\n");
111 rc = ecore_mcp_drain(p_hwfn, p_ptt);
112 ecore_ptt_release(p_hwfn, p_ptt);
113 if (rc != ECORE_SUCCESS) {
114 DP_NOTICE(p_hwfn, true, "MCP drain failed\n");
118 /* Retry after drain */
119 rc = __ecore_spq_block(p_hwfn, p_ent, p_fw_ret, true);
120 if (rc == ECORE_SUCCESS)
121 return ECORE_SUCCESS;
123 comp_done = (struct ecore_spq_comp_done *)p_ent->comp_cb.cookie;
124 if (comp_done->done == 1) {
126 *p_fw_ret = comp_done->fw_return_code;
127 return ECORE_SUCCESS;
130 DP_NOTICE(p_hwfn, true,
131 "Ramrod is stuck [CID %08x cmd %02x proto %02x echo %04x]\n",
132 OSAL_LE32_TO_CPU(p_ent->elem.hdr.cid),
133 p_ent->elem.hdr.cmd_id, p_ent->elem.hdr.protocol_id,
134 OSAL_LE16_TO_CPU(p_ent->elem.hdr.echo));
136 ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_RAMROD_FAIL);
141 /***************************************************************************
142 * SPQ entries inner API
143 ***************************************************************************/
144 static enum _ecore_status_t
145 ecore_spq_fill_entry(struct ecore_hwfn *p_hwfn, struct ecore_spq_entry *p_ent)
149 switch (p_ent->comp_mode) {
150 case ECORE_SPQ_MODE_EBLOCK:
151 case ECORE_SPQ_MODE_BLOCK:
152 p_ent->comp_cb.function = ecore_spq_blocking_cb;
154 case ECORE_SPQ_MODE_CB:
157 DP_NOTICE(p_hwfn, true, "Unknown SPQE completion mode %d\n",
162 DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
163 "Ramrod header: [CID 0x%08x CMD 0x%02x protocol 0x%02x]"
164 " Data pointer: [%08x:%08x] Completion Mode: %s\n",
165 p_ent->elem.hdr.cid, p_ent->elem.hdr.cmd_id,
166 p_ent->elem.hdr.protocol_id,
167 p_ent->elem.data_ptr.hi, p_ent->elem.data_ptr.lo,
168 D_TRINE(p_ent->comp_mode, ECORE_SPQ_MODE_EBLOCK,
169 ECORE_SPQ_MODE_BLOCK, "MODE_EBLOCK", "MODE_BLOCK",
172 return ECORE_SUCCESS;
175 /***************************************************************************
177 ***************************************************************************/
178 static void ecore_spq_hw_initialize(struct ecore_hwfn *p_hwfn,
179 struct ecore_spq *p_spq)
181 struct ecore_cxt_info cxt_info;
182 struct core_conn_context *p_cxt;
183 enum _ecore_status_t rc;
186 cxt_info.iid = p_spq->cid;
188 rc = ecore_cxt_get_cid_info(p_hwfn, &cxt_info);
191 DP_NOTICE(p_hwfn, true, "Cannot find context info for cid=%d\n",
196 p_cxt = cxt_info.p_cxt;
198 /* @@@TBD we zero the context until we have ilt_reset implemented. */
199 OSAL_MEM_ZERO(p_cxt, sizeof(*p_cxt));
201 if (ECORE_IS_BB(p_hwfn->p_dev) || ECORE_IS_AH(p_hwfn->p_dev)) {
202 SET_FIELD(p_cxt->xstorm_ag_context.flags10,
203 E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN, 1);
204 SET_FIELD(p_cxt->xstorm_ag_context.flags1,
205 E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE, 1);
206 /* SET_FIELD(p_cxt->xstorm_ag_context.flags10,
207 * E4_XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN, 1);
209 SET_FIELD(p_cxt->xstorm_ag_context.flags9,
210 E4_XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN, 1);
213 /* CDU validation - FIXME currently disabled */
215 /* QM physical queue */
216 physical_q = ecore_get_cm_pq_idx(p_hwfn, PQ_FLAGS_LB);
217 p_cxt->xstorm_ag_context.physical_q0 = OSAL_CPU_TO_LE16(physical_q);
219 p_cxt->xstorm_st_context.spq_base_lo =
220 DMA_LO_LE(p_spq->chain.p_phys_addr);
221 p_cxt->xstorm_st_context.spq_base_hi =
222 DMA_HI_LE(p_spq->chain.p_phys_addr);
224 DMA_REGPAIR_LE(p_cxt->xstorm_st_context.consolid_base_addr,
225 p_hwfn->p_consq->chain.p_phys_addr);
228 static enum _ecore_status_t ecore_spq_hw_post(struct ecore_hwfn *p_hwfn,
229 struct ecore_spq *p_spq,
230 struct ecore_spq_entry *p_ent)
232 struct ecore_chain *p_chain = &p_hwfn->p_spq->chain;
233 struct core_db_data *p_db_data = &p_spq->db_data;
234 u16 echo = ecore_chain_get_prod_idx(p_chain);
235 struct slow_path_element *elem;
237 p_ent->elem.hdr.echo = OSAL_CPU_TO_LE16(echo);
238 elem = ecore_chain_produce(p_chain);
240 DP_NOTICE(p_hwfn, true, "Failed to produce from SPQ chain\n");
244 *elem = p_ent->elem; /* Struct assignment */
246 p_db_data->spq_prod =
247 OSAL_CPU_TO_LE16(ecore_chain_get_prod_idx(p_chain));
249 /* Make sure the SPQE is updated before the doorbell */
250 OSAL_WMB(p_hwfn->p_dev);
252 DOORBELL(p_hwfn, p_spq->db_addr_offset, *(u32 *)p_db_data);
254 /* Make sure doorbell is rang */
255 OSAL_WMB(p_hwfn->p_dev);
257 DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
258 "Doorbelled [0x%08x, CID 0x%08x] with Flags: %02x"
259 " agg_params: %02x, prod: %04x\n",
260 p_spq->db_addr_offset, p_spq->cid, p_db_data->params,
261 p_db_data->agg_flags, ecore_chain_get_prod_idx(p_chain));
263 return ECORE_SUCCESS;
266 /***************************************************************************
267 * Asynchronous events
268 ***************************************************************************/
270 static enum _ecore_status_t
271 ecore_async_event_completion(struct ecore_hwfn *p_hwfn,
272 struct event_ring_entry *p_eqe)
274 switch (p_eqe->protocol_id) {
275 case PROTOCOLID_COMMON:
276 return ecore_sriov_eqe_event(p_hwfn,
278 p_eqe->echo, &p_eqe->data);
281 true, "Unknown Async completion for protocol: %d\n",
287 /***************************************************************************
289 ***************************************************************************/
290 void ecore_eq_prod_update(struct ecore_hwfn *p_hwfn, u16 prod)
292 u32 addr = GTT_BAR0_MAP_REG_USDM_RAM +
293 USTORM_EQE_CONS_OFFSET(p_hwfn->rel_pf_id);
295 REG_WR16(p_hwfn, addr, prod);
297 /* keep prod updates ordered */
298 OSAL_MMIOWB(p_hwfn->p_dev);
301 enum _ecore_status_t ecore_eq_completion(struct ecore_hwfn *p_hwfn,
304 struct ecore_eq *p_eq = cookie;
305 struct ecore_chain *p_chain = &p_eq->chain;
306 enum _ecore_status_t rc = 0;
308 /* take a snapshot of the FW consumer */
309 u16 fw_cons_idx = OSAL_LE16_TO_CPU(*p_eq->p_fw_cons);
311 DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ, "fw_cons_idx %x\n", fw_cons_idx);
313 /* Need to guarantee the fw_cons index we use points to a usuable
314 * element (to comply with our chain), so our macros would comply
316 if ((fw_cons_idx & ecore_chain_get_usable_per_page(p_chain)) ==
317 ecore_chain_get_usable_per_page(p_chain)) {
318 fw_cons_idx += ecore_chain_get_unusable_per_page(p_chain);
321 /* Complete current segment of eq entries */
322 while (fw_cons_idx != ecore_chain_get_cons_idx(p_chain)) {
323 struct event_ring_entry *p_eqe = ecore_chain_consume(p_chain);
329 DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
330 "op %x prot %x res0 %x echo %x fwret %x flags %x\n",
331 p_eqe->opcode, /* Event Opcode */
332 p_eqe->protocol_id, /* Event Protocol ID */
333 p_eqe->reserved0, /* Reserved */
334 /* Echo value from ramrod data on the host */
335 OSAL_LE16_TO_CPU(p_eqe->echo),
336 p_eqe->fw_return_code, /* FW return code for SP
341 if (GET_FIELD(p_eqe->flags, EVENT_RING_ENTRY_ASYNC)) {
342 if (ecore_async_event_completion(p_hwfn, p_eqe))
344 } else if (ecore_spq_completion(p_hwfn,
346 p_eqe->fw_return_code,
351 ecore_chain_recycle_consumed(p_chain);
354 ecore_eq_prod_update(p_hwfn, ecore_chain_get_prod_idx(p_chain));
359 enum _ecore_status_t ecore_eq_alloc(struct ecore_hwfn *p_hwfn, u16 num_elem)
361 struct ecore_eq *p_eq;
363 /* Allocate EQ struct */
364 p_eq = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*p_eq));
366 DP_NOTICE(p_hwfn, true,
367 "Failed to allocate `struct ecore_eq'\n");
371 /* Allocate and initialize EQ chain*/
372 if (ecore_chain_alloc(p_hwfn->p_dev,
373 ECORE_CHAIN_USE_TO_PRODUCE,
374 ECORE_CHAIN_MODE_PBL,
375 ECORE_CHAIN_CNT_TYPE_U16,
377 sizeof(union event_ring_element),
378 &p_eq->chain, OSAL_NULL) != ECORE_SUCCESS) {
379 DP_NOTICE(p_hwfn, true, "Failed to allocate eq chain\n");
380 goto eq_allocate_fail;
383 /* register EQ completion on the SP SB */
384 ecore_int_register_cb(p_hwfn, ecore_eq_completion,
385 p_eq, &p_eq->eq_sb_index, &p_eq->p_fw_cons);
388 return ECORE_SUCCESS;
391 OSAL_FREE(p_hwfn->p_dev, p_eq);
395 void ecore_eq_setup(struct ecore_hwfn *p_hwfn)
397 ecore_chain_reset(&p_hwfn->p_eq->chain);
400 void ecore_eq_free(struct ecore_hwfn *p_hwfn)
405 ecore_chain_free(p_hwfn->p_dev, &p_hwfn->p_eq->chain);
407 OSAL_FREE(p_hwfn->p_dev, p_hwfn->p_eq);
408 p_hwfn->p_eq = OSAL_NULL;
411 /***************************************************************************
412 * CQE API - manipulate EQ functionality
413 ***************************************************************************/
414 static enum _ecore_status_t ecore_cqe_completion(struct ecore_hwfn *p_hwfn,
415 struct eth_slow_path_rx_cqe
417 enum protocol_type protocol)
419 if (IS_VF(p_hwfn->p_dev))
420 return OSAL_VF_CQE_COMPLETION(p_hwfn, cqe, protocol);
422 /* @@@tmp - it's possible we'll eventually want to handle some
423 * actual commands that can arrive here, but for now this is only
424 * used to complete the ramrod using the echo value on the cqe
426 return ecore_spq_completion(p_hwfn, cqe->echo, 0, OSAL_NULL);
429 enum _ecore_status_t ecore_eth_cqe_completion(struct ecore_hwfn *p_hwfn,
430 struct eth_slow_path_rx_cqe *cqe)
432 enum _ecore_status_t rc;
434 rc = ecore_cqe_completion(p_hwfn, cqe, PROTOCOLID_ETH);
436 DP_NOTICE(p_hwfn, true,
437 "Failed to handle RXQ CQE [cmd 0x%02x]\n",
444 /***************************************************************************
445 * Slow hwfn Queue (spq)
446 ***************************************************************************/
447 void ecore_spq_setup(struct ecore_hwfn *p_hwfn)
449 struct ecore_spq *p_spq = p_hwfn->p_spq;
450 struct ecore_spq_entry *p_virt = OSAL_NULL;
451 struct core_db_data *p_db_data;
452 void OSAL_IOMEM *db_addr;
453 dma_addr_t p_phys = 0;
455 enum _ecore_status_t rc;
457 OSAL_LIST_INIT(&p_spq->pending);
458 OSAL_LIST_INIT(&p_spq->completion_pending);
459 OSAL_LIST_INIT(&p_spq->free_pool);
460 OSAL_LIST_INIT(&p_spq->unlimited_pending);
461 OSAL_SPIN_LOCK_INIT(&p_spq->lock);
464 p_phys = p_spq->p_phys + OFFSETOF(struct ecore_spq_entry, ramrod);
465 p_virt = p_spq->p_virt;
467 capacity = ecore_chain_get_capacity(&p_spq->chain);
468 for (i = 0; i < capacity; i++) {
469 DMA_REGPAIR_LE(p_virt->elem.data_ptr, p_phys);
471 OSAL_LIST_PUSH_TAIL(&p_virt->list, &p_spq->free_pool);
474 p_phys += sizeof(struct ecore_spq_entry);
478 p_spq->normal_count = 0;
479 p_spq->comp_count = 0;
480 p_spq->comp_sent_count = 0;
481 p_spq->unlimited_pending_count = 0;
483 OSAL_MEM_ZERO(p_spq->p_comp_bitmap,
484 SPQ_COMP_BMAP_SIZE * sizeof(unsigned long));
485 p_spq->comp_bitmap_idx = 0;
487 /* SPQ cid, cannot fail */
488 ecore_cxt_acquire_cid(p_hwfn, PROTOCOLID_CORE, &p_spq->cid);
489 ecore_spq_hw_initialize(p_hwfn, p_spq);
491 /* reset the chain itself */
492 ecore_chain_reset(&p_spq->chain);
494 /* Initialize the address/data of the SPQ doorbell */
495 p_spq->db_addr_offset = DB_ADDR(p_spq->cid, DQ_DEMS_LEGACY);
496 p_db_data = &p_spq->db_data;
497 OSAL_MEM_ZERO(p_db_data, sizeof(*p_db_data));
498 SET_FIELD(p_db_data->params, CORE_DB_DATA_DEST, DB_DEST_XCM);
499 SET_FIELD(p_db_data->params, CORE_DB_DATA_AGG_CMD, DB_AGG_CMD_MAX);
500 SET_FIELD(p_db_data->params, CORE_DB_DATA_AGG_VAL_SEL,
501 DQ_XCM_CORE_SPQ_PROD_CMD);
502 p_db_data->agg_flags = DQ_XCM_CORE_DQ_CF_CMD;
504 /* Register the SPQ doorbell with the doorbell recovery mechanism */
505 db_addr = (void *)((u8 *)p_hwfn->doorbells + p_spq->db_addr_offset);
506 rc = ecore_db_recovery_add(p_hwfn->p_dev, db_addr, &p_spq->db_data,
507 DB_REC_WIDTH_32B, DB_REC_KERNEL);
508 if (rc != ECORE_SUCCESS)
510 "Failed to register the SPQ doorbell with the doorbell recovery mechanism\n");
513 enum _ecore_status_t ecore_spq_alloc(struct ecore_hwfn *p_hwfn)
515 struct ecore_spq_entry *p_virt = OSAL_NULL;
516 struct ecore_spq *p_spq = OSAL_NULL;
517 dma_addr_t p_phys = 0;
522 OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(struct ecore_spq));
524 DP_NOTICE(p_hwfn, true,
525 "Failed to allocate `struct ecore_spq'\n");
530 if (ecore_chain_alloc(p_hwfn->p_dev,
531 ECORE_CHAIN_USE_TO_PRODUCE,
532 ECORE_CHAIN_MODE_SINGLE,
533 ECORE_CHAIN_CNT_TYPE_U16,
534 0, /* N/A when the mode is SINGLE */
535 sizeof(struct slow_path_element),
536 &p_spq->chain, OSAL_NULL)) {
537 DP_NOTICE(p_hwfn, true, "Failed to allocate spq chain\n");
538 goto spq_allocate_fail;
541 /* allocate and fill the SPQ elements (incl. ramrod data list) */
542 capacity = ecore_chain_get_capacity(&p_spq->chain);
543 p_virt = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, &p_phys,
545 sizeof(struct ecore_spq_entry));
547 goto spq_allocate_fail;
549 p_spq->p_virt = p_virt;
550 p_spq->p_phys = p_phys;
552 #ifdef CONFIG_ECORE_LOCK_ALLOC
553 OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_spq->lock);
556 p_hwfn->p_spq = p_spq;
557 return ECORE_SUCCESS;
560 ecore_chain_free(p_hwfn->p_dev, &p_spq->chain);
561 OSAL_FREE(p_hwfn->p_dev, p_spq);
565 void ecore_spq_free(struct ecore_hwfn *p_hwfn)
567 struct ecore_spq *p_spq = p_hwfn->p_spq;
568 void OSAL_IOMEM *db_addr;
574 /* Delete the SPQ doorbell from the doorbell recovery mechanism */
575 db_addr = (void *)((u8 *)p_hwfn->doorbells + p_spq->db_addr_offset);
576 ecore_db_recovery_del(p_hwfn->p_dev, db_addr, &p_spq->db_data);
579 capacity = ecore_chain_get_capacity(&p_spq->chain);
580 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
584 sizeof(struct ecore_spq_entry));
587 ecore_chain_free(p_hwfn->p_dev, &p_spq->chain);
588 #ifdef CONFIG_ECORE_LOCK_ALLOC
589 OSAL_SPIN_LOCK_DEALLOC(&p_spq->lock);
592 OSAL_FREE(p_hwfn->p_dev, p_spq);
596 ecore_spq_get_entry(struct ecore_hwfn *p_hwfn, struct ecore_spq_entry **pp_ent)
598 struct ecore_spq *p_spq = p_hwfn->p_spq;
599 struct ecore_spq_entry *p_ent = OSAL_NULL;
600 enum _ecore_status_t rc = ECORE_SUCCESS;
602 OSAL_SPIN_LOCK(&p_spq->lock);
604 if (OSAL_LIST_IS_EMPTY(&p_spq->free_pool)) {
605 p_ent = OSAL_ZALLOC(p_hwfn->p_dev, GFP_ATOMIC, sizeof(*p_ent));
607 DP_NOTICE(p_hwfn, true,
608 "Failed to allocate an SPQ entry for a pending"
613 p_ent->queue = &p_spq->unlimited_pending;
615 p_ent = OSAL_LIST_FIRST_ENTRY(&p_spq->free_pool,
616 struct ecore_spq_entry, list);
617 OSAL_LIST_REMOVE_ENTRY(&p_ent->list, &p_spq->free_pool);
618 p_ent->queue = &p_spq->pending;
624 OSAL_SPIN_UNLOCK(&p_spq->lock);
628 /* Locked variant; Should be called while the SPQ lock is taken */
629 static void __ecore_spq_return_entry(struct ecore_hwfn *p_hwfn,
630 struct ecore_spq_entry *p_ent)
632 OSAL_LIST_PUSH_TAIL(&p_ent->list, &p_hwfn->p_spq->free_pool);
635 void ecore_spq_return_entry(struct ecore_hwfn *p_hwfn,
636 struct ecore_spq_entry *p_ent)
638 OSAL_SPIN_LOCK(&p_hwfn->p_spq->lock);
639 __ecore_spq_return_entry(p_hwfn, p_ent);
640 OSAL_SPIN_UNLOCK(&p_hwfn->p_spq->lock);
644 * @brief ecore_spq_add_entry - adds a new entry to the pending
645 * list. Should be used while lock is being held.
647 * Addes an entry to the pending list is there is room (en empty
648 * element is available in the free_pool), or else places the
649 * entry in the unlimited_pending pool.
655 * @return enum _ecore_status_t
657 static enum _ecore_status_t
658 ecore_spq_add_entry(struct ecore_hwfn *p_hwfn,
659 struct ecore_spq_entry *p_ent, enum spq_priority priority)
661 struct ecore_spq *p_spq = p_hwfn->p_spq;
663 if (p_ent->queue == &p_spq->unlimited_pending) {
664 if (OSAL_LIST_IS_EMPTY(&p_spq->free_pool)) {
665 OSAL_LIST_PUSH_TAIL(&p_ent->list,
666 &p_spq->unlimited_pending);
667 p_spq->unlimited_pending_count++;
669 return ECORE_SUCCESS;
672 struct ecore_spq_entry *p_en2;
674 p_en2 = OSAL_LIST_FIRST_ENTRY(&p_spq->free_pool,
675 struct ecore_spq_entry,
677 OSAL_LIST_REMOVE_ENTRY(&p_en2->list, &p_spq->free_pool);
679 /* Copy the ring element physical pointer to the new
680 * entry, since we are about to override the entire ring
681 * entry and don't want to lose the pointer.
683 p_ent->elem.data_ptr = p_en2->elem.data_ptr;
687 /* EBLOCK responsible to free the allocated p_ent */
688 if (p_ent->comp_mode != ECORE_SPQ_MODE_EBLOCK)
689 OSAL_FREE(p_hwfn->p_dev, p_ent);
695 /* entry is to be placed in 'pending' queue */
697 case ECORE_SPQ_PRIORITY_NORMAL:
698 OSAL_LIST_PUSH_TAIL(&p_ent->list, &p_spq->pending);
699 p_spq->normal_count++;
701 case ECORE_SPQ_PRIORITY_HIGH:
702 OSAL_LIST_PUSH_HEAD(&p_ent->list, &p_spq->pending);
709 return ECORE_SUCCESS;
712 /***************************************************************************
714 ***************************************************************************/
716 u32 ecore_spq_get_cid(struct ecore_hwfn *p_hwfn)
719 return 0xffffffff; /* illegal */
720 return p_hwfn->p_spq->cid;
723 /***************************************************************************
724 * Posting new Ramrods
725 ***************************************************************************/
727 static enum _ecore_status_t ecore_spq_post_list(struct ecore_hwfn *p_hwfn,
731 struct ecore_spq *p_spq = p_hwfn->p_spq;
732 enum _ecore_status_t rc;
734 /* TODO - implementation might be wasteful; will always keep room
735 * for an additional high priority ramrod (even if one is already
738 while (ecore_chain_get_elem_left(&p_spq->chain) > keep_reserve &&
739 !OSAL_LIST_IS_EMPTY(head)) {
740 struct ecore_spq_entry *p_ent =
741 OSAL_LIST_FIRST_ENTRY(head, struct ecore_spq_entry, list);
742 if (p_ent != OSAL_NULL) {
744 #pragma warning(suppress : 6011 28182)
746 OSAL_LIST_REMOVE_ENTRY(&p_ent->list, head);
747 OSAL_LIST_PUSH_TAIL(&p_ent->list,
748 &p_spq->completion_pending);
749 p_spq->comp_sent_count++;
751 rc = ecore_spq_hw_post(p_hwfn, p_spq, p_ent);
753 OSAL_LIST_REMOVE_ENTRY(&p_ent->list,
754 &p_spq->completion_pending);
755 __ecore_spq_return_entry(p_hwfn, p_ent);
761 return ECORE_SUCCESS;
764 static enum _ecore_status_t ecore_spq_pend_post(struct ecore_hwfn *p_hwfn)
766 struct ecore_spq *p_spq = p_hwfn->p_spq;
767 struct ecore_spq_entry *p_ent = OSAL_NULL;
769 while (!OSAL_LIST_IS_EMPTY(&p_spq->free_pool)) {
770 if (OSAL_LIST_IS_EMPTY(&p_spq->unlimited_pending))
773 p_ent = OSAL_LIST_FIRST_ENTRY(&p_spq->unlimited_pending,
774 struct ecore_spq_entry, list);
779 #pragma warning(suppress : 6011)
781 OSAL_LIST_REMOVE_ENTRY(&p_ent->list, &p_spq->unlimited_pending);
783 ecore_spq_add_entry(p_hwfn, p_ent, p_ent->priority);
786 return ecore_spq_post_list(p_hwfn,
787 &p_spq->pending, SPQ_HIGH_PRI_RESERVE_DEFAULT);
790 enum _ecore_status_t ecore_spq_post(struct ecore_hwfn *p_hwfn,
791 struct ecore_spq_entry *p_ent,
794 enum _ecore_status_t rc = ECORE_SUCCESS;
795 struct ecore_spq *p_spq = p_hwfn ? p_hwfn->p_spq : OSAL_NULL;
796 bool b_ret_ent = true;
802 DP_NOTICE(p_hwfn, true, "Got a NULL pointer\n");
806 if (p_hwfn->p_dev->recov_in_prog) {
807 DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
808 "Recovery is in progress -> skip spq post"
809 " [cmd %02x protocol %02x]\n",
810 p_ent->elem.hdr.cmd_id, p_ent->elem.hdr.protocol_id);
811 /* Return success to let the flows to be completed successfully
812 * w/o any error handling.
814 return ECORE_SUCCESS;
817 OSAL_SPIN_LOCK(&p_spq->lock);
819 /* Complete the entry */
820 rc = ecore_spq_fill_entry(p_hwfn, p_ent);
822 /* Check return value after LOCK is taken for cleaner error flow */
826 /* Add the request to the pending queue */
827 rc = ecore_spq_add_entry(p_hwfn, p_ent, p_ent->priority);
831 rc = ecore_spq_pend_post(p_hwfn);
833 /* Since it's possible that pending failed for a different
834 * entry [although unlikely], the failed entry was already
835 * dealt with; No need to return it here.
841 OSAL_SPIN_UNLOCK(&p_spq->lock);
843 if (p_ent->comp_mode == ECORE_SPQ_MODE_EBLOCK) {
844 /* For entries in ECORE BLOCK mode, the completion code cannot
845 * perform the necessary cleanup - if it did, we couldn't
846 * access p_ent here to see whether it's successful or not.
847 * Thus, after gaining the answer perform the cleanup here.
849 rc = ecore_spq_block(p_hwfn, p_ent, fw_return_code,
850 p_ent->queue == &p_spq->unlimited_pending);
852 if (p_ent->queue == &p_spq->unlimited_pending) {
853 /* This is an allocated p_ent which does not need to
856 OSAL_FREE(p_hwfn->p_dev, p_ent);
858 /* TBD: handle error flow and remove p_ent from
868 ecore_spq_return_entry(p_hwfn, p_ent);
873 OSAL_SPIN_LOCK(&p_spq->lock);
874 OSAL_LIST_REMOVE_ENTRY(&p_ent->list, &p_spq->completion_pending);
875 ecore_chain_return_produced(&p_spq->chain);
878 /* return to the free pool */
880 __ecore_spq_return_entry(p_hwfn, p_ent);
881 OSAL_SPIN_UNLOCK(&p_spq->lock);
886 enum _ecore_status_t ecore_spq_completion(struct ecore_hwfn *p_hwfn,
889 union event_ring_data *p_data)
891 struct ecore_spq *p_spq;
892 struct ecore_spq_entry *p_ent = OSAL_NULL;
893 struct ecore_spq_entry *tmp;
894 struct ecore_spq_entry *found = OSAL_NULL;
895 enum _ecore_status_t rc;
900 p_spq = p_hwfn->p_spq;
904 OSAL_SPIN_LOCK(&p_spq->lock);
905 OSAL_LIST_FOR_EACH_ENTRY_SAFE(p_ent,
907 &p_spq->completion_pending,
908 list, struct ecore_spq_entry) {
909 if (p_ent->elem.hdr.echo == echo) {
910 OSAL_LIST_REMOVE_ENTRY(&p_ent->list,
911 &p_spq->completion_pending);
913 /* Avoid overriding of SPQ entries when getting
914 * out-of-order completions, by marking the completions
915 * in a bitmap and increasing the chain consumer only
916 * for the first successive completed entries.
918 SPQ_COMP_BMAP_SET_BIT(p_spq, echo);
919 while (SPQ_COMP_BMAP_TEST_BIT(p_spq,
920 p_spq->comp_bitmap_idx)) {
921 SPQ_COMP_BMAP_CLEAR_BIT(p_spq,
922 p_spq->comp_bitmap_idx);
923 p_spq->comp_bitmap_idx++;
924 ecore_chain_return_produced(&p_spq->chain);
932 /* This is debug and should be relatively uncommon - depends
933 * on scenarios which have mutliple per-PF sent ramrods.
935 DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
936 "Got completion for echo %04x - doesn't match"
937 " echo %04x in completion pending list\n",
938 OSAL_LE16_TO_CPU(echo),
939 OSAL_LE16_TO_CPU(p_ent->elem.hdr.echo));
942 /* Release lock before callback, as callback may post
943 * an additional ramrod.
945 OSAL_SPIN_UNLOCK(&p_spq->lock);
948 DP_NOTICE(p_hwfn, true,
949 "Failed to find an entry this"
950 " EQE [echo %04x] completes\n",
951 OSAL_LE16_TO_CPU(echo));
955 DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
956 "Complete EQE [echo %04x]: func %p cookie %p)\n",
957 OSAL_LE16_TO_CPU(echo),
958 p_ent->comp_cb.function, p_ent->comp_cb.cookie);
959 if (found->comp_cb.function)
960 found->comp_cb.function(p_hwfn, found->comp_cb.cookie, p_data,
963 DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
964 "Got a completion without a callback function\n");
966 if ((found->comp_mode != ECORE_SPQ_MODE_EBLOCK) ||
967 (found->queue == &p_spq->unlimited_pending))
968 /* EBLOCK is responsible for returning its own entry into the
969 * free list, unless it originally added the entry into the
970 * unlimited pending list.
972 ecore_spq_return_entry(p_hwfn, found);
974 /* Attempt to post pending requests */
975 OSAL_SPIN_LOCK(&p_spq->lock);
976 rc = ecore_spq_pend_post(p_hwfn);
977 OSAL_SPIN_UNLOCK(&p_spq->lock);
982 enum _ecore_status_t ecore_consq_alloc(struct ecore_hwfn *p_hwfn)
984 struct ecore_consq *p_consq;
986 /* Allocate ConsQ struct */
988 OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*p_consq));
990 DP_NOTICE(p_hwfn, true,
991 "Failed to allocate `struct ecore_consq'\n");
995 /* Allocate and initialize EQ chain */
996 if (ecore_chain_alloc(p_hwfn->p_dev,
997 ECORE_CHAIN_USE_TO_PRODUCE,
998 ECORE_CHAIN_MODE_PBL,
999 ECORE_CHAIN_CNT_TYPE_U16,
1000 ECORE_CHAIN_PAGE_SIZE / 0x80,
1002 &p_consq->chain, OSAL_NULL) != ECORE_SUCCESS) {
1003 DP_NOTICE(p_hwfn, true, "Failed to allocate consq chain");
1004 goto consq_allocate_fail;
1007 p_hwfn->p_consq = p_consq;
1008 return ECORE_SUCCESS;
1010 consq_allocate_fail:
1011 OSAL_FREE(p_hwfn->p_dev, p_consq);
1015 void ecore_consq_setup(struct ecore_hwfn *p_hwfn)
1017 ecore_chain_reset(&p_hwfn->p_consq->chain);
1020 void ecore_consq_free(struct ecore_hwfn *p_hwfn)
1022 if (!p_hwfn->p_consq)
1025 ecore_chain_free(p_hwfn->p_dev, &p_hwfn->p_consq->chain);
1026 OSAL_FREE(p_hwfn->p_dev, p_hwfn->p_consq);