2 * Copyright (c) 2016 QLogic Corporation.
6 * See LICENSE.qede_pmd for copyright and licensing details.
11 #include "ecore_gtt_reg_addr.h"
12 #include "ecore_hsi_common.h"
14 #include "ecore_sp_api.h"
15 #include "ecore_spq.h"
16 #include "ecore_iro.h"
17 #include "ecore_init_fw_funcs.h"
18 #include "ecore_cxt.h"
19 #include "ecore_int.h"
20 #include "ecore_dev_api.h"
21 #include "ecore_mcp.h"
23 #include "ecore_sriov.h"
25 /***************************************************************************
26 * Structures & Definitions
27 ***************************************************************************/
29 #define SPQ_HIGH_PRI_RESERVE_DEFAULT (1)
31 #define SPQ_BLOCK_DELAY_MAX_ITER (10)
32 #define SPQ_BLOCK_DELAY_US (10)
33 #define SPQ_BLOCK_SLEEP_MAX_ITER (200)
34 #define SPQ_BLOCK_SLEEP_MS (5)
36 /***************************************************************************
37 * Blocking Imp. (BLOCK/EBLOCK mode)
38 ***************************************************************************/
39 static void ecore_spq_blocking_cb(struct ecore_hwfn *p_hwfn, void *cookie,
40 union event_ring_data OSAL_UNUSED * data,
43 struct ecore_spq_comp_done *comp_done;
45 comp_done = (struct ecore_spq_comp_done *)cookie;
47 comp_done->done = 0x1;
48 comp_done->fw_return_code = fw_return_code;
50 /* make update visible to waiting thread */
51 OSAL_SMP_WMB(p_hwfn->p_dev);
54 static enum _ecore_status_t __ecore_spq_block(struct ecore_hwfn *p_hwfn,
55 struct ecore_spq_entry *p_ent,
57 bool sleep_between_iter)
59 struct ecore_spq_comp_done *comp_done;
62 comp_done = (struct ecore_spq_comp_done *)p_ent->comp_cb.cookie;
63 iter_cnt = sleep_between_iter ? p_hwfn->p_spq->block_sleep_max_iter
64 : SPQ_BLOCK_DELAY_MAX_ITER;
66 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev) && sleep_between_iter)
71 OSAL_POLL_MODE_DPC(p_hwfn);
72 OSAL_SMP_RMB(p_hwfn->p_dev);
73 if (comp_done->done == 1) {
75 *p_fw_ret = comp_done->fw_return_code;
79 if (sleep_between_iter)
80 OSAL_MSLEEP(SPQ_BLOCK_SLEEP_MS);
82 OSAL_UDELAY(SPQ_BLOCK_DELAY_US);
88 static enum _ecore_status_t ecore_spq_block(struct ecore_hwfn *p_hwfn,
89 struct ecore_spq_entry *p_ent,
90 u8 *p_fw_ret, bool skip_quick_poll)
92 struct ecore_spq_comp_done *comp_done;
93 struct ecore_ptt *p_ptt;
94 enum _ecore_status_t rc;
96 /* A relatively short polling period w/o sleeping, to allow the FW to
97 * complete the ramrod and thus possibly to avoid the following sleeps.
99 if (!skip_quick_poll) {
100 rc = __ecore_spq_block(p_hwfn, p_ent, p_fw_ret, false);
101 if (rc == ECORE_SUCCESS)
102 return ECORE_SUCCESS;
105 /* Move to polling with a sleeping period between iterations */
106 rc = __ecore_spq_block(p_hwfn, p_ent, p_fw_ret, true);
107 if (rc == ECORE_SUCCESS)
108 return ECORE_SUCCESS;
110 p_ptt = ecore_ptt_acquire(p_hwfn);
114 DP_INFO(p_hwfn, "Ramrod is stuck, requesting MCP drain\n");
115 rc = ecore_mcp_drain(p_hwfn, p_ptt);
116 ecore_ptt_release(p_hwfn, p_ptt);
117 if (rc != ECORE_SUCCESS) {
118 DP_NOTICE(p_hwfn, true, "MCP drain failed\n");
122 /* Retry after drain */
123 rc = __ecore_spq_block(p_hwfn, p_ent, p_fw_ret, true);
124 if (rc == ECORE_SUCCESS)
125 return ECORE_SUCCESS;
127 comp_done = (struct ecore_spq_comp_done *)p_ent->comp_cb.cookie;
128 if (comp_done->done == 1) {
130 *p_fw_ret = comp_done->fw_return_code;
131 return ECORE_SUCCESS;
134 DP_NOTICE(p_hwfn, true,
135 "Ramrod is stuck [CID %08x cmd %02x proto %02x echo %04x]\n",
136 OSAL_LE32_TO_CPU(p_ent->elem.hdr.cid),
137 p_ent->elem.hdr.cmd_id, p_ent->elem.hdr.protocol_id,
138 OSAL_LE16_TO_CPU(p_ent->elem.hdr.echo));
140 ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_RAMROD_FAIL);
145 void ecore_set_spq_block_timeout(struct ecore_hwfn *p_hwfn,
148 p_hwfn->p_spq->block_sleep_max_iter = spq_timeout_ms ?
149 spq_timeout_ms / SPQ_BLOCK_SLEEP_MS :
150 SPQ_BLOCK_SLEEP_MAX_ITER;
153 /***************************************************************************
154 * SPQ entries inner API
155 ***************************************************************************/
156 static enum _ecore_status_t
157 ecore_spq_fill_entry(struct ecore_hwfn *p_hwfn, struct ecore_spq_entry *p_ent)
161 switch (p_ent->comp_mode) {
162 case ECORE_SPQ_MODE_EBLOCK:
163 case ECORE_SPQ_MODE_BLOCK:
164 p_ent->comp_cb.function = ecore_spq_blocking_cb;
166 case ECORE_SPQ_MODE_CB:
169 DP_NOTICE(p_hwfn, true, "Unknown SPQE completion mode %d\n",
174 DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
175 "Ramrod header: [CID 0x%08x CMD 0x%02x protocol 0x%02x]"
176 " Data pointer: [%08x:%08x] Completion Mode: %s\n",
177 p_ent->elem.hdr.cid, p_ent->elem.hdr.cmd_id,
178 p_ent->elem.hdr.protocol_id,
179 p_ent->elem.data_ptr.hi, p_ent->elem.data_ptr.lo,
180 D_TRINE(p_ent->comp_mode, ECORE_SPQ_MODE_EBLOCK,
181 ECORE_SPQ_MODE_BLOCK, "MODE_EBLOCK", "MODE_BLOCK",
184 return ECORE_SUCCESS;
187 /***************************************************************************
189 ***************************************************************************/
190 static void ecore_spq_hw_initialize(struct ecore_hwfn *p_hwfn,
191 struct ecore_spq *p_spq)
193 struct e4_core_conn_context *p_cxt;
194 struct ecore_cxt_info cxt_info;
196 enum _ecore_status_t rc;
198 cxt_info.iid = p_spq->cid;
200 rc = ecore_cxt_get_cid_info(p_hwfn, &cxt_info);
203 DP_NOTICE(p_hwfn, true, "Cannot find context info for cid=%d\n",
208 p_cxt = cxt_info.p_cxt;
210 /* @@@TBD we zero the context until we have ilt_reset implemented. */
211 OSAL_MEM_ZERO(p_cxt, sizeof(*p_cxt));
213 if (ECORE_IS_BB(p_hwfn->p_dev) || ECORE_IS_AH(p_hwfn->p_dev)) {
214 SET_FIELD(p_cxt->xstorm_ag_context.flags10,
215 E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN, 1);
216 SET_FIELD(p_cxt->xstorm_ag_context.flags1,
217 E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE, 1);
218 /* SET_FIELD(p_cxt->xstorm_ag_context.flags10,
219 * E4_XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN, 1);
221 SET_FIELD(p_cxt->xstorm_ag_context.flags9,
222 E4_XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN, 1);
225 /* CDU validation - FIXME currently disabled */
227 /* QM physical queue */
228 physical_q = ecore_get_cm_pq_idx(p_hwfn, PQ_FLAGS_LB);
229 p_cxt->xstorm_ag_context.physical_q0 = OSAL_CPU_TO_LE16(physical_q);
231 p_cxt->xstorm_st_context.spq_base_lo =
232 DMA_LO_LE(p_spq->chain.p_phys_addr);
233 p_cxt->xstorm_st_context.spq_base_hi =
234 DMA_HI_LE(p_spq->chain.p_phys_addr);
236 DMA_REGPAIR_LE(p_cxt->xstorm_st_context.consolid_base_addr,
237 p_hwfn->p_consq->chain.p_phys_addr);
240 static enum _ecore_status_t ecore_spq_hw_post(struct ecore_hwfn *p_hwfn,
241 struct ecore_spq *p_spq,
242 struct ecore_spq_entry *p_ent)
244 struct ecore_chain *p_chain = &p_hwfn->p_spq->chain;
245 struct core_db_data *p_db_data = &p_spq->db_data;
246 u16 echo = ecore_chain_get_prod_idx(p_chain);
247 struct slow_path_element *elem;
249 p_ent->elem.hdr.echo = OSAL_CPU_TO_LE16(echo);
250 elem = ecore_chain_produce(p_chain);
252 DP_NOTICE(p_hwfn, true, "Failed to produce from SPQ chain\n");
256 *elem = p_ent->elem; /* Struct assignment */
258 p_db_data->spq_prod =
259 OSAL_CPU_TO_LE16(ecore_chain_get_prod_idx(p_chain));
261 /* Make sure the SPQE is updated before the doorbell */
262 OSAL_WMB(p_hwfn->p_dev);
264 DOORBELL(p_hwfn, p_spq->db_addr_offset, *(u32 *)p_db_data);
266 /* Make sure doorbell is rang */
267 OSAL_WMB(p_hwfn->p_dev);
269 DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
270 "Doorbelled [0x%08x, CID 0x%08x] with Flags: %02x"
271 " agg_params: %02x, prod: %04x\n",
272 p_spq->db_addr_offset, p_spq->cid, p_db_data->params,
273 p_db_data->agg_flags, ecore_chain_get_prod_idx(p_chain));
275 return ECORE_SUCCESS;
278 /***************************************************************************
279 * Asynchronous events
280 ***************************************************************************/
282 static enum _ecore_status_t
283 ecore_async_event_completion(struct ecore_hwfn *p_hwfn,
284 struct event_ring_entry *p_eqe)
286 ecore_spq_async_comp_cb cb;
288 if (!p_hwfn->p_spq || (p_eqe->protocol_id >= MAX_PROTOCOL_TYPE))
291 cb = p_hwfn->p_spq->async_comp_cb[p_eqe->protocol_id];
293 return cb(p_hwfn, p_eqe->opcode, p_eqe->echo,
294 &p_eqe->data, p_eqe->fw_return_code);
297 true, "Unknown Async completion for protocol: %d\n",
304 ecore_spq_register_async_cb(struct ecore_hwfn *p_hwfn,
305 enum protocol_type protocol_id,
306 ecore_spq_async_comp_cb cb)
308 if (!p_hwfn->p_spq || (protocol_id >= MAX_PROTOCOL_TYPE))
311 p_hwfn->p_spq->async_comp_cb[protocol_id] = cb;
312 return ECORE_SUCCESS;
316 ecore_spq_unregister_async_cb(struct ecore_hwfn *p_hwfn,
317 enum protocol_type protocol_id)
319 if (!p_hwfn->p_spq || (protocol_id >= MAX_PROTOCOL_TYPE))
322 p_hwfn->p_spq->async_comp_cb[protocol_id] = OSAL_NULL;
325 /***************************************************************************
327 ***************************************************************************/
328 void ecore_eq_prod_update(struct ecore_hwfn *p_hwfn, u16 prod)
330 u32 addr = GTT_BAR0_MAP_REG_USDM_RAM +
331 USTORM_EQE_CONS_OFFSET(p_hwfn->rel_pf_id);
333 REG_WR16(p_hwfn, addr, prod);
335 /* keep prod updates ordered */
336 OSAL_MMIOWB(p_hwfn->p_dev);
339 enum _ecore_status_t ecore_eq_completion(struct ecore_hwfn *p_hwfn,
342 struct ecore_eq *p_eq = cookie;
343 struct ecore_chain *p_chain = &p_eq->chain;
344 enum _ecore_status_t rc = 0;
346 /* take a snapshot of the FW consumer */
347 u16 fw_cons_idx = OSAL_LE16_TO_CPU(*p_eq->p_fw_cons);
349 DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ, "fw_cons_idx %x\n", fw_cons_idx);
351 /* Need to guarantee the fw_cons index we use points to a usuable
352 * element (to comply with our chain), so our macros would comply
354 if ((fw_cons_idx & ecore_chain_get_usable_per_page(p_chain)) ==
355 ecore_chain_get_usable_per_page(p_chain)) {
356 fw_cons_idx += ecore_chain_get_unusable_per_page(p_chain);
359 /* Complete current segment of eq entries */
360 while (fw_cons_idx != ecore_chain_get_cons_idx(p_chain)) {
361 struct event_ring_entry *p_eqe = ecore_chain_consume(p_chain);
367 DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
368 "op %x prot %x res0 %x echo %x fwret %x flags %x\n",
369 p_eqe->opcode, /* Event Opcode */
370 p_eqe->protocol_id, /* Event Protocol ID */
371 p_eqe->reserved0, /* Reserved */
372 /* Echo value from ramrod data on the host */
373 OSAL_LE16_TO_CPU(p_eqe->echo),
374 p_eqe->fw_return_code, /* FW return code for SP
379 if (GET_FIELD(p_eqe->flags, EVENT_RING_ENTRY_ASYNC)) {
380 if (ecore_async_event_completion(p_hwfn, p_eqe))
382 } else if (ecore_spq_completion(p_hwfn,
384 p_eqe->fw_return_code,
389 ecore_chain_recycle_consumed(p_chain);
392 ecore_eq_prod_update(p_hwfn, ecore_chain_get_prod_idx(p_chain));
397 enum _ecore_status_t ecore_eq_alloc(struct ecore_hwfn *p_hwfn, u16 num_elem)
399 struct ecore_eq *p_eq;
401 /* Allocate EQ struct */
402 p_eq = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*p_eq));
404 DP_NOTICE(p_hwfn, false,
405 "Failed to allocate `struct ecore_eq'\n");
409 /* Allocate and initialize EQ chain*/
410 if (ecore_chain_alloc(p_hwfn->p_dev,
411 ECORE_CHAIN_USE_TO_PRODUCE,
412 ECORE_CHAIN_MODE_PBL,
413 ECORE_CHAIN_CNT_TYPE_U16,
415 sizeof(union event_ring_element),
416 &p_eq->chain, OSAL_NULL) != ECORE_SUCCESS) {
417 DP_NOTICE(p_hwfn, false, "Failed to allocate eq chain\n");
418 goto eq_allocate_fail;
421 /* register EQ completion on the SP SB */
422 ecore_int_register_cb(p_hwfn, ecore_eq_completion,
423 p_eq, &p_eq->eq_sb_index, &p_eq->p_fw_cons);
426 return ECORE_SUCCESS;
429 OSAL_FREE(p_hwfn->p_dev, p_eq);
433 void ecore_eq_setup(struct ecore_hwfn *p_hwfn)
435 ecore_chain_reset(&p_hwfn->p_eq->chain);
438 void ecore_eq_free(struct ecore_hwfn *p_hwfn)
443 ecore_chain_free(p_hwfn->p_dev, &p_hwfn->p_eq->chain);
445 OSAL_FREE(p_hwfn->p_dev, p_hwfn->p_eq);
446 p_hwfn->p_eq = OSAL_NULL;
449 /***************************************************************************
450 * CQE API - manipulate EQ functionality
451 ***************************************************************************/
452 static enum _ecore_status_t ecore_cqe_completion(struct ecore_hwfn *p_hwfn,
453 struct eth_slow_path_rx_cqe
455 enum protocol_type protocol)
457 if (IS_VF(p_hwfn->p_dev))
458 return OSAL_VF_CQE_COMPLETION(p_hwfn, cqe, protocol);
460 /* @@@tmp - it's possible we'll eventually want to handle some
461 * actual commands that can arrive here, but for now this is only
462 * used to complete the ramrod using the echo value on the cqe
464 return ecore_spq_completion(p_hwfn, cqe->echo, 0, OSAL_NULL);
467 enum _ecore_status_t ecore_eth_cqe_completion(struct ecore_hwfn *p_hwfn,
468 struct eth_slow_path_rx_cqe *cqe)
470 enum _ecore_status_t rc;
472 rc = ecore_cqe_completion(p_hwfn, cqe, PROTOCOLID_ETH);
474 DP_NOTICE(p_hwfn, true,
475 "Failed to handle RXQ CQE [cmd 0x%02x]\n",
482 /***************************************************************************
483 * Slow hwfn Queue (spq)
484 ***************************************************************************/
485 void ecore_spq_setup(struct ecore_hwfn *p_hwfn)
487 struct ecore_spq *p_spq = p_hwfn->p_spq;
488 struct ecore_spq_entry *p_virt = OSAL_NULL;
489 struct core_db_data *p_db_data;
490 void OSAL_IOMEM *db_addr;
491 dma_addr_t p_phys = 0;
493 enum _ecore_status_t rc;
495 OSAL_LIST_INIT(&p_spq->pending);
496 OSAL_LIST_INIT(&p_spq->completion_pending);
497 OSAL_LIST_INIT(&p_spq->free_pool);
498 OSAL_LIST_INIT(&p_spq->unlimited_pending);
499 OSAL_SPIN_LOCK_INIT(&p_spq->lock);
502 p_phys = p_spq->p_phys + OFFSETOF(struct ecore_spq_entry, ramrod);
503 p_virt = p_spq->p_virt;
505 capacity = ecore_chain_get_capacity(&p_spq->chain);
506 for (i = 0; i < capacity; i++) {
507 DMA_REGPAIR_LE(p_virt->elem.data_ptr, p_phys);
509 OSAL_LIST_PUSH_TAIL(&p_virt->list, &p_spq->free_pool);
512 p_phys += sizeof(struct ecore_spq_entry);
516 p_spq->normal_count = 0;
517 p_spq->comp_count = 0;
518 p_spq->comp_sent_count = 0;
519 p_spq->unlimited_pending_count = 0;
521 OSAL_MEM_ZERO(p_spq->p_comp_bitmap,
522 SPQ_COMP_BMAP_SIZE * sizeof(unsigned long));
523 p_spq->comp_bitmap_idx = 0;
525 /* SPQ cid, cannot fail */
526 ecore_cxt_acquire_cid(p_hwfn, PROTOCOLID_CORE, &p_spq->cid);
527 ecore_spq_hw_initialize(p_hwfn, p_spq);
529 /* reset the chain itself */
530 ecore_chain_reset(&p_spq->chain);
532 /* Initialize the address/data of the SPQ doorbell */
533 p_spq->db_addr_offset = DB_ADDR(p_spq->cid, DQ_DEMS_LEGACY);
534 p_db_data = &p_spq->db_data;
535 OSAL_MEM_ZERO(p_db_data, sizeof(*p_db_data));
536 SET_FIELD(p_db_data->params, CORE_DB_DATA_DEST, DB_DEST_XCM);
537 SET_FIELD(p_db_data->params, CORE_DB_DATA_AGG_CMD, DB_AGG_CMD_MAX);
538 SET_FIELD(p_db_data->params, CORE_DB_DATA_AGG_VAL_SEL,
539 DQ_XCM_CORE_SPQ_PROD_CMD);
540 p_db_data->agg_flags = DQ_XCM_CORE_DQ_CF_CMD;
542 /* Register the SPQ doorbell with the doorbell recovery mechanism */
543 db_addr = (void *)((u8 *)p_hwfn->doorbells + p_spq->db_addr_offset);
544 rc = ecore_db_recovery_add(p_hwfn->p_dev, db_addr, &p_spq->db_data,
545 DB_REC_WIDTH_32B, DB_REC_KERNEL);
546 if (rc != ECORE_SUCCESS)
548 "Failed to register the SPQ doorbell with the doorbell recovery mechanism\n");
551 enum _ecore_status_t ecore_spq_alloc(struct ecore_hwfn *p_hwfn)
553 struct ecore_spq_entry *p_virt = OSAL_NULL;
554 struct ecore_spq *p_spq = OSAL_NULL;
555 dma_addr_t p_phys = 0;
560 OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(struct ecore_spq));
562 DP_NOTICE(p_hwfn, false, "Failed to allocate `struct ecore_spq'\n");
567 if (ecore_chain_alloc(p_hwfn->p_dev,
568 ECORE_CHAIN_USE_TO_PRODUCE,
569 ECORE_CHAIN_MODE_SINGLE,
570 ECORE_CHAIN_CNT_TYPE_U16,
571 0, /* N/A when the mode is SINGLE */
572 sizeof(struct slow_path_element),
573 &p_spq->chain, OSAL_NULL)) {
574 DP_NOTICE(p_hwfn, false, "Failed to allocate spq chain\n");
575 goto spq_allocate_fail;
578 /* allocate and fill the SPQ elements (incl. ramrod data list) */
579 capacity = ecore_chain_get_capacity(&p_spq->chain);
580 p_virt = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, &p_phys,
582 sizeof(struct ecore_spq_entry));
584 goto spq_allocate_fail;
586 p_spq->p_virt = p_virt;
587 p_spq->p_phys = p_phys;
589 #ifdef CONFIG_ECORE_LOCK_ALLOC
590 if (OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_spq->lock))
591 goto spq_allocate_fail;
594 p_hwfn->p_spq = p_spq;
595 return ECORE_SUCCESS;
598 ecore_chain_free(p_hwfn->p_dev, &p_spq->chain);
599 OSAL_FREE(p_hwfn->p_dev, p_spq);
603 void ecore_spq_free(struct ecore_hwfn *p_hwfn)
605 struct ecore_spq *p_spq = p_hwfn->p_spq;
606 void OSAL_IOMEM *db_addr;
612 /* Delete the SPQ doorbell from the doorbell recovery mechanism */
613 db_addr = (void *)((u8 *)p_hwfn->doorbells + p_spq->db_addr_offset);
614 ecore_db_recovery_del(p_hwfn->p_dev, db_addr, &p_spq->db_data);
617 capacity = ecore_chain_get_capacity(&p_spq->chain);
618 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
622 sizeof(struct ecore_spq_entry));
625 ecore_chain_free(p_hwfn->p_dev, &p_spq->chain);
626 #ifdef CONFIG_ECORE_LOCK_ALLOC
627 OSAL_SPIN_LOCK_DEALLOC(&p_spq->lock);
630 OSAL_FREE(p_hwfn->p_dev, p_spq);
634 ecore_spq_get_entry(struct ecore_hwfn *p_hwfn, struct ecore_spq_entry **pp_ent)
636 struct ecore_spq *p_spq = p_hwfn->p_spq;
637 struct ecore_spq_entry *p_ent = OSAL_NULL;
638 enum _ecore_status_t rc = ECORE_SUCCESS;
640 OSAL_SPIN_LOCK(&p_spq->lock);
642 if (OSAL_LIST_IS_EMPTY(&p_spq->free_pool)) {
643 p_ent = OSAL_ZALLOC(p_hwfn->p_dev, GFP_ATOMIC, sizeof(*p_ent));
645 DP_NOTICE(p_hwfn, false, "Failed to allocate an SPQ entry for a pending ramrod\n");
649 p_ent->queue = &p_spq->unlimited_pending;
651 p_ent = OSAL_LIST_FIRST_ENTRY(&p_spq->free_pool,
652 struct ecore_spq_entry, list);
653 OSAL_LIST_REMOVE_ENTRY(&p_ent->list, &p_spq->free_pool);
654 p_ent->queue = &p_spq->pending;
660 OSAL_SPIN_UNLOCK(&p_spq->lock);
664 /* Locked variant; Should be called while the SPQ lock is taken */
665 static void __ecore_spq_return_entry(struct ecore_hwfn *p_hwfn,
666 struct ecore_spq_entry *p_ent)
668 OSAL_LIST_PUSH_TAIL(&p_ent->list, &p_hwfn->p_spq->free_pool);
671 void ecore_spq_return_entry(struct ecore_hwfn *p_hwfn,
672 struct ecore_spq_entry *p_ent)
674 OSAL_SPIN_LOCK(&p_hwfn->p_spq->lock);
675 __ecore_spq_return_entry(p_hwfn, p_ent);
676 OSAL_SPIN_UNLOCK(&p_hwfn->p_spq->lock);
680 * @brief ecore_spq_add_entry - adds a new entry to the pending
681 * list. Should be used while lock is being held.
683 * Addes an entry to the pending list is there is room (en empty
684 * element is available in the free_pool), or else places the
685 * entry in the unlimited_pending pool.
691 * @return enum _ecore_status_t
693 static enum _ecore_status_t
694 ecore_spq_add_entry(struct ecore_hwfn *p_hwfn,
695 struct ecore_spq_entry *p_ent, enum spq_priority priority)
697 struct ecore_spq *p_spq = p_hwfn->p_spq;
699 if (p_ent->queue == &p_spq->unlimited_pending) {
700 if (OSAL_LIST_IS_EMPTY(&p_spq->free_pool)) {
701 OSAL_LIST_PUSH_TAIL(&p_ent->list,
702 &p_spq->unlimited_pending);
703 p_spq->unlimited_pending_count++;
705 return ECORE_SUCCESS;
708 struct ecore_spq_entry *p_en2;
710 p_en2 = OSAL_LIST_FIRST_ENTRY(&p_spq->free_pool,
711 struct ecore_spq_entry,
713 OSAL_LIST_REMOVE_ENTRY(&p_en2->list, &p_spq->free_pool);
715 /* Copy the ring element physical pointer to the new
716 * entry, since we are about to override the entire ring
717 * entry and don't want to lose the pointer.
719 p_ent->elem.data_ptr = p_en2->elem.data_ptr;
723 /* EBLOCK responsible to free the allocated p_ent */
724 if (p_ent->comp_mode != ECORE_SPQ_MODE_EBLOCK)
725 OSAL_FREE(p_hwfn->p_dev, p_ent);
731 /* entry is to be placed in 'pending' queue */
733 case ECORE_SPQ_PRIORITY_NORMAL:
734 OSAL_LIST_PUSH_TAIL(&p_ent->list, &p_spq->pending);
735 p_spq->normal_count++;
737 case ECORE_SPQ_PRIORITY_HIGH:
738 OSAL_LIST_PUSH_HEAD(&p_ent->list, &p_spq->pending);
745 return ECORE_SUCCESS;
748 /***************************************************************************
750 ***************************************************************************/
752 u32 ecore_spq_get_cid(struct ecore_hwfn *p_hwfn)
755 return 0xffffffff; /* illegal */
756 return p_hwfn->p_spq->cid;
759 /***************************************************************************
760 * Posting new Ramrods
761 ***************************************************************************/
763 static enum _ecore_status_t ecore_spq_post_list(struct ecore_hwfn *p_hwfn,
767 struct ecore_spq *p_spq = p_hwfn->p_spq;
768 enum _ecore_status_t rc;
770 /* TODO - implementation might be wasteful; will always keep room
771 * for an additional high priority ramrod (even if one is already
774 while (ecore_chain_get_elem_left(&p_spq->chain) > keep_reserve &&
775 !OSAL_LIST_IS_EMPTY(head)) {
776 struct ecore_spq_entry *p_ent =
777 OSAL_LIST_FIRST_ENTRY(head, struct ecore_spq_entry, list);
778 if (p_ent != OSAL_NULL) {
780 #pragma warning(suppress : 6011 28182)
782 OSAL_LIST_REMOVE_ENTRY(&p_ent->list, head);
783 OSAL_LIST_PUSH_TAIL(&p_ent->list,
784 &p_spq->completion_pending);
785 p_spq->comp_sent_count++;
787 rc = ecore_spq_hw_post(p_hwfn, p_spq, p_ent);
789 OSAL_LIST_REMOVE_ENTRY(&p_ent->list,
790 &p_spq->completion_pending);
791 __ecore_spq_return_entry(p_hwfn, p_ent);
797 return ECORE_SUCCESS;
800 static enum _ecore_status_t ecore_spq_pend_post(struct ecore_hwfn *p_hwfn)
802 struct ecore_spq *p_spq = p_hwfn->p_spq;
803 struct ecore_spq_entry *p_ent = OSAL_NULL;
805 while (!OSAL_LIST_IS_EMPTY(&p_spq->free_pool)) {
806 if (OSAL_LIST_IS_EMPTY(&p_spq->unlimited_pending))
809 p_ent = OSAL_LIST_FIRST_ENTRY(&p_spq->unlimited_pending,
810 struct ecore_spq_entry, list);
815 #pragma warning(suppress : 6011)
817 OSAL_LIST_REMOVE_ENTRY(&p_ent->list, &p_spq->unlimited_pending);
819 ecore_spq_add_entry(p_hwfn, p_ent, p_ent->priority);
822 return ecore_spq_post_list(p_hwfn,
823 &p_spq->pending, SPQ_HIGH_PRI_RESERVE_DEFAULT);
826 enum _ecore_status_t ecore_spq_post(struct ecore_hwfn *p_hwfn,
827 struct ecore_spq_entry *p_ent,
830 enum _ecore_status_t rc = ECORE_SUCCESS;
831 struct ecore_spq *p_spq = p_hwfn ? p_hwfn->p_spq : OSAL_NULL;
832 bool b_ret_ent = true;
838 DP_NOTICE(p_hwfn, true, "Got a NULL pointer\n");
842 if (p_hwfn->p_dev->recov_in_prog) {
843 DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
844 "Recovery is in progress -> skip spq post"
845 " [cmd %02x protocol %02x]\n",
846 p_ent->elem.hdr.cmd_id, p_ent->elem.hdr.protocol_id);
847 /* Return success to let the flows to be completed successfully
848 * w/o any error handling.
850 return ECORE_SUCCESS;
853 OSAL_SPIN_LOCK(&p_spq->lock);
855 /* Complete the entry */
856 rc = ecore_spq_fill_entry(p_hwfn, p_ent);
858 /* Check return value after LOCK is taken for cleaner error flow */
862 /* Add the request to the pending queue */
863 rc = ecore_spq_add_entry(p_hwfn, p_ent, p_ent->priority);
867 rc = ecore_spq_pend_post(p_hwfn);
869 /* Since it's possible that pending failed for a different
870 * entry [although unlikely], the failed entry was already
871 * dealt with; No need to return it here.
877 OSAL_SPIN_UNLOCK(&p_spq->lock);
879 if (p_ent->comp_mode == ECORE_SPQ_MODE_EBLOCK) {
880 /* For entries in ECORE BLOCK mode, the completion code cannot
881 * perform the necessary cleanup - if it did, we couldn't
882 * access p_ent here to see whether it's successful or not.
883 * Thus, after gaining the answer perform the cleanup here.
885 rc = ecore_spq_block(p_hwfn, p_ent, fw_return_code,
886 p_ent->queue == &p_spq->unlimited_pending);
888 if (p_ent->queue == &p_spq->unlimited_pending) {
889 /* This is an allocated p_ent which does not need to
892 OSAL_FREE(p_hwfn->p_dev, p_ent);
894 /* TBD: handle error flow and remove p_ent from
904 ecore_spq_return_entry(p_hwfn, p_ent);
909 OSAL_SPIN_LOCK(&p_spq->lock);
910 OSAL_LIST_REMOVE_ENTRY(&p_ent->list, &p_spq->completion_pending);
911 ecore_chain_return_produced(&p_spq->chain);
914 /* return to the free pool */
916 __ecore_spq_return_entry(p_hwfn, p_ent);
917 OSAL_SPIN_UNLOCK(&p_spq->lock);
922 enum _ecore_status_t ecore_spq_completion(struct ecore_hwfn *p_hwfn,
925 union event_ring_data *p_data)
927 struct ecore_spq *p_spq;
928 struct ecore_spq_entry *p_ent = OSAL_NULL;
929 struct ecore_spq_entry *tmp;
930 struct ecore_spq_entry *found = OSAL_NULL;
931 enum _ecore_status_t rc;
936 p_spq = p_hwfn->p_spq;
940 OSAL_SPIN_LOCK(&p_spq->lock);
941 OSAL_LIST_FOR_EACH_ENTRY_SAFE(p_ent,
943 &p_spq->completion_pending,
944 list, struct ecore_spq_entry) {
945 if (p_ent->elem.hdr.echo == echo) {
946 OSAL_LIST_REMOVE_ENTRY(&p_ent->list,
947 &p_spq->completion_pending);
949 /* Avoid overriding of SPQ entries when getting
950 * out-of-order completions, by marking the completions
951 * in a bitmap and increasing the chain consumer only
952 * for the first successive completed entries.
954 SPQ_COMP_BMAP_SET_BIT(p_spq, echo);
955 while (SPQ_COMP_BMAP_TEST_BIT(p_spq,
956 p_spq->comp_bitmap_idx)) {
957 SPQ_COMP_BMAP_CLEAR_BIT(p_spq,
958 p_spq->comp_bitmap_idx);
959 p_spq->comp_bitmap_idx++;
960 ecore_chain_return_produced(&p_spq->chain);
968 /* This is debug and should be relatively uncommon - depends
969 * on scenarios which have mutliple per-PF sent ramrods.
971 DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
972 "Got completion for echo %04x - doesn't match"
973 " echo %04x in completion pending list\n",
974 OSAL_LE16_TO_CPU(echo),
975 OSAL_LE16_TO_CPU(p_ent->elem.hdr.echo));
978 /* Release lock before callback, as callback may post
979 * an additional ramrod.
981 OSAL_SPIN_UNLOCK(&p_spq->lock);
984 DP_NOTICE(p_hwfn, true,
985 "Failed to find an entry this"
986 " EQE [echo %04x] completes\n",
987 OSAL_LE16_TO_CPU(echo));
991 DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
992 "Complete EQE [echo %04x]: func %p cookie %p)\n",
993 OSAL_LE16_TO_CPU(echo),
994 p_ent->comp_cb.function, p_ent->comp_cb.cookie);
995 if (found->comp_cb.function)
996 found->comp_cb.function(p_hwfn, found->comp_cb.cookie, p_data,
999 DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
1000 "Got a completion without a callback function\n");
1002 if ((found->comp_mode != ECORE_SPQ_MODE_EBLOCK) ||
1003 (found->queue == &p_spq->unlimited_pending))
1004 /* EBLOCK is responsible for returning its own entry into the
1005 * free list, unless it originally added the entry into the
1006 * unlimited pending list.
1008 ecore_spq_return_entry(p_hwfn, found);
1010 /* Attempt to post pending requests */
1011 OSAL_SPIN_LOCK(&p_spq->lock);
1012 rc = ecore_spq_pend_post(p_hwfn);
1013 OSAL_SPIN_UNLOCK(&p_spq->lock);
1018 enum _ecore_status_t ecore_consq_alloc(struct ecore_hwfn *p_hwfn)
1020 struct ecore_consq *p_consq;
1022 /* Allocate ConsQ struct */
1024 OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*p_consq));
1026 DP_NOTICE(p_hwfn, false,
1027 "Failed to allocate `struct ecore_consq'\n");
1031 /* Allocate and initialize EQ chain */
1032 if (ecore_chain_alloc(p_hwfn->p_dev,
1033 ECORE_CHAIN_USE_TO_PRODUCE,
1034 ECORE_CHAIN_MODE_PBL,
1035 ECORE_CHAIN_CNT_TYPE_U16,
1036 ECORE_CHAIN_PAGE_SIZE / 0x80,
1038 &p_consq->chain, OSAL_NULL) != ECORE_SUCCESS) {
1039 DP_NOTICE(p_hwfn, false, "Failed to allocate consq chain");
1040 goto consq_allocate_fail;
1043 p_hwfn->p_consq = p_consq;
1044 return ECORE_SUCCESS;
1046 consq_allocate_fail:
1047 OSAL_FREE(p_hwfn->p_dev, p_consq);
1051 void ecore_consq_setup(struct ecore_hwfn *p_hwfn)
1053 ecore_chain_reset(&p_hwfn->p_consq->chain);
1056 void ecore_consq_free(struct ecore_hwfn *p_hwfn)
1058 if (!p_hwfn->p_consq)
1061 ecore_chain_free(p_hwfn->p_dev, &p_hwfn->p_consq->chain);
1062 OSAL_FREE(p_hwfn->p_dev, p_hwfn->p_consq);