1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright (c) 2016 - 2018 Cavium Inc.
9 #include "ecore_gtt_reg_addr.h"
10 #include "ecore_hsi_common.h"
12 #include "ecore_sp_api.h"
13 #include "ecore_spq.h"
14 #include "ecore_iro.h"
15 #include "ecore_init_fw_funcs.h"
16 #include "ecore_cxt.h"
17 #include "ecore_int.h"
18 #include "ecore_dev_api.h"
19 #include "ecore_mcp.h"
21 #include "ecore_sriov.h"
23 /***************************************************************************
24 * Structures & Definitions
25 ***************************************************************************/
27 #define SPQ_HIGH_PRI_RESERVE_DEFAULT (1)
29 #define SPQ_BLOCK_DELAY_MAX_ITER (10)
30 #define SPQ_BLOCK_DELAY_US (10)
31 #define SPQ_BLOCK_SLEEP_MAX_ITER (200)
32 #define SPQ_BLOCK_SLEEP_MS (5)
34 /***************************************************************************
35 * Blocking Imp. (BLOCK/EBLOCK mode)
36 ***************************************************************************/
37 static void ecore_spq_blocking_cb(struct ecore_hwfn *p_hwfn, void *cookie,
38 union event_ring_data OSAL_UNUSED * data,
41 struct ecore_spq_comp_done *comp_done;
43 comp_done = (struct ecore_spq_comp_done *)cookie;
45 comp_done->done = 0x1;
46 comp_done->fw_return_code = fw_return_code;
48 /* make update visible to waiting thread */
49 OSAL_SMP_WMB(p_hwfn->p_dev);
52 static enum _ecore_status_t __ecore_spq_block(struct ecore_hwfn *p_hwfn,
53 struct ecore_spq_entry *p_ent,
55 bool sleep_between_iter)
57 struct ecore_spq_comp_done *comp_done;
60 comp_done = (struct ecore_spq_comp_done *)p_ent->comp_cb.cookie;
61 iter_cnt = sleep_between_iter ? p_hwfn->p_spq->block_sleep_max_iter
62 : SPQ_BLOCK_DELAY_MAX_ITER;
64 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev) && sleep_between_iter)
69 OSAL_POLL_MODE_DPC(p_hwfn);
70 OSAL_SMP_RMB(p_hwfn->p_dev);
71 if (comp_done->done == 1) {
73 *p_fw_ret = comp_done->fw_return_code;
77 if (sleep_between_iter)
78 OSAL_MSLEEP(SPQ_BLOCK_SLEEP_MS);
80 OSAL_UDELAY(SPQ_BLOCK_DELAY_US);
86 static enum _ecore_status_t ecore_spq_block(struct ecore_hwfn *p_hwfn,
87 struct ecore_spq_entry *p_ent,
88 u8 *p_fw_ret, bool skip_quick_poll)
90 struct ecore_spq_comp_done *comp_done;
91 struct ecore_ptt *p_ptt;
92 enum _ecore_status_t rc;
94 /* A relatively short polling period w/o sleeping, to allow the FW to
95 * complete the ramrod and thus possibly to avoid the following sleeps.
97 if (!skip_quick_poll) {
98 rc = __ecore_spq_block(p_hwfn, p_ent, p_fw_ret, false);
99 if (rc == ECORE_SUCCESS)
100 return ECORE_SUCCESS;
103 /* Move to polling with a sleeping period between iterations */
104 rc = __ecore_spq_block(p_hwfn, p_ent, p_fw_ret, true);
105 if (rc == ECORE_SUCCESS)
106 return ECORE_SUCCESS;
108 p_ptt = ecore_ptt_acquire(p_hwfn);
112 DP_INFO(p_hwfn, "Ramrod is stuck, requesting MCP drain\n");
113 rc = ecore_mcp_drain(p_hwfn, p_ptt);
114 ecore_ptt_release(p_hwfn, p_ptt);
115 if (rc != ECORE_SUCCESS) {
116 DP_NOTICE(p_hwfn, true, "MCP drain failed\n");
120 /* Retry after drain */
121 rc = __ecore_spq_block(p_hwfn, p_ent, p_fw_ret, true);
122 if (rc == ECORE_SUCCESS)
123 return ECORE_SUCCESS;
125 comp_done = (struct ecore_spq_comp_done *)p_ent->comp_cb.cookie;
126 if (comp_done->done == 1) {
128 *p_fw_ret = comp_done->fw_return_code;
129 return ECORE_SUCCESS;
132 DP_NOTICE(p_hwfn, true,
133 "Ramrod is stuck [CID %08x cmd %02x proto %02x echo %04x]\n",
134 OSAL_LE32_TO_CPU(p_ent->elem.hdr.cid),
135 p_ent->elem.hdr.cmd_id, p_ent->elem.hdr.protocol_id,
136 OSAL_LE16_TO_CPU(p_ent->elem.hdr.echo));
138 ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_RAMROD_FAIL);
143 void ecore_set_spq_block_timeout(struct ecore_hwfn *p_hwfn,
146 p_hwfn->p_spq->block_sleep_max_iter = spq_timeout_ms ?
147 spq_timeout_ms / SPQ_BLOCK_SLEEP_MS :
148 SPQ_BLOCK_SLEEP_MAX_ITER;
151 /***************************************************************************
152 * SPQ entries inner API
153 ***************************************************************************/
154 static enum _ecore_status_t
155 ecore_spq_fill_entry(struct ecore_hwfn *p_hwfn, struct ecore_spq_entry *p_ent)
159 switch (p_ent->comp_mode) {
160 case ECORE_SPQ_MODE_EBLOCK:
161 case ECORE_SPQ_MODE_BLOCK:
162 p_ent->comp_cb.function = ecore_spq_blocking_cb;
164 case ECORE_SPQ_MODE_CB:
167 DP_NOTICE(p_hwfn, true, "Unknown SPQE completion mode %d\n",
172 DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
173 "Ramrod header: [CID 0x%08x CMD 0x%02x protocol 0x%02x]"
174 " Data pointer: [%08x:%08x] Completion Mode: %s\n",
175 p_ent->elem.hdr.cid, p_ent->elem.hdr.cmd_id,
176 p_ent->elem.hdr.protocol_id,
177 p_ent->elem.data_ptr.hi, p_ent->elem.data_ptr.lo,
178 D_TRINE(p_ent->comp_mode, ECORE_SPQ_MODE_EBLOCK,
179 ECORE_SPQ_MODE_BLOCK, "MODE_EBLOCK", "MODE_BLOCK",
182 return ECORE_SUCCESS;
185 /***************************************************************************
187 ***************************************************************************/
189 #define XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN_MASK 0x1
190 #define XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN_SHIFT 0
191 #define XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE_MASK 0x1
192 #define XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT 7
193 #define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
194 #define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
195 #define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN_MASK 0x1
196 #define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN_SHIFT 6
198 static void ecore_spq_hw_initialize(struct ecore_hwfn *p_hwfn,
199 struct ecore_spq *p_spq)
201 __le32 *p_spq_base_lo, *p_spq_base_hi;
202 struct regpair *p_consolid_base_addr;
203 u8 *p_flags1, *p_flags9, *p_flags10;
204 struct core_conn_context *p_cxt;
205 struct ecore_cxt_info cxt_info;
206 u32 core_conn_context_size;
207 __le16 *p_physical_q0;
209 enum _ecore_status_t rc;
211 cxt_info.iid = p_spq->cid;
213 rc = ecore_cxt_get_cid_info(p_hwfn, &cxt_info);
215 if (rc != ECORE_SUCCESS) {
216 DP_NOTICE(p_hwfn, true, "Cannot find context info for cid=%d\n",
221 p_cxt = cxt_info.p_cxt;
222 core_conn_context_size = sizeof(*p_cxt);
223 p_flags1 = &p_cxt->xstorm_ag_context.flags1;
224 p_flags9 = &p_cxt->xstorm_ag_context.flags9;
225 p_flags10 = &p_cxt->xstorm_ag_context.flags10;
226 p_physical_q0 = &p_cxt->xstorm_ag_context.physical_q0;
227 p_spq_base_lo = &p_cxt->xstorm_st_context.spq_base_lo;
228 p_spq_base_hi = &p_cxt->xstorm_st_context.spq_base_hi;
229 p_consolid_base_addr = &p_cxt->xstorm_st_context.consolid_base_addr;
231 /* @@@TBD we zero the context until we have ilt_reset implemented. */
232 OSAL_MEM_ZERO(p_cxt, core_conn_context_size);
234 SET_FIELD(*p_flags10, XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN, 1);
235 SET_FIELD(*p_flags1, XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE, 1);
236 SET_FIELD(*p_flags9, XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN, 1);
238 /* CDU validation - FIXME currently disabled */
240 /* QM physical queue */
241 physical_q = ecore_get_cm_pq_idx(p_hwfn, PQ_FLAGS_LB);
242 *p_physical_q0 = OSAL_CPU_TO_LE16(physical_q);
244 *p_spq_base_lo = DMA_LO_LE(p_spq->chain.p_phys_addr);
245 *p_spq_base_hi = DMA_HI_LE(p_spq->chain.p_phys_addr);
247 DMA_REGPAIR_LE(*p_consolid_base_addr,
248 p_hwfn->p_consq->chain.p_phys_addr);
251 static enum _ecore_status_t ecore_spq_hw_post(struct ecore_hwfn *p_hwfn,
252 struct ecore_spq *p_spq,
253 struct ecore_spq_entry *p_ent)
255 struct ecore_chain *p_chain = &p_hwfn->p_spq->chain;
256 struct core_db_data *p_db_data = &p_spq->db_data;
257 u16 echo = ecore_chain_get_prod_idx(p_chain);
258 struct slow_path_element *elem;
260 p_ent->elem.hdr.echo = OSAL_CPU_TO_LE16(echo);
261 elem = ecore_chain_produce(p_chain);
263 DP_NOTICE(p_hwfn, true, "Failed to produce from SPQ chain\n");
267 *elem = p_ent->elem; /* Struct assignment */
269 p_db_data->spq_prod =
270 OSAL_CPU_TO_LE16(ecore_chain_get_prod_idx(p_chain));
272 /* Make sure the SPQE is updated before the doorbell */
273 OSAL_WMB(p_hwfn->p_dev);
275 DOORBELL(p_hwfn, p_spq->db_addr_offset, *(u32 *)p_db_data);
277 /* Make sure doorbell is rang */
278 OSAL_WMB(p_hwfn->p_dev);
280 DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
281 "Doorbelled [0x%08x, CID 0x%08x] with Flags: %02x"
282 " agg_params: %02x, prod: %04x\n",
283 p_spq->db_addr_offset, p_spq->cid, p_db_data->params,
284 p_db_data->agg_flags, ecore_chain_get_prod_idx(p_chain));
286 return ECORE_SUCCESS;
289 /***************************************************************************
290 * Asynchronous events
291 ***************************************************************************/
293 static enum _ecore_status_t
294 ecore_async_event_completion(struct ecore_hwfn *p_hwfn,
295 struct event_ring_entry *p_eqe)
297 ecore_spq_async_comp_cb cb;
298 enum _ecore_status_t rc;
300 if (p_eqe->protocol_id >= MAX_PROTOCOL_TYPE) {
301 DP_ERR(p_hwfn, "Wrong protocol: %d\n", p_eqe->protocol_id);
305 cb = p_hwfn->p_spq->async_comp_cb[p_eqe->protocol_id];
308 true, "Unknown Async completion for protocol: %d\n",
313 rc = cb(p_hwfn, p_eqe->opcode, p_eqe->echo,
314 &p_eqe->data, p_eqe->fw_return_code);
315 if (rc != ECORE_SUCCESS)
316 DP_NOTICE(p_hwfn, true,
317 "Async completion callback failed, rc = %d [opcode %x, echo %x, fw_return_code %x]",
318 rc, p_eqe->opcode, p_eqe->echo,
319 p_eqe->fw_return_code);
325 ecore_spq_register_async_cb(struct ecore_hwfn *p_hwfn,
326 enum protocol_type protocol_id,
327 ecore_spq_async_comp_cb cb)
329 if (!p_hwfn->p_spq || (protocol_id >= MAX_PROTOCOL_TYPE))
332 p_hwfn->p_spq->async_comp_cb[protocol_id] = cb;
333 return ECORE_SUCCESS;
337 ecore_spq_unregister_async_cb(struct ecore_hwfn *p_hwfn,
338 enum protocol_type protocol_id)
340 if (!p_hwfn->p_spq || (protocol_id >= MAX_PROTOCOL_TYPE))
343 p_hwfn->p_spq->async_comp_cb[protocol_id] = OSAL_NULL;
346 /***************************************************************************
348 ***************************************************************************/
349 void ecore_eq_prod_update(struct ecore_hwfn *p_hwfn, u16 prod)
351 u32 addr = GTT_BAR0_MAP_REG_USDM_RAM +
352 USTORM_EQE_CONS_OFFSET(p_hwfn->rel_pf_id);
354 REG_WR16(p_hwfn, addr, prod);
356 /* keep prod updates ordered */
357 OSAL_MMIOWB(p_hwfn->p_dev);
360 enum _ecore_status_t ecore_eq_completion(struct ecore_hwfn *p_hwfn,
363 struct ecore_eq *p_eq = cookie;
364 struct ecore_chain *p_chain = &p_eq->chain;
366 enum _ecore_status_t rc = ECORE_SUCCESS;
368 if (!p_hwfn->p_spq) {
369 DP_ERR(p_hwfn, "Unexpected NULL p_spq\n");
373 /* take a snapshot of the FW consumer */
374 fw_cons_idx = OSAL_LE16_TO_CPU(*p_eq->p_fw_cons);
376 DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ, "fw_cons_idx %x\n", fw_cons_idx);
378 /* Need to guarantee the fw_cons index we use points to a usuable
379 * element (to comply with our chain), so our macros would comply
381 if ((fw_cons_idx & ecore_chain_get_usable_per_page(p_chain)) ==
382 ecore_chain_get_usable_per_page(p_chain)) {
383 fw_cons_idx += ecore_chain_get_unusable_per_page(p_chain);
386 /* Complete current segment of eq entries */
387 while (fw_cons_idx != ecore_chain_get_cons_idx(p_chain)) {
388 struct event_ring_entry *p_eqe = ecore_chain_consume(p_chain);
391 "Unexpected NULL chain consumer entry\n");
395 DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
396 "op %x prot %x res0 %x echo %x fwret %x flags %x\n",
397 p_eqe->opcode, /* Event Opcode */
398 p_eqe->protocol_id, /* Event Protocol ID */
399 p_eqe->reserved0, /* Reserved */
400 /* Echo value from ramrod data on the host */
401 OSAL_LE16_TO_CPU(p_eqe->echo),
402 p_eqe->fw_return_code, /* FW return code for SP
407 if (GET_FIELD(p_eqe->flags, EVENT_RING_ENTRY_ASYNC))
408 ecore_async_event_completion(p_hwfn, p_eqe);
410 ecore_spq_completion(p_hwfn,
412 p_eqe->fw_return_code,
415 ecore_chain_recycle_consumed(p_chain);
418 ecore_eq_prod_update(p_hwfn, ecore_chain_get_prod_idx(p_chain));
423 enum _ecore_status_t ecore_eq_alloc(struct ecore_hwfn *p_hwfn, u16 num_elem)
425 struct ecore_eq *p_eq;
427 /* Allocate EQ struct */
428 p_eq = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*p_eq));
430 DP_NOTICE(p_hwfn, false,
431 "Failed to allocate `struct ecore_eq'\n");
435 /* Allocate and initialize EQ chain*/
436 if (ecore_chain_alloc(p_hwfn->p_dev,
437 ECORE_CHAIN_USE_TO_PRODUCE,
438 ECORE_CHAIN_MODE_PBL,
439 ECORE_CHAIN_CNT_TYPE_U16,
441 sizeof(union event_ring_element),
442 &p_eq->chain, OSAL_NULL) != ECORE_SUCCESS) {
443 DP_NOTICE(p_hwfn, false, "Failed to allocate eq chain\n");
444 goto eq_allocate_fail;
447 /* register EQ completion on the SP SB */
448 ecore_int_register_cb(p_hwfn, ecore_eq_completion,
449 p_eq, &p_eq->eq_sb_index, &p_eq->p_fw_cons);
452 return ECORE_SUCCESS;
455 OSAL_FREE(p_hwfn->p_dev, p_eq);
459 void ecore_eq_setup(struct ecore_hwfn *p_hwfn)
461 ecore_chain_reset(&p_hwfn->p_eq->chain);
464 void ecore_eq_free(struct ecore_hwfn *p_hwfn)
469 ecore_chain_free(p_hwfn->p_dev, &p_hwfn->p_eq->chain);
471 OSAL_FREE(p_hwfn->p_dev, p_hwfn->p_eq);
472 p_hwfn->p_eq = OSAL_NULL;
475 /***************************************************************************
476 * CQE API - manipulate EQ functionality
477 ***************************************************************************/
478 static enum _ecore_status_t ecore_cqe_completion(struct ecore_hwfn *p_hwfn,
479 struct eth_slow_path_rx_cqe
481 enum protocol_type protocol)
483 if (IS_VF(p_hwfn->p_dev))
484 return OSAL_VF_CQE_COMPLETION(p_hwfn, cqe, protocol);
486 /* @@@tmp - it's possible we'll eventually want to handle some
487 * actual commands that can arrive here, but for now this is only
488 * used to complete the ramrod using the echo value on the cqe
490 return ecore_spq_completion(p_hwfn, cqe->echo, 0, OSAL_NULL);
493 enum _ecore_status_t ecore_eth_cqe_completion(struct ecore_hwfn *p_hwfn,
494 struct eth_slow_path_rx_cqe *cqe)
496 enum _ecore_status_t rc;
498 rc = ecore_cqe_completion(p_hwfn, cqe, PROTOCOLID_ETH);
500 DP_NOTICE(p_hwfn, true,
501 "Failed to handle RXQ CQE [cmd 0x%02x]\n",
508 /***************************************************************************
509 * Slow hwfn Queue (spq)
510 ***************************************************************************/
511 void ecore_spq_setup(struct ecore_hwfn *p_hwfn)
513 struct ecore_spq *p_spq = p_hwfn->p_spq;
514 struct ecore_spq_entry *p_virt = OSAL_NULL;
515 struct core_db_data *p_db_data;
516 void OSAL_IOMEM *db_addr;
517 dma_addr_t p_phys = 0;
519 enum _ecore_status_t rc;
521 OSAL_LIST_INIT(&p_spq->pending);
522 OSAL_LIST_INIT(&p_spq->completion_pending);
523 OSAL_LIST_INIT(&p_spq->free_pool);
524 OSAL_LIST_INIT(&p_spq->unlimited_pending);
525 OSAL_SPIN_LOCK_INIT(&p_spq->lock);
528 p_phys = p_spq->p_phys + OFFSETOF(struct ecore_spq_entry, ramrod);
529 p_virt = p_spq->p_virt;
531 capacity = ecore_chain_get_capacity(&p_spq->chain);
532 for (i = 0; i < capacity; i++) {
533 DMA_REGPAIR_LE(p_virt->elem.data_ptr, p_phys);
535 OSAL_LIST_PUSH_TAIL(&p_virt->list, &p_spq->free_pool);
538 p_phys += sizeof(struct ecore_spq_entry);
542 p_spq->normal_count = 0;
543 p_spq->comp_count = 0;
544 p_spq->comp_sent_count = 0;
545 p_spq->unlimited_pending_count = 0;
547 OSAL_MEM_ZERO(p_spq->p_comp_bitmap,
548 SPQ_COMP_BMAP_SIZE * sizeof(unsigned long));
549 p_spq->comp_bitmap_idx = 0;
551 /* SPQ cid, cannot fail */
552 ecore_cxt_acquire_cid(p_hwfn, PROTOCOLID_CORE, &p_spq->cid);
553 ecore_spq_hw_initialize(p_hwfn, p_spq);
555 /* reset the chain itself */
556 ecore_chain_reset(&p_spq->chain);
558 /* Initialize the address/data of the SPQ doorbell */
559 p_spq->db_addr_offset = DB_ADDR(p_spq->cid, DQ_DEMS_LEGACY);
560 p_db_data = &p_spq->db_data;
561 OSAL_MEM_ZERO(p_db_data, sizeof(*p_db_data));
562 SET_FIELD(p_db_data->params, CORE_DB_DATA_DEST, DB_DEST_XCM);
563 SET_FIELD(p_db_data->params, CORE_DB_DATA_AGG_CMD, DB_AGG_CMD_MAX);
564 SET_FIELD(p_db_data->params, CORE_DB_DATA_AGG_VAL_SEL,
565 DQ_XCM_CORE_SPQ_PROD_CMD);
566 p_db_data->agg_flags = DQ_XCM_CORE_DQ_CF_CMD;
568 /* Register the SPQ doorbell with the doorbell recovery mechanism */
569 db_addr = (void *)((u8 *)p_hwfn->doorbells + p_spq->db_addr_offset);
570 rc = ecore_db_recovery_add(p_hwfn->p_dev, db_addr, &p_spq->db_data,
571 DB_REC_WIDTH_32B, DB_REC_KERNEL);
572 if (rc != ECORE_SUCCESS)
574 "Failed to register the SPQ doorbell with the doorbell recovery mechanism\n");
577 enum _ecore_status_t ecore_spq_alloc(struct ecore_hwfn *p_hwfn)
579 struct ecore_spq_entry *p_virt = OSAL_NULL;
580 struct ecore_spq *p_spq = OSAL_NULL;
581 dma_addr_t p_phys = 0;
586 OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(struct ecore_spq));
588 DP_NOTICE(p_hwfn, false, "Failed to allocate `struct ecore_spq'\n");
593 if (ecore_chain_alloc(p_hwfn->p_dev,
594 ECORE_CHAIN_USE_TO_PRODUCE,
595 ECORE_CHAIN_MODE_SINGLE,
596 ECORE_CHAIN_CNT_TYPE_U16,
597 0, /* N/A when the mode is SINGLE */
598 sizeof(struct slow_path_element),
599 &p_spq->chain, OSAL_NULL)) {
600 DP_NOTICE(p_hwfn, false, "Failed to allocate spq chain\n");
601 goto spq_allocate_fail;
604 /* allocate and fill the SPQ elements (incl. ramrod data list) */
605 capacity = ecore_chain_get_capacity(&p_spq->chain);
606 p_virt = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, &p_phys,
608 sizeof(struct ecore_spq_entry));
610 goto spq_allocate_fail;
612 p_spq->p_virt = p_virt;
613 p_spq->p_phys = p_phys;
615 #ifdef CONFIG_ECORE_LOCK_ALLOC
616 if (OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_spq->lock))
617 goto spq_allocate_fail;
620 p_hwfn->p_spq = p_spq;
621 return ECORE_SUCCESS;
624 ecore_chain_free(p_hwfn->p_dev, &p_spq->chain);
625 OSAL_FREE(p_hwfn->p_dev, p_spq);
629 void ecore_spq_free(struct ecore_hwfn *p_hwfn)
631 struct ecore_spq *p_spq = p_hwfn->p_spq;
632 void OSAL_IOMEM *db_addr;
638 /* Delete the SPQ doorbell from the doorbell recovery mechanism */
639 db_addr = (void *)((u8 *)p_hwfn->doorbells + p_spq->db_addr_offset);
640 ecore_db_recovery_del(p_hwfn->p_dev, db_addr, &p_spq->db_data);
643 capacity = ecore_chain_get_capacity(&p_spq->chain);
644 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
648 sizeof(struct ecore_spq_entry));
651 ecore_chain_free(p_hwfn->p_dev, &p_spq->chain);
652 #ifdef CONFIG_ECORE_LOCK_ALLOC
653 OSAL_SPIN_LOCK_DEALLOC(&p_spq->lock);
656 OSAL_FREE(p_hwfn->p_dev, p_spq);
660 ecore_spq_get_entry(struct ecore_hwfn *p_hwfn, struct ecore_spq_entry **pp_ent)
662 struct ecore_spq *p_spq = p_hwfn->p_spq;
663 struct ecore_spq_entry *p_ent = OSAL_NULL;
664 enum _ecore_status_t rc = ECORE_SUCCESS;
666 OSAL_SPIN_LOCK(&p_spq->lock);
668 if (OSAL_LIST_IS_EMPTY(&p_spq->free_pool)) {
669 p_ent = OSAL_ZALLOC(p_hwfn->p_dev, GFP_ATOMIC, sizeof(*p_ent));
671 DP_NOTICE(p_hwfn, false, "Failed to allocate an SPQ entry for a pending ramrod\n");
675 p_ent->queue = &p_spq->unlimited_pending;
677 p_ent = OSAL_LIST_FIRST_ENTRY(&p_spq->free_pool,
678 struct ecore_spq_entry, list);
679 OSAL_LIST_REMOVE_ENTRY(&p_ent->list, &p_spq->free_pool);
680 p_ent->queue = &p_spq->pending;
686 OSAL_SPIN_UNLOCK(&p_spq->lock);
690 /* Locked variant; Should be called while the SPQ lock is taken */
691 static void __ecore_spq_return_entry(struct ecore_hwfn *p_hwfn,
692 struct ecore_spq_entry *p_ent)
694 OSAL_LIST_PUSH_TAIL(&p_ent->list, &p_hwfn->p_spq->free_pool);
697 void ecore_spq_return_entry(struct ecore_hwfn *p_hwfn,
698 struct ecore_spq_entry *p_ent)
700 OSAL_SPIN_LOCK(&p_hwfn->p_spq->lock);
701 __ecore_spq_return_entry(p_hwfn, p_ent);
702 OSAL_SPIN_UNLOCK(&p_hwfn->p_spq->lock);
706 * @brief ecore_spq_add_entry - adds a new entry to the pending
707 * list. Should be used while lock is being held.
709 * Addes an entry to the pending list is there is room (en empty
710 * element is available in the free_pool), or else places the
711 * entry in the unlimited_pending pool.
717 * @return enum _ecore_status_t
719 static enum _ecore_status_t
720 ecore_spq_add_entry(struct ecore_hwfn *p_hwfn,
721 struct ecore_spq_entry *p_ent, enum spq_priority priority)
723 struct ecore_spq *p_spq = p_hwfn->p_spq;
725 if (p_ent->queue == &p_spq->unlimited_pending) {
726 if (OSAL_LIST_IS_EMPTY(&p_spq->free_pool)) {
727 OSAL_LIST_PUSH_TAIL(&p_ent->list,
728 &p_spq->unlimited_pending);
729 p_spq->unlimited_pending_count++;
731 return ECORE_SUCCESS;
734 struct ecore_spq_entry *p_en2;
736 p_en2 = OSAL_LIST_FIRST_ENTRY(&p_spq->free_pool,
737 struct ecore_spq_entry,
739 OSAL_LIST_REMOVE_ENTRY(&p_en2->list, &p_spq->free_pool);
741 /* Copy the ring element physical pointer to the new
742 * entry, since we are about to override the entire ring
743 * entry and don't want to lose the pointer.
745 p_ent->elem.data_ptr = p_en2->elem.data_ptr;
749 /* EBLOCK responsible to free the allocated p_ent */
750 if (p_ent->comp_mode != ECORE_SPQ_MODE_EBLOCK)
751 OSAL_FREE(p_hwfn->p_dev, p_ent);
757 /* entry is to be placed in 'pending' queue */
759 case ECORE_SPQ_PRIORITY_NORMAL:
760 OSAL_LIST_PUSH_TAIL(&p_ent->list, &p_spq->pending);
761 p_spq->normal_count++;
763 case ECORE_SPQ_PRIORITY_HIGH:
764 OSAL_LIST_PUSH_HEAD(&p_ent->list, &p_spq->pending);
771 return ECORE_SUCCESS;
774 /***************************************************************************
776 ***************************************************************************/
778 u32 ecore_spq_get_cid(struct ecore_hwfn *p_hwfn)
781 return 0xffffffff; /* illegal */
782 return p_hwfn->p_spq->cid;
785 /***************************************************************************
786 * Posting new Ramrods
787 ***************************************************************************/
789 static enum _ecore_status_t ecore_spq_post_list(struct ecore_hwfn *p_hwfn,
793 struct ecore_spq *p_spq = p_hwfn->p_spq;
794 enum _ecore_status_t rc;
796 /* TODO - implementation might be wasteful; will always keep room
797 * for an additional high priority ramrod (even if one is already
800 while (ecore_chain_get_elem_left(&p_spq->chain) > keep_reserve &&
801 !OSAL_LIST_IS_EMPTY(head)) {
802 struct ecore_spq_entry *p_ent =
803 OSAL_LIST_FIRST_ENTRY(head, struct ecore_spq_entry, list);
804 if (p_ent != OSAL_NULL) {
806 #pragma warning(suppress : 6011 28182)
808 OSAL_LIST_REMOVE_ENTRY(&p_ent->list, head);
809 OSAL_LIST_PUSH_TAIL(&p_ent->list,
810 &p_spq->completion_pending);
811 p_spq->comp_sent_count++;
813 rc = ecore_spq_hw_post(p_hwfn, p_spq, p_ent);
815 OSAL_LIST_REMOVE_ENTRY(&p_ent->list,
816 &p_spq->completion_pending);
817 __ecore_spq_return_entry(p_hwfn, p_ent);
823 return ECORE_SUCCESS;
826 static enum _ecore_status_t ecore_spq_pend_post(struct ecore_hwfn *p_hwfn)
828 struct ecore_spq *p_spq = p_hwfn->p_spq;
829 struct ecore_spq_entry *p_ent = OSAL_NULL;
831 while (!OSAL_LIST_IS_EMPTY(&p_spq->free_pool)) {
832 if (OSAL_LIST_IS_EMPTY(&p_spq->unlimited_pending))
835 p_ent = OSAL_LIST_FIRST_ENTRY(&p_spq->unlimited_pending,
836 struct ecore_spq_entry, list);
841 #pragma warning(suppress : 6011)
843 OSAL_LIST_REMOVE_ENTRY(&p_ent->list, &p_spq->unlimited_pending);
845 ecore_spq_add_entry(p_hwfn, p_ent, p_ent->priority);
848 return ecore_spq_post_list(p_hwfn,
849 &p_spq->pending, SPQ_HIGH_PRI_RESERVE_DEFAULT);
852 enum _ecore_status_t ecore_spq_post(struct ecore_hwfn *p_hwfn,
853 struct ecore_spq_entry *p_ent,
856 enum _ecore_status_t rc = ECORE_SUCCESS;
857 struct ecore_spq *p_spq = p_hwfn ? p_hwfn->p_spq : OSAL_NULL;
858 bool b_ret_ent = true;
864 DP_NOTICE(p_hwfn, true, "Got a NULL pointer\n");
868 if (p_hwfn->p_dev->recov_in_prog) {
869 DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
870 "Recovery is in progress -> skip spq post"
871 " [cmd %02x protocol %02x]\n",
872 p_ent->elem.hdr.cmd_id, p_ent->elem.hdr.protocol_id);
873 /* Return success to let the flows to be completed successfully
874 * w/o any error handling.
876 return ECORE_SUCCESS;
879 OSAL_SPIN_LOCK(&p_spq->lock);
881 /* Complete the entry */
882 rc = ecore_spq_fill_entry(p_hwfn, p_ent);
884 /* Check return value after LOCK is taken for cleaner error flow */
888 /* Add the request to the pending queue */
889 rc = ecore_spq_add_entry(p_hwfn, p_ent, p_ent->priority);
893 rc = ecore_spq_pend_post(p_hwfn);
895 /* Since it's possible that pending failed for a different
896 * entry [although unlikely], the failed entry was already
897 * dealt with; No need to return it here.
903 OSAL_SPIN_UNLOCK(&p_spq->lock);
905 if (p_ent->comp_mode == ECORE_SPQ_MODE_EBLOCK) {
906 /* For entries in ECORE BLOCK mode, the completion code cannot
907 * perform the necessary cleanup - if it did, we couldn't
908 * access p_ent here to see whether it's successful or not.
909 * Thus, after gaining the answer perform the cleanup here.
911 rc = ecore_spq_block(p_hwfn, p_ent, fw_return_code,
912 p_ent->queue == &p_spq->unlimited_pending);
914 if (p_ent->queue == &p_spq->unlimited_pending) {
915 /* This is an allocated p_ent which does not need to
918 OSAL_FREE(p_hwfn->p_dev, p_ent);
920 /* TBD: handle error flow and remove p_ent from
930 ecore_spq_return_entry(p_hwfn, p_ent);
935 OSAL_SPIN_LOCK(&p_spq->lock);
936 OSAL_LIST_REMOVE_ENTRY(&p_ent->list, &p_spq->completion_pending);
937 ecore_chain_return_produced(&p_spq->chain);
940 /* return to the free pool */
942 __ecore_spq_return_entry(p_hwfn, p_ent);
943 OSAL_SPIN_UNLOCK(&p_spq->lock);
948 enum _ecore_status_t ecore_spq_completion(struct ecore_hwfn *p_hwfn,
951 union event_ring_data *p_data)
953 struct ecore_spq *p_spq;
954 struct ecore_spq_entry *p_ent = OSAL_NULL;
955 struct ecore_spq_entry *tmp;
956 struct ecore_spq_entry *found = OSAL_NULL;
957 enum _ecore_status_t rc;
959 p_spq = p_hwfn->p_spq;
961 DP_ERR(p_hwfn, "Unexpected NULL p_spq\n");
965 OSAL_SPIN_LOCK(&p_spq->lock);
966 OSAL_LIST_FOR_EACH_ENTRY_SAFE(p_ent,
968 &p_spq->completion_pending,
969 list, struct ecore_spq_entry) {
970 if (p_ent->elem.hdr.echo == echo) {
971 OSAL_LIST_REMOVE_ENTRY(&p_ent->list,
972 &p_spq->completion_pending);
974 /* Avoid overriding of SPQ entries when getting
975 * out-of-order completions, by marking the completions
976 * in a bitmap and increasing the chain consumer only
977 * for the first successive completed entries.
979 SPQ_COMP_BMAP_SET_BIT(p_spq, echo);
980 while (SPQ_COMP_BMAP_TEST_BIT(p_spq,
981 p_spq->comp_bitmap_idx)) {
982 SPQ_COMP_BMAP_CLEAR_BIT(p_spq,
983 p_spq->comp_bitmap_idx);
984 p_spq->comp_bitmap_idx++;
985 ecore_chain_return_produced(&p_spq->chain);
993 /* This is debug and should be relatively uncommon - depends
994 * on scenarios which have mutliple per-PF sent ramrods.
996 DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
997 "Got completion for echo %04x - doesn't match"
998 " echo %04x in completion pending list\n",
999 OSAL_LE16_TO_CPU(echo),
1000 OSAL_LE16_TO_CPU(p_ent->elem.hdr.echo));
1003 /* Release lock before callback, as callback may post
1004 * an additional ramrod.
1006 OSAL_SPIN_UNLOCK(&p_spq->lock);
1009 DP_NOTICE(p_hwfn, true,
1010 "Failed to find an entry this"
1011 " EQE [echo %04x] completes\n",
1012 OSAL_LE16_TO_CPU(echo));
1013 return ECORE_EXISTS;
1016 DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
1017 "Complete EQE [echo %04x]: func %p cookie %p)\n",
1018 OSAL_LE16_TO_CPU(echo),
1019 p_ent->comp_cb.function, p_ent->comp_cb.cookie);
1020 if (found->comp_cb.function)
1021 found->comp_cb.function(p_hwfn, found->comp_cb.cookie, p_data,
1024 DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
1025 "Got a completion without a callback function\n");
1027 if ((found->comp_mode != ECORE_SPQ_MODE_EBLOCK) ||
1028 (found->queue == &p_spq->unlimited_pending))
1029 /* EBLOCK is responsible for returning its own entry into the
1030 * free list, unless it originally added the entry into the
1031 * unlimited pending list.
1033 ecore_spq_return_entry(p_hwfn, found);
1035 /* Attempt to post pending requests */
1036 OSAL_SPIN_LOCK(&p_spq->lock);
1037 rc = ecore_spq_pend_post(p_hwfn);
1038 OSAL_SPIN_UNLOCK(&p_spq->lock);
1043 enum _ecore_status_t ecore_consq_alloc(struct ecore_hwfn *p_hwfn)
1045 struct ecore_consq *p_consq;
1047 /* Allocate ConsQ struct */
1049 OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*p_consq));
1051 DP_NOTICE(p_hwfn, false,
1052 "Failed to allocate `struct ecore_consq'\n");
1056 /* Allocate and initialize EQ chain */
1057 if (ecore_chain_alloc(p_hwfn->p_dev,
1058 ECORE_CHAIN_USE_TO_PRODUCE,
1059 ECORE_CHAIN_MODE_PBL,
1060 ECORE_CHAIN_CNT_TYPE_U16,
1061 ECORE_CHAIN_PAGE_SIZE / 0x80,
1063 &p_consq->chain, OSAL_NULL) != ECORE_SUCCESS) {
1064 DP_NOTICE(p_hwfn, false, "Failed to allocate consq chain");
1065 goto consq_allocate_fail;
1068 p_hwfn->p_consq = p_consq;
1069 return ECORE_SUCCESS;
1071 consq_allocate_fail:
1072 OSAL_FREE(p_hwfn->p_dev, p_consq);
1076 void ecore_consq_setup(struct ecore_hwfn *p_hwfn)
1078 ecore_chain_reset(&p_hwfn->p_consq->chain);
1081 void ecore_consq_free(struct ecore_hwfn *p_hwfn)
1083 if (!p_hwfn->p_consq)
1086 ecore_chain_free(p_hwfn->p_dev, &p_hwfn->p_consq->chain);
1087 OSAL_FREE(p_hwfn->p_dev, p_hwfn->p_consq);