net/qede/base: semantic changes
[dpdk.git] / drivers / net / qede / base / ecore_spq.c
index b263693..70ffa8c 100644 (file)
  ***************************************************************************/
 
 #define SPQ_HIGH_PRI_RESERVE_DEFAULT   (1)
-#define SPQ_BLOCK_SLEEP_LENGTH         (1000)
+
+#define SPQ_BLOCK_DELAY_MAX_ITER       (10)
+#define SPQ_BLOCK_DELAY_US             (10)
+#define SPQ_BLOCK_SLEEP_MAX_ITER       (1000)
+#define SPQ_BLOCK_SLEEP_MS             (5)
 
 /***************************************************************************
  * Blocking Imp. (BLOCK/EBLOCK mode)
  ***************************************************************************/
-static void ecore_spq_blocking_cb(struct ecore_hwfn *p_hwfn,
-                                 void *cookie,
-                                 union event_ring_data *data,
+static void ecore_spq_blocking_cb(struct ecore_hwfn *p_hwfn, void *cookie,
+                                 union event_ring_data OSAL_UNUSED * data,
                                  u8 fw_return_code)
 {
        struct ecore_spq_comp_done *comp_done;
@@ -48,53 +51,82 @@ static void ecore_spq_blocking_cb(struct ecore_hwfn *p_hwfn,
        OSAL_SMP_WMB(p_hwfn->p_dev);
 }
 
-static enum _ecore_status_t ecore_spq_block(struct ecore_hwfn *p_hwfn,
-                                           struct ecore_spq_entry *p_ent,
-                                           u8 *p_fw_ret)
+static enum _ecore_status_t __ecore_spq_block(struct ecore_hwfn *p_hwfn,
+                                             struct ecore_spq_entry *p_ent,
+                                             u8 *p_fw_ret,
+                                             bool sleep_between_iter)
 {
-       int sleep_count = SPQ_BLOCK_SLEEP_LENGTH;
        struct ecore_spq_comp_done *comp_done;
-       enum _ecore_status_t rc;
+       u32 iter_cnt;
 
        comp_done = (struct ecore_spq_comp_done *)p_ent->comp_cb.cookie;
-       while (sleep_count) {
+       iter_cnt = sleep_between_iter ? SPQ_BLOCK_SLEEP_MAX_ITER
+                                     : SPQ_BLOCK_DELAY_MAX_ITER;
+
+       while (iter_cnt--) {
                OSAL_POLL_MODE_DPC(p_hwfn);
-               /* validate we receive completion update */
                OSAL_SMP_RMB(p_hwfn->p_dev);
                if (comp_done->done == 1) {
                        if (p_fw_ret)
                                *p_fw_ret = comp_done->fw_return_code;
                        return ECORE_SUCCESS;
                }
-               OSAL_MSLEEP(5);
-               sleep_count--;
+
+               if (sleep_between_iter)
+                       OSAL_MSLEEP(SPQ_BLOCK_SLEEP_MS);
+               else
+                       OSAL_UDELAY(SPQ_BLOCK_DELAY_US);
        }
 
+       return ECORE_TIMEOUT;
+}
+
+static enum _ecore_status_t ecore_spq_block(struct ecore_hwfn *p_hwfn,
+                                           struct ecore_spq_entry *p_ent,
+                                           u8 *p_fw_ret, bool skip_quick_poll)
+{
+       struct ecore_spq_comp_done *comp_done;
+       struct ecore_ptt *p_ptt;
+       enum _ecore_status_t rc;
+
+       /* A relatively short polling period w/o sleeping, to allow the FW to
+        * complete the ramrod and thus possibly to avoid the following sleeps.
+        */
+       if (!skip_quick_poll) {
+               rc = __ecore_spq_block(p_hwfn, p_ent, p_fw_ret, false);
+               if (rc == ECORE_SUCCESS)
+                       return ECORE_SUCCESS;
+       }
+
+       /* Move to polling with a sleeping period between iterations */
+       rc = __ecore_spq_block(p_hwfn, p_ent, p_fw_ret, true);
+       if (rc == ECORE_SUCCESS)
+               return ECORE_SUCCESS;
+
+       p_ptt = ecore_ptt_acquire(p_hwfn);
+       if (!p_ptt)
+               return ECORE_AGAIN;
+
        DP_INFO(p_hwfn, "Ramrod is stuck, requesting MCP drain\n");
-       rc = ecore_mcp_drain(p_hwfn, p_hwfn->p_main_ptt);
-       if (rc != ECORE_SUCCESS)
+       rc = ecore_mcp_drain(p_hwfn, p_ptt);
+       ecore_ptt_release(p_hwfn, p_ptt);
+       if (rc != ECORE_SUCCESS) {
                DP_NOTICE(p_hwfn, true, "MCP drain failed\n");
+               goto err;
+       }
 
        /* Retry after drain */
-       sleep_count = SPQ_BLOCK_SLEEP_LENGTH;
-       while (sleep_count) {
-               /* validate we receive completion update */
-               OSAL_SMP_RMB(p_hwfn->p_dev);
-               if (comp_done->done == 1) {
-                       if (p_fw_ret)
-                               *p_fw_ret = comp_done->fw_return_code;
-                       return ECORE_SUCCESS;
-               }
-               OSAL_MSLEEP(5);
-               sleep_count--;
-       }
+       rc = __ecore_spq_block(p_hwfn, p_ent, p_fw_ret, true);
+       if (rc == ECORE_SUCCESS)
+               return ECORE_SUCCESS;
 
+       comp_done = (struct ecore_spq_comp_done *)p_ent->comp_cb.cookie;
        if (comp_done->done == 1) {
                if (p_fw_ret)
                        *p_fw_ret = comp_done->fw_return_code;
                return ECORE_SUCCESS;
        }
-
+err:
        DP_NOTICE(p_hwfn, true,
                  "Ramrod is stuck [CID %08x cmd %02x proto %02x echo %04x]\n",
                  OSAL_LE32_TO_CPU(p_ent->elem.hdr.cid),
@@ -146,10 +178,9 @@ ecore_spq_fill_entry(struct ecore_hwfn *p_hwfn, struct ecore_spq_entry *p_ent)
 static void ecore_spq_hw_initialize(struct ecore_hwfn *p_hwfn,
                                    struct ecore_spq *p_spq)
 {
-       u16 pq;
+       struct e4_core_conn_context *p_cxt;
        struct ecore_cxt_info cxt_info;
-       struct core_conn_context *p_cxt;
-       union ecore_qm_pq_params pq_params;
+       u16 physical_q;
        enum _ecore_status_t rc;
 
        cxt_info.iid = p_spq->cid;
@@ -157,40 +188,41 @@ static void ecore_spq_hw_initialize(struct ecore_hwfn *p_hwfn,
        rc = ecore_cxt_get_cid_info(p_hwfn, &cxt_info);
 
        if (rc < 0) {
-               DP_NOTICE(p_hwfn, true, "Cannot find context info for cid=%d",
+               DP_NOTICE(p_hwfn, true, "Cannot find context info for cid=%d\n",
                          p_spq->cid);
                return;
        }
 
        p_cxt = cxt_info.p_cxt;
 
-       SET_FIELD(p_cxt->xstorm_ag_context.flags10,
-                 XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN, 1);
-       SET_FIELD(p_cxt->xstorm_ag_context.flags1,
-                 XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE, 1);
-       /* SET_FIELD(p_cxt->xstorm_ag_context.flags10,
-        *           XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN, 1);
-        */
-       SET_FIELD(p_cxt->xstorm_ag_context.flags9,
-                 XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN, 1);
+       /* @@@TBD we zero the context until we have ilt_reset implemented. */
+       OSAL_MEM_ZERO(p_cxt, sizeof(*p_cxt));
+
+       if (ECORE_IS_BB(p_hwfn->p_dev) || ECORE_IS_AH(p_hwfn->p_dev)) {
+               SET_FIELD(p_cxt->xstorm_ag_context.flags10,
+                         E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN, 1);
+               SET_FIELD(p_cxt->xstorm_ag_context.flags1,
+                         E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE, 1);
+               /* SET_FIELD(p_cxt->xstorm_ag_context.flags10,
+                *        E4_XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN, 1);
+                */
+               SET_FIELD(p_cxt->xstorm_ag_context.flags9,
+                         E4_XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN, 1);
+       }
 
        /* CDU validation - FIXME currently disabled */
 
        /* QM physical queue */
-       OSAL_MEMSET(&pq_params, 0, sizeof(pq_params));
-       pq_params.core.tc = LB_TC;
-       pq = ecore_get_qm_pq(p_hwfn, PROTOCOLID_CORE, &pq_params);
-       p_cxt->xstorm_ag_context.physical_q0 = OSAL_CPU_TO_LE16(pq);
+       physical_q = ecore_get_cm_pq_idx(p_hwfn, PQ_FLAGS_LB);
+       p_cxt->xstorm_ag_context.physical_q0 = OSAL_CPU_TO_LE16(physical_q);
 
        p_cxt->xstorm_st_context.spq_base_lo =
            DMA_LO_LE(p_spq->chain.p_phys_addr);
        p_cxt->xstorm_st_context.spq_base_hi =
            DMA_HI_LE(p_spq->chain.p_phys_addr);
 
-       p_cxt->xstorm_st_context.consolid_base_addr.lo =
-           DMA_LO_LE(p_hwfn->p_consq->chain.p_phys_addr);
-       p_cxt->xstorm_st_context.consolid_base_addr.hi =
-           DMA_HI_LE(p_hwfn->p_consq->chain.p_phys_addr);
+       DMA_REGPAIR_LE(p_cxt->xstorm_st_context.consolid_base_addr,
+                      p_hwfn->p_consq->chain.p_phys_addr);
 }
 
 static enum _ecore_status_t ecore_spq_hw_post(struct ecore_hwfn *p_hwfn,
@@ -198,9 +230,9 @@ static enum _ecore_status_t ecore_spq_hw_post(struct ecore_hwfn *p_hwfn,
                                              struct ecore_spq_entry *p_ent)
 {
        struct ecore_chain *p_chain = &p_hwfn->p_spq->chain;
+       struct core_db_data *p_db_data = &p_spq->db_data;
        u16 echo = ecore_chain_get_prod_idx(p_chain);
        struct slow_path_element *elem;
-       struct core_db_data db;
 
        p_ent->elem.hdr.echo = OSAL_CPU_TO_LE16(echo);
        elem = ecore_chain_produce(p_chain);
@@ -209,34 +241,24 @@ static enum _ecore_status_t ecore_spq_hw_post(struct ecore_hwfn *p_hwfn,
                return ECORE_INVAL;
        }
 
-       *elem = p_ent->elem;    /* struct assignment */
-
-       /* send a doorbell on the slow hwfn session */
-       OSAL_MEMSET(&db, 0, sizeof(db));
-       SET_FIELD(db.params, CORE_DB_DATA_DEST, DB_DEST_XCM);
-       SET_FIELD(db.params, CORE_DB_DATA_AGG_CMD, DB_AGG_CMD_SET);
-       SET_FIELD(db.params, CORE_DB_DATA_AGG_VAL_SEL,
-                 DQ_XCM_CORE_SPQ_PROD_CMD);
-       db.agg_flags = DQ_XCM_CORE_DQ_CF_CMD;
+       *elem = p_ent->elem;    /* Struct assignment */
 
-       /* validate producer is up to-date */
-       OSAL_RMB(p_hwfn->p_dev);
+       p_db_data->spq_prod =
+               OSAL_CPU_TO_LE16(ecore_chain_get_prod_idx(p_chain));
 
-       db.spq_prod = OSAL_CPU_TO_LE16(ecore_chain_get_prod_idx(p_chain));
+       /* Make sure the SPQE is updated before the doorbell */
+       OSAL_WMB(p_hwfn->p_dev);
 
-       /* do not reorder */
-       OSAL_BARRIER(p_hwfn->p_dev);
+       DOORBELL(p_hwfn, p_spq->db_addr_offset, *(u32 *)p_db_data);
 
-       DOORBELL(p_hwfn, DB_ADDR(p_spq->cid, DQ_DEMS_LEGACY), *(u32 *)&db);
-
-       /* make sure doorbell is rang */
-       OSAL_MMIOWB(p_hwfn->p_dev);
+       /* Make sure doorbell is rang */
+       OSAL_WMB(p_hwfn->p_dev);
 
        DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
                   "Doorbelled [0x%08x, CID 0x%08x] with Flags: %02x"
                   " agg_params: %02x, prod: %04x\n",
-                  DB_ADDR(p_spq->cid, DQ_DEMS_LEGACY), p_spq->cid, db.params,
-                  db.agg_flags, ecore_chain_get_prod_idx(p_chain));
+                  p_spq->db_addr_offset, p_spq->cid, p_db_data->params,
+                  p_db_data->agg_flags, ecore_chain_get_prod_idx(p_chain));
 
        return ECORE_SUCCESS;
 }
@@ -249,12 +271,16 @@ static enum _ecore_status_t
 ecore_async_event_completion(struct ecore_hwfn *p_hwfn,
                             struct event_ring_entry *p_eqe)
 {
-       switch (p_eqe->protocol_id) {
-       case PROTOCOLID_COMMON:
-               return ecore_sriov_eqe_event(p_hwfn,
-                                            p_eqe->opcode,
-                                            p_eqe->echo, &p_eqe->data);
-       default:
+       ecore_spq_async_comp_cb cb;
+
+       if (!p_hwfn->p_spq || (p_eqe->protocol_id >= MAX_PROTOCOL_TYPE))
+               return ECORE_INVAL;
+
+       cb = p_hwfn->p_spq->async_comp_cb[p_eqe->protocol_id];
+       if (cb) {
+               return cb(p_hwfn, p_eqe->opcode, p_eqe->echo,
+                         &p_eqe->data, p_eqe->fw_return_code);
+       } else {
                DP_NOTICE(p_hwfn,
                          true, "Unknown Async completion for protocol: %d\n",
                          p_eqe->protocol_id);
@@ -262,6 +288,28 @@ ecore_async_event_completion(struct ecore_hwfn *p_hwfn,
        }
 }
 
+enum _ecore_status_t
+ecore_spq_register_async_cb(struct ecore_hwfn *p_hwfn,
+                           enum protocol_type protocol_id,
+                           ecore_spq_async_comp_cb cb)
+{
+       if (!p_hwfn->p_spq || (protocol_id >= MAX_PROTOCOL_TYPE))
+               return ECORE_INVAL;
+
+       p_hwfn->p_spq->async_comp_cb[protocol_id] = cb;
+       return ECORE_SUCCESS;
+}
+
+void
+ecore_spq_unregister_async_cb(struct ecore_hwfn *p_hwfn,
+                             enum protocol_type protocol_id)
+{
+       if (!p_hwfn->p_spq || (protocol_id >= MAX_PROTOCOL_TYPE))
+               return;
+
+       p_hwfn->p_spq->async_comp_cb[protocol_id] = OSAL_NULL;
+}
+
 /***************************************************************************
  * EQ API
  ***************************************************************************/
@@ -305,14 +353,15 @@ enum _ecore_status_t ecore_eq_completion(struct ecore_hwfn *p_hwfn,
                }
 
                DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
-                               "op %x prot %x res0 %x echo %x "
-                               "fwret %x flags %x\n", p_eqe->opcode,
+                          "op %x prot %x res0 %x echo %x fwret %x flags %x\n",
+                          p_eqe->opcode,            /* Event Opcode */
                           p_eqe->protocol_id,  /* Event Protocol ID */
                           p_eqe->reserved0,    /* Reserved */
+                          /* Echo value from ramrod data on the host */
                           OSAL_LE16_TO_CPU(p_eqe->echo),
-                          p_eqe->fw_return_code,       /* FW return code for SP
-                                                        * ramrods
-                                                        */
+                          p_eqe->fw_return_code,    /* FW return code for SP
+                                                     * ramrods
+                                                     */
                           p_eqe->flags);
 
                if (GET_FIELD(p_eqe->flags, EVENT_RING_ENTRY_ASYNC)) {
@@ -333,52 +382,56 @@ enum _ecore_status_t ecore_eq_completion(struct ecore_hwfn *p_hwfn,
        return rc;
 }
 
-struct ecore_eq *ecore_eq_alloc(struct ecore_hwfn *p_hwfn, u16 num_elem)
+enum _ecore_status_t ecore_eq_alloc(struct ecore_hwfn *p_hwfn, u16 num_elem)
 {
        struct ecore_eq *p_eq;
 
        /* Allocate EQ struct */
-       p_eq = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(struct ecore_eq));
+       p_eq = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*p_eq));
        if (!p_eq) {
                DP_NOTICE(p_hwfn, true,
                          "Failed to allocate `struct ecore_eq'\n");
-               return OSAL_NULL;
+               return ECORE_NOMEM;
        }
 
-       /* Allocate and initialize EQ chain */
+       /* Allocate and initialize EQ chain*/
        if (ecore_chain_alloc(p_hwfn->p_dev,
                              ECORE_CHAIN_USE_TO_PRODUCE,
                              ECORE_CHAIN_MODE_PBL,
                              ECORE_CHAIN_CNT_TYPE_U16,
                              num_elem,
-                             sizeof(union event_ring_element), &p_eq->chain)) {
-               DP_NOTICE(p_hwfn, true, "Failed to allocate eq chain");
+                             sizeof(union event_ring_element),
+                             &p_eq->chain, OSAL_NULL) != ECORE_SUCCESS) {
+               DP_NOTICE(p_hwfn, true, "Failed to allocate eq chain\n");
                goto eq_allocate_fail;
        }
 
        /* register EQ completion on the SP SB */
-       ecore_int_register_cb(p_hwfn,
-                             ecore_eq_completion,
+       ecore_int_register_cb(p_hwfn, ecore_eq_completion,
                              p_eq, &p_eq->eq_sb_index, &p_eq->p_fw_cons);
 
-       return p_eq;
+       p_hwfn->p_eq = p_eq;
+       return ECORE_SUCCESS;
 
 eq_allocate_fail:
-       ecore_eq_free(p_hwfn, p_eq);
-       return OSAL_NULL;
+       OSAL_FREE(p_hwfn->p_dev, p_eq);
+       return ECORE_NOMEM;
 }
 
-void ecore_eq_setup(struct ecore_hwfn *p_hwfn, struct ecore_eq *p_eq)
+void ecore_eq_setup(struct ecore_hwfn *p_hwfn)
 {
-       ecore_chain_reset(&p_eq->chain);
+       ecore_chain_reset(&p_hwfn->p_eq->chain);
 }
 
-void ecore_eq_free(struct ecore_hwfn *p_hwfn, struct ecore_eq *p_eq)
+void ecore_eq_free(struct ecore_hwfn *p_hwfn)
 {
-       if (!p_eq)
+       if (!p_hwfn->p_eq)
                return;
-       ecore_chain_free(p_hwfn->p_dev, &p_eq->chain);
-       OSAL_FREE(p_hwfn->p_dev, p_eq);
+
+       ecore_chain_free(p_hwfn->p_dev, &p_hwfn->p_eq->chain);
+
+       OSAL_FREE(p_hwfn->p_dev, p_hwfn->p_eq);
+       p_hwfn->p_eq = OSAL_NULL;
 }
 
 /***************************************************************************
@@ -419,10 +472,13 @@ enum _ecore_status_t ecore_eth_cqe_completion(struct ecore_hwfn *p_hwfn,
  ***************************************************************************/
 void ecore_spq_setup(struct ecore_hwfn *p_hwfn)
 {
-       struct ecore_spq_entry *p_virt = OSAL_NULL;
        struct ecore_spq *p_spq = p_hwfn->p_spq;
+       struct ecore_spq_entry *p_virt = OSAL_NULL;
+       struct core_db_data *p_db_data;
+       void OSAL_IOMEM *db_addr;
        dma_addr_t p_phys = 0;
        u32 i, capacity;
+       enum _ecore_status_t rc;
 
        OSAL_LIST_INIT(&p_spq->pending);
        OSAL_LIST_INIT(&p_spq->completion_pending);
@@ -436,8 +492,7 @@ void ecore_spq_setup(struct ecore_hwfn *p_hwfn)
 
        capacity = ecore_chain_get_capacity(&p_spq->chain);
        for (i = 0; i < capacity; i++) {
-               p_virt->elem.data_ptr.hi = DMA_HI_LE(p_phys);
-               p_virt->elem.data_ptr.lo = DMA_LO_LE(p_phys);
+               DMA_REGPAIR_LE(p_virt->elem.data_ptr, p_phys);
 
                OSAL_LIST_PUSH_TAIL(&p_virt->list, &p_spq->free_pool);
 
@@ -461,6 +516,24 @@ void ecore_spq_setup(struct ecore_hwfn *p_hwfn)
 
        /* reset the chain itself */
        ecore_chain_reset(&p_spq->chain);
+
+       /* Initialize the address/data of the SPQ doorbell */
+       p_spq->db_addr_offset = DB_ADDR(p_spq->cid, DQ_DEMS_LEGACY);
+       p_db_data = &p_spq->db_data;
+       OSAL_MEM_ZERO(p_db_data, sizeof(*p_db_data));
+       SET_FIELD(p_db_data->params, CORE_DB_DATA_DEST, DB_DEST_XCM);
+       SET_FIELD(p_db_data->params, CORE_DB_DATA_AGG_CMD, DB_AGG_CMD_MAX);
+       SET_FIELD(p_db_data->params, CORE_DB_DATA_AGG_VAL_SEL,
+                 DQ_XCM_CORE_SPQ_PROD_CMD);
+       p_db_data->agg_flags = DQ_XCM_CORE_DQ_CF_CMD;
+
+       /* Register the SPQ doorbell with the doorbell recovery mechanism */
+       db_addr = (void *)((u8 *)p_hwfn->doorbells + p_spq->db_addr_offset);
+       rc = ecore_db_recovery_add(p_hwfn->p_dev, db_addr, &p_spq->db_data,
+                                  DB_REC_WIDTH_32B, DB_REC_KERNEL);
+       if (rc != ECORE_SUCCESS)
+               DP_INFO(p_hwfn,
+                       "Failed to register the SPQ doorbell with the doorbell recovery mechanism\n");
 }
 
 enum _ecore_status_t ecore_spq_alloc(struct ecore_hwfn *p_hwfn)
@@ -475,16 +548,19 @@ enum _ecore_status_t ecore_spq_alloc(struct ecore_hwfn *p_hwfn)
            OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(struct ecore_spq));
        if (!p_spq) {
                DP_NOTICE(p_hwfn, true,
-                         "Failed to allocate `struct ecore_spq'");
+                         "Failed to allocate `struct ecore_spq'\n");
                return ECORE_NOMEM;
        }
 
        /* SPQ ring  */
-       if (ecore_chain_alloc(p_hwfn->p_dev, ECORE_CHAIN_USE_TO_PRODUCE,
-                       ECORE_CHAIN_MODE_SINGLE, ECORE_CHAIN_CNT_TYPE_U16, 0,
-                       /* N/A when the mode is SINGLE */
-                       sizeof(struct slow_path_element), &p_spq->chain)) {
-               DP_NOTICE(p_hwfn, true, "Failed to allocate spq chain");
+       if (ecore_chain_alloc(p_hwfn->p_dev,
+                             ECORE_CHAIN_USE_TO_PRODUCE,
+                             ECORE_CHAIN_MODE_SINGLE,
+                             ECORE_CHAIN_CNT_TYPE_U16,
+                             0, /* N/A when the mode is SINGLE */
+                             sizeof(struct slow_path_element),
+                             &p_spq->chain, OSAL_NULL)) {
+               DP_NOTICE(p_hwfn, true, "Failed to allocate spq chain\n");
                goto spq_allocate_fail;
        }
 
@@ -499,7 +575,9 @@ enum _ecore_status_t ecore_spq_alloc(struct ecore_hwfn *p_hwfn)
        p_spq->p_virt = p_virt;
        p_spq->p_phys = p_phys;
 
+#ifdef CONFIG_ECORE_LOCK_ALLOC
        OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_spq->lock);
+#endif
 
        p_hwfn->p_spq = p_spq;
        return ECORE_SUCCESS;
@@ -513,11 +591,16 @@ spq_allocate_fail:
 void ecore_spq_free(struct ecore_hwfn *p_hwfn)
 {
        struct ecore_spq *p_spq = p_hwfn->p_spq;
+       void OSAL_IOMEM *db_addr;
        u32 capacity;
 
        if (!p_spq)
                return;
 
+       /* Delete the SPQ doorbell from the doorbell recovery mechanism */
+       db_addr = (void *)((u8 *)p_hwfn->doorbells + p_spq->db_addr_offset);
+       ecore_db_recovery_del(p_hwfn->p_dev, db_addr, &p_spq->db_data);
+
        if (p_spq->p_virt) {
                capacity = ecore_chain_get_capacity(&p_spq->chain);
                OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
@@ -528,7 +611,10 @@ void ecore_spq_free(struct ecore_hwfn *p_hwfn)
        }
 
        ecore_chain_free(p_hwfn->p_dev, &p_spq->chain);
+#ifdef CONFIG_ECORE_LOCK_ALLOC
        OSAL_SPIN_LOCK_DEALLOC(&p_spq->lock);
+#endif
+
        OSAL_FREE(p_hwfn->p_dev, p_spq);
 }
 
@@ -537,18 +623,18 @@ ecore_spq_get_entry(struct ecore_hwfn *p_hwfn, struct ecore_spq_entry **pp_ent)
 {
        struct ecore_spq *p_spq = p_hwfn->p_spq;
        struct ecore_spq_entry *p_ent = OSAL_NULL;
+       enum _ecore_status_t rc = ECORE_SUCCESS;
 
        OSAL_SPIN_LOCK(&p_spq->lock);
 
        if (OSAL_LIST_IS_EMPTY(&p_spq->free_pool)) {
-               p_ent = OSAL_ZALLOC(p_hwfn->p_dev, GFP_ATOMIC,
-                                   sizeof(struct ecore_spq_entry));
+               p_ent = OSAL_ZALLOC(p_hwfn->p_dev, GFP_ATOMIC, sizeof(*p_ent));
                if (!p_ent) {
-                       OSAL_SPIN_UNLOCK(&p_spq->lock);
                        DP_NOTICE(p_hwfn, true,
-                                 "Failed to allocate an SPQ entry"
-                                 " for a pending ramrod\n");
-                       return ECORE_NOMEM;
+                                "Failed to allocate an SPQ entry for a pending"
+                                " ramrod\n");
+                       rc = ECORE_NOMEM;
+                       goto out_unlock;
                }
                p_ent->queue = &p_spq->unlimited_pending;
        } else {
@@ -560,9 +646,9 @@ ecore_spq_get_entry(struct ecore_hwfn *p_hwfn, struct ecore_spq_entry **pp_ent)
 
        *pp_ent = p_ent;
 
+out_unlock:
        OSAL_SPIN_UNLOCK(&p_spq->lock);
-
-       return ECORE_SUCCESS;
+       return rc;
 }
 
 /* Locked variant; Should be called while the SPQ lock is taken */
@@ -607,32 +693,29 @@ ecore_spq_add_entry(struct ecore_hwfn *p_hwfn,
                        p_spq->unlimited_pending_count++;
 
                        return ECORE_SUCCESS;
-               }
-
-               struct ecore_spq_entry *p_en2;
 
-               p_en2 = OSAL_LIST_FIRST_ENTRY(&p_spq->free_pool,
-                                             struct ecore_spq_entry,
-                                             list);
-               OSAL_LIST_REMOVE_ENTRY(&p_en2->list, &p_spq->free_pool);
+               } else {
+                       struct ecore_spq_entry *p_en2;
 
-               /* Copy the ring element physical pointer to the new
-                * entry, since we are about to override the entire ring
-                * entry and don't want to lose the pointer.
-                */
-               p_ent->elem.data_ptr = p_en2->elem.data_ptr;
+                       p_en2 = OSAL_LIST_FIRST_ENTRY(&p_spq->free_pool,
+                                                    struct ecore_spq_entry,
+                                                    list);
+                       OSAL_LIST_REMOVE_ENTRY(&p_en2->list, &p_spq->free_pool);
 
-               /* Setting the cookie to the comp_done of the
-                * new element.
-                */
-               if (p_ent->comp_cb.cookie == &p_ent->comp_done)
-                       p_ent->comp_cb.cookie = &p_en2->comp_done;
+                       /* Copy the ring element physical pointer to the new
+                        * entry, since we are about to override the entire ring
+                        * entry and don't want to lose the pointer.
+                        */
+                       p_ent->elem.data_ptr = p_en2->elem.data_ptr;
 
-               *p_en2 = *p_ent;
+                       *p_en2 = *p_ent;
 
-               OSAL_FREE(p_hwfn->p_dev, p_ent);
+                       /* EBLOCK responsible to free the allocated p_ent */
+                       if (p_ent->comp_mode != ECORE_SPQ_MODE_EBLOCK)
+                               OSAL_FREE(p_hwfn->p_dev, p_ent);
 
-               p_ent = p_en2;
+                       p_ent = p_en2;
+               }
        }
 
        /* entry is to be placed in 'pending' queue */
@@ -682,16 +765,22 @@ static enum _ecore_status_t ecore_spq_post_list(struct ecore_hwfn *p_hwfn,
               !OSAL_LIST_IS_EMPTY(head)) {
                struct ecore_spq_entry *p_ent =
                    OSAL_LIST_FIRST_ENTRY(head, struct ecore_spq_entry, list);
-               OSAL_LIST_REMOVE_ENTRY(&p_ent->list, head);
-               OSAL_LIST_PUSH_TAIL(&p_ent->list, &p_spq->completion_pending);
-               p_spq->comp_sent_count++;
-
-               rc = ecore_spq_hw_post(p_hwfn, p_spq, p_ent);
-               if (rc) {
-                       OSAL_LIST_REMOVE_ENTRY(&p_ent->list,
-                                              &p_spq->completion_pending);
-                       __ecore_spq_return_entry(p_hwfn, p_ent);
-                       return rc;
+               if (p_ent != OSAL_NULL) {
+#if defined(_NTDDK_)
+#pragma warning(suppress : 6011 28182)
+#endif
+                       OSAL_LIST_REMOVE_ENTRY(&p_ent->list, head);
+                       OSAL_LIST_PUSH_TAIL(&p_ent->list,
+                                           &p_spq->completion_pending);
+                       p_spq->comp_sent_count++;
+
+                       rc = ecore_spq_hw_post(p_hwfn, p_spq, p_ent);
+                       if (rc) {
+                               OSAL_LIST_REMOVE_ENTRY(&p_ent->list,
+                                                   &p_spq->completion_pending);
+                               __ecore_spq_return_entry(p_hwfn, p_ent);
+                               return rc;
+                       }
                }
        }
 
@@ -700,7 +789,6 @@ static enum _ecore_status_t ecore_spq_post_list(struct ecore_hwfn *p_hwfn,
 
 static enum _ecore_status_t ecore_spq_pend_post(struct ecore_hwfn *p_hwfn)
 {
-       enum _ecore_status_t rc = ECORE_NOTIMPL;
        struct ecore_spq *p_spq = p_hwfn->p_spq;
        struct ecore_spq_entry *p_ent = OSAL_NULL;
 
@@ -713,17 +801,16 @@ static enum _ecore_status_t ecore_spq_pend_post(struct ecore_hwfn *p_hwfn)
                if (!p_ent)
                        return ECORE_INVAL;
 
+#if defined(_NTDDK_)
+#pragma warning(suppress : 6011)
+#endif
                OSAL_LIST_REMOVE_ENTRY(&p_ent->list, &p_spq->unlimited_pending);
 
                ecore_spq_add_entry(p_hwfn, p_ent, p_ent->priority);
        }
 
-       rc = ecore_spq_post_list(p_hwfn,
+       return ecore_spq_post_list(p_hwfn,
                                 &p_spq->pending, SPQ_HIGH_PRI_RESERVE_DEFAULT);
-       if (rc)
-               return rc;
-
-       return ECORE_SUCCESS;
 }
 
 enum _ecore_status_t ecore_spq_post(struct ecore_hwfn *p_hwfn,
@@ -745,7 +832,7 @@ enum _ecore_status_t ecore_spq_post(struct ecore_hwfn *p_hwfn,
        if (p_hwfn->p_dev->recov_in_prog) {
                DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
                           "Recovery is in progress -> skip spq post"
-                          " [cmd %02x protocol %02x]",
+                          " [cmd %02x protocol %02x]\n",
                           p_ent->elem.hdr.cmd_id, p_ent->elem.hdr.protocol_id);
                /* Return success to let the flows to be completed successfully
                 * w/o any error handling.
@@ -785,7 +872,21 @@ enum _ecore_status_t ecore_spq_post(struct ecore_hwfn *p_hwfn,
                 * access p_ent here to see whether it's successful or not.
                 * Thus, after gaining the answer perform the cleanup here.
                 */
-               rc = ecore_spq_block(p_hwfn, p_ent, fw_return_code);
+               rc = ecore_spq_block(p_hwfn, p_ent, fw_return_code,
+                                    p_ent->queue == &p_spq->unlimited_pending);
+
+               if (p_ent->queue == &p_spq->unlimited_pending) {
+                       /* This is an allocated p_ent which does not need to
+                        * return to pool.
+                        */
+                       OSAL_FREE(p_hwfn->p_dev, p_ent);
+
+                       /* TBD: handle error flow and remove p_ent from
+                        * completion pending
+                        */
+                       return rc;
+               }
+
                if (rc)
                        goto spq_post_fail2;
 
@@ -884,11 +985,17 @@ enum _ecore_status_t ecore_spq_completion(struct ecore_hwfn *p_hwfn,
        if (found->comp_cb.function)
                found->comp_cb.function(p_hwfn, found->comp_cb.cookie, p_data,
                                        fw_return_code);
+       else
+               DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
+                          "Got a completion without a callback function\n");
 
-       if (found->comp_mode != ECORE_SPQ_MODE_EBLOCK) {
-               /* EBLOCK is responsible for freeing its own entry */
+       if ((found->comp_mode != ECORE_SPQ_MODE_EBLOCK) ||
+           (found->queue == &p_spq->unlimited_pending))
+               /* EBLOCK  is responsible for returning its own entry into the
+                * free list, unless it originally added the entry into the
+                * unlimited pending list.
+                */
                ecore_spq_return_entry(p_hwfn, found);
-       }
 
        /* Attempt to post pending requests */
        OSAL_SPIN_LOCK(&p_spq->lock);
@@ -898,17 +1005,17 @@ enum _ecore_status_t ecore_spq_completion(struct ecore_hwfn *p_hwfn,
        return rc;
 }
 
-struct ecore_consq *ecore_consq_alloc(struct ecore_hwfn *p_hwfn)
+enum _ecore_status_t ecore_consq_alloc(struct ecore_hwfn *p_hwfn)
 {
        struct ecore_consq *p_consq;
 
        /* Allocate ConsQ struct */
        p_consq =
-           OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(struct ecore_consq));
+           OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*p_consq));
        if (!p_consq) {
                DP_NOTICE(p_hwfn, true,
                          "Failed to allocate `struct ecore_consq'\n");
-               return OSAL_NULL;
+               return ECORE_NOMEM;
        }
 
        /* Allocate and initialize EQ chain */
@@ -917,27 +1024,30 @@ struct ecore_consq *ecore_consq_alloc(struct ecore_hwfn *p_hwfn)
                              ECORE_CHAIN_MODE_PBL,
                              ECORE_CHAIN_CNT_TYPE_U16,
                              ECORE_CHAIN_PAGE_SIZE / 0x80,
-                             0x80, &p_consq->chain)) {
+                             0x80,
+                             &p_consq->chain, OSAL_NULL) != ECORE_SUCCESS) {
                DP_NOTICE(p_hwfn, true, "Failed to allocate consq chain");
                goto consq_allocate_fail;
        }
 
-       return p_consq;
+       p_hwfn->p_consq = p_consq;
+       return ECORE_SUCCESS;
 
 consq_allocate_fail:
-       ecore_consq_free(p_hwfn, p_consq);
-       return OSAL_NULL;
+       OSAL_FREE(p_hwfn->p_dev, p_consq);
+       return ECORE_NOMEM;
 }
 
-void ecore_consq_setup(struct ecore_hwfn *p_hwfn, struct ecore_consq *p_consq)
+void ecore_consq_setup(struct ecore_hwfn *p_hwfn)
 {
-       ecore_chain_reset(&p_consq->chain);
+       ecore_chain_reset(&p_hwfn->p_consq->chain);
 }
 
-void ecore_consq_free(struct ecore_hwfn *p_hwfn, struct ecore_consq *p_consq)
+void ecore_consq_free(struct ecore_hwfn *p_hwfn)
 {
-       if (!p_consq)
+       if (!p_hwfn->p_consq)
                return;
-       ecore_chain_free(p_hwfn->p_dev, &p_consq->chain);
-       OSAL_FREE(p_hwfn->p_dev, p_consq);
+
+       ecore_chain_free(p_hwfn->p_dev, &p_hwfn->p_consq->chain);
+       OSAL_FREE(p_hwfn->p_dev, p_hwfn->p_consq);
 }