net/qede/base: update FW to 8.40.25.0
[dpdk.git] / drivers / net / qede / base / ecore_spq.c
index 3c1d05b..6c38682 100644 (file)
@@ -1,9 +1,7 @@
-/*
- * Copyright (c) 2016 QLogic Corporation.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2016 - 2018 Cavium Inc.
  * All rights reserved.
- * www.qlogic.com
- *
- * See LICENSE.qede_pmd for copyright and licensing details.
+ * www.cavium.com
  */
 
 #include "bcm_osal.h"
 
 #define SPQ_BLOCK_DELAY_MAX_ITER       (10)
 #define SPQ_BLOCK_DELAY_US             (10)
-#define SPQ_BLOCK_SLEEP_MAX_ITER       (1000)
+#define SPQ_BLOCK_SLEEP_MAX_ITER       (200)
 #define SPQ_BLOCK_SLEEP_MS             (5)
 
 /***************************************************************************
  * Blocking Imp. (BLOCK/EBLOCK mode)
  ***************************************************************************/
-static void ecore_spq_blocking_cb(struct ecore_hwfn *p_hwfn,
-                                 void *cookie,
-                                 union event_ring_data *data,
+static void ecore_spq_blocking_cb(struct ecore_hwfn *p_hwfn, void *cookie,
+                                 union event_ring_data OSAL_UNUSED * data,
                                  u8 fw_return_code)
 {
        struct ecore_spq_comp_done *comp_done;
@@ -61,8 +58,12 @@ static enum _ecore_status_t __ecore_spq_block(struct ecore_hwfn *p_hwfn,
        u32 iter_cnt;
 
        comp_done = (struct ecore_spq_comp_done *)p_ent->comp_cb.cookie;
-       iter_cnt = sleep_between_iter ? SPQ_BLOCK_SLEEP_MAX_ITER
+       iter_cnt = sleep_between_iter ? p_hwfn->p_spq->block_sleep_max_iter
                                      : SPQ_BLOCK_DELAY_MAX_ITER;
+#ifndef ASIC_ONLY
+       if (CHIP_REV_IS_EMUL(p_hwfn->p_dev) && sleep_between_iter)
+               iter_cnt *= 5;
+#endif
 
        while (iter_cnt--) {
                OSAL_POLL_MODE_DPC(p_hwfn);
@@ -87,6 +88,7 @@ static enum _ecore_status_t ecore_spq_block(struct ecore_hwfn *p_hwfn,
                                            u8 *p_fw_ret, bool skip_quick_poll)
 {
        struct ecore_spq_comp_done *comp_done;
+       struct ecore_ptt *p_ptt;
        enum _ecore_status_t rc;
 
        /* A relatively short polling period w/o sleeping, to allow the FW to
@@ -103,8 +105,13 @@ static enum _ecore_status_t ecore_spq_block(struct ecore_hwfn *p_hwfn,
        if (rc == ECORE_SUCCESS)
                return ECORE_SUCCESS;
 
+       p_ptt = ecore_ptt_acquire(p_hwfn);
+       if (!p_ptt)
+               return ECORE_AGAIN;
+
        DP_INFO(p_hwfn, "Ramrod is stuck, requesting MCP drain\n");
-       rc = ecore_mcp_drain(p_hwfn, p_hwfn->p_main_ptt);
+       rc = ecore_mcp_drain(p_hwfn, p_ptt);
+       ecore_ptt_release(p_hwfn, p_ptt);
        if (rc != ECORE_SUCCESS) {
                DP_NOTICE(p_hwfn, true, "MCP drain failed\n");
                goto err;
@@ -133,6 +140,14 @@ err:
        return ECORE_BUSY;
 }
 
+void ecore_set_spq_block_timeout(struct ecore_hwfn *p_hwfn,
+                                u32 spq_timeout_ms)
+{
+       p_hwfn->p_spq->block_sleep_max_iter = spq_timeout_ms ?
+               spq_timeout_ms / SPQ_BLOCK_SLEEP_MS :
+               SPQ_BLOCK_SLEEP_MAX_ITER;
+}
+
 /***************************************************************************
  * SPQ entries inner API
  ***************************************************************************/
@@ -170,53 +185,66 @@ ecore_spq_fill_entry(struct ecore_hwfn *p_hwfn, struct ecore_spq_entry *p_ent)
 /***************************************************************************
  * HSI access
  ***************************************************************************/
+
+#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN_MASK                  0x1
+#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN_SHIFT                 0
+#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE_MASK              0x1
+#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT             7
+#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN_MASK              0x1
+#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN_SHIFT             4
+#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN_MASK       0x1
+#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN_SHIFT      6
+
 static void ecore_spq_hw_initialize(struct ecore_hwfn *p_hwfn,
                                    struct ecore_spq *p_spq)
 {
-       struct ecore_cxt_info cxt_info;
+       __le32 *p_spq_base_lo, *p_spq_base_hi;
+       struct regpair *p_consolid_base_addr;
+       u8 *p_flags1, *p_flags9, *p_flags10;
        struct core_conn_context *p_cxt;
-       enum _ecore_status_t rc;
+       struct ecore_cxt_info cxt_info;
+       u32 core_conn_context_size;
+       __le16 *p_physical_q0;
        u16 physical_q;
+       enum _ecore_status_t rc;
 
        cxt_info.iid = p_spq->cid;
 
        rc = ecore_cxt_get_cid_info(p_hwfn, &cxt_info);
 
-       if (rc < 0) {
+       if (rc != ECORE_SUCCESS) {
                DP_NOTICE(p_hwfn, true, "Cannot find context info for cid=%d\n",
                          p_spq->cid);
                return;
        }
 
        p_cxt = cxt_info.p_cxt;
+       core_conn_context_size = sizeof(*p_cxt);
+       p_flags1 = &p_cxt->xstorm_ag_context.flags1;
+       p_flags9 = &p_cxt->xstorm_ag_context.flags9;
+       p_flags10 = &p_cxt->xstorm_ag_context.flags10;
+       p_physical_q0 = &p_cxt->xstorm_ag_context.physical_q0;
+       p_spq_base_lo = &p_cxt->xstorm_st_context.spq_base_lo;
+       p_spq_base_hi = &p_cxt->xstorm_st_context.spq_base_hi;
+       p_consolid_base_addr = &p_cxt->xstorm_st_context.consolid_base_addr;
 
        /* @@@TBD we zero the context until we have ilt_reset implemented. */
-       OSAL_MEM_ZERO(p_cxt, sizeof(*p_cxt));
-
-       if (ECORE_IS_BB(p_hwfn->p_dev) || ECORE_IS_AH(p_hwfn->p_dev)) {
-               SET_FIELD(p_cxt->xstorm_ag_context.flags10,
-                         E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN, 1);
-               SET_FIELD(p_cxt->xstorm_ag_context.flags1,
-                         E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE, 1);
-               /* SET_FIELD(p_cxt->xstorm_ag_context.flags10,
-                *        E4_XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN, 1);
-                */
-               SET_FIELD(p_cxt->xstorm_ag_context.flags9,
-                         E4_XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN, 1);
-       }
+       OSAL_MEM_ZERO(p_cxt, core_conn_context_size);
+
+       SET_FIELD(*p_flags10, XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN, 1);
+       SET_FIELD(*p_flags1, XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE, 1);
+       SET_FIELD(*p_flags9, XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN, 1);
 
        /* CDU validation - FIXME currently disabled */
 
        /* QM physical queue */
        physical_q = ecore_get_cm_pq_idx(p_hwfn, PQ_FLAGS_LB);
-       p_cxt->xstorm_ag_context.physical_q0 = OSAL_CPU_TO_LE16(physical_q);
+       *p_physical_q0 = OSAL_CPU_TO_LE16(physical_q);
 
-       p_cxt->xstorm_st_context.spq_base_lo =
-           DMA_LO_LE(p_spq->chain.p_phys_addr);
-       p_cxt->xstorm_st_context.spq_base_hi =
-           DMA_HI_LE(p_spq->chain.p_phys_addr);
+       *p_spq_base_lo = DMA_LO_LE(p_spq->chain.p_phys_addr);
+       *p_spq_base_hi = DMA_HI_LE(p_spq->chain.p_phys_addr);
 
-       DMA_REGPAIR_LE(p_cxt->xstorm_st_context.consolid_base_addr,
+       DMA_REGPAIR_LE(*p_consolid_base_addr,
                       p_hwfn->p_consq->chain.p_phys_addr);
 }
 
@@ -225,9 +253,9 @@ static enum _ecore_status_t ecore_spq_hw_post(struct ecore_hwfn *p_hwfn,
                                              struct ecore_spq_entry *p_ent)
 {
        struct ecore_chain *p_chain = &p_hwfn->p_spq->chain;
+       struct core_db_data *p_db_data = &p_spq->db_data;
        u16 echo = ecore_chain_get_prod_idx(p_chain);
        struct slow_path_element *elem;
-       struct core_db_data db;
 
        p_ent->elem.hdr.echo = OSAL_CPU_TO_LE16(echo);
        elem = ecore_chain_produce(p_chain);
@@ -236,31 +264,24 @@ static enum _ecore_status_t ecore_spq_hw_post(struct ecore_hwfn *p_hwfn,
                return ECORE_INVAL;
        }
 
-       *elem = p_ent->elem;    /* struct assignment */
+       *elem = p_ent->elem;    /* Struct assignment */
 
-       /* send a doorbell on the slow hwfn session */
-       OSAL_MEMSET(&db, 0, sizeof(db));
-       SET_FIELD(db.params, CORE_DB_DATA_DEST, DB_DEST_XCM);
-       SET_FIELD(db.params, CORE_DB_DATA_AGG_CMD, DB_AGG_CMD_SET);
-       SET_FIELD(db.params, CORE_DB_DATA_AGG_VAL_SEL,
-                 DQ_XCM_CORE_SPQ_PROD_CMD);
-       db.agg_flags = DQ_XCM_CORE_DQ_CF_CMD;
-       db.spq_prod = OSAL_CPU_TO_LE16(ecore_chain_get_prod_idx(p_chain));
+       p_db_data->spq_prod =
+               OSAL_CPU_TO_LE16(ecore_chain_get_prod_idx(p_chain));
 
-       /* make sure the SPQE is updated before the doorbell */
+       /* Make sure the SPQE is updated before the doorbell */
        OSAL_WMB(p_hwfn->p_dev);
 
-       DOORBELL(p_hwfn, DB_ADDR(p_spq->cid, DQ_DEMS_LEGACY),
-                *(u32 *)&db);
+       DOORBELL(p_hwfn, p_spq->db_addr_offset, *(u32 *)p_db_data);
 
-       /* make sure doorbell is rang */
+       /* Make sure doorbell is rang */
        OSAL_WMB(p_hwfn->p_dev);
 
        DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
                   "Doorbelled [0x%08x, CID 0x%08x] with Flags: %02x"
                   " agg_params: %02x, prod: %04x\n",
-                  DB_ADDR(p_spq->cid, DQ_DEMS_LEGACY), p_spq->cid, db.params,
-                  db.agg_flags, ecore_chain_get_prod_idx(p_chain));
+                  p_spq->db_addr_offset, p_spq->cid, p_db_data->params,
+                  p_db_data->agg_flags, ecore_chain_get_prod_idx(p_chain));
 
        return ECORE_SUCCESS;
 }
@@ -273,17 +294,53 @@ static enum _ecore_status_t
 ecore_async_event_completion(struct ecore_hwfn *p_hwfn,
                             struct event_ring_entry *p_eqe)
 {
-       switch (p_eqe->protocol_id) {
-       case PROTOCOLID_COMMON:
-               return ecore_sriov_eqe_event(p_hwfn,
-                                            p_eqe->opcode,
-                                            p_eqe->echo, &p_eqe->data);
-       default:
+       ecore_spq_async_comp_cb cb;
+       enum _ecore_status_t rc;
+
+       if (p_eqe->protocol_id >= MAX_PROTOCOL_TYPE) {
+               DP_ERR(p_hwfn, "Wrong protocol: %d\n", p_eqe->protocol_id);
+               return ECORE_INVAL;
+       }
+
+       cb = p_hwfn->p_spq->async_comp_cb[p_eqe->protocol_id];
+       if (!cb) {
                DP_NOTICE(p_hwfn,
                          true, "Unknown Async completion for protocol: %d\n",
                          p_eqe->protocol_id);
                return ECORE_INVAL;
        }
+
+       rc = cb(p_hwfn, p_eqe->opcode, p_eqe->echo,
+               &p_eqe->data, p_eqe->fw_return_code);
+       if (rc != ECORE_SUCCESS)
+               DP_NOTICE(p_hwfn, true,
+                         "Async completion callback failed, rc = %d [opcode %x, echo %x, fw_return_code %x]",
+                         rc, p_eqe->opcode, p_eqe->echo,
+                         p_eqe->fw_return_code);
+
+       return rc;
+}
+
+enum _ecore_status_t
+ecore_spq_register_async_cb(struct ecore_hwfn *p_hwfn,
+                           enum protocol_type protocol_id,
+                           ecore_spq_async_comp_cb cb)
+{
+       if (!p_hwfn->p_spq || (protocol_id >= MAX_PROTOCOL_TYPE))
+               return ECORE_INVAL;
+
+       p_hwfn->p_spq->async_comp_cb[protocol_id] = cb;
+       return ECORE_SUCCESS;
+}
+
+void
+ecore_spq_unregister_async_cb(struct ecore_hwfn *p_hwfn,
+                             enum protocol_type protocol_id)
+{
+       if (!p_hwfn->p_spq || (protocol_id >= MAX_PROTOCOL_TYPE))
+               return;
+
+       p_hwfn->p_spq->async_comp_cb[protocol_id] = OSAL_NULL;
 }
 
 /***************************************************************************
@@ -305,10 +362,16 @@ enum _ecore_status_t ecore_eq_completion(struct ecore_hwfn *p_hwfn,
 {
        struct ecore_eq *p_eq = cookie;
        struct ecore_chain *p_chain = &p_eq->chain;
-       enum _ecore_status_t rc = 0;
+       u16 fw_cons_idx             = 0;
+       enum _ecore_status_t rc = ECORE_SUCCESS;
+
+       if (!p_hwfn->p_spq) {
+               DP_ERR(p_hwfn, "Unexpected NULL p_spq\n");
+               return ECORE_INVAL;
+       }
 
        /* take a snapshot of the FW consumer */
-       u16 fw_cons_idx = OSAL_LE16_TO_CPU(*p_eq->p_fw_cons);
+       fw_cons_idx = OSAL_LE16_TO_CPU(*p_eq->p_fw_cons);
 
        DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ, "fw_cons_idx %x\n", fw_cons_idx);
 
@@ -324,7 +387,8 @@ enum _ecore_status_t ecore_eq_completion(struct ecore_hwfn *p_hwfn,
        while (fw_cons_idx != ecore_chain_get_cons_idx(p_chain)) {
                struct event_ring_entry *p_eqe = ecore_chain_consume(p_chain);
                if (!p_eqe) {
-                       rc = ECORE_INVAL;
+                       DP_ERR(p_hwfn,
+                              "Unexpected NULL chain consumer entry\n");
                        break;
                }
 
@@ -340,15 +404,13 @@ enum _ecore_status_t ecore_eq_completion(struct ecore_hwfn *p_hwfn,
                                                      */
                           p_eqe->flags);
 
-               if (GET_FIELD(p_eqe->flags, EVENT_RING_ENTRY_ASYNC)) {
-                       if (ecore_async_event_completion(p_hwfn, p_eqe))
-                               rc = ECORE_INVAL;
-               } else if (ecore_spq_completion(p_hwfn,
-                                               p_eqe->echo,
-                                               p_eqe->fw_return_code,
-                                               &p_eqe->data)) {
-                       rc = ECORE_INVAL;
-               }
+               if (GET_FIELD(p_eqe->flags, EVENT_RING_ENTRY_ASYNC))
+                       ecore_async_event_completion(p_hwfn, p_eqe);
+               else
+                       ecore_spq_completion(p_hwfn,
+                                            p_eqe->echo,
+                                            p_eqe->fw_return_code,
+                                            &p_eqe->data);
 
                ecore_chain_recycle_consumed(p_chain);
        }
@@ -365,7 +427,7 @@ enum _ecore_status_t ecore_eq_alloc(struct ecore_hwfn *p_hwfn, u16 num_elem)
        /* Allocate EQ struct */
        p_eq = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*p_eq));
        if (!p_eq) {
-               DP_NOTICE(p_hwfn, true,
+               DP_NOTICE(p_hwfn, false,
                          "Failed to allocate `struct ecore_eq'\n");
                return ECORE_NOMEM;
        }
@@ -378,7 +440,7 @@ enum _ecore_status_t ecore_eq_alloc(struct ecore_hwfn *p_hwfn, u16 num_elem)
                              num_elem,
                              sizeof(union event_ring_element),
                              &p_eq->chain, OSAL_NULL) != ECORE_SUCCESS) {
-               DP_NOTICE(p_hwfn, true, "Failed to allocate eq chain\n");
+               DP_NOTICE(p_hwfn, false, "Failed to allocate eq chain\n");
                goto eq_allocate_fail;
        }
 
@@ -450,8 +512,11 @@ void ecore_spq_setup(struct ecore_hwfn *p_hwfn)
 {
        struct ecore_spq *p_spq = p_hwfn->p_spq;
        struct ecore_spq_entry *p_virt = OSAL_NULL;
+       struct core_db_data *p_db_data;
+       void OSAL_IOMEM *db_addr;
        dma_addr_t p_phys = 0;
        u32 i, capacity;
+       enum _ecore_status_t rc;
 
        OSAL_LIST_INIT(&p_spq->pending);
        OSAL_LIST_INIT(&p_spq->completion_pending);
@@ -489,6 +554,24 @@ void ecore_spq_setup(struct ecore_hwfn *p_hwfn)
 
        /* reset the chain itself */
        ecore_chain_reset(&p_spq->chain);
+
+       /* Initialize the address/data of the SPQ doorbell */
+       p_spq->db_addr_offset = DB_ADDR(p_spq->cid, DQ_DEMS_LEGACY);
+       p_db_data = &p_spq->db_data;
+       OSAL_MEM_ZERO(p_db_data, sizeof(*p_db_data));
+       SET_FIELD(p_db_data->params, CORE_DB_DATA_DEST, DB_DEST_XCM);
+       SET_FIELD(p_db_data->params, CORE_DB_DATA_AGG_CMD, DB_AGG_CMD_MAX);
+       SET_FIELD(p_db_data->params, CORE_DB_DATA_AGG_VAL_SEL,
+                 DQ_XCM_CORE_SPQ_PROD_CMD);
+       p_db_data->agg_flags = DQ_XCM_CORE_DQ_CF_CMD;
+
+       /* Register the SPQ doorbell with the doorbell recovery mechanism */
+       db_addr = (void *)((u8 *)p_hwfn->doorbells + p_spq->db_addr_offset);
+       rc = ecore_db_recovery_add(p_hwfn->p_dev, db_addr, &p_spq->db_data,
+                                  DB_REC_WIDTH_32B, DB_REC_KERNEL);
+       if (rc != ECORE_SUCCESS)
+               DP_INFO(p_hwfn,
+                       "Failed to register the SPQ doorbell with the doorbell recovery mechanism\n");
 }
 
 enum _ecore_status_t ecore_spq_alloc(struct ecore_hwfn *p_hwfn)
@@ -502,8 +585,7 @@ enum _ecore_status_t ecore_spq_alloc(struct ecore_hwfn *p_hwfn)
        p_spq =
            OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(struct ecore_spq));
        if (!p_spq) {
-               DP_NOTICE(p_hwfn, true,
-                         "Failed to allocate `struct ecore_spq'\n");
+               DP_NOTICE(p_hwfn, false, "Failed to allocate `struct ecore_spq'\n");
                return ECORE_NOMEM;
        }
 
@@ -515,7 +597,7 @@ enum _ecore_status_t ecore_spq_alloc(struct ecore_hwfn *p_hwfn)
                              0, /* N/A when the mode is SINGLE */
                              sizeof(struct slow_path_element),
                              &p_spq->chain, OSAL_NULL)) {
-               DP_NOTICE(p_hwfn, true, "Failed to allocate spq chain\n");
+               DP_NOTICE(p_hwfn, false, "Failed to allocate spq chain\n");
                goto spq_allocate_fail;
        }
 
@@ -530,7 +612,10 @@ enum _ecore_status_t ecore_spq_alloc(struct ecore_hwfn *p_hwfn)
        p_spq->p_virt = p_virt;
        p_spq->p_phys = p_phys;
 
-       OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_spq->lock);
+#ifdef CONFIG_ECORE_LOCK_ALLOC
+       if (OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_spq->lock))
+               goto spq_allocate_fail;
+#endif
 
        p_hwfn->p_spq = p_spq;
        return ECORE_SUCCESS;
@@ -544,11 +629,16 @@ spq_allocate_fail:
 void ecore_spq_free(struct ecore_hwfn *p_hwfn)
 {
        struct ecore_spq *p_spq = p_hwfn->p_spq;
+       void OSAL_IOMEM *db_addr;
        u32 capacity;
 
        if (!p_spq)
                return;
 
+       /* Delete the SPQ doorbell from the doorbell recovery mechanism */
+       db_addr = (void *)((u8 *)p_hwfn->doorbells + p_spq->db_addr_offset);
+       ecore_db_recovery_del(p_hwfn->p_dev, db_addr, &p_spq->db_data);
+
        if (p_spq->p_virt) {
                capacity = ecore_chain_get_capacity(&p_spq->chain);
                OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
@@ -559,7 +649,10 @@ void ecore_spq_free(struct ecore_hwfn *p_hwfn)
        }
 
        ecore_chain_free(p_hwfn->p_dev, &p_spq->chain);
+#ifdef CONFIG_ECORE_LOCK_ALLOC
        OSAL_SPIN_LOCK_DEALLOC(&p_spq->lock);
+#endif
+
        OSAL_FREE(p_hwfn->p_dev, p_spq);
 }
 
@@ -575,9 +668,7 @@ ecore_spq_get_entry(struct ecore_hwfn *p_hwfn, struct ecore_spq_entry **pp_ent)
        if (OSAL_LIST_IS_EMPTY(&p_spq->free_pool)) {
                p_ent = OSAL_ZALLOC(p_hwfn->p_dev, GFP_ATOMIC, sizeof(*p_ent));
                if (!p_ent) {
-                       DP_NOTICE(p_hwfn, true,
-                                "Failed to allocate an SPQ entry for a pending"
-                                " ramrod\n");
+                       DP_NOTICE(p_hwfn, false, "Failed to allocate an SPQ entry for a pending ramrod\n");
                        rc = ECORE_NOMEM;
                        goto out_unlock;
                }
@@ -865,12 +956,11 @@ enum _ecore_status_t ecore_spq_completion(struct ecore_hwfn *p_hwfn,
        struct ecore_spq_entry *found = OSAL_NULL;
        enum _ecore_status_t rc;
 
-       if (!p_hwfn)
-               return ECORE_INVAL;
-
        p_spq = p_hwfn->p_spq;
-       if (!p_spq)
+       if (!p_spq) {
+               DP_ERR(p_hwfn, "Unexpected NULL p_spq\n");
                return ECORE_INVAL;
+       }
 
        OSAL_SPIN_LOCK(&p_spq->lock);
        OSAL_LIST_FOR_EACH_ENTRY_SAFE(p_ent,
@@ -958,7 +1048,7 @@ enum _ecore_status_t ecore_consq_alloc(struct ecore_hwfn *p_hwfn)
        p_consq =
            OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*p_consq));
        if (!p_consq) {
-               DP_NOTICE(p_hwfn, true,
+               DP_NOTICE(p_hwfn, false,
                          "Failed to allocate `struct ecore_consq'\n");
                return ECORE_NOMEM;
        }
@@ -971,7 +1061,7 @@ enum _ecore_status_t ecore_consq_alloc(struct ecore_hwfn *p_hwfn)
                              ECORE_CHAIN_PAGE_SIZE / 0x80,
                              0x80,
                              &p_consq->chain, OSAL_NULL) != ECORE_SUCCESS) {
-               DP_NOTICE(p_hwfn, true, "Failed to allocate consq chain");
+               DP_NOTICE(p_hwfn, false, "Failed to allocate consq chain");
                goto consq_allocate_fail;
        }