void *hwrm_short_cmd_req_addr;
rte_iova_t hwrm_short_cmd_req_dma_addr;
rte_spinlock_t hwrm_lock;
+ pthread_mutex_t def_cp_lock;
uint16_t max_req_len;
uint16_t max_resp_len;
uint16_t hwrm_max_ext_req_len;
* resource reservation. This will ensure the resource counts
* are calculated correctly.
*/
+
+ pthread_mutex_lock(&bp->def_cp_lock);
+
if (!BNXT_HAS_NQ(bp) && bp->async_cp_ring) {
bnxt_disable_int(bp);
bnxt_free_cp_ring(bp, bp->async_cp_ring);
rc = bnxt_hwrm_func_reserve_vf_resc(bp, false);
if (rc) {
PMD_DRV_LOG(ERR, "HWRM resource alloc fail:%x\n", rc);
+ pthread_mutex_unlock(&bp->def_cp_lock);
return -ENOSPC;
}
if (!BNXT_HAS_NQ(bp) && bp->async_cp_ring) {
rc = bnxt_alloc_async_cp_ring(bp);
- if (rc)
+ if (rc) {
+ pthread_mutex_unlock(&bp->def_cp_lock);
return rc;
+ }
bnxt_enable_int(bp);
}
+
+ pthread_mutex_unlock(&bp->def_cp_lock);
} else {
/* legacy driver needs to get updated values */
rc = bnxt_hwrm_func_qcaps(bp);
int err;
err = pthread_mutex_init(&bp->flow_lock, NULL);
- if (err)
+ if (err) {
PMD_DRV_LOG(ERR, "Unable to initialize flow_lock\n");
+ return err;
+ }
+
+ err = pthread_mutex_init(&bp->def_cp_lock, NULL);
+ if (err)
+ PMD_DRV_LOG(ERR, "Unable to initialize def_cp_lock\n");
return err;
}
bnxt_uninit_locks(struct bnxt *bp)
{
pthread_mutex_destroy(&bp->flow_lock);
+ pthread_mutex_destroy(&bp->def_cp_lock);
}
static int
return;
raw_cons = cpr->cp_raw_cons;
+ pthread_mutex_lock(&bp->def_cp_lock);
while (1) {
- if (!cpr || !cpr->cp_ring_struct || !cpr->cp_db.doorbell)
+ if (!cpr || !cpr->cp_ring_struct || !cpr->cp_db.doorbell) {
+ pthread_mutex_unlock(&bp->def_cp_lock);
return;
+ }
cons = RING_CMP(cpr->cp_ring_struct, raw_cons);
cmp = &cpr->cp_desc_ring[cons];
bnxt_db_nq_arm(cpr);
else
B_CP_DB_REARM(cpr, cpr->cp_raw_cons);
+
+ pthread_mutex_unlock(&bp->def_cp_lock);
}
int bnxt_free_int(struct bnxt *bp)