X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=lib%2Flibrte_lpm%2Frte_lpm.c;h=757436f492a76e111e08c51aa358a0f4fab8515a;hb=0bf68c660e932e76087dc8c87f8b1dacba89c2be;hp=126fc5a82dbd41c8419d3d76103f65bc3993a5e1;hpb=8a9f8564e9f916d469492f5c5a4dc5886e814e05;p=dpdk.git diff --git a/lib/librte_lpm/rte_lpm.c b/lib/librte_lpm/rte_lpm.c index 126fc5a82d..757436f492 100644 --- a/lib/librte_lpm/rte_lpm.c +++ b/lib/librte_lpm/rte_lpm.c @@ -288,8 +288,7 @@ __lpm_rcu_qsbr_free_resource(void *p, void *data, unsigned int n) /* Associate QSBR variable with an LPM object. */ int -rte_lpm_rcu_qsbr_add(struct rte_lpm *lpm, struct rte_lpm_rcu_config *cfg, - struct rte_rcu_qsbr_dq **dq) +rte_lpm_rcu_qsbr_add(struct rte_lpm *lpm, struct rte_lpm_rcu_config *cfg) { struct rte_rcu_qsbr_dq_parameters params = {0}; char rcu_dq_name[RTE_RCU_QSBR_DQ_NAMESIZE]; @@ -329,8 +328,6 @@ rte_lpm_rcu_qsbr_add(struct rte_lpm *lpm, struct rte_lpm_rcu_config *cfg, RTE_LOG(ERR, LPM, "LPM defer queue creation failed\n"); return 1; } - if (dq != NULL) - *dq = internal_lpm->dq; } else { rte_errno = EINVAL; return 1; @@ -535,11 +532,12 @@ tbl8_alloc(struct rte_lpm *lpm) return group_idx; } -static void +static int32_t tbl8_free(struct rte_lpm *lpm, uint32_t tbl8_group_start) { struct rte_lpm_tbl_entry zero_tbl8_entry = {0}; struct __rte_lpm *internal_lpm; + int status; internal_lpm = container_of(lpm, struct __rte_lpm, lpm); if (internal_lpm->v == NULL) { @@ -555,9 +553,15 @@ tbl8_free(struct rte_lpm *lpm, uint32_t tbl8_group_start) __ATOMIC_RELAXED); } else if (internal_lpm->rcu_mode == RTE_LPM_QSBR_MODE_DQ) { /* Push into QSBR defer queue. */ - rte_rcu_qsbr_dq_enqueue(internal_lpm->dq, + status = rte_rcu_qsbr_dq_enqueue(internal_lpm->dq, (void *)&tbl8_group_start); + if (status == 1) { + RTE_LOG(ERR, LPM, "Failed to push QSBR FIFO\n"); + return -rte_errno; + } } + + return 0; } static __rte_noinline int32_t @@ -1043,7 +1047,7 @@ delete_depth_big(struct rte_lpm *lpm, uint32_t ip_masked, #define group_idx next_hop uint32_t tbl24_index, tbl8_group_index, tbl8_group_start, tbl8_index, tbl8_range, i; - int32_t tbl8_recycle_index; + int32_t tbl8_recycle_index, status = 0; /* * Calculate the index into tbl24 and range. Note: All depths larger @@ -1100,7 +1104,7 @@ delete_depth_big(struct rte_lpm *lpm, uint32_t ip_masked, */ lpm->tbl24[tbl24_index].valid = 0; __atomic_thread_fence(__ATOMIC_RELEASE); - tbl8_free(lpm, tbl8_group_start); + status = tbl8_free(lpm, tbl8_group_start); } else if (tbl8_recycle_index > -1) { /* Update tbl24 entry. */ struct rte_lpm_tbl_entry new_tbl24_entry = { @@ -1116,10 +1120,10 @@ delete_depth_big(struct rte_lpm *lpm, uint32_t ip_masked, __atomic_store(&lpm->tbl24[tbl24_index], &new_tbl24_entry, __ATOMIC_RELAXED); __atomic_thread_fence(__ATOMIC_RELEASE); - tbl8_free(lpm, tbl8_group_start); + status = tbl8_free(lpm, tbl8_group_start); } #undef group_idx - return 0; + return status; } /*