return group_idx;
}
-static void
+static int32_t
tbl8_free(struct rte_lpm *lpm, uint32_t tbl8_group_start)
{
struct rte_lpm_tbl_entry zero_tbl8_entry = {0};
struct __rte_lpm *internal_lpm;
+ int status;
internal_lpm = container_of(lpm, struct __rte_lpm, lpm);
if (internal_lpm->v == NULL) {
__ATOMIC_RELAXED);
} else if (internal_lpm->rcu_mode == RTE_LPM_QSBR_MODE_DQ) {
/* Push into QSBR defer queue. */
- rte_rcu_qsbr_dq_enqueue(internal_lpm->dq,
+ status = rte_rcu_qsbr_dq_enqueue(internal_lpm->dq,
(void *)&tbl8_group_start);
+ if (status == 1) {
+ RTE_LOG(ERR, LPM, "Failed to push QSBR FIFO\n");
+ return -rte_errno;
+ }
}
+
+ return 0;
}
static __rte_noinline int32_t
#define group_idx next_hop
uint32_t tbl24_index, tbl8_group_index, tbl8_group_start, tbl8_index,
tbl8_range, i;
- int32_t tbl8_recycle_index;
+ int32_t tbl8_recycle_index, status = 0;
/*
* Calculate the index into tbl24 and range. Note: All depths larger
*/
lpm->tbl24[tbl24_index].valid = 0;
__atomic_thread_fence(__ATOMIC_RELEASE);
- tbl8_free(lpm, tbl8_group_start);
+ status = tbl8_free(lpm, tbl8_group_start);
} else if (tbl8_recycle_index > -1) {
/* Update tbl24 entry. */
struct rte_lpm_tbl_entry new_tbl24_entry = {
__atomic_store(&lpm->tbl24[tbl24_index], &new_tbl24_entry,
__ATOMIC_RELAXED);
__atomic_thread_fence(__ATOMIC_RELEASE);
- tbl8_free(lpm, tbl8_group_start);
+ status = tbl8_free(lpm, tbl8_group_start);
}
#undef group_idx
- return 0;
+ return status;
}
/*