lpm: report error when defer queue overflows
authorRuifeng Wang <ruifeng.wang@arm.com>
Thu, 16 Jul 2020 15:49:19 +0000 (23:49 +0800)
committerDavid Marchand <david.marchand@redhat.com>
Tue, 21 Jul 2020 18:48:40 +0000 (20:48 +0200)
Coverity complains about unchecked return value of rte_rcu_qsbr_dq_enqueue.
By default, defer queue size is big enough to hold all tbl8 groups. When
enqueue fails, return error to the user to indicate system issue.

Coverity issue: 360832
Fixes: 8a9f8564e9f9 ("lpm: implement RCU rule reclamation")

Signed-off-by: Ruifeng Wang <ruifeng.wang@arm.com>
Acked-by: Vladimir Medvedkin <vladimir.medvedkin@intel.com>
Acked-by: Bruce Richardson <bruce.richardson@intel.com>
lib/librte_lpm/rte_lpm.c

index 2db9e16..757436f 100644 (file)
@@ -532,11 +532,12 @@ tbl8_alloc(struct rte_lpm *lpm)
        return group_idx;
 }
 
-static void
+static int32_t
 tbl8_free(struct rte_lpm *lpm, uint32_t tbl8_group_start)
 {
        struct rte_lpm_tbl_entry zero_tbl8_entry = {0};
        struct __rte_lpm *internal_lpm;
+       int status;
 
        internal_lpm = container_of(lpm, struct __rte_lpm, lpm);
        if (internal_lpm->v == NULL) {
@@ -552,9 +553,15 @@ tbl8_free(struct rte_lpm *lpm, uint32_t tbl8_group_start)
                                __ATOMIC_RELAXED);
        } else if (internal_lpm->rcu_mode == RTE_LPM_QSBR_MODE_DQ) {
                /* Push into QSBR defer queue. */
-               rte_rcu_qsbr_dq_enqueue(internal_lpm->dq,
+               status = rte_rcu_qsbr_dq_enqueue(internal_lpm->dq,
                                (void *)&tbl8_group_start);
+               if (status == 1) {
+                       RTE_LOG(ERR, LPM, "Failed to push QSBR FIFO\n");
+                       return -rte_errno;
+               }
        }
+
+       return 0;
 }
 
 static __rte_noinline int32_t
@@ -1040,7 +1047,7 @@ delete_depth_big(struct rte_lpm *lpm, uint32_t ip_masked,
 #define group_idx next_hop
        uint32_t tbl24_index, tbl8_group_index, tbl8_group_start, tbl8_index,
                        tbl8_range, i;
-       int32_t tbl8_recycle_index;
+       int32_t tbl8_recycle_index, status = 0;
 
        /*
         * Calculate the index into tbl24 and range. Note: All depths larger
@@ -1097,7 +1104,7 @@ delete_depth_big(struct rte_lpm *lpm, uint32_t ip_masked,
                 */
                lpm->tbl24[tbl24_index].valid = 0;
                __atomic_thread_fence(__ATOMIC_RELEASE);
-               tbl8_free(lpm, tbl8_group_start);
+               status = tbl8_free(lpm, tbl8_group_start);
        } else if (tbl8_recycle_index > -1) {
                /* Update tbl24 entry. */
                struct rte_lpm_tbl_entry new_tbl24_entry = {
@@ -1113,10 +1120,10 @@ delete_depth_big(struct rte_lpm *lpm, uint32_t ip_masked,
                __atomic_store(&lpm->tbl24[tbl24_index], &new_tbl24_entry,
                                __ATOMIC_RELAXED);
                __atomic_thread_fence(__ATOMIC_RELEASE);
-               tbl8_free(lpm, tbl8_group_start);
+               status = tbl8_free(lpm, tbl8_group_start);
        }
 #undef group_idx
-       return 0;
+       return status;
 }
 
 /*