net/bonding: change state machine to defaulted
[dpdk.git] / lib / librte_lpm / rte_lpm.c
index 126fc5a..757436f 100644 (file)
@@ -288,8 +288,7 @@ __lpm_rcu_qsbr_free_resource(void *p, void *data, unsigned int n)
 /* Associate QSBR variable with an LPM object.
  */
 int
-rte_lpm_rcu_qsbr_add(struct rte_lpm *lpm, struct rte_lpm_rcu_config *cfg,
-       struct rte_rcu_qsbr_dq **dq)
+rte_lpm_rcu_qsbr_add(struct rte_lpm *lpm, struct rte_lpm_rcu_config *cfg)
 {
        struct rte_rcu_qsbr_dq_parameters params = {0};
        char rcu_dq_name[RTE_RCU_QSBR_DQ_NAMESIZE];
@@ -329,8 +328,6 @@ rte_lpm_rcu_qsbr_add(struct rte_lpm *lpm, struct rte_lpm_rcu_config *cfg,
                        RTE_LOG(ERR, LPM, "LPM defer queue creation failed\n");
                        return 1;
                }
-               if (dq != NULL)
-                       *dq = internal_lpm->dq;
        } else {
                rte_errno = EINVAL;
                return 1;
@@ -535,11 +532,12 @@ tbl8_alloc(struct rte_lpm *lpm)
        return group_idx;
 }
 
-static void
+static int32_t
 tbl8_free(struct rte_lpm *lpm, uint32_t tbl8_group_start)
 {
        struct rte_lpm_tbl_entry zero_tbl8_entry = {0};
        struct __rte_lpm *internal_lpm;
+       int status;
 
        internal_lpm = container_of(lpm, struct __rte_lpm, lpm);
        if (internal_lpm->v == NULL) {
@@ -555,9 +553,15 @@ tbl8_free(struct rte_lpm *lpm, uint32_t tbl8_group_start)
                                __ATOMIC_RELAXED);
        } else if (internal_lpm->rcu_mode == RTE_LPM_QSBR_MODE_DQ) {
                /* Push into QSBR defer queue. */
-               rte_rcu_qsbr_dq_enqueue(internal_lpm->dq,
+               status = rte_rcu_qsbr_dq_enqueue(internal_lpm->dq,
                                (void *)&tbl8_group_start);
+               if (status == 1) {
+                       RTE_LOG(ERR, LPM, "Failed to push QSBR FIFO\n");
+                       return -rte_errno;
+               }
        }
+
+       return 0;
 }
 
 static __rte_noinline int32_t
@@ -1043,7 +1047,7 @@ delete_depth_big(struct rte_lpm *lpm, uint32_t ip_masked,
 #define group_idx next_hop
        uint32_t tbl24_index, tbl8_group_index, tbl8_group_start, tbl8_index,
                        tbl8_range, i;
-       int32_t tbl8_recycle_index;
+       int32_t tbl8_recycle_index, status = 0;
 
        /*
         * Calculate the index into tbl24 and range. Note: All depths larger
@@ -1100,7 +1104,7 @@ delete_depth_big(struct rte_lpm *lpm, uint32_t ip_masked,
                 */
                lpm->tbl24[tbl24_index].valid = 0;
                __atomic_thread_fence(__ATOMIC_RELEASE);
-               tbl8_free(lpm, tbl8_group_start);
+               status = tbl8_free(lpm, tbl8_group_start);
        } else if (tbl8_recycle_index > -1) {
                /* Update tbl24 entry. */
                struct rte_lpm_tbl_entry new_tbl24_entry = {
@@ -1116,10 +1120,10 @@ delete_depth_big(struct rte_lpm *lpm, uint32_t ip_masked,
                __atomic_store(&lpm->tbl24[tbl24_index], &new_tbl24_entry,
                                __ATOMIC_RELAXED);
                __atomic_thread_fence(__ATOMIC_RELEASE);
-               tbl8_free(lpm, tbl8_group_start);
+               status = tbl8_free(lpm, tbl8_group_start);
        }
 #undef group_idx
-       return 0;
+       return status;
 }
 
 /*