remove extra parentheses in return statement
[dpdk.git] / lib / librte_lpm / rte_lpm.c
index 983e04b..cfdf959 100644 (file)
@@ -45,7 +45,6 @@
 #include <rte_memory.h>        /* for definition of RTE_CACHE_LINE_SIZE */
 #include <rte_malloc.h>
 #include <rte_memzone.h>
-#include <rte_tailq.h>
 #include <rte_eal.h>
 #include <rte_eal_memconfig.h>
 #include <rte_per_lcore.h>
 
 TAILQ_HEAD(rte_lpm_list, rte_tailq_entry);
 
+static struct rte_tailq_elem rte_lpm_tailq = {
+       .name = "RTE_LPM",
+};
+EAL_REGISTER_TAILQ(rte_lpm_tailq)
+
 #define MAX_DEPTH_TBL24 24
 
 enum valid_flag {
@@ -109,7 +113,7 @@ depth_to_range(uint8_t depth)
                return 1 << (MAX_DEPTH_TBL24 - depth);
 
        /* Else if depth is greater than 24 */
-       return (1 << (RTE_LPM_MAX_DEPTH - depth));
+       return 1 << (RTE_LPM_MAX_DEPTH - depth);
 }
 
 /*
@@ -122,12 +126,7 @@ rte_lpm_find_existing(const char *name)
        struct rte_tailq_entry *te;
        struct rte_lpm_list *lpm_list;
 
-       /* check that we have an initialised tail queue */
-       if ((lpm_list = RTE_TAILQ_LOOKUP_BY_IDX(RTE_TAILQ_LPM,
-                       rte_lpm_list)) == NULL) {
-               rte_errno = E_RTE_NO_TAILQ;
-               return NULL;
-       }
+       lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list);
 
        rte_rwlock_read_lock(RTE_EAL_TAILQ_RWLOCK);
        TAILQ_FOREACH(te, lpm_list, next) {
@@ -158,12 +157,7 @@ rte_lpm_create(const char *name, int socket_id, int max_rules,
        uint32_t mem_size;
        struct rte_lpm_list *lpm_list;
 
-       /* check that we have an initialised tail queue */
-       if ((lpm_list = RTE_TAILQ_LOOKUP_BY_IDX(RTE_TAILQ_LPM,
-                       rte_lpm_list)) == NULL) {
-               rte_errno = E_RTE_NO_TAILQ;
-               return NULL;
-       }
+       lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list);
 
        RTE_BUILD_BUG_ON(sizeof(struct rte_lpm_tbl24_entry) != 2);
        RTE_BUILD_BUG_ON(sizeof(struct rte_lpm_tbl8_entry) != 2);
@@ -233,12 +227,7 @@ rte_lpm_free(struct rte_lpm *lpm)
        if (lpm == NULL)
                return;
 
-       /* check that we have an initialised tail queue */
-       if ((lpm_list =
-            RTE_TAILQ_LOOKUP_BY_IDX(RTE_TAILQ_LPM, rte_lpm_list)) == NULL) {
-               rte_errno = E_RTE_NO_TAILQ;
-               return;
-       }
+       lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list);
 
        rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
 
@@ -298,6 +287,9 @@ rule_add(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth,
                                return rule_index;
                        }
                }
+
+               if (rule_index == lpm->max_rules)
+                       return -ENOSPC;
        } else {
                /* Calculate the position in which the rule will be stored. */
                rule_index = 0;
@@ -379,11 +371,11 @@ rule_find(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth)
        for (rule_index = rule_gindex; rule_index < last_rule; rule_index++) {
                /* If rule is found return the rule index. */
                if (lpm->rules_tbl[rule_index].ip == ip_masked)
-                       return (rule_index);
+                       return rule_index;
        }
 
-       /* If rule is not found return -E_RTE_NO_TAILQ. */
-       return -E_RTE_NO_TAILQ;
+       /* If rule is not found return -EINVAL. */
+       return -EINVAL;
 }
 
 /*
@@ -450,35 +442,41 @@ add_depth_small(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
                        };
 
                        /* Setting tbl24 entry in one go to avoid race
-                        * conditions */
+                        * conditions
+                        */
                        lpm->tbl24[i] = new_tbl24_entry;
 
                        continue;
                }
 
-               /* If tbl24 entry is valid and extended calculate the index
-                * into tbl8. */
-               tbl8_index = lpm->tbl24[i].tbl8_gindex *
-                               RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
-               tbl8_group_end = tbl8_index + RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
-
-               for (j = tbl8_index; j < tbl8_group_end; j++) {
-                       if (!lpm->tbl8[j].valid ||
-                                       lpm->tbl8[j].depth <= depth) {
-                               struct rte_lpm_tbl8_entry new_tbl8_entry = {
-                                       .valid = VALID,
-                                       .valid_group = VALID,
-                                       .depth = depth,
-                                       .next_hop = next_hop,
-                               };
-
-                               /*
-                                * Setting tbl8 entry in one go to avoid race
-                                * conditions
-                                */
-                               lpm->tbl8[j] = new_tbl8_entry;
-
-                               continue;
+               if (lpm->tbl24[i].ext_entry == 1) {
+                       /* If tbl24 entry is valid and extended calculate the
+                        *  index into tbl8.
+                        */
+                       tbl8_index = lpm->tbl24[i].tbl8_gindex *
+                                       RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
+                       tbl8_group_end = tbl8_index +
+                                       RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
+
+                       for (j = tbl8_index; j < tbl8_group_end; j++) {
+                               if (!lpm->tbl8[j].valid ||
+                                               lpm->tbl8[j].depth <= depth) {
+                                       struct rte_lpm_tbl8_entry
+                                               new_tbl8_entry = {
+                                               .valid = VALID,
+                                               .valid_group = VALID,
+                                               .depth = depth,
+                                               .next_hop = next_hop,
+                                       };
+
+                                       /*
+                                        * Setting tbl8 entry in one go to avoid
+                                        * race conditions
+                                        */
+                                       lpm->tbl8[j] = new_tbl8_entry;
+
+                                       continue;
+                               }
                        }
                }
        }
@@ -736,8 +734,7 @@ delete_depth_small(struct rte_lpm *lpm, uint32_t ip_masked,
                        if (lpm->tbl24[i].ext_entry == 0 &&
                                        lpm->tbl24[i].depth <= depth ) {
                                lpm->tbl24[i].valid = INVALID;
-                       }
-                       else {
+                       } else if (lpm->tbl24[i].ext_entry == 1) {
                                /*
                                 * If TBL24 entry is extended, then there has
                                 * to be a rule with depth >= 25 in the
@@ -772,6 +769,7 @@ delete_depth_small(struct rte_lpm *lpm, uint32_t ip_masked,
 
                struct rte_lpm_tbl8_entry new_tbl8_entry = {
                        .valid = VALID,
+                       .valid_group = VALID,
                        .depth = sub_rule_depth,
                        .next_hop = lpm->rules_tbl
                        [sub_rule_index].next_hop,
@@ -782,8 +780,7 @@ delete_depth_small(struct rte_lpm *lpm, uint32_t ip_masked,
                        if (lpm->tbl24[i].ext_entry == 0 &&
                                        lpm->tbl24[i].depth <= depth ) {
                                lpm->tbl24[i] = new_tbl24_entry;
-                       }
-                       else {
+                       } else  if (lpm->tbl24[i].ext_entry == 1) {
                                /*
                                 * If TBL24 entry is extended, then there has
                                 * to be a rule with depth >= 25 in the
@@ -967,10 +964,10 @@ rte_lpm_delete(struct rte_lpm *lpm, uint32_t ip, uint8_t depth)
 
        /*
         * Check if rule_to_delete_index was found. If no rule was found the
-        * function rule_find returns -E_RTE_NO_TAILQ.
+        * function rule_find returns -EINVAL.
         */
        if (rule_to_delete_index < 0)
-               return -E_RTE_NO_TAILQ;
+               return -EINVAL;
 
        /* Delete the rule from the rule table. */
        rule_delete(lpm, rule_to_delete_index, depth);
@@ -1014,4 +1011,3 @@ rte_lpm_delete_all(struct rte_lpm *lpm)
        /* Delete all rules form the rules table. */
        memset(lpm->rules_tbl, 0, sizeof(lpm->rules_tbl[0]) * lpm->max_rules);
 }
-