lpm: merge tbl24 and tbl8 structures
[dpdk.git] / lib / librte_lpm / rte_lpm.c
index 983e04b..22f2ae9 100644 (file)
@@ -45,7 +45,6 @@
 #include <rte_memory.h>        /* for definition of RTE_CACHE_LINE_SIZE */
 #include <rte_malloc.h>
 #include <rte_memzone.h>
-#include <rte_tailq.h>
 #include <rte_eal.h>
 #include <rte_eal_memconfig.h>
 #include <rte_per_lcore.h>
 
 TAILQ_HEAD(rte_lpm_list, rte_tailq_entry);
 
+static struct rte_tailq_elem rte_lpm_tailq = {
+       .name = "RTE_LPM",
+};
+EAL_REGISTER_TAILQ(rte_lpm_tailq)
+
 #define MAX_DEPTH_TBL24 24
 
 enum valid_flag {
@@ -109,7 +113,7 @@ depth_to_range(uint8_t depth)
                return 1 << (MAX_DEPTH_TBL24 - depth);
 
        /* Else if depth is greater than 24 */
-       return (1 << (RTE_LPM_MAX_DEPTH - depth));
+       return 1 << (RTE_LPM_MAX_DEPTH - depth);
 }
 
 /*
@@ -122,12 +126,7 @@ rte_lpm_find_existing(const char *name)
        struct rte_tailq_entry *te;
        struct rte_lpm_list *lpm_list;
 
-       /* check that we have an initialised tail queue */
-       if ((lpm_list = RTE_TAILQ_LOOKUP_BY_IDX(RTE_TAILQ_LPM,
-                       rte_lpm_list)) == NULL) {
-               rte_errno = E_RTE_NO_TAILQ;
-               return NULL;
-       }
+       lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list);
 
        rte_rwlock_read_lock(RTE_EAL_TAILQ_RWLOCK);
        TAILQ_FOREACH(te, lpm_list, next) {
@@ -158,15 +157,9 @@ rte_lpm_create(const char *name, int socket_id, int max_rules,
        uint32_t mem_size;
        struct rte_lpm_list *lpm_list;
 
-       /* check that we have an initialised tail queue */
-       if ((lpm_list = RTE_TAILQ_LOOKUP_BY_IDX(RTE_TAILQ_LPM,
-                       rte_lpm_list)) == NULL) {
-               rte_errno = E_RTE_NO_TAILQ;
-               return NULL;
-       }
+       lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list);
 
-       RTE_BUILD_BUG_ON(sizeof(struct rte_lpm_tbl24_entry) != 2);
-       RTE_BUILD_BUG_ON(sizeof(struct rte_lpm_tbl8_entry) != 2);
+       RTE_BUILD_BUG_ON(sizeof(struct rte_lpm_tbl_entry) != 2);
 
        /* Check user arguments. */
        if ((name == NULL) || (socket_id < -1) || (max_rules == 0)){
@@ -233,12 +226,7 @@ rte_lpm_free(struct rte_lpm *lpm)
        if (lpm == NULL)
                return;
 
-       /* check that we have an initialised tail queue */
-       if ((lpm_list =
-            RTE_TAILQ_LOOKUP_BY_IDX(RTE_TAILQ_LPM, rte_lpm_list)) == NULL) {
-               rte_errno = E_RTE_NO_TAILQ;
-               return;
-       }
+       lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list);
 
        rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
 
@@ -298,6 +286,9 @@ rule_add(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth,
                                return rule_index;
                        }
                }
+
+               if (rule_index == lpm->max_rules)
+                       return -ENOSPC;
        } else {
                /* Calculate the position in which the rule will be stored. */
                rule_index = 0;
@@ -379,26 +370,26 @@ rule_find(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth)
        for (rule_index = rule_gindex; rule_index < last_rule; rule_index++) {
                /* If rule is found return the rule index. */
                if (lpm->rules_tbl[rule_index].ip == ip_masked)
-                       return (rule_index);
+                       return rule_index;
        }
 
-       /* If rule is not found return -E_RTE_NO_TAILQ. */
-       return -E_RTE_NO_TAILQ;
+       /* If rule is not found return -EINVAL. */
+       return -EINVAL;
 }
 
 /*
  * Find, clean and allocate a tbl8.
  */
 static inline int32_t
-tbl8_alloc(struct rte_lpm_tbl8_entry *tbl8)
+tbl8_alloc(struct rte_lpm_tbl_entry *tbl8)
 {
-       uint32_t tbl8_gindex; /* tbl8 group index. */
-       struct rte_lpm_tbl8_entry *tbl8_entry;
+       uint32_t group_idx; /* tbl8 group index. */
+       struct rte_lpm_tbl_entry *tbl8_entry;
 
        /* Scan through tbl8 to find a free (i.e. INVALID) tbl8 group. */
-       for (tbl8_gindex = 0; tbl8_gindex < RTE_LPM_TBL8_NUM_GROUPS;
-                       tbl8_gindex++) {
-               tbl8_entry = &tbl8[tbl8_gindex *
+       for (group_idx = 0; group_idx < RTE_LPM_TBL8_NUM_GROUPS;
+                       group_idx++) {
+               tbl8_entry = &tbl8[group_idx *
                                   RTE_LPM_TBL8_GROUP_NUM_ENTRIES];
                /* If a free tbl8 group is found clean it and set as VALID. */
                if (!tbl8_entry->valid_group) {
@@ -409,7 +400,7 @@ tbl8_alloc(struct rte_lpm_tbl8_entry *tbl8)
                        tbl8_entry->valid_group = VALID;
 
                        /* Return group index for allocated tbl8 group. */
-                       return tbl8_gindex;
+                       return group_idx;
                }
        }
 
@@ -418,7 +409,7 @@ tbl8_alloc(struct rte_lpm_tbl8_entry *tbl8)
 }
 
 static inline void
-tbl8_free(struct rte_lpm_tbl8_entry *tbl8, uint32_t tbl8_group_start)
+tbl8_free(struct rte_lpm_tbl_entry *tbl8, uint32_t tbl8_group_start)
 {
        /* Set tbl8 group invalid*/
        tbl8[tbl8_group_start].valid_group = INVALID;
@@ -439,46 +430,52 @@ add_depth_small(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
                 * For invalid OR valid and non-extended tbl 24 entries set
                 * entry.
                 */
-               if (!lpm->tbl24[i].valid || (lpm->tbl24[i].ext_entry == 0 &&
+               if (!lpm->tbl24[i].valid || (lpm->tbl24[i].valid_group == 0 &&
                                lpm->tbl24[i].depth <= depth)) {
 
-                       struct rte_lpm_tbl24_entry new_tbl24_entry = {
+                       struct rte_lpm_tbl_entry new_tbl24_entry = {
                                { .next_hop = next_hop, },
                                .valid = VALID,
-                               .ext_entry = 0,
+                               .valid_group = 0,
                                .depth = depth,
                        };
 
                        /* Setting tbl24 entry in one go to avoid race
-                        * conditions */
+                        * conditions
+                        */
                        lpm->tbl24[i] = new_tbl24_entry;
 
                        continue;
                }
 
-               /* If tbl24 entry is valid and extended calculate the index
-                * into tbl8. */
-               tbl8_index = lpm->tbl24[i].tbl8_gindex *
-                               RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
-               tbl8_group_end = tbl8_index + RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
-
-               for (j = tbl8_index; j < tbl8_group_end; j++) {
-                       if (!lpm->tbl8[j].valid ||
-                                       lpm->tbl8[j].depth <= depth) {
-                               struct rte_lpm_tbl8_entry new_tbl8_entry = {
-                                       .valid = VALID,
-                                       .valid_group = VALID,
-                                       .depth = depth,
-                                       .next_hop = next_hop,
-                               };
-
-                               /*
-                                * Setting tbl8 entry in one go to avoid race
-                                * conditions
-                                */
-                               lpm->tbl8[j] = new_tbl8_entry;
-
-                               continue;
+               if (lpm->tbl24[i].valid_group == 1) {
+                       /* If tbl24 entry is valid and extended calculate the
+                        *  index into tbl8.
+                        */
+                       tbl8_index = lpm->tbl24[i].group_idx *
+                                       RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
+                       tbl8_group_end = tbl8_index +
+                                       RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
+
+                       for (j = tbl8_index; j < tbl8_group_end; j++) {
+                               if (!lpm->tbl8[j].valid ||
+                                               lpm->tbl8[j].depth <= depth) {
+                                       struct rte_lpm_tbl_entry
+                                               new_tbl8_entry = {
+                                               .valid = VALID,
+                                               .valid_group = VALID,
+                                               .depth = depth,
+                                               .next_hop = next_hop,
+                                       };
+
+                                       /*
+                                        * Setting tbl8 entry in one go to avoid
+                                        * race conditions
+                                        */
+                                       lpm->tbl8[j] = new_tbl8_entry;
+
+                                       continue;
+                               }
                        }
                }
        }
@@ -524,17 +521,17 @@ add_depth_big(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth,
                 * so assign whole structure in one go
                 */
 
-               struct rte_lpm_tbl24_entry new_tbl24_entry = {
-                       { .tbl8_gindex = (uint8_t)tbl8_group_index, },
+               struct rte_lpm_tbl_entry new_tbl24_entry = {
+                       { .group_idx = (uint8_t)tbl8_group_index, },
                        .valid = VALID,
-                       .ext_entry = 1,
+                       .valid_group = 1,
                        .depth = 0,
                };
 
                lpm->tbl24[tbl24_index] = new_tbl24_entry;
 
        }/* If valid entry but not extended calculate the index into Table8. */
-       else if (lpm->tbl24[tbl24_index].ext_entry == 0) {
+       else if (lpm->tbl24[tbl24_index].valid_group == 0) {
                /* Search for free tbl8 group. */
                tbl8_group_index = tbl8_alloc(lpm->tbl8);
 
@@ -575,10 +572,10 @@ add_depth_big(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth,
                 * so assign whole structure in one go.
                 */
 
-               struct rte_lpm_tbl24_entry new_tbl24_entry = {
-                               { .tbl8_gindex = (uint8_t)tbl8_group_index, },
+               struct rte_lpm_tbl_entry new_tbl24_entry = {
+                               { .group_idx = (uint8_t)tbl8_group_index, },
                                .valid = VALID,
-                               .ext_entry = 1,
+                               .valid_group = 1,
                                .depth = 0,
                };
 
@@ -588,7 +585,7 @@ add_depth_big(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth,
        else { /*
                * If it is valid, extended entry calculate the index into tbl8.
                */
-               tbl8_group_index = lpm->tbl24[tbl24_index].tbl8_gindex;
+               tbl8_group_index = lpm->tbl24[tbl24_index].group_idx;
                tbl8_group_start = tbl8_group_index *
                                RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
                tbl8_index = tbl8_group_start + (ip_masked & 0xFF);
@@ -597,7 +594,7 @@ add_depth_big(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth,
 
                        if (!lpm->tbl8[i].valid ||
                                        lpm->tbl8[i].depth <= depth) {
-                               struct rte_lpm_tbl8_entry new_tbl8_entry = {
+                               struct rte_lpm_tbl_entry new_tbl8_entry = {
                                        .valid = VALID,
                                        .depth = depth,
                                        .next_hop = next_hop,
@@ -733,18 +730,17 @@ delete_depth_small(struct rte_lpm *lpm, uint32_t ip_masked,
                 */
                for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) {
 
-                       if (lpm->tbl24[i].ext_entry == 0 &&
+                       if (lpm->tbl24[i].valid_group == 0 &&
                                        lpm->tbl24[i].depth <= depth ) {
                                lpm->tbl24[i].valid = INVALID;
-                       }
-                       else {
+                       } else if (lpm->tbl24[i].valid_group == 1) {
                                /*
                                 * If TBL24 entry is extended, then there has
                                 * to be a rule with depth >= 25 in the
                                 * associated TBL8 group.
                                 */
 
-                               tbl8_group_index = lpm->tbl24[i].tbl8_gindex;
+                               tbl8_group_index = lpm->tbl24[i].group_idx;
                                tbl8_index = tbl8_group_index *
                                                RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
 
@@ -763,15 +759,16 @@ delete_depth_small(struct rte_lpm *lpm, uint32_t ip_masked,
                 * associated with this rule.
                 */
 
-               struct rte_lpm_tbl24_entry new_tbl24_entry = {
+               struct rte_lpm_tbl_entry new_tbl24_entry = {
                        {.next_hop = lpm->rules_tbl[sub_rule_index].next_hop,},
                        .valid = VALID,
-                       .ext_entry = 0,
+                       .valid_group = 0,
                        .depth = sub_rule_depth,
                };
 
-               struct rte_lpm_tbl8_entry new_tbl8_entry = {
+               struct rte_lpm_tbl_entry new_tbl8_entry = {
                        .valid = VALID,
+                       .valid_group = VALID,
                        .depth = sub_rule_depth,
                        .next_hop = lpm->rules_tbl
                        [sub_rule_index].next_hop,
@@ -779,18 +776,17 @@ delete_depth_small(struct rte_lpm *lpm, uint32_t ip_masked,
 
                for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) {
 
-                       if (lpm->tbl24[i].ext_entry == 0 &&
+                       if (lpm->tbl24[i].valid_group == 0 &&
                                        lpm->tbl24[i].depth <= depth ) {
                                lpm->tbl24[i] = new_tbl24_entry;
-                       }
-                       else {
+                       } else  if (lpm->tbl24[i].valid_group == 1) {
                                /*
                                 * If TBL24 entry is extended, then there has
                                 * to be a rule with depth >= 25 in the
                                 * associated TBL8 group.
                                 */
 
-                               tbl8_group_index = lpm->tbl24[i].tbl8_gindex;
+                               tbl8_group_index = lpm->tbl24[i].group_idx;
                                tbl8_index = tbl8_group_index *
                                                RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
 
@@ -816,7 +812,7 @@ delete_depth_small(struct rte_lpm *lpm, uint32_t ip_masked,
  * thus can be recycled
  */
 static inline int32_t
-tbl8_recycle_check(struct rte_lpm_tbl8_entry *tbl8, uint32_t tbl8_group_start)
+tbl8_recycle_check(struct rte_lpm_tbl_entry *tbl8, uint32_t tbl8_group_start)
 {
        uint32_t tbl8_group_end, i;
        tbl8_group_end = tbl8_group_start + RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
@@ -876,7 +872,7 @@ delete_depth_big(struct rte_lpm *lpm, uint32_t ip_masked,
        tbl24_index = ip_masked >> 8;
 
        /* Calculate the index into tbl8 and range. */
-       tbl8_group_index = lpm->tbl24[tbl24_index].tbl8_gindex;
+       tbl8_group_index = lpm->tbl24[tbl24_index].group_idx;
        tbl8_group_start = tbl8_group_index * RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
        tbl8_index = tbl8_group_start + (ip_masked & 0xFF);
        tbl8_range = depth_to_range(depth);
@@ -893,7 +889,7 @@ delete_depth_big(struct rte_lpm *lpm, uint32_t ip_masked,
        }
        else {
                /* Set new tbl8 entry. */
-               struct rte_lpm_tbl8_entry new_tbl8_entry = {
+               struct rte_lpm_tbl_entry new_tbl8_entry = {
                        .valid = VALID,
                        .depth = sub_rule_depth,
                        .valid_group = lpm->tbl8[tbl8_group_start].valid_group,
@@ -925,10 +921,10 @@ delete_depth_big(struct rte_lpm *lpm, uint32_t ip_masked,
        }
        else if (tbl8_recycle_index > -1) {
                /* Update tbl24 entry. */
-               struct rte_lpm_tbl24_entry new_tbl24_entry = {
+               struct rte_lpm_tbl_entry new_tbl24_entry = {
                        { .next_hop = lpm->tbl8[tbl8_recycle_index].next_hop, },
                        .valid = VALID,
-                       .ext_entry = 0,
+                       .valid_group = 0,
                        .depth = lpm->tbl8[tbl8_recycle_index].depth,
                };
 
@@ -967,10 +963,10 @@ rte_lpm_delete(struct rte_lpm *lpm, uint32_t ip, uint8_t depth)
 
        /*
         * Check if rule_to_delete_index was found. If no rule was found the
-        * function rule_find returns -E_RTE_NO_TAILQ.
+        * function rule_find returns -EINVAL.
         */
        if (rule_to_delete_index < 0)
-               return -E_RTE_NO_TAILQ;
+               return -EINVAL;
 
        /* Delete the rule from the rule table. */
        rule_delete(lpm, rule_to_delete_index, depth);
@@ -1014,4 +1010,3 @@ rte_lpm_delete_all(struct rte_lpm *lpm)
        /* Delete all rules form the rules table. */
        memset(lpm->rules_tbl, 0, sizeof(lpm->rules_tbl[0]) * lpm->max_rules);
 }
-