update copyright date to 2013
[dpdk.git] / lib / librte_lpm / rte_lpm.c
index f1c9892..95e1546 100644 (file)
@@ -1,7 +1,7 @@
 /*-
  *   BSD LICENSE
  * 
- *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   Copyright(c) 2010-2013 Intel Corporation. All rights reserved.
  *   All rights reserved.
  * 
  *   Redistribution and use in source and binary forms, with or without 
@@ -52,6 +52,8 @@
 #include <rte_per_lcore.h>
 #include <rte_string_fns.h>
 #include <rte_errno.h>
+#include <rte_rwlock.h>
+#include <rte_spinlock.h>
 
 #include "rte_lpm.h"
 
@@ -126,10 +128,12 @@ rte_lpm_find_existing(const char *name)
                return NULL;
        }
 
+       rte_rwlock_read_lock(RTE_EAL_TAILQ_RWLOCK);
        TAILQ_FOREACH(l, lpm_list, next) {
                if (strncmp(name, l->name, RTE_LPM_NAMESIZE) == 0)
                        break;
        }
+       rte_rwlock_read_unlock(RTE_EAL_TAILQ_RWLOCK);
 
        if (l == NULL)
                rte_errno = ENOENT;
@@ -179,20 +183,22 @@ rte_lpm_create(const char *name, int socket_id, int max_rules,
        /* Determine the amount of memory to allocate. */
        mem_size = sizeof(*lpm) + (sizeof(lpm->rules_tbl[0]) * max_rules);
 
+       rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
+
        /* guarantee there's no existing */
        TAILQ_FOREACH(lpm, lpm_list, next) {
                if (strncmp(name, lpm->name, RTE_LPM_NAMESIZE) == 0)
                        break;
        }
        if (lpm != NULL)
-               return NULL;
+               goto exit;
 
        /* Allocate memory to store the LPM data structures. */
        lpm = (struct rte_lpm *)rte_zmalloc_socket(mem_name, mem_size,
                        CACHE_LINE_SIZE, socket_id);
        if (lpm == NULL) {
                RTE_LOG(ERR, LPM, "LPM memory allocation failed\n");
-               return NULL;
+               goto exit;
        }
 
        /* Save user arguments. */
@@ -201,6 +207,9 @@ rte_lpm_create(const char *name, int socket_id, int max_rules,
 
        TAILQ_INSERT_TAIL(lpm_list, lpm, next);
 
+exit:  
+       rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
+
        return lpm;
 }
 
@@ -244,7 +253,7 @@ rule_add(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth,
        last_rule = rule_gindex + lpm->used_rules_at_depth[depth - 1];
                
        /* Scan through rule group to see if rule already exists. */
-       for (rule_index = rule_gindex; rule_index < last_rule; rule_index++) {
+       for (; rule_index < last_rule; rule_index++) {
 
                /* If rule already exists update its next_hop and return. */
                if (lpm->rules_tbl[rule_index].ip == ip_masked) {
@@ -376,10 +385,10 @@ add_depth_small(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
                                lpm->tbl24[i].depth <= depth)) {
 
                        struct rte_lpm_tbl24_entry new_tbl24_entry = {
+                               { .next_hop = next_hop, },
                                .valid = VALID,
                                .ext_entry = 0,
                                .depth = depth,
-                               { .next_hop = next_hop, }
                        };
 
                        /* Setting tbl24 entry in one go to avoid race
@@ -458,10 +467,10 @@ add_depth_big(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth,
                 */
 
                struct rte_lpm_tbl24_entry new_tbl24_entry = {
+                       { .tbl8_gindex = (uint8_t)tbl8_group_index, },
                        .valid = VALID,
                        .ext_entry = 1,
                        .depth = 0,
-                       { .tbl8_gindex = (uint8_t)tbl8_group_index, }
                };
 
                lpm->tbl24[tbl24_index] = new_tbl24_entry;
@@ -509,10 +518,10 @@ add_depth_big(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth,
                 */
 
                struct rte_lpm_tbl24_entry new_tbl24_entry = {
+                               { .tbl8_gindex = (uint8_t)tbl8_group_index, },
                                .valid = VALID,
                                .ext_entry = 1,
                                .depth = 0,
-                               { .tbl8_gindex = (uint8_t)tbl8_group_index, }
                };
 
                lpm->tbl24[tbl24_index] = new_tbl24_entry;
@@ -670,10 +679,10 @@ delete_depth_small(struct rte_lpm *lpm, uint32_t ip_masked,
                                lpm->max_rules_per_depth);
 
                struct rte_lpm_tbl24_entry new_tbl24_entry = {
+                       {.next_hop = lpm->rules_tbl[sub_rule_index].next_hop,},
                        .valid = VALID,
                        .ext_entry = 0,
                        .depth = new_depth,
-                       {.next_hop = lpm->rules_tbl[sub_rule_index].next_hop,}
                };
 
                struct rte_lpm_tbl8_entry new_tbl8_entry = {
@@ -835,10 +844,10 @@ delete_depth_big(struct rte_lpm *lpm, uint32_t ip_masked,
        else if (tbl8_recycle_index > -1) {
                /* Update tbl24 entry. */
                struct rte_lpm_tbl24_entry new_tbl24_entry = {
+                       { .next_hop = lpm->tbl8[tbl8_recycle_index].next_hop, },
                        .valid = VALID,
                        .ext_entry = 0,
                        .depth = lpm->tbl8[tbl8_recycle_index].depth,
-                       { .next_hop = lpm->tbl8[tbl8_recycle_index].next_hop, }
                };
 
                /* Set tbl24 before freeing tbl8 to avoid race condition. */