4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40 #include <sys/queue.h>
43 #include <rte_branch_prediction.h>
44 #include <rte_common.h>
45 #include <rte_memory.h> /* for definition of RTE_CACHE_LINE_SIZE */
46 #include <rte_malloc.h>
47 #include <rte_memzone.h>
49 #include <rte_eal_memconfig.h>
50 #include <rte_per_lcore.h>
51 #include <rte_string_fns.h>
52 #include <rte_errno.h>
53 #include <rte_rwlock.h>
54 #include <rte_spinlock.h>
58 TAILQ_HEAD(rte_lpm_list, rte_tailq_entry);
60 static struct rte_tailq_elem rte_lpm_tailq = {
63 EAL_REGISTER_TAILQ(rte_lpm_tailq)
65 #define MAX_DEPTH_TBL24 24
72 /* Macro to enable/disable run-time checks. */
73 #if defined(RTE_LIBRTE_LPM_DEBUG)
74 #include <rte_debug.h>
75 #define VERIFY_DEPTH(depth) do { \
76 if ((depth == 0) || (depth > RTE_LPM_MAX_DEPTH)) \
77 rte_panic("LPM: Invalid depth (%u) at line %d", \
78 (unsigned)(depth), __LINE__); \
81 #define VERIFY_DEPTH(depth)
85 * Converts a given depth value to its corresponding mask value.
87 * depth (IN) : range = 1 - 32
88 * mask (OUT) : 32bit mask
90 static uint32_t __attribute__((pure))
91 depth_to_mask(uint8_t depth)
95 /* To calculate a mask start with a 1 on the left hand side and right
96 * shift while populating the left hand side with 1's
98 return (int)0x80000000 >> (depth - 1);
102 * Converts given depth value to its corresponding range value.
104 static inline uint32_t __attribute__((pure))
105 depth_to_range(uint8_t depth)
110 * Calculate tbl24 range. (Note: 2^depth = 1 << depth)
112 if (depth <= MAX_DEPTH_TBL24)
113 return 1 << (MAX_DEPTH_TBL24 - depth);
115 /* Else if depth is greater than 24 */
116 return 1 << (RTE_LPM_MAX_DEPTH - depth);
120 * Find an existing lpm table and return a pointer to it.
123 rte_lpm_find_existing_v20(const char *name)
125 struct rte_lpm_v20 *l = NULL;
126 struct rte_tailq_entry *te;
127 struct rte_lpm_list *lpm_list;
129 lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list);
131 rte_rwlock_read_lock(RTE_EAL_TAILQ_RWLOCK);
132 TAILQ_FOREACH(te, lpm_list, next) {
133 l = (struct rte_lpm_v20 *) te->data;
134 if (strncmp(name, l->name, RTE_LPM_NAMESIZE) == 0)
137 rte_rwlock_read_unlock(RTE_EAL_TAILQ_RWLOCK);
146 VERSION_SYMBOL(rte_lpm_find_existing, _v20, 2.0);
149 rte_lpm_find_existing_v1604(const char *name)
151 struct rte_lpm *l = NULL;
152 struct rte_tailq_entry *te;
153 struct rte_lpm_list *lpm_list;
155 lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list);
157 rte_rwlock_read_lock(RTE_EAL_TAILQ_RWLOCK);
158 TAILQ_FOREACH(te, lpm_list, next) {
159 l = (struct rte_lpm *) te->data;
160 if (strncmp(name, l->name, RTE_LPM_NAMESIZE) == 0)
163 rte_rwlock_read_unlock(RTE_EAL_TAILQ_RWLOCK);
172 BIND_DEFAULT_SYMBOL(rte_lpm_find_existing, _v1604, 16.04);
173 MAP_STATIC_SYMBOL(struct rte_lpm *rte_lpm_find_existing(const char *name),
174 rte_lpm_find_existing_v1604);
177 * Allocates memory for LPM object
180 rte_lpm_create_v20(const char *name, int socket_id, int max_rules,
181 __rte_unused int flags)
183 char mem_name[RTE_LPM_NAMESIZE];
184 struct rte_lpm_v20 *lpm = NULL;
185 struct rte_tailq_entry *te;
187 struct rte_lpm_list *lpm_list;
189 lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list);
191 RTE_BUILD_BUG_ON(sizeof(struct rte_lpm_tbl_entry_v20) != 2);
193 /* Check user arguments. */
194 if ((name == NULL) || (socket_id < -1) || (max_rules == 0)) {
199 snprintf(mem_name, sizeof(mem_name), "LPM_%s", name);
201 /* Determine the amount of memory to allocate. */
202 mem_size = sizeof(*lpm) + (sizeof(lpm->rules_tbl[0]) * max_rules);
204 rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
206 /* guarantee there's no existing */
207 TAILQ_FOREACH(te, lpm_list, next) {
208 lpm = (struct rte_lpm_v20 *) te->data;
209 if (strncmp(name, lpm->name, RTE_LPM_NAMESIZE) == 0)
215 /* allocate tailq entry */
216 te = rte_zmalloc("LPM_TAILQ_ENTRY", sizeof(*te), 0);
218 RTE_LOG(ERR, LPM, "Failed to allocate tailq entry\n");
222 /* Allocate memory to store the LPM data structures. */
223 lpm = (struct rte_lpm_v20 *)rte_zmalloc_socket(mem_name, mem_size,
224 RTE_CACHE_LINE_SIZE, socket_id);
226 RTE_LOG(ERR, LPM, "LPM memory allocation failed\n");
231 /* Save user arguments. */
232 lpm->max_rules = max_rules;
233 snprintf(lpm->name, sizeof(lpm->name), "%s", name);
235 te->data = (void *) lpm;
237 TAILQ_INSERT_TAIL(lpm_list, te, next);
240 rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
244 VERSION_SYMBOL(rte_lpm_create, _v20, 2.0);
247 rte_lpm_create_v1604(const char *name, int socket_id,
248 const struct rte_lpm_config *config)
250 char mem_name[RTE_LPM_NAMESIZE];
251 struct rte_lpm *lpm = NULL;
252 struct rte_tailq_entry *te;
253 uint32_t mem_size, rules_size, tbl8s_size;
254 struct rte_lpm_list *lpm_list;
256 lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list);
258 RTE_BUILD_BUG_ON(sizeof(struct rte_lpm_tbl_entry) != 4);
260 /* Check user arguments. */
261 if ((name == NULL) || (socket_id < -1) || (config->max_rules == 0)
262 || config->number_tbl8s > RTE_LPM_MAX_TBL8_NUM_GROUPS) {
267 snprintf(mem_name, sizeof(mem_name), "LPM_%s", name);
269 /* Determine the amount of memory to allocate. */
270 mem_size = sizeof(*lpm);
271 rules_size = sizeof(struct rte_lpm_rule) * config->max_rules;
272 tbl8s_size = (sizeof(struct rte_lpm_tbl_entry) *
273 RTE_LPM_TBL8_GROUP_NUM_ENTRIES * config->number_tbl8s);
275 rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
277 /* guarantee there's no existing */
278 TAILQ_FOREACH(te, lpm_list, next) {
279 lpm = (struct rte_lpm *) te->data;
280 if (strncmp(name, lpm->name, RTE_LPM_NAMESIZE) == 0)
286 /* allocate tailq entry */
287 te = rte_zmalloc("LPM_TAILQ_ENTRY", sizeof(*te), 0);
289 RTE_LOG(ERR, LPM, "Failed to allocate tailq entry\n");
293 /* Allocate memory to store the LPM data structures. */
294 lpm = (struct rte_lpm *)rte_zmalloc_socket(mem_name, mem_size,
295 RTE_CACHE_LINE_SIZE, socket_id);
297 RTE_LOG(ERR, LPM, "LPM memory allocation failed\n");
302 lpm->rules_tbl = (struct rte_lpm_rule *)rte_zmalloc_socket(NULL,
303 (size_t)rules_size, RTE_CACHE_LINE_SIZE, socket_id);
305 if (lpm->rules_tbl == NULL) {
306 RTE_LOG(ERR, LPM, "LPM rules_tbl memory allocation failed\n");
313 lpm->tbl8 = (struct rte_lpm_tbl_entry *)rte_zmalloc_socket(NULL,
314 (size_t)tbl8s_size, RTE_CACHE_LINE_SIZE, socket_id);
316 if (lpm->tbl8 == NULL) {
317 RTE_LOG(ERR, LPM, "LPM tbl8 memory allocation failed\n");
324 /* Save user arguments. */
325 lpm->max_rules = config->max_rules;
326 lpm->number_tbl8s = config->number_tbl8s;
327 snprintf(lpm->name, sizeof(lpm->name), "%s", name);
329 te->data = (void *) lpm;
331 TAILQ_INSERT_TAIL(lpm_list, te, next);
334 rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
338 BIND_DEFAULT_SYMBOL(rte_lpm_create, _v1604, 16.04);
340 struct rte_lpm *rte_lpm_create(const char *name, int socket_id,
341 const struct rte_lpm_config *config), rte_lpm_create_v1604);
344 * Deallocates memory for given LPM table.
347 rte_lpm_free_v20(struct rte_lpm_v20 *lpm)
349 struct rte_lpm_list *lpm_list;
350 struct rte_tailq_entry *te;
352 /* Check user arguments. */
356 lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list);
358 rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
360 /* find our tailq entry */
361 TAILQ_FOREACH(te, lpm_list, next) {
362 if (te->data == (void *) lpm)
366 TAILQ_REMOVE(lpm_list, te, next);
368 rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
370 rte_free(lpm->rules_tbl);
374 VERSION_SYMBOL(rte_lpm_free, _v20, 2.0);
377 rte_lpm_free_v1604(struct rte_lpm *lpm)
379 struct rte_lpm_list *lpm_list;
380 struct rte_tailq_entry *te;
382 /* Check user arguments. */
386 lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list);
388 rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
390 /* find our tailq entry */
391 TAILQ_FOREACH(te, lpm_list, next) {
392 if (te->data == (void *) lpm)
396 TAILQ_REMOVE(lpm_list, te, next);
398 rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
400 rte_free(lpm->rules_tbl);
404 BIND_DEFAULT_SYMBOL(rte_lpm_free, _v1604, 16.04);
405 MAP_STATIC_SYMBOL(void rte_lpm_free(struct rte_lpm *lpm),
409 * Adds a rule to the rule table.
411 * NOTE: The rule table is split into 32 groups. Each group contains rules that
412 * apply to a specific prefix depth (i.e. group 1 contains rules that apply to
413 * prefixes with a depth of 1 etc.). In the following code (depth - 1) is used
414 * to refer to depth 1 because even though the depth range is 1 - 32, depths
415 * are stored in the rule table from 0 - 31.
416 * NOTE: Valid range for depth parameter is 1 .. 32 inclusive.
418 static inline int32_t
419 rule_add_v20(struct rte_lpm_v20 *lpm, uint32_t ip_masked, uint8_t depth,
422 uint32_t rule_gindex, rule_index, last_rule;
427 /* Scan through rule group to see if rule already exists. */
428 if (lpm->rule_info[depth - 1].used_rules > 0) {
430 /* rule_gindex stands for rule group index. */
431 rule_gindex = lpm->rule_info[depth - 1].first_rule;
432 /* Initialise rule_index to point to start of rule group. */
433 rule_index = rule_gindex;
434 /* Last rule = Last used rule in this rule group. */
435 last_rule = rule_gindex + lpm->rule_info[depth - 1].used_rules;
437 for (; rule_index < last_rule; rule_index++) {
439 /* If rule already exists update its next_hop and return. */
440 if (lpm->rules_tbl[rule_index].ip == ip_masked) {
441 lpm->rules_tbl[rule_index].next_hop = next_hop;
447 if (rule_index == lpm->max_rules)
450 /* Calculate the position in which the rule will be stored. */
453 for (i = depth - 1; i > 0; i--) {
454 if (lpm->rule_info[i - 1].used_rules > 0) {
455 rule_index = lpm->rule_info[i - 1].first_rule
456 + lpm->rule_info[i - 1].used_rules;
460 if (rule_index == lpm->max_rules)
463 lpm->rule_info[depth - 1].first_rule = rule_index;
466 /* Make room for the new rule in the array. */
467 for (i = RTE_LPM_MAX_DEPTH; i > depth; i--) {
468 if (lpm->rule_info[i - 1].first_rule
469 + lpm->rule_info[i - 1].used_rules == lpm->max_rules)
472 if (lpm->rule_info[i - 1].used_rules > 0) {
473 lpm->rules_tbl[lpm->rule_info[i - 1].first_rule
474 + lpm->rule_info[i - 1].used_rules]
475 = lpm->rules_tbl[lpm->rule_info[i - 1].first_rule];
476 lpm->rule_info[i - 1].first_rule++;
480 /* Add the new rule. */
481 lpm->rules_tbl[rule_index].ip = ip_masked;
482 lpm->rules_tbl[rule_index].next_hop = next_hop;
484 /* Increment the used rules counter for this rule group. */
485 lpm->rule_info[depth - 1].used_rules++;
490 static inline int32_t
491 rule_add_v1604(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth,
494 uint32_t rule_gindex, rule_index, last_rule;
499 /* Scan through rule group to see if rule already exists. */
500 if (lpm->rule_info[depth - 1].used_rules > 0) {
502 /* rule_gindex stands for rule group index. */
503 rule_gindex = lpm->rule_info[depth - 1].first_rule;
504 /* Initialise rule_index to point to start of rule group. */
505 rule_index = rule_gindex;
506 /* Last rule = Last used rule in this rule group. */
507 last_rule = rule_gindex + lpm->rule_info[depth - 1].used_rules;
509 for (; rule_index < last_rule; rule_index++) {
511 /* If rule already exists update its next_hop and return. */
512 if (lpm->rules_tbl[rule_index].ip == ip_masked) {
513 lpm->rules_tbl[rule_index].next_hop = next_hop;
519 if (rule_index == lpm->max_rules)
522 /* Calculate the position in which the rule will be stored. */
525 for (i = depth - 1; i > 0; i--) {
526 if (lpm->rule_info[i - 1].used_rules > 0) {
527 rule_index = lpm->rule_info[i - 1].first_rule
528 + lpm->rule_info[i - 1].used_rules;
532 if (rule_index == lpm->max_rules)
535 lpm->rule_info[depth - 1].first_rule = rule_index;
538 /* Make room for the new rule in the array. */
539 for (i = RTE_LPM_MAX_DEPTH; i > depth; i--) {
540 if (lpm->rule_info[i - 1].first_rule
541 + lpm->rule_info[i - 1].used_rules == lpm->max_rules)
544 if (lpm->rule_info[i - 1].used_rules > 0) {
545 lpm->rules_tbl[lpm->rule_info[i - 1].first_rule
546 + lpm->rule_info[i - 1].used_rules]
547 = lpm->rules_tbl[lpm->rule_info[i - 1].first_rule];
548 lpm->rule_info[i - 1].first_rule++;
552 /* Add the new rule. */
553 lpm->rules_tbl[rule_index].ip = ip_masked;
554 lpm->rules_tbl[rule_index].next_hop = next_hop;
556 /* Increment the used rules counter for this rule group. */
557 lpm->rule_info[depth - 1].used_rules++;
563 * Delete a rule from the rule table.
564 * NOTE: Valid range for depth parameter is 1 .. 32 inclusive.
567 rule_delete_v20(struct rte_lpm_v20 *lpm, int32_t rule_index, uint8_t depth)
573 lpm->rules_tbl[rule_index] =
574 lpm->rules_tbl[lpm->rule_info[depth - 1].first_rule
575 + lpm->rule_info[depth - 1].used_rules - 1];
577 for (i = depth; i < RTE_LPM_MAX_DEPTH; i++) {
578 if (lpm->rule_info[i].used_rules > 0) {
579 lpm->rules_tbl[lpm->rule_info[i].first_rule - 1] =
580 lpm->rules_tbl[lpm->rule_info[i].first_rule
581 + lpm->rule_info[i].used_rules - 1];
582 lpm->rule_info[i].first_rule--;
586 lpm->rule_info[depth - 1].used_rules--;
590 rule_delete_v1604(struct rte_lpm *lpm, int32_t rule_index, uint8_t depth)
596 lpm->rules_tbl[rule_index] =
597 lpm->rules_tbl[lpm->rule_info[depth - 1].first_rule
598 + lpm->rule_info[depth - 1].used_rules - 1];
600 for (i = depth; i < RTE_LPM_MAX_DEPTH; i++) {
601 if (lpm->rule_info[i].used_rules > 0) {
602 lpm->rules_tbl[lpm->rule_info[i].first_rule - 1] =
603 lpm->rules_tbl[lpm->rule_info[i].first_rule
604 + lpm->rule_info[i].used_rules - 1];
605 lpm->rule_info[i].first_rule--;
609 lpm->rule_info[depth - 1].used_rules--;
613 * Finds a rule in rule table.
614 * NOTE: Valid range for depth parameter is 1 .. 32 inclusive.
616 static inline int32_t
617 rule_find_v20(struct rte_lpm_v20 *lpm, uint32_t ip_masked, uint8_t depth)
619 uint32_t rule_gindex, last_rule, rule_index;
623 rule_gindex = lpm->rule_info[depth - 1].first_rule;
624 last_rule = rule_gindex + lpm->rule_info[depth - 1].used_rules;
626 /* Scan used rules at given depth to find rule. */
627 for (rule_index = rule_gindex; rule_index < last_rule; rule_index++) {
628 /* If rule is found return the rule index. */
629 if (lpm->rules_tbl[rule_index].ip == ip_masked)
633 /* If rule is not found return -EINVAL. */
637 static inline int32_t
638 rule_find_v1604(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth)
640 uint32_t rule_gindex, last_rule, rule_index;
644 rule_gindex = lpm->rule_info[depth - 1].first_rule;
645 last_rule = rule_gindex + lpm->rule_info[depth - 1].used_rules;
647 /* Scan used rules at given depth to find rule. */
648 for (rule_index = rule_gindex; rule_index < last_rule; rule_index++) {
649 /* If rule is found return the rule index. */
650 if (lpm->rules_tbl[rule_index].ip == ip_masked)
654 /* If rule is not found return -EINVAL. */
659 * Find, clean and allocate a tbl8.
661 static inline int32_t
662 tbl8_alloc_v20(struct rte_lpm_tbl_entry_v20 *tbl8)
664 uint32_t group_idx; /* tbl8 group index. */
665 struct rte_lpm_tbl_entry_v20 *tbl8_entry;
667 /* Scan through tbl8 to find a free (i.e. INVALID) tbl8 group. */
668 for (group_idx = 0; group_idx < RTE_LPM_TBL8_NUM_GROUPS;
670 tbl8_entry = &tbl8[group_idx * RTE_LPM_TBL8_GROUP_NUM_ENTRIES];
671 /* If a free tbl8 group is found clean it and set as VALID. */
672 if (!tbl8_entry->valid_group) {
673 memset(&tbl8_entry[0], 0,
674 RTE_LPM_TBL8_GROUP_NUM_ENTRIES *
675 sizeof(tbl8_entry[0]));
677 tbl8_entry->valid_group = VALID;
679 /* Return group index for allocated tbl8 group. */
684 /* If there are no tbl8 groups free then return error. */
688 static inline int32_t
689 tbl8_alloc_v1604(struct rte_lpm_tbl_entry *tbl8, uint32_t number_tbl8s)
691 uint32_t group_idx; /* tbl8 group index. */
692 struct rte_lpm_tbl_entry *tbl8_entry;
694 /* Scan through tbl8 to find a free (i.e. INVALID) tbl8 group. */
695 for (group_idx = 0; group_idx < number_tbl8s; group_idx++) {
696 tbl8_entry = &tbl8[group_idx * RTE_LPM_TBL8_GROUP_NUM_ENTRIES];
697 /* If a free tbl8 group is found clean it and set as VALID. */
698 if (!tbl8_entry->valid_group) {
699 memset(&tbl8_entry[0], 0,
700 RTE_LPM_TBL8_GROUP_NUM_ENTRIES *
701 sizeof(tbl8_entry[0]));
703 tbl8_entry->valid_group = VALID;
705 /* Return group index for allocated tbl8 group. */
710 /* If there are no tbl8 groups free then return error. */
715 tbl8_free_v20(struct rte_lpm_tbl_entry_v20 *tbl8, uint32_t tbl8_group_start)
717 /* Set tbl8 group invalid*/
718 tbl8[tbl8_group_start].valid_group = INVALID;
722 tbl8_free_v1604(struct rte_lpm_tbl_entry *tbl8, uint32_t tbl8_group_start)
724 /* Set tbl8 group invalid*/
725 tbl8[tbl8_group_start].valid_group = INVALID;
728 static inline int32_t
729 add_depth_small_v20(struct rte_lpm_v20 *lpm, uint32_t ip, uint8_t depth,
732 uint32_t tbl24_index, tbl24_range, tbl8_index, tbl8_group_end, i, j;
734 /* Calculate the index into Table24. */
735 tbl24_index = ip >> 8;
736 tbl24_range = depth_to_range(depth);
738 for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) {
740 * For invalid OR valid and non-extended tbl 24 entries set
743 if (!lpm->tbl24[i].valid || (lpm->tbl24[i].valid_group == 0 &&
744 lpm->tbl24[i].depth <= depth)) {
746 struct rte_lpm_tbl_entry_v20 new_tbl24_entry = {
747 { .next_hop = next_hop, },
753 /* Setting tbl24 entry in one go to avoid race
756 lpm->tbl24[i] = new_tbl24_entry;
761 if (lpm->tbl24[i].valid_group == 1) {
762 /* If tbl24 entry is valid and extended calculate the
765 tbl8_index = lpm->tbl24[i].group_idx *
766 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
767 tbl8_group_end = tbl8_index +
768 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
770 for (j = tbl8_index; j < tbl8_group_end; j++) {
771 if (!lpm->tbl8[j].valid ||
772 lpm->tbl8[j].depth <= depth) {
773 struct rte_lpm_tbl_entry_v20
776 .valid_group = VALID,
778 .next_hop = next_hop,
782 * Setting tbl8 entry in one go to avoid
785 lpm->tbl8[j] = new_tbl8_entry;
796 static inline int32_t
797 add_depth_small_v1604(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
800 #define group_idx next_hop
801 uint32_t tbl24_index, tbl24_range, tbl8_index, tbl8_group_end, i, j;
803 /* Calculate the index into Table24. */
804 tbl24_index = ip >> 8;
805 tbl24_range = depth_to_range(depth);
807 for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) {
809 * For invalid OR valid and non-extended tbl 24 entries set
812 if (!lpm->tbl24[i].valid || (lpm->tbl24[i].valid_group == 0 &&
813 lpm->tbl24[i].depth <= depth)) {
815 struct rte_lpm_tbl_entry new_tbl24_entry = {
816 .next_hop = next_hop,
822 /* Setting tbl24 entry in one go to avoid race
825 lpm->tbl24[i] = new_tbl24_entry;
830 if (lpm->tbl24[i].valid_group == 1) {
831 /* If tbl24 entry is valid and extended calculate the
834 tbl8_index = lpm->tbl24[i].group_idx *
835 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
836 tbl8_group_end = tbl8_index +
837 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
839 for (j = tbl8_index; j < tbl8_group_end; j++) {
840 if (!lpm->tbl8[j].valid ||
841 lpm->tbl8[j].depth <= depth) {
842 struct rte_lpm_tbl_entry
845 .valid_group = VALID,
847 .next_hop = next_hop,
851 * Setting tbl8 entry in one go to avoid
854 lpm->tbl8[j] = new_tbl8_entry;
865 static inline int32_t
866 add_depth_big_v20(struct rte_lpm_v20 *lpm, uint32_t ip_masked, uint8_t depth,
869 uint32_t tbl24_index;
870 int32_t tbl8_group_index, tbl8_group_start, tbl8_group_end, tbl8_index,
873 tbl24_index = (ip_masked >> 8);
874 tbl8_range = depth_to_range(depth);
876 if (!lpm->tbl24[tbl24_index].valid) {
877 /* Search for a free tbl8 group. */
878 tbl8_group_index = tbl8_alloc_v20(lpm->tbl8);
880 /* Check tbl8 allocation was successful. */
881 if (tbl8_group_index < 0) {
882 return tbl8_group_index;
885 /* Find index into tbl8 and range. */
886 tbl8_index = (tbl8_group_index *
887 RTE_LPM_TBL8_GROUP_NUM_ENTRIES) +
890 /* Set tbl8 entry. */
891 for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
892 lpm->tbl8[i].depth = depth;
893 lpm->tbl8[i].next_hop = next_hop;
894 lpm->tbl8[i].valid = VALID;
898 * Update tbl24 entry to point to new tbl8 entry. Note: The
899 * ext_flag and tbl8_index need to be updated simultaneously,
900 * so assign whole structure in one go
903 struct rte_lpm_tbl_entry_v20 new_tbl24_entry = {
904 { .group_idx = (uint8_t)tbl8_group_index, },
910 lpm->tbl24[tbl24_index] = new_tbl24_entry;
912 } /* If valid entry but not extended calculate the index into Table8. */
913 else if (lpm->tbl24[tbl24_index].valid_group == 0) {
914 /* Search for free tbl8 group. */
915 tbl8_group_index = tbl8_alloc_v20(lpm->tbl8);
917 if (tbl8_group_index < 0) {
918 return tbl8_group_index;
921 tbl8_group_start = tbl8_group_index *
922 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
923 tbl8_group_end = tbl8_group_start +
924 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
926 /* Populate new tbl8 with tbl24 value. */
927 for (i = tbl8_group_start; i < tbl8_group_end; i++) {
928 lpm->tbl8[i].valid = VALID;
929 lpm->tbl8[i].depth = lpm->tbl24[tbl24_index].depth;
930 lpm->tbl8[i].next_hop =
931 lpm->tbl24[tbl24_index].next_hop;
934 tbl8_index = tbl8_group_start + (ip_masked & 0xFF);
936 /* Insert new rule into the tbl8 entry. */
937 for (i = tbl8_index; i < tbl8_index + tbl8_range; i++) {
938 if (!lpm->tbl8[i].valid ||
939 lpm->tbl8[i].depth <= depth) {
940 lpm->tbl8[i].valid = VALID;
941 lpm->tbl8[i].depth = depth;
942 lpm->tbl8[i].next_hop = next_hop;
949 * Update tbl24 entry to point to new tbl8 entry. Note: The
950 * ext_flag and tbl8_index need to be updated simultaneously,
951 * so assign whole structure in one go.
954 struct rte_lpm_tbl_entry_v20 new_tbl24_entry = {
955 { .group_idx = (uint8_t)tbl8_group_index, },
961 lpm->tbl24[tbl24_index] = new_tbl24_entry;
964 * If it is valid, extended entry calculate the index into tbl8.
966 tbl8_group_index = lpm->tbl24[tbl24_index].group_idx;
967 tbl8_group_start = tbl8_group_index *
968 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
969 tbl8_index = tbl8_group_start + (ip_masked & 0xFF);
971 for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
973 if (!lpm->tbl8[i].valid ||
974 lpm->tbl8[i].depth <= depth) {
975 struct rte_lpm_tbl_entry_v20 new_tbl8_entry = {
978 .next_hop = next_hop,
979 .valid_group = lpm->tbl8[i].valid_group,
983 * Setting tbl8 entry in one go to avoid race
986 lpm->tbl8[i] = new_tbl8_entry;
996 static inline int32_t
997 add_depth_big_v1604(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth,
1000 #define group_idx next_hop
1001 uint32_t tbl24_index;
1002 int32_t tbl8_group_index, tbl8_group_start, tbl8_group_end, tbl8_index,
1005 tbl24_index = (ip_masked >> 8);
1006 tbl8_range = depth_to_range(depth);
1008 if (!lpm->tbl24[tbl24_index].valid) {
1009 /* Search for a free tbl8 group. */
1010 tbl8_group_index = tbl8_alloc_v1604(lpm->tbl8, lpm->number_tbl8s);
1012 /* Check tbl8 allocation was successful. */
1013 if (tbl8_group_index < 0) {
1014 return tbl8_group_index;
1017 /* Find index into tbl8 and range. */
1018 tbl8_index = (tbl8_group_index *
1019 RTE_LPM_TBL8_GROUP_NUM_ENTRIES) +
1022 /* Set tbl8 entry. */
1023 for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
1024 lpm->tbl8[i].depth = depth;
1025 lpm->tbl8[i].next_hop = next_hop;
1026 lpm->tbl8[i].valid = VALID;
1030 * Update tbl24 entry to point to new tbl8 entry. Note: The
1031 * ext_flag and tbl8_index need to be updated simultaneously,
1032 * so assign whole structure in one go
1035 struct rte_lpm_tbl_entry new_tbl24_entry = {
1036 .group_idx = (uint8_t)tbl8_group_index,
1042 lpm->tbl24[tbl24_index] = new_tbl24_entry;
1044 } /* If valid entry but not extended calculate the index into Table8. */
1045 else if (lpm->tbl24[tbl24_index].valid_group == 0) {
1046 /* Search for free tbl8 group. */
1047 tbl8_group_index = tbl8_alloc_v1604(lpm->tbl8, lpm->number_tbl8s);
1049 if (tbl8_group_index < 0) {
1050 return tbl8_group_index;
1053 tbl8_group_start = tbl8_group_index *
1054 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1055 tbl8_group_end = tbl8_group_start +
1056 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1058 /* Populate new tbl8 with tbl24 value. */
1059 for (i = tbl8_group_start; i < tbl8_group_end; i++) {
1060 lpm->tbl8[i].valid = VALID;
1061 lpm->tbl8[i].depth = lpm->tbl24[tbl24_index].depth;
1062 lpm->tbl8[i].next_hop =
1063 lpm->tbl24[tbl24_index].next_hop;
1066 tbl8_index = tbl8_group_start + (ip_masked & 0xFF);
1068 /* Insert new rule into the tbl8 entry. */
1069 for (i = tbl8_index; i < tbl8_index + tbl8_range; i++) {
1070 if (!lpm->tbl8[i].valid ||
1071 lpm->tbl8[i].depth <= depth) {
1072 lpm->tbl8[i].valid = VALID;
1073 lpm->tbl8[i].depth = depth;
1074 lpm->tbl8[i].next_hop = next_hop;
1081 * Update tbl24 entry to point to new tbl8 entry. Note: The
1082 * ext_flag and tbl8_index need to be updated simultaneously,
1083 * so assign whole structure in one go.
1086 struct rte_lpm_tbl_entry new_tbl24_entry = {
1087 .group_idx = (uint8_t)tbl8_group_index,
1093 lpm->tbl24[tbl24_index] = new_tbl24_entry;
1096 * If it is valid, extended entry calculate the index into tbl8.
1098 tbl8_group_index = lpm->tbl24[tbl24_index].group_idx;
1099 tbl8_group_start = tbl8_group_index *
1100 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1101 tbl8_index = tbl8_group_start + (ip_masked & 0xFF);
1103 for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
1105 if (!lpm->tbl8[i].valid ||
1106 lpm->tbl8[i].depth <= depth) {
1107 struct rte_lpm_tbl_entry new_tbl8_entry = {
1110 .next_hop = next_hop,
1111 .valid_group = lpm->tbl8[i].valid_group,
1115 * Setting tbl8 entry in one go to avoid race
1118 lpm->tbl8[i] = new_tbl8_entry;
1132 rte_lpm_add_v20(struct rte_lpm_v20 *lpm, uint32_t ip, uint8_t depth,
1135 int32_t rule_index, status = 0;
1138 /* Check user arguments. */
1139 if ((lpm == NULL) || (depth < 1) || (depth > RTE_LPM_MAX_DEPTH))
1142 ip_masked = ip & depth_to_mask(depth);
1144 /* Add the rule to the rule table. */
1145 rule_index = rule_add_v20(lpm, ip_masked, depth, next_hop);
1147 /* If the is no space available for new rule return error. */
1148 if (rule_index < 0) {
1152 if (depth <= MAX_DEPTH_TBL24) {
1153 status = add_depth_small_v20(lpm, ip_masked, depth, next_hop);
1154 } else { /* If depth > RTE_LPM_MAX_DEPTH_TBL24 */
1155 status = add_depth_big_v20(lpm, ip_masked, depth, next_hop);
1158 * If add fails due to exhaustion of tbl8 extensions delete
1159 * rule that was added to rule table.
1162 rule_delete_v20(lpm, rule_index, depth);
1170 VERSION_SYMBOL(rte_lpm_add, _v20, 2.0);
1173 rte_lpm_add_v1604(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
1176 int32_t rule_index, status = 0;
1179 /* Check user arguments. */
1180 if ((lpm == NULL) || (depth < 1) || (depth > RTE_LPM_MAX_DEPTH))
1183 ip_masked = ip & depth_to_mask(depth);
1185 /* Add the rule to the rule table. */
1186 rule_index = rule_add_v1604(lpm, ip_masked, depth, next_hop);
1188 /* If the is no space available for new rule return error. */
1189 if (rule_index < 0) {
1193 if (depth <= MAX_DEPTH_TBL24) {
1194 status = add_depth_small_v1604(lpm, ip_masked, depth, next_hop);
1195 } else { /* If depth > RTE_LPM_MAX_DEPTH_TBL24 */
1196 status = add_depth_big_v1604(lpm, ip_masked, depth, next_hop);
1199 * If add fails due to exhaustion of tbl8 extensions delete
1200 * rule that was added to rule table.
1203 rule_delete_v1604(lpm, rule_index, depth);
1211 BIND_DEFAULT_SYMBOL(rte_lpm_add, _v1604, 16.04);
1212 MAP_STATIC_SYMBOL(int rte_lpm_add(struct rte_lpm *lpm, uint32_t ip,
1213 uint8_t depth, uint32_t next_hop), rte_lpm_add_v1604);
1216 * Look for a rule in the high-level rules table
1219 rte_lpm_is_rule_present_v20(struct rte_lpm_v20 *lpm, uint32_t ip, uint8_t depth,
1225 /* Check user arguments. */
1226 if ((lpm == NULL) ||
1227 (next_hop == NULL) ||
1228 (depth < 1) || (depth > RTE_LPM_MAX_DEPTH))
1231 /* Look for the rule using rule_find. */
1232 ip_masked = ip & depth_to_mask(depth);
1233 rule_index = rule_find_v20(lpm, ip_masked, depth);
1235 if (rule_index >= 0) {
1236 *next_hop = lpm->rules_tbl[rule_index].next_hop;
1240 /* If rule is not found return 0. */
1243 VERSION_SYMBOL(rte_lpm_is_rule_present, _v20, 2.0);
1246 rte_lpm_is_rule_present_v1604(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
1252 /* Check user arguments. */
1253 if ((lpm == NULL) ||
1254 (next_hop == NULL) ||
1255 (depth < 1) || (depth > RTE_LPM_MAX_DEPTH))
1258 /* Look for the rule using rule_find. */
1259 ip_masked = ip & depth_to_mask(depth);
1260 rule_index = rule_find_v1604(lpm, ip_masked, depth);
1262 if (rule_index >= 0) {
1263 *next_hop = lpm->rules_tbl[rule_index].next_hop;
1267 /* If rule is not found return 0. */
1270 BIND_DEFAULT_SYMBOL(rte_lpm_is_rule_present, _v1604, 16.04);
1271 MAP_STATIC_SYMBOL(int rte_lpm_is_rule_present(struct rte_lpm *lpm, uint32_t ip,
1272 uint8_t depth, uint32_t *next_hop), rte_lpm_is_rule_present_v1604);
1274 static inline int32_t
1275 find_previous_rule_v20(struct rte_lpm_v20 *lpm, uint32_t ip, uint8_t depth,
1276 uint8_t *sub_rule_depth)
1282 for (prev_depth = (uint8_t)(depth - 1); prev_depth > 0; prev_depth--) {
1283 ip_masked = ip & depth_to_mask(prev_depth);
1285 rule_index = rule_find_v20(lpm, ip_masked, prev_depth);
1287 if (rule_index >= 0) {
1288 *sub_rule_depth = prev_depth;
1296 static inline int32_t
1297 find_previous_rule_v1604(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
1298 uint8_t *sub_rule_depth)
1304 for (prev_depth = (uint8_t)(depth - 1); prev_depth > 0; prev_depth--) {
1305 ip_masked = ip & depth_to_mask(prev_depth);
1307 rule_index = rule_find_v1604(lpm, ip_masked, prev_depth);
1309 if (rule_index >= 0) {
1310 *sub_rule_depth = prev_depth;
1318 static inline int32_t
1319 delete_depth_small_v20(struct rte_lpm_v20 *lpm, uint32_t ip_masked,
1320 uint8_t depth, int32_t sub_rule_index, uint8_t sub_rule_depth)
1322 uint32_t tbl24_range, tbl24_index, tbl8_group_index, tbl8_index, i, j;
1324 /* Calculate the range and index into Table24. */
1325 tbl24_range = depth_to_range(depth);
1326 tbl24_index = (ip_masked >> 8);
1329 * Firstly check the sub_rule_index. A -1 indicates no replacement rule
1330 * and a positive number indicates a sub_rule_index.
1332 if (sub_rule_index < 0) {
1334 * If no replacement rule exists then invalidate entries
1335 * associated with this rule.
1337 for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) {
1339 if (lpm->tbl24[i].valid_group == 0 &&
1340 lpm->tbl24[i].depth <= depth) {
1341 lpm->tbl24[i].valid = INVALID;
1342 } else if (lpm->tbl24[i].valid_group == 1) {
1344 * If TBL24 entry is extended, then there has
1345 * to be a rule with depth >= 25 in the
1346 * associated TBL8 group.
1349 tbl8_group_index = lpm->tbl24[i].group_idx;
1350 tbl8_index = tbl8_group_index *
1351 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1353 for (j = tbl8_index; j < (tbl8_index +
1354 RTE_LPM_TBL8_GROUP_NUM_ENTRIES); j++) {
1356 if (lpm->tbl8[j].depth <= depth)
1357 lpm->tbl8[j].valid = INVALID;
1363 * If a replacement rule exists then modify entries
1364 * associated with this rule.
1367 struct rte_lpm_tbl_entry_v20 new_tbl24_entry = {
1368 {.next_hop = lpm->rules_tbl[sub_rule_index].next_hop,},
1371 .depth = sub_rule_depth,
1374 struct rte_lpm_tbl_entry_v20 new_tbl8_entry = {
1376 .valid_group = VALID,
1377 .depth = sub_rule_depth,
1378 .next_hop = lpm->rules_tbl
1379 [sub_rule_index].next_hop,
1382 for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) {
1384 if (lpm->tbl24[i].valid_group == 0 &&
1385 lpm->tbl24[i].depth <= depth) {
1386 lpm->tbl24[i] = new_tbl24_entry;
1387 } else if (lpm->tbl24[i].valid_group == 1) {
1389 * If TBL24 entry is extended, then there has
1390 * to be a rule with depth >= 25 in the
1391 * associated TBL8 group.
1394 tbl8_group_index = lpm->tbl24[i].group_idx;
1395 tbl8_index = tbl8_group_index *
1396 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1398 for (j = tbl8_index; j < (tbl8_index +
1399 RTE_LPM_TBL8_GROUP_NUM_ENTRIES); j++) {
1401 if (lpm->tbl8[j].depth <= depth)
1402 lpm->tbl8[j] = new_tbl8_entry;
1411 static inline int32_t
1412 delete_depth_small_v1604(struct rte_lpm *lpm, uint32_t ip_masked,
1413 uint8_t depth, int32_t sub_rule_index, uint8_t sub_rule_depth)
1415 #define group_idx next_hop
1416 uint32_t tbl24_range, tbl24_index, tbl8_group_index, tbl8_index, i, j;
1418 /* Calculate the range and index into Table24. */
1419 tbl24_range = depth_to_range(depth);
1420 tbl24_index = (ip_masked >> 8);
1423 * Firstly check the sub_rule_index. A -1 indicates no replacement rule
1424 * and a positive number indicates a sub_rule_index.
1426 if (sub_rule_index < 0) {
1428 * If no replacement rule exists then invalidate entries
1429 * associated with this rule.
1431 for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) {
1433 if (lpm->tbl24[i].valid_group == 0 &&
1434 lpm->tbl24[i].depth <= depth) {
1435 lpm->tbl24[i].valid = INVALID;
1436 } else if (lpm->tbl24[i].valid_group == 1) {
1438 * If TBL24 entry is extended, then there has
1439 * to be a rule with depth >= 25 in the
1440 * associated TBL8 group.
1443 tbl8_group_index = lpm->tbl24[i].group_idx;
1444 tbl8_index = tbl8_group_index *
1445 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1447 for (j = tbl8_index; j < (tbl8_index +
1448 RTE_LPM_TBL8_GROUP_NUM_ENTRIES); j++) {
1450 if (lpm->tbl8[j].depth <= depth)
1451 lpm->tbl8[j].valid = INVALID;
1457 * If a replacement rule exists then modify entries
1458 * associated with this rule.
1461 struct rte_lpm_tbl_entry new_tbl24_entry = {
1462 .next_hop = lpm->rules_tbl[sub_rule_index].next_hop,
1465 .depth = sub_rule_depth,
1468 struct rte_lpm_tbl_entry new_tbl8_entry = {
1470 .valid_group = VALID,
1471 .depth = sub_rule_depth,
1472 .next_hop = lpm->rules_tbl
1473 [sub_rule_index].next_hop,
1476 for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) {
1478 if (lpm->tbl24[i].valid_group == 0 &&
1479 lpm->tbl24[i].depth <= depth) {
1480 lpm->tbl24[i] = new_tbl24_entry;
1481 } else if (lpm->tbl24[i].valid_group == 1) {
1483 * If TBL24 entry is extended, then there has
1484 * to be a rule with depth >= 25 in the
1485 * associated TBL8 group.
1488 tbl8_group_index = lpm->tbl24[i].group_idx;
1489 tbl8_index = tbl8_group_index *
1490 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1492 for (j = tbl8_index; j < (tbl8_index +
1493 RTE_LPM_TBL8_GROUP_NUM_ENTRIES); j++) {
1495 if (lpm->tbl8[j].depth <= depth)
1496 lpm->tbl8[j] = new_tbl8_entry;
1506 * Checks if table 8 group can be recycled.
1508 * Return of -EEXIST means tbl8 is in use and thus can not be recycled.
1509 * Return of -EINVAL means tbl8 is empty and thus can be recycled
1510 * Return of value > -1 means tbl8 is in use but has all the same values and
1511 * thus can be recycled
1513 static inline int32_t
1514 tbl8_recycle_check_v20(struct rte_lpm_tbl_entry_v20 *tbl8,
1515 uint32_t tbl8_group_start)
1517 uint32_t tbl8_group_end, i;
1518 tbl8_group_end = tbl8_group_start + RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1521 * Check the first entry of the given tbl8. If it is invalid we know
1522 * this tbl8 does not contain any rule with a depth < RTE_LPM_MAX_DEPTH
1523 * (As they would affect all entries in a tbl8) and thus this table
1524 * can not be recycled.
1526 if (tbl8[tbl8_group_start].valid) {
1528 * If first entry is valid check if the depth is less than 24
1529 * and if so check the rest of the entries to verify that they
1530 * are all of this depth.
1532 if (tbl8[tbl8_group_start].depth < MAX_DEPTH_TBL24) {
1533 for (i = (tbl8_group_start + 1); i < tbl8_group_end;
1536 if (tbl8[i].depth !=
1537 tbl8[tbl8_group_start].depth) {
1542 /* If all entries are the same return the tb8 index */
1543 return tbl8_group_start;
1549 * If the first entry is invalid check if the rest of the entries in
1550 * the tbl8 are invalid.
1552 for (i = (tbl8_group_start + 1); i < tbl8_group_end; i++) {
1556 /* If no valid entries are found then return -EINVAL. */
1560 static inline int32_t
1561 tbl8_recycle_check_v1604(struct rte_lpm_tbl_entry *tbl8,
1562 uint32_t tbl8_group_start)
1564 uint32_t tbl8_group_end, i;
1565 tbl8_group_end = tbl8_group_start + RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1568 * Check the first entry of the given tbl8. If it is invalid we know
1569 * this tbl8 does not contain any rule with a depth < RTE_LPM_MAX_DEPTH
1570 * (As they would affect all entries in a tbl8) and thus this table
1571 * can not be recycled.
1573 if (tbl8[tbl8_group_start].valid) {
1575 * If first entry is valid check if the depth is less than 24
1576 * and if so check the rest of the entries to verify that they
1577 * are all of this depth.
1579 if (tbl8[tbl8_group_start].depth < MAX_DEPTH_TBL24) {
1580 for (i = (tbl8_group_start + 1); i < tbl8_group_end;
1583 if (tbl8[i].depth !=
1584 tbl8[tbl8_group_start].depth) {
1589 /* If all entries are the same return the tb8 index */
1590 return tbl8_group_start;
1596 * If the first entry is invalid check if the rest of the entries in
1597 * the tbl8 are invalid.
1599 for (i = (tbl8_group_start + 1); i < tbl8_group_end; i++) {
1603 /* If no valid entries are found then return -EINVAL. */
1607 static inline int32_t
1608 delete_depth_big_v20(struct rte_lpm_v20 *lpm, uint32_t ip_masked,
1609 uint8_t depth, int32_t sub_rule_index, uint8_t sub_rule_depth)
1611 uint32_t tbl24_index, tbl8_group_index, tbl8_group_start, tbl8_index,
1613 int32_t tbl8_recycle_index;
1616 * Calculate the index into tbl24 and range. Note: All depths larger
1617 * than MAX_DEPTH_TBL24 are associated with only one tbl24 entry.
1619 tbl24_index = ip_masked >> 8;
1621 /* Calculate the index into tbl8 and range. */
1622 tbl8_group_index = lpm->tbl24[tbl24_index].group_idx;
1623 tbl8_group_start = tbl8_group_index * RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1624 tbl8_index = tbl8_group_start + (ip_masked & 0xFF);
1625 tbl8_range = depth_to_range(depth);
1627 if (sub_rule_index < 0) {
1629 * Loop through the range of entries on tbl8 for which the
1630 * rule_to_delete must be removed or modified.
1632 for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
1633 if (lpm->tbl8[i].depth <= depth)
1634 lpm->tbl8[i].valid = INVALID;
1637 /* Set new tbl8 entry. */
1638 struct rte_lpm_tbl_entry_v20 new_tbl8_entry = {
1640 .depth = sub_rule_depth,
1641 .valid_group = lpm->tbl8[tbl8_group_start].valid_group,
1642 .next_hop = lpm->rules_tbl[sub_rule_index].next_hop,
1646 * Loop through the range of entries on tbl8 for which the
1647 * rule_to_delete must be modified.
1649 for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
1650 if (lpm->tbl8[i].depth <= depth)
1651 lpm->tbl8[i] = new_tbl8_entry;
1656 * Check if there are any valid entries in this tbl8 group. If all
1657 * tbl8 entries are invalid we can free the tbl8 and invalidate the
1658 * associated tbl24 entry.
1661 tbl8_recycle_index = tbl8_recycle_check_v20(lpm->tbl8, tbl8_group_start);
1663 if (tbl8_recycle_index == -EINVAL) {
1664 /* Set tbl24 before freeing tbl8 to avoid race condition. */
1665 lpm->tbl24[tbl24_index].valid = 0;
1666 tbl8_free_v20(lpm->tbl8, tbl8_group_start);
1667 } else if (tbl8_recycle_index > -1) {
1668 /* Update tbl24 entry. */
1669 struct rte_lpm_tbl_entry_v20 new_tbl24_entry = {
1670 { .next_hop = lpm->tbl8[tbl8_recycle_index].next_hop, },
1673 .depth = lpm->tbl8[tbl8_recycle_index].depth,
1676 /* Set tbl24 before freeing tbl8 to avoid race condition. */
1677 lpm->tbl24[tbl24_index] = new_tbl24_entry;
1678 tbl8_free_v20(lpm->tbl8, tbl8_group_start);
1684 static inline int32_t
1685 delete_depth_big_v1604(struct rte_lpm *lpm, uint32_t ip_masked,
1686 uint8_t depth, int32_t sub_rule_index, uint8_t sub_rule_depth)
1688 #define group_idx next_hop
1689 uint32_t tbl24_index, tbl8_group_index, tbl8_group_start, tbl8_index,
1691 int32_t tbl8_recycle_index;
1694 * Calculate the index into tbl24 and range. Note: All depths larger
1695 * than MAX_DEPTH_TBL24 are associated with only one tbl24 entry.
1697 tbl24_index = ip_masked >> 8;
1699 /* Calculate the index into tbl8 and range. */
1700 tbl8_group_index = lpm->tbl24[tbl24_index].group_idx;
1701 tbl8_group_start = tbl8_group_index * RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1702 tbl8_index = tbl8_group_start + (ip_masked & 0xFF);
1703 tbl8_range = depth_to_range(depth);
1705 if (sub_rule_index < 0) {
1707 * Loop through the range of entries on tbl8 for which the
1708 * rule_to_delete must be removed or modified.
1710 for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
1711 if (lpm->tbl8[i].depth <= depth)
1712 lpm->tbl8[i].valid = INVALID;
1715 /* Set new tbl8 entry. */
1716 struct rte_lpm_tbl_entry new_tbl8_entry = {
1718 .depth = sub_rule_depth,
1719 .valid_group = lpm->tbl8[tbl8_group_start].valid_group,
1720 .next_hop = lpm->rules_tbl[sub_rule_index].next_hop,
1724 * Loop through the range of entries on tbl8 for which the
1725 * rule_to_delete must be modified.
1727 for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
1728 if (lpm->tbl8[i].depth <= depth)
1729 lpm->tbl8[i] = new_tbl8_entry;
1734 * Check if there are any valid entries in this tbl8 group. If all
1735 * tbl8 entries are invalid we can free the tbl8 and invalidate the
1736 * associated tbl24 entry.
1739 tbl8_recycle_index = tbl8_recycle_check_v1604(lpm->tbl8, tbl8_group_start);
1741 if (tbl8_recycle_index == -EINVAL) {
1742 /* Set tbl24 before freeing tbl8 to avoid race condition. */
1743 lpm->tbl24[tbl24_index].valid = 0;
1744 tbl8_free_v1604(lpm->tbl8, tbl8_group_start);
1745 } else if (tbl8_recycle_index > -1) {
1746 /* Update tbl24 entry. */
1747 struct rte_lpm_tbl_entry new_tbl24_entry = {
1748 .next_hop = lpm->tbl8[tbl8_recycle_index].next_hop,
1751 .depth = lpm->tbl8[tbl8_recycle_index].depth,
1754 /* Set tbl24 before freeing tbl8 to avoid race condition. */
1755 lpm->tbl24[tbl24_index] = new_tbl24_entry;
1756 tbl8_free_v1604(lpm->tbl8, tbl8_group_start);
1766 rte_lpm_delete_v20(struct rte_lpm_v20 *lpm, uint32_t ip, uint8_t depth)
1768 int32_t rule_to_delete_index, sub_rule_index;
1770 uint8_t sub_rule_depth;
1772 * Check input arguments. Note: IP must be a positive integer of 32
1773 * bits in length therefore it need not be checked.
1775 if ((lpm == NULL) || (depth < 1) || (depth > RTE_LPM_MAX_DEPTH)) {
1779 ip_masked = ip & depth_to_mask(depth);
1782 * Find the index of the input rule, that needs to be deleted, in the
1785 rule_to_delete_index = rule_find_v20(lpm, ip_masked, depth);
1788 * Check if rule_to_delete_index was found. If no rule was found the
1789 * function rule_find returns -EINVAL.
1791 if (rule_to_delete_index < 0)
1794 /* Delete the rule from the rule table. */
1795 rule_delete_v20(lpm, rule_to_delete_index, depth);
1798 * Find rule to replace the rule_to_delete. If there is no rule to
1799 * replace the rule_to_delete we return -1 and invalidate the table
1800 * entries associated with this rule.
1803 sub_rule_index = find_previous_rule_v20(lpm, ip, depth, &sub_rule_depth);
1806 * If the input depth value is less than 25 use function
1807 * delete_depth_small otherwise use delete_depth_big.
1809 if (depth <= MAX_DEPTH_TBL24) {
1810 return delete_depth_small_v20(lpm, ip_masked, depth,
1811 sub_rule_index, sub_rule_depth);
1812 } else { /* If depth > MAX_DEPTH_TBL24 */
1813 return delete_depth_big_v20(lpm, ip_masked, depth, sub_rule_index,
1817 VERSION_SYMBOL(rte_lpm_delete, _v20, 2.0);
1820 rte_lpm_delete_v1604(struct rte_lpm *lpm, uint32_t ip, uint8_t depth)
1822 int32_t rule_to_delete_index, sub_rule_index;
1824 uint8_t sub_rule_depth;
1826 * Check input arguments. Note: IP must be a positive integer of 32
1827 * bits in length therefore it need not be checked.
1829 if ((lpm == NULL) || (depth < 1) || (depth > RTE_LPM_MAX_DEPTH)) {
1833 ip_masked = ip & depth_to_mask(depth);
1836 * Find the index of the input rule, that needs to be deleted, in the
1839 rule_to_delete_index = rule_find_v1604(lpm, ip_masked, depth);
1842 * Check if rule_to_delete_index was found. If no rule was found the
1843 * function rule_find returns -EINVAL.
1845 if (rule_to_delete_index < 0)
1848 /* Delete the rule from the rule table. */
1849 rule_delete_v1604(lpm, rule_to_delete_index, depth);
1852 * Find rule to replace the rule_to_delete. If there is no rule to
1853 * replace the rule_to_delete we return -1 and invalidate the table
1854 * entries associated with this rule.
1857 sub_rule_index = find_previous_rule_v1604(lpm, ip, depth, &sub_rule_depth);
1860 * If the input depth value is less than 25 use function
1861 * delete_depth_small otherwise use delete_depth_big.
1863 if (depth <= MAX_DEPTH_TBL24) {
1864 return delete_depth_small_v1604(lpm, ip_masked, depth,
1865 sub_rule_index, sub_rule_depth);
1866 } else { /* If depth > MAX_DEPTH_TBL24 */
1867 return delete_depth_big_v1604(lpm, ip_masked, depth, sub_rule_index,
1871 BIND_DEFAULT_SYMBOL(rte_lpm_delete, _v1604, 16.04);
1872 MAP_STATIC_SYMBOL(int rte_lpm_delete(struct rte_lpm *lpm, uint32_t ip,
1873 uint8_t depth), rte_lpm_delete_v1604);
1876 * Delete all rules from the LPM table.
1879 rte_lpm_delete_all_v20(struct rte_lpm_v20 *lpm)
1881 /* Zero rule information. */
1882 memset(lpm->rule_info, 0, sizeof(lpm->rule_info));
1885 memset(lpm->tbl24, 0, sizeof(lpm->tbl24));
1888 memset(lpm->tbl8, 0, sizeof(lpm->tbl8));
1890 /* Delete all rules form the rules table. */
1891 memset(lpm->rules_tbl, 0, sizeof(lpm->rules_tbl[0]) * lpm->max_rules);
1893 VERSION_SYMBOL(rte_lpm_delete_all, _v20, 2.0);
1896 rte_lpm_delete_all_v1604(struct rte_lpm *lpm)
1898 /* Zero rule information. */
1899 memset(lpm->rule_info, 0, sizeof(lpm->rule_info));
1902 memset(lpm->tbl24, 0, sizeof(lpm->tbl24));
1905 memset(lpm->tbl8, 0, sizeof(lpm->tbl8[0])
1906 * RTE_LPM_TBL8_GROUP_NUM_ENTRIES * lpm->number_tbl8s);
1908 /* Delete all rules form the rules table. */
1909 memset(lpm->rules_tbl, 0, sizeof(lpm->rules_tbl[0]) * lpm->max_rules);
1911 BIND_DEFAULT_SYMBOL(rte_lpm_delete_all, _v1604, 16.04);
1912 MAP_STATIC_SYMBOL(void rte_lpm_delete_all(struct rte_lpm *lpm),
1913 rte_lpm_delete_all_v1604);