4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40 #include <sys/queue.h>
43 #include <rte_branch_prediction.h>
44 #include <rte_common.h>
45 #include <rte_memory.h> /* for definition of RTE_CACHE_LINE_SIZE */
46 #include <rte_malloc.h>
47 #include <rte_memzone.h>
49 #include <rte_eal_memconfig.h>
50 #include <rte_per_lcore.h>
51 #include <rte_string_fns.h>
52 #include <rte_errno.h>
53 #include <rte_rwlock.h>
54 #include <rte_spinlock.h>
58 TAILQ_HEAD(rte_lpm_list, rte_tailq_entry);
60 static struct rte_tailq_elem rte_lpm_tailq = {
63 EAL_REGISTER_TAILQ(rte_lpm_tailq)
65 #define MAX_DEPTH_TBL24 24
72 /* Macro to enable/disable run-time checks. */
73 #if defined(RTE_LIBRTE_LPM_DEBUG)
74 #include <rte_debug.h>
75 #define VERIFY_DEPTH(depth) do { \
76 if ((depth == 0) || (depth > RTE_LPM_MAX_DEPTH)) \
77 rte_panic("LPM: Invalid depth (%u) at line %d", \
78 (unsigned)(depth), __LINE__); \
81 #define VERIFY_DEPTH(depth)
85 * Converts a given depth value to its corresponding mask value.
87 * depth (IN) : range = 1 - 32
88 * mask (OUT) : 32bit mask
90 static uint32_t __attribute__((pure))
91 depth_to_mask(uint8_t depth)
95 /* To calculate a mask start with a 1 on the left hand side and right
96 * shift while populating the left hand side with 1's
98 return (int)0x80000000 >> (depth - 1);
102 * Converts given depth value to its corresponding range value.
104 static inline uint32_t __attribute__((pure))
105 depth_to_range(uint8_t depth)
110 * Calculate tbl24 range. (Note: 2^depth = 1 << depth)
112 if (depth <= MAX_DEPTH_TBL24)
113 return 1 << (MAX_DEPTH_TBL24 - depth);
115 /* Else if depth is greater than 24 */
116 return 1 << (RTE_LPM_MAX_DEPTH - depth);
120 * Find an existing lpm table and return a pointer to it.
123 rte_lpm_find_existing_v20(const char *name)
125 struct rte_lpm_v20 *l = NULL;
126 struct rte_tailq_entry *te;
127 struct rte_lpm_list *lpm_list;
129 lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list);
131 rte_rwlock_read_lock(RTE_EAL_TAILQ_RWLOCK);
132 TAILQ_FOREACH(te, lpm_list, next) {
133 l = (struct rte_lpm_v20 *) te->data;
134 if (strncmp(name, l->name, RTE_LPM_NAMESIZE) == 0)
137 rte_rwlock_read_unlock(RTE_EAL_TAILQ_RWLOCK);
146 VERSION_SYMBOL(rte_lpm_find_existing, _v20, 2.0);
149 rte_lpm_find_existing_v1604(const char *name)
151 struct rte_lpm *l = NULL;
152 struct rte_tailq_entry *te;
153 struct rte_lpm_list *lpm_list;
155 lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list);
157 rte_rwlock_read_lock(RTE_EAL_TAILQ_RWLOCK);
158 TAILQ_FOREACH(te, lpm_list, next) {
159 l = (struct rte_lpm *) te->data;
160 if (strncmp(name, l->name, RTE_LPM_NAMESIZE) == 0)
163 rte_rwlock_read_unlock(RTE_EAL_TAILQ_RWLOCK);
172 BIND_DEFAULT_SYMBOL(rte_lpm_find_existing, _v1604, 16.04);
173 MAP_STATIC_SYMBOL(struct rte_lpm *rte_lpm_find_existing(const char *name),
174 rte_lpm_find_existing_v1604);
177 * Allocates memory for LPM object
180 rte_lpm_create_v20(const char *name, int socket_id, int max_rules,
181 __rte_unused int flags)
183 char mem_name[RTE_LPM_NAMESIZE];
184 struct rte_lpm_v20 *lpm = NULL;
185 struct rte_tailq_entry *te;
187 struct rte_lpm_list *lpm_list;
189 lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list);
191 RTE_BUILD_BUG_ON(sizeof(struct rte_lpm_tbl_entry_v20) != 2);
193 /* Check user arguments. */
194 if ((name == NULL) || (socket_id < -1) || (max_rules == 0)) {
199 snprintf(mem_name, sizeof(mem_name), "LPM_%s", name);
201 /* Determine the amount of memory to allocate. */
202 mem_size = sizeof(*lpm) + (sizeof(lpm->rules_tbl[0]) * max_rules);
204 rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
206 /* guarantee there's no existing */
207 TAILQ_FOREACH(te, lpm_list, next) {
208 lpm = (struct rte_lpm_v20 *) te->data;
209 if (strncmp(name, lpm->name, RTE_LPM_NAMESIZE) == 0)
218 /* allocate tailq entry */
219 te = rte_zmalloc("LPM_TAILQ_ENTRY", sizeof(*te), 0);
221 RTE_LOG(ERR, LPM, "Failed to allocate tailq entry\n");
225 /* Allocate memory to store the LPM data structures. */
226 lpm = (struct rte_lpm_v20 *)rte_zmalloc_socket(mem_name, mem_size,
227 RTE_CACHE_LINE_SIZE, socket_id);
229 RTE_LOG(ERR, LPM, "LPM memory allocation failed\n");
234 /* Save user arguments. */
235 lpm->max_rules = max_rules;
236 snprintf(lpm->name, sizeof(lpm->name), "%s", name);
238 te->data = (void *) lpm;
240 TAILQ_INSERT_TAIL(lpm_list, te, next);
243 rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
247 VERSION_SYMBOL(rte_lpm_create, _v20, 2.0);
250 rte_lpm_create_v1604(const char *name, int socket_id,
251 const struct rte_lpm_config *config)
253 char mem_name[RTE_LPM_NAMESIZE];
254 struct rte_lpm *lpm = NULL;
255 struct rte_tailq_entry *te;
256 uint32_t mem_size, rules_size, tbl8s_size;
257 struct rte_lpm_list *lpm_list;
259 lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list);
261 RTE_BUILD_BUG_ON(sizeof(struct rte_lpm_tbl_entry) != 4);
263 /* Check user arguments. */
264 if ((name == NULL) || (socket_id < -1) || (config->max_rules == 0)
265 || config->number_tbl8s > RTE_LPM_MAX_TBL8_NUM_GROUPS) {
270 snprintf(mem_name, sizeof(mem_name), "LPM_%s", name);
272 /* Determine the amount of memory to allocate. */
273 mem_size = sizeof(*lpm);
274 rules_size = sizeof(struct rte_lpm_rule) * config->max_rules;
275 tbl8s_size = (sizeof(struct rte_lpm_tbl_entry) *
276 RTE_LPM_TBL8_GROUP_NUM_ENTRIES * config->number_tbl8s);
278 rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
280 /* guarantee there's no existing */
281 TAILQ_FOREACH(te, lpm_list, next) {
282 lpm = (struct rte_lpm *) te->data;
283 if (strncmp(name, lpm->name, RTE_LPM_NAMESIZE) == 0)
292 /* allocate tailq entry */
293 te = rte_zmalloc("LPM_TAILQ_ENTRY", sizeof(*te), 0);
295 RTE_LOG(ERR, LPM, "Failed to allocate tailq entry\n");
299 /* Allocate memory to store the LPM data structures. */
300 lpm = (struct rte_lpm *)rte_zmalloc_socket(mem_name, mem_size,
301 RTE_CACHE_LINE_SIZE, socket_id);
303 RTE_LOG(ERR, LPM, "LPM memory allocation failed\n");
308 lpm->rules_tbl = (struct rte_lpm_rule *)rte_zmalloc_socket(NULL,
309 (size_t)rules_size, RTE_CACHE_LINE_SIZE, socket_id);
311 if (lpm->rules_tbl == NULL) {
312 RTE_LOG(ERR, LPM, "LPM rules_tbl memory allocation failed\n");
319 lpm->tbl8 = (struct rte_lpm_tbl_entry *)rte_zmalloc_socket(NULL,
320 (size_t)tbl8s_size, RTE_CACHE_LINE_SIZE, socket_id);
322 if (lpm->tbl8 == NULL) {
323 RTE_LOG(ERR, LPM, "LPM tbl8 memory allocation failed\n");
324 rte_free(lpm->rules_tbl);
331 /* Save user arguments. */
332 lpm->max_rules = config->max_rules;
333 lpm->number_tbl8s = config->number_tbl8s;
334 snprintf(lpm->name, sizeof(lpm->name), "%s", name);
336 te->data = (void *) lpm;
338 TAILQ_INSERT_TAIL(lpm_list, te, next);
341 rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
345 BIND_DEFAULT_SYMBOL(rte_lpm_create, _v1604, 16.04);
347 struct rte_lpm *rte_lpm_create(const char *name, int socket_id,
348 const struct rte_lpm_config *config), rte_lpm_create_v1604);
351 * Deallocates memory for given LPM table.
354 rte_lpm_free_v20(struct rte_lpm_v20 *lpm)
356 struct rte_lpm_list *lpm_list;
357 struct rte_tailq_entry *te;
359 /* Check user arguments. */
363 lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list);
365 rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
367 /* find our tailq entry */
368 TAILQ_FOREACH(te, lpm_list, next) {
369 if (te->data == (void *) lpm)
373 TAILQ_REMOVE(lpm_list, te, next);
375 rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
380 VERSION_SYMBOL(rte_lpm_free, _v20, 2.0);
383 rte_lpm_free_v1604(struct rte_lpm *lpm)
385 struct rte_lpm_list *lpm_list;
386 struct rte_tailq_entry *te;
388 /* Check user arguments. */
392 lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list);
394 rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
396 /* find our tailq entry */
397 TAILQ_FOREACH(te, lpm_list, next) {
398 if (te->data == (void *) lpm)
402 TAILQ_REMOVE(lpm_list, te, next);
404 rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
407 rte_free(lpm->rules_tbl);
411 BIND_DEFAULT_SYMBOL(rte_lpm_free, _v1604, 16.04);
412 MAP_STATIC_SYMBOL(void rte_lpm_free(struct rte_lpm *lpm),
416 * Adds a rule to the rule table.
418 * NOTE: The rule table is split into 32 groups. Each group contains rules that
419 * apply to a specific prefix depth (i.e. group 1 contains rules that apply to
420 * prefixes with a depth of 1 etc.). In the following code (depth - 1) is used
421 * to refer to depth 1 because even though the depth range is 1 - 32, depths
422 * are stored in the rule table from 0 - 31.
423 * NOTE: Valid range for depth parameter is 1 .. 32 inclusive.
425 static inline int32_t
426 rule_add_v20(struct rte_lpm_v20 *lpm, uint32_t ip_masked, uint8_t depth,
429 uint32_t rule_gindex, rule_index, last_rule;
434 /* Scan through rule group to see if rule already exists. */
435 if (lpm->rule_info[depth - 1].used_rules > 0) {
437 /* rule_gindex stands for rule group index. */
438 rule_gindex = lpm->rule_info[depth - 1].first_rule;
439 /* Initialise rule_index to point to start of rule group. */
440 rule_index = rule_gindex;
441 /* Last rule = Last used rule in this rule group. */
442 last_rule = rule_gindex + lpm->rule_info[depth - 1].used_rules;
444 for (; rule_index < last_rule; rule_index++) {
446 /* If rule already exists update its next_hop and return. */
447 if (lpm->rules_tbl[rule_index].ip == ip_masked) {
448 lpm->rules_tbl[rule_index].next_hop = next_hop;
454 if (rule_index == lpm->max_rules)
457 /* Calculate the position in which the rule will be stored. */
460 for (i = depth - 1; i > 0; i--) {
461 if (lpm->rule_info[i - 1].used_rules > 0) {
462 rule_index = lpm->rule_info[i - 1].first_rule
463 + lpm->rule_info[i - 1].used_rules;
467 if (rule_index == lpm->max_rules)
470 lpm->rule_info[depth - 1].first_rule = rule_index;
473 /* Make room for the new rule in the array. */
474 for (i = RTE_LPM_MAX_DEPTH; i > depth; i--) {
475 if (lpm->rule_info[i - 1].first_rule
476 + lpm->rule_info[i - 1].used_rules == lpm->max_rules)
479 if (lpm->rule_info[i - 1].used_rules > 0) {
480 lpm->rules_tbl[lpm->rule_info[i - 1].first_rule
481 + lpm->rule_info[i - 1].used_rules]
482 = lpm->rules_tbl[lpm->rule_info[i - 1].first_rule];
483 lpm->rule_info[i - 1].first_rule++;
487 /* Add the new rule. */
488 lpm->rules_tbl[rule_index].ip = ip_masked;
489 lpm->rules_tbl[rule_index].next_hop = next_hop;
491 /* Increment the used rules counter for this rule group. */
492 lpm->rule_info[depth - 1].used_rules++;
497 static inline int32_t
498 rule_add_v1604(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth,
501 uint32_t rule_gindex, rule_index, last_rule;
506 /* Scan through rule group to see if rule already exists. */
507 if (lpm->rule_info[depth - 1].used_rules > 0) {
509 /* rule_gindex stands for rule group index. */
510 rule_gindex = lpm->rule_info[depth - 1].first_rule;
511 /* Initialise rule_index to point to start of rule group. */
512 rule_index = rule_gindex;
513 /* Last rule = Last used rule in this rule group. */
514 last_rule = rule_gindex + lpm->rule_info[depth - 1].used_rules;
516 for (; rule_index < last_rule; rule_index++) {
518 /* If rule already exists update its next_hop and return. */
519 if (lpm->rules_tbl[rule_index].ip == ip_masked) {
520 lpm->rules_tbl[rule_index].next_hop = next_hop;
526 if (rule_index == lpm->max_rules)
529 /* Calculate the position in which the rule will be stored. */
532 for (i = depth - 1; i > 0; i--) {
533 if (lpm->rule_info[i - 1].used_rules > 0) {
534 rule_index = lpm->rule_info[i - 1].first_rule
535 + lpm->rule_info[i - 1].used_rules;
539 if (rule_index == lpm->max_rules)
542 lpm->rule_info[depth - 1].first_rule = rule_index;
545 /* Make room for the new rule in the array. */
546 for (i = RTE_LPM_MAX_DEPTH; i > depth; i--) {
547 if (lpm->rule_info[i - 1].first_rule
548 + lpm->rule_info[i - 1].used_rules == lpm->max_rules)
551 if (lpm->rule_info[i - 1].used_rules > 0) {
552 lpm->rules_tbl[lpm->rule_info[i - 1].first_rule
553 + lpm->rule_info[i - 1].used_rules]
554 = lpm->rules_tbl[lpm->rule_info[i - 1].first_rule];
555 lpm->rule_info[i - 1].first_rule++;
559 /* Add the new rule. */
560 lpm->rules_tbl[rule_index].ip = ip_masked;
561 lpm->rules_tbl[rule_index].next_hop = next_hop;
563 /* Increment the used rules counter for this rule group. */
564 lpm->rule_info[depth - 1].used_rules++;
570 * Delete a rule from the rule table.
571 * NOTE: Valid range for depth parameter is 1 .. 32 inclusive.
574 rule_delete_v20(struct rte_lpm_v20 *lpm, int32_t rule_index, uint8_t depth)
580 lpm->rules_tbl[rule_index] =
581 lpm->rules_tbl[lpm->rule_info[depth - 1].first_rule
582 + lpm->rule_info[depth - 1].used_rules - 1];
584 for (i = depth; i < RTE_LPM_MAX_DEPTH; i++) {
585 if (lpm->rule_info[i].used_rules > 0) {
586 lpm->rules_tbl[lpm->rule_info[i].first_rule - 1] =
587 lpm->rules_tbl[lpm->rule_info[i].first_rule
588 + lpm->rule_info[i].used_rules - 1];
589 lpm->rule_info[i].first_rule--;
593 lpm->rule_info[depth - 1].used_rules--;
597 rule_delete_v1604(struct rte_lpm *lpm, int32_t rule_index, uint8_t depth)
603 lpm->rules_tbl[rule_index] =
604 lpm->rules_tbl[lpm->rule_info[depth - 1].first_rule
605 + lpm->rule_info[depth - 1].used_rules - 1];
607 for (i = depth; i < RTE_LPM_MAX_DEPTH; i++) {
608 if (lpm->rule_info[i].used_rules > 0) {
609 lpm->rules_tbl[lpm->rule_info[i].first_rule - 1] =
610 lpm->rules_tbl[lpm->rule_info[i].first_rule
611 + lpm->rule_info[i].used_rules - 1];
612 lpm->rule_info[i].first_rule--;
616 lpm->rule_info[depth - 1].used_rules--;
620 * Finds a rule in rule table.
621 * NOTE: Valid range for depth parameter is 1 .. 32 inclusive.
623 static inline int32_t
624 rule_find_v20(struct rte_lpm_v20 *lpm, uint32_t ip_masked, uint8_t depth)
626 uint32_t rule_gindex, last_rule, rule_index;
630 rule_gindex = lpm->rule_info[depth - 1].first_rule;
631 last_rule = rule_gindex + lpm->rule_info[depth - 1].used_rules;
633 /* Scan used rules at given depth to find rule. */
634 for (rule_index = rule_gindex; rule_index < last_rule; rule_index++) {
635 /* If rule is found return the rule index. */
636 if (lpm->rules_tbl[rule_index].ip == ip_masked)
640 /* If rule is not found return -EINVAL. */
644 static inline int32_t
645 rule_find_v1604(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth)
647 uint32_t rule_gindex, last_rule, rule_index;
651 rule_gindex = lpm->rule_info[depth - 1].first_rule;
652 last_rule = rule_gindex + lpm->rule_info[depth - 1].used_rules;
654 /* Scan used rules at given depth to find rule. */
655 for (rule_index = rule_gindex; rule_index < last_rule; rule_index++) {
656 /* If rule is found return the rule index. */
657 if (lpm->rules_tbl[rule_index].ip == ip_masked)
661 /* If rule is not found return -EINVAL. */
666 * Find, clean and allocate a tbl8.
668 static inline int32_t
669 tbl8_alloc_v20(struct rte_lpm_tbl_entry_v20 *tbl8)
671 uint32_t group_idx; /* tbl8 group index. */
672 struct rte_lpm_tbl_entry_v20 *tbl8_entry;
674 /* Scan through tbl8 to find a free (i.e. INVALID) tbl8 group. */
675 for (group_idx = 0; group_idx < RTE_LPM_TBL8_NUM_GROUPS;
677 tbl8_entry = &tbl8[group_idx * RTE_LPM_TBL8_GROUP_NUM_ENTRIES];
678 /* If a free tbl8 group is found clean it and set as VALID. */
679 if (!tbl8_entry->valid_group) {
680 memset(&tbl8_entry[0], 0,
681 RTE_LPM_TBL8_GROUP_NUM_ENTRIES *
682 sizeof(tbl8_entry[0]));
684 tbl8_entry->valid_group = VALID;
686 /* Return group index for allocated tbl8 group. */
691 /* If there are no tbl8 groups free then return error. */
695 static inline int32_t
696 tbl8_alloc_v1604(struct rte_lpm_tbl_entry *tbl8, uint32_t number_tbl8s)
698 uint32_t group_idx; /* tbl8 group index. */
699 struct rte_lpm_tbl_entry *tbl8_entry;
701 /* Scan through tbl8 to find a free (i.e. INVALID) tbl8 group. */
702 for (group_idx = 0; group_idx < number_tbl8s; group_idx++) {
703 tbl8_entry = &tbl8[group_idx * RTE_LPM_TBL8_GROUP_NUM_ENTRIES];
704 /* If a free tbl8 group is found clean it and set as VALID. */
705 if (!tbl8_entry->valid_group) {
706 memset(&tbl8_entry[0], 0,
707 RTE_LPM_TBL8_GROUP_NUM_ENTRIES *
708 sizeof(tbl8_entry[0]));
710 tbl8_entry->valid_group = VALID;
712 /* Return group index for allocated tbl8 group. */
717 /* If there are no tbl8 groups free then return error. */
722 tbl8_free_v20(struct rte_lpm_tbl_entry_v20 *tbl8, uint32_t tbl8_group_start)
724 /* Set tbl8 group invalid*/
725 tbl8[tbl8_group_start].valid_group = INVALID;
729 tbl8_free_v1604(struct rte_lpm_tbl_entry *tbl8, uint32_t tbl8_group_start)
731 /* Set tbl8 group invalid*/
732 tbl8[tbl8_group_start].valid_group = INVALID;
735 static inline int32_t
736 add_depth_small_v20(struct rte_lpm_v20 *lpm, uint32_t ip, uint8_t depth,
739 uint32_t tbl24_index, tbl24_range, tbl8_index, tbl8_group_end, i, j;
741 /* Calculate the index into Table24. */
742 tbl24_index = ip >> 8;
743 tbl24_range = depth_to_range(depth);
745 for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) {
747 * For invalid OR valid and non-extended tbl 24 entries set
750 if (!lpm->tbl24[i].valid || (lpm->tbl24[i].valid_group == 0 &&
751 lpm->tbl24[i].depth <= depth)) {
753 struct rte_lpm_tbl_entry_v20 new_tbl24_entry = {
758 new_tbl24_entry.next_hop = next_hop;
760 /* Setting tbl24 entry in one go to avoid race
763 lpm->tbl24[i] = new_tbl24_entry;
768 if (lpm->tbl24[i].valid_group == 1) {
769 /* If tbl24 entry is valid and extended calculate the
772 tbl8_index = lpm->tbl24[i].group_idx *
773 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
774 tbl8_group_end = tbl8_index +
775 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
777 for (j = tbl8_index; j < tbl8_group_end; j++) {
778 if (!lpm->tbl8[j].valid ||
779 lpm->tbl8[j].depth <= depth) {
780 struct rte_lpm_tbl_entry_v20
783 .valid_group = VALID,
786 new_tbl8_entry.next_hop = next_hop;
789 * Setting tbl8 entry in one go to avoid
792 lpm->tbl8[j] = new_tbl8_entry;
803 static inline int32_t
804 add_depth_small_v1604(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
807 #define group_idx next_hop
808 uint32_t tbl24_index, tbl24_range, tbl8_index, tbl8_group_end, i, j;
810 /* Calculate the index into Table24. */
811 tbl24_index = ip >> 8;
812 tbl24_range = depth_to_range(depth);
814 for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) {
816 * For invalid OR valid and non-extended tbl 24 entries set
819 if (!lpm->tbl24[i].valid || (lpm->tbl24[i].valid_group == 0 &&
820 lpm->tbl24[i].depth <= depth)) {
822 struct rte_lpm_tbl_entry new_tbl24_entry = {
823 .next_hop = next_hop,
829 /* Setting tbl24 entry in one go to avoid race
832 lpm->tbl24[i] = new_tbl24_entry;
837 if (lpm->tbl24[i].valid_group == 1) {
838 /* If tbl24 entry is valid and extended calculate the
841 tbl8_index = lpm->tbl24[i].group_idx *
842 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
843 tbl8_group_end = tbl8_index +
844 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
846 for (j = tbl8_index; j < tbl8_group_end; j++) {
847 if (!lpm->tbl8[j].valid ||
848 lpm->tbl8[j].depth <= depth) {
849 struct rte_lpm_tbl_entry
852 .valid_group = VALID,
854 .next_hop = next_hop,
858 * Setting tbl8 entry in one go to avoid
861 lpm->tbl8[j] = new_tbl8_entry;
872 static inline int32_t
873 add_depth_big_v20(struct rte_lpm_v20 *lpm, uint32_t ip_masked, uint8_t depth,
876 uint32_t tbl24_index;
877 int32_t tbl8_group_index, tbl8_group_start, tbl8_group_end, tbl8_index,
880 tbl24_index = (ip_masked >> 8);
881 tbl8_range = depth_to_range(depth);
883 if (!lpm->tbl24[tbl24_index].valid) {
884 /* Search for a free tbl8 group. */
885 tbl8_group_index = tbl8_alloc_v20(lpm->tbl8);
887 /* Check tbl8 allocation was successful. */
888 if (tbl8_group_index < 0) {
889 return tbl8_group_index;
892 /* Find index into tbl8 and range. */
893 tbl8_index = (tbl8_group_index *
894 RTE_LPM_TBL8_GROUP_NUM_ENTRIES) +
897 /* Set tbl8 entry. */
898 for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
899 lpm->tbl8[i].depth = depth;
900 lpm->tbl8[i].next_hop = next_hop;
901 lpm->tbl8[i].valid = VALID;
905 * Update tbl24 entry to point to new tbl8 entry. Note: The
906 * ext_flag and tbl8_index need to be updated simultaneously,
907 * so assign whole structure in one go
910 struct rte_lpm_tbl_entry_v20 new_tbl24_entry = {
911 { .group_idx = (uint8_t)tbl8_group_index, },
917 lpm->tbl24[tbl24_index] = new_tbl24_entry;
919 } /* If valid entry but not extended calculate the index into Table8. */
920 else if (lpm->tbl24[tbl24_index].valid_group == 0) {
921 /* Search for free tbl8 group. */
922 tbl8_group_index = tbl8_alloc_v20(lpm->tbl8);
924 if (tbl8_group_index < 0) {
925 return tbl8_group_index;
928 tbl8_group_start = tbl8_group_index *
929 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
930 tbl8_group_end = tbl8_group_start +
931 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
933 /* Populate new tbl8 with tbl24 value. */
934 for (i = tbl8_group_start; i < tbl8_group_end; i++) {
935 lpm->tbl8[i].valid = VALID;
936 lpm->tbl8[i].depth = lpm->tbl24[tbl24_index].depth;
937 lpm->tbl8[i].next_hop =
938 lpm->tbl24[tbl24_index].next_hop;
941 tbl8_index = tbl8_group_start + (ip_masked & 0xFF);
943 /* Insert new rule into the tbl8 entry. */
944 for (i = tbl8_index; i < tbl8_index + tbl8_range; i++) {
945 lpm->tbl8[i].valid = VALID;
946 lpm->tbl8[i].depth = depth;
947 lpm->tbl8[i].next_hop = next_hop;
951 * Update tbl24 entry to point to new tbl8 entry. Note: The
952 * ext_flag and tbl8_index need to be updated simultaneously,
953 * so assign whole structure in one go.
956 struct rte_lpm_tbl_entry_v20 new_tbl24_entry = {
957 { .group_idx = (uint8_t)tbl8_group_index, },
963 lpm->tbl24[tbl24_index] = new_tbl24_entry;
966 * If it is valid, extended entry calculate the index into tbl8.
968 tbl8_group_index = lpm->tbl24[tbl24_index].group_idx;
969 tbl8_group_start = tbl8_group_index *
970 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
971 tbl8_index = tbl8_group_start + (ip_masked & 0xFF);
973 for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
975 if (!lpm->tbl8[i].valid ||
976 lpm->tbl8[i].depth <= depth) {
977 struct rte_lpm_tbl_entry_v20 new_tbl8_entry = {
980 .valid_group = lpm->tbl8[i].valid_group,
982 new_tbl8_entry.next_hop = next_hop;
984 * Setting tbl8 entry in one go to avoid race
987 lpm->tbl8[i] = new_tbl8_entry;
997 static inline int32_t
998 add_depth_big_v1604(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth,
1001 #define group_idx next_hop
1002 uint32_t tbl24_index;
1003 int32_t tbl8_group_index, tbl8_group_start, tbl8_group_end, tbl8_index,
1006 tbl24_index = (ip_masked >> 8);
1007 tbl8_range = depth_to_range(depth);
1009 if (!lpm->tbl24[tbl24_index].valid) {
1010 /* Search for a free tbl8 group. */
1011 tbl8_group_index = tbl8_alloc_v1604(lpm->tbl8, lpm->number_tbl8s);
1013 /* Check tbl8 allocation was successful. */
1014 if (tbl8_group_index < 0) {
1015 return tbl8_group_index;
1018 /* Find index into tbl8 and range. */
1019 tbl8_index = (tbl8_group_index *
1020 RTE_LPM_TBL8_GROUP_NUM_ENTRIES) +
1023 /* Set tbl8 entry. */
1024 for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
1025 lpm->tbl8[i].depth = depth;
1026 lpm->tbl8[i].next_hop = next_hop;
1027 lpm->tbl8[i].valid = VALID;
1031 * Update tbl24 entry to point to new tbl8 entry. Note: The
1032 * ext_flag and tbl8_index need to be updated simultaneously,
1033 * so assign whole structure in one go
1036 struct rte_lpm_tbl_entry new_tbl24_entry = {
1037 .group_idx = (uint8_t)tbl8_group_index,
1043 lpm->tbl24[tbl24_index] = new_tbl24_entry;
1045 } /* If valid entry but not extended calculate the index into Table8. */
1046 else if (lpm->tbl24[tbl24_index].valid_group == 0) {
1047 /* Search for free tbl8 group. */
1048 tbl8_group_index = tbl8_alloc_v1604(lpm->tbl8, lpm->number_tbl8s);
1050 if (tbl8_group_index < 0) {
1051 return tbl8_group_index;
1054 tbl8_group_start = tbl8_group_index *
1055 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1056 tbl8_group_end = tbl8_group_start +
1057 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1059 /* Populate new tbl8 with tbl24 value. */
1060 for (i = tbl8_group_start; i < tbl8_group_end; i++) {
1061 lpm->tbl8[i].valid = VALID;
1062 lpm->tbl8[i].depth = lpm->tbl24[tbl24_index].depth;
1063 lpm->tbl8[i].next_hop =
1064 lpm->tbl24[tbl24_index].next_hop;
1067 tbl8_index = tbl8_group_start + (ip_masked & 0xFF);
1069 /* Insert new rule into the tbl8 entry. */
1070 for (i = tbl8_index; i < tbl8_index + tbl8_range; i++) {
1071 lpm->tbl8[i].valid = VALID;
1072 lpm->tbl8[i].depth = depth;
1073 lpm->tbl8[i].next_hop = next_hop;
1077 * Update tbl24 entry to point to new tbl8 entry. Note: The
1078 * ext_flag and tbl8_index need to be updated simultaneously,
1079 * so assign whole structure in one go.
1082 struct rte_lpm_tbl_entry new_tbl24_entry = {
1083 .group_idx = (uint8_t)tbl8_group_index,
1089 lpm->tbl24[tbl24_index] = new_tbl24_entry;
1092 * If it is valid, extended entry calculate the index into tbl8.
1094 tbl8_group_index = lpm->tbl24[tbl24_index].group_idx;
1095 tbl8_group_start = tbl8_group_index *
1096 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1097 tbl8_index = tbl8_group_start + (ip_masked & 0xFF);
1099 for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
1101 if (!lpm->tbl8[i].valid ||
1102 lpm->tbl8[i].depth <= depth) {
1103 struct rte_lpm_tbl_entry new_tbl8_entry = {
1106 .next_hop = next_hop,
1107 .valid_group = lpm->tbl8[i].valid_group,
1111 * Setting tbl8 entry in one go to avoid race
1114 lpm->tbl8[i] = new_tbl8_entry;
1128 rte_lpm_add_v20(struct rte_lpm_v20 *lpm, uint32_t ip, uint8_t depth,
1131 int32_t rule_index, status = 0;
1134 /* Check user arguments. */
1135 if ((lpm == NULL) || (depth < 1) || (depth > RTE_LPM_MAX_DEPTH))
1138 ip_masked = ip & depth_to_mask(depth);
1140 /* Add the rule to the rule table. */
1141 rule_index = rule_add_v20(lpm, ip_masked, depth, next_hop);
1143 /* If the is no space available for new rule return error. */
1144 if (rule_index < 0) {
1148 if (depth <= MAX_DEPTH_TBL24) {
1149 status = add_depth_small_v20(lpm, ip_masked, depth, next_hop);
1150 } else { /* If depth > RTE_LPM_MAX_DEPTH_TBL24 */
1151 status = add_depth_big_v20(lpm, ip_masked, depth, next_hop);
1154 * If add fails due to exhaustion of tbl8 extensions delete
1155 * rule that was added to rule table.
1158 rule_delete_v20(lpm, rule_index, depth);
1166 VERSION_SYMBOL(rte_lpm_add, _v20, 2.0);
1169 rte_lpm_add_v1604(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
1172 int32_t rule_index, status = 0;
1175 /* Check user arguments. */
1176 if ((lpm == NULL) || (depth < 1) || (depth > RTE_LPM_MAX_DEPTH))
1179 ip_masked = ip & depth_to_mask(depth);
1181 /* Add the rule to the rule table. */
1182 rule_index = rule_add_v1604(lpm, ip_masked, depth, next_hop);
1184 /* If the is no space available for new rule return error. */
1185 if (rule_index < 0) {
1189 if (depth <= MAX_DEPTH_TBL24) {
1190 status = add_depth_small_v1604(lpm, ip_masked, depth, next_hop);
1191 } else { /* If depth > RTE_LPM_MAX_DEPTH_TBL24 */
1192 status = add_depth_big_v1604(lpm, ip_masked, depth, next_hop);
1195 * If add fails due to exhaustion of tbl8 extensions delete
1196 * rule that was added to rule table.
1199 rule_delete_v1604(lpm, rule_index, depth);
1207 BIND_DEFAULT_SYMBOL(rte_lpm_add, _v1604, 16.04);
1208 MAP_STATIC_SYMBOL(int rte_lpm_add(struct rte_lpm *lpm, uint32_t ip,
1209 uint8_t depth, uint32_t next_hop), rte_lpm_add_v1604);
1212 * Look for a rule in the high-level rules table
1215 rte_lpm_is_rule_present_v20(struct rte_lpm_v20 *lpm, uint32_t ip, uint8_t depth,
1221 /* Check user arguments. */
1222 if ((lpm == NULL) ||
1223 (next_hop == NULL) ||
1224 (depth < 1) || (depth > RTE_LPM_MAX_DEPTH))
1227 /* Look for the rule using rule_find. */
1228 ip_masked = ip & depth_to_mask(depth);
1229 rule_index = rule_find_v20(lpm, ip_masked, depth);
1231 if (rule_index >= 0) {
1232 *next_hop = lpm->rules_tbl[rule_index].next_hop;
1236 /* If rule is not found return 0. */
1239 VERSION_SYMBOL(rte_lpm_is_rule_present, _v20, 2.0);
1242 rte_lpm_is_rule_present_v1604(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
1248 /* Check user arguments. */
1249 if ((lpm == NULL) ||
1250 (next_hop == NULL) ||
1251 (depth < 1) || (depth > RTE_LPM_MAX_DEPTH))
1254 /* Look for the rule using rule_find. */
1255 ip_masked = ip & depth_to_mask(depth);
1256 rule_index = rule_find_v1604(lpm, ip_masked, depth);
1258 if (rule_index >= 0) {
1259 *next_hop = lpm->rules_tbl[rule_index].next_hop;
1263 /* If rule is not found return 0. */
1266 BIND_DEFAULT_SYMBOL(rte_lpm_is_rule_present, _v1604, 16.04);
1267 MAP_STATIC_SYMBOL(int rte_lpm_is_rule_present(struct rte_lpm *lpm, uint32_t ip,
1268 uint8_t depth, uint32_t *next_hop), rte_lpm_is_rule_present_v1604);
1270 static inline int32_t
1271 find_previous_rule_v20(struct rte_lpm_v20 *lpm, uint32_t ip, uint8_t depth,
1272 uint8_t *sub_rule_depth)
1278 for (prev_depth = (uint8_t)(depth - 1); prev_depth > 0; prev_depth--) {
1279 ip_masked = ip & depth_to_mask(prev_depth);
1281 rule_index = rule_find_v20(lpm, ip_masked, prev_depth);
1283 if (rule_index >= 0) {
1284 *sub_rule_depth = prev_depth;
1292 static inline int32_t
1293 find_previous_rule_v1604(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
1294 uint8_t *sub_rule_depth)
1300 for (prev_depth = (uint8_t)(depth - 1); prev_depth > 0; prev_depth--) {
1301 ip_masked = ip & depth_to_mask(prev_depth);
1303 rule_index = rule_find_v1604(lpm, ip_masked, prev_depth);
1305 if (rule_index >= 0) {
1306 *sub_rule_depth = prev_depth;
1314 static inline int32_t
1315 delete_depth_small_v20(struct rte_lpm_v20 *lpm, uint32_t ip_masked,
1316 uint8_t depth, int32_t sub_rule_index, uint8_t sub_rule_depth)
1318 uint32_t tbl24_range, tbl24_index, tbl8_group_index, tbl8_index, i, j;
1320 /* Calculate the range and index into Table24. */
1321 tbl24_range = depth_to_range(depth);
1322 tbl24_index = (ip_masked >> 8);
1325 * Firstly check the sub_rule_index. A -1 indicates no replacement rule
1326 * and a positive number indicates a sub_rule_index.
1328 if (sub_rule_index < 0) {
1330 * If no replacement rule exists then invalidate entries
1331 * associated with this rule.
1333 for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) {
1335 if (lpm->tbl24[i].valid_group == 0 &&
1336 lpm->tbl24[i].depth <= depth) {
1337 lpm->tbl24[i].valid = INVALID;
1338 } else if (lpm->tbl24[i].valid_group == 1) {
1340 * If TBL24 entry is extended, then there has
1341 * to be a rule with depth >= 25 in the
1342 * associated TBL8 group.
1345 tbl8_group_index = lpm->tbl24[i].group_idx;
1346 tbl8_index = tbl8_group_index *
1347 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1349 for (j = tbl8_index; j < (tbl8_index +
1350 RTE_LPM_TBL8_GROUP_NUM_ENTRIES); j++) {
1352 if (lpm->tbl8[j].depth <= depth)
1353 lpm->tbl8[j].valid = INVALID;
1359 * If a replacement rule exists then modify entries
1360 * associated with this rule.
1363 struct rte_lpm_tbl_entry_v20 new_tbl24_entry = {
1364 {.next_hop = lpm->rules_tbl[sub_rule_index].next_hop,},
1367 .depth = sub_rule_depth,
1370 struct rte_lpm_tbl_entry_v20 new_tbl8_entry = {
1372 .valid_group = VALID,
1373 .depth = sub_rule_depth,
1375 new_tbl8_entry.next_hop =
1376 lpm->rules_tbl[sub_rule_index].next_hop;
1378 for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) {
1380 if (lpm->tbl24[i].valid_group == 0 &&
1381 lpm->tbl24[i].depth <= depth) {
1382 lpm->tbl24[i] = new_tbl24_entry;
1383 } else if (lpm->tbl24[i].valid_group == 1) {
1385 * If TBL24 entry is extended, then there has
1386 * to be a rule with depth >= 25 in the
1387 * associated TBL8 group.
1390 tbl8_group_index = lpm->tbl24[i].group_idx;
1391 tbl8_index = tbl8_group_index *
1392 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1394 for (j = tbl8_index; j < (tbl8_index +
1395 RTE_LPM_TBL8_GROUP_NUM_ENTRIES); j++) {
1397 if (lpm->tbl8[j].depth <= depth)
1398 lpm->tbl8[j] = new_tbl8_entry;
1407 static inline int32_t
1408 delete_depth_small_v1604(struct rte_lpm *lpm, uint32_t ip_masked,
1409 uint8_t depth, int32_t sub_rule_index, uint8_t sub_rule_depth)
1411 #define group_idx next_hop
1412 uint32_t tbl24_range, tbl24_index, tbl8_group_index, tbl8_index, i, j;
1414 /* Calculate the range and index into Table24. */
1415 tbl24_range = depth_to_range(depth);
1416 tbl24_index = (ip_masked >> 8);
1419 * Firstly check the sub_rule_index. A -1 indicates no replacement rule
1420 * and a positive number indicates a sub_rule_index.
1422 if (sub_rule_index < 0) {
1424 * If no replacement rule exists then invalidate entries
1425 * associated with this rule.
1427 for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) {
1429 if (lpm->tbl24[i].valid_group == 0 &&
1430 lpm->tbl24[i].depth <= depth) {
1431 lpm->tbl24[i].valid = INVALID;
1432 } else if (lpm->tbl24[i].valid_group == 1) {
1434 * If TBL24 entry is extended, then there has
1435 * to be a rule with depth >= 25 in the
1436 * associated TBL8 group.
1439 tbl8_group_index = lpm->tbl24[i].group_idx;
1440 tbl8_index = tbl8_group_index *
1441 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1443 for (j = tbl8_index; j < (tbl8_index +
1444 RTE_LPM_TBL8_GROUP_NUM_ENTRIES); j++) {
1446 if (lpm->tbl8[j].depth <= depth)
1447 lpm->tbl8[j].valid = INVALID;
1453 * If a replacement rule exists then modify entries
1454 * associated with this rule.
1457 struct rte_lpm_tbl_entry new_tbl24_entry = {
1458 .next_hop = lpm->rules_tbl[sub_rule_index].next_hop,
1461 .depth = sub_rule_depth,
1464 struct rte_lpm_tbl_entry new_tbl8_entry = {
1466 .valid_group = VALID,
1467 .depth = sub_rule_depth,
1468 .next_hop = lpm->rules_tbl
1469 [sub_rule_index].next_hop,
1472 for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) {
1474 if (lpm->tbl24[i].valid_group == 0 &&
1475 lpm->tbl24[i].depth <= depth) {
1476 lpm->tbl24[i] = new_tbl24_entry;
1477 } else if (lpm->tbl24[i].valid_group == 1) {
1479 * If TBL24 entry is extended, then there has
1480 * to be a rule with depth >= 25 in the
1481 * associated TBL8 group.
1484 tbl8_group_index = lpm->tbl24[i].group_idx;
1485 tbl8_index = tbl8_group_index *
1486 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1488 for (j = tbl8_index; j < (tbl8_index +
1489 RTE_LPM_TBL8_GROUP_NUM_ENTRIES); j++) {
1491 if (lpm->tbl8[j].depth <= depth)
1492 lpm->tbl8[j] = new_tbl8_entry;
1502 * Checks if table 8 group can be recycled.
1504 * Return of -EEXIST means tbl8 is in use and thus can not be recycled.
1505 * Return of -EINVAL means tbl8 is empty and thus can be recycled
1506 * Return of value > -1 means tbl8 is in use but has all the same values and
1507 * thus can be recycled
1509 static inline int32_t
1510 tbl8_recycle_check_v20(struct rte_lpm_tbl_entry_v20 *tbl8,
1511 uint32_t tbl8_group_start)
1513 uint32_t tbl8_group_end, i;
1514 tbl8_group_end = tbl8_group_start + RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1517 * Check the first entry of the given tbl8. If it is invalid we know
1518 * this tbl8 does not contain any rule with a depth < RTE_LPM_MAX_DEPTH
1519 * (As they would affect all entries in a tbl8) and thus this table
1520 * can not be recycled.
1522 if (tbl8[tbl8_group_start].valid) {
1524 * If first entry is valid check if the depth is less than 24
1525 * and if so check the rest of the entries to verify that they
1526 * are all of this depth.
1528 if (tbl8[tbl8_group_start].depth <= MAX_DEPTH_TBL24) {
1529 for (i = (tbl8_group_start + 1); i < tbl8_group_end;
1532 if (tbl8[i].depth !=
1533 tbl8[tbl8_group_start].depth) {
1538 /* If all entries are the same return the tb8 index */
1539 return tbl8_group_start;
1545 * If the first entry is invalid check if the rest of the entries in
1546 * the tbl8 are invalid.
1548 for (i = (tbl8_group_start + 1); i < tbl8_group_end; i++) {
1552 /* If no valid entries are found then return -EINVAL. */
1556 static inline int32_t
1557 tbl8_recycle_check_v1604(struct rte_lpm_tbl_entry *tbl8,
1558 uint32_t tbl8_group_start)
1560 uint32_t tbl8_group_end, i;
1561 tbl8_group_end = tbl8_group_start + RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1564 * Check the first entry of the given tbl8. If it is invalid we know
1565 * this tbl8 does not contain any rule with a depth < RTE_LPM_MAX_DEPTH
1566 * (As they would affect all entries in a tbl8) and thus this table
1567 * can not be recycled.
1569 if (tbl8[tbl8_group_start].valid) {
1571 * If first entry is valid check if the depth is less than 24
1572 * and if so check the rest of the entries to verify that they
1573 * are all of this depth.
1575 if (tbl8[tbl8_group_start].depth <= MAX_DEPTH_TBL24) {
1576 for (i = (tbl8_group_start + 1); i < tbl8_group_end;
1579 if (tbl8[i].depth !=
1580 tbl8[tbl8_group_start].depth) {
1585 /* If all entries are the same return the tb8 index */
1586 return tbl8_group_start;
1592 * If the first entry is invalid check if the rest of the entries in
1593 * the tbl8 are invalid.
1595 for (i = (tbl8_group_start + 1); i < tbl8_group_end; i++) {
1599 /* If no valid entries are found then return -EINVAL. */
1603 static inline int32_t
1604 delete_depth_big_v20(struct rte_lpm_v20 *lpm, uint32_t ip_masked,
1605 uint8_t depth, int32_t sub_rule_index, uint8_t sub_rule_depth)
1607 uint32_t tbl24_index, tbl8_group_index, tbl8_group_start, tbl8_index,
1609 int32_t tbl8_recycle_index;
1612 * Calculate the index into tbl24 and range. Note: All depths larger
1613 * than MAX_DEPTH_TBL24 are associated with only one tbl24 entry.
1615 tbl24_index = ip_masked >> 8;
1617 /* Calculate the index into tbl8 and range. */
1618 tbl8_group_index = lpm->tbl24[tbl24_index].group_idx;
1619 tbl8_group_start = tbl8_group_index * RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1620 tbl8_index = tbl8_group_start + (ip_masked & 0xFF);
1621 tbl8_range = depth_to_range(depth);
1623 if (sub_rule_index < 0) {
1625 * Loop through the range of entries on tbl8 for which the
1626 * rule_to_delete must be removed or modified.
1628 for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
1629 if (lpm->tbl8[i].depth <= depth)
1630 lpm->tbl8[i].valid = INVALID;
1633 /* Set new tbl8 entry. */
1634 struct rte_lpm_tbl_entry_v20 new_tbl8_entry = {
1636 .depth = sub_rule_depth,
1637 .valid_group = lpm->tbl8[tbl8_group_start].valid_group,
1640 new_tbl8_entry.next_hop =
1641 lpm->rules_tbl[sub_rule_index].next_hop;
1643 * Loop through the range of entries on tbl8 for which the
1644 * rule_to_delete must be modified.
1646 for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
1647 if (lpm->tbl8[i].depth <= depth)
1648 lpm->tbl8[i] = new_tbl8_entry;
1653 * Check if there are any valid entries in this tbl8 group. If all
1654 * tbl8 entries are invalid we can free the tbl8 and invalidate the
1655 * associated tbl24 entry.
1658 tbl8_recycle_index = tbl8_recycle_check_v20(lpm->tbl8, tbl8_group_start);
1660 if (tbl8_recycle_index == -EINVAL) {
1661 /* Set tbl24 before freeing tbl8 to avoid race condition. */
1662 lpm->tbl24[tbl24_index].valid = 0;
1663 tbl8_free_v20(lpm->tbl8, tbl8_group_start);
1664 } else if (tbl8_recycle_index > -1) {
1665 /* Update tbl24 entry. */
1666 struct rte_lpm_tbl_entry_v20 new_tbl24_entry = {
1667 { .next_hop = lpm->tbl8[tbl8_recycle_index].next_hop, },
1670 .depth = lpm->tbl8[tbl8_recycle_index].depth,
1673 /* Set tbl24 before freeing tbl8 to avoid race condition. */
1674 lpm->tbl24[tbl24_index] = new_tbl24_entry;
1675 tbl8_free_v20(lpm->tbl8, tbl8_group_start);
1681 static inline int32_t
1682 delete_depth_big_v1604(struct rte_lpm *lpm, uint32_t ip_masked,
1683 uint8_t depth, int32_t sub_rule_index, uint8_t sub_rule_depth)
1685 #define group_idx next_hop
1686 uint32_t tbl24_index, tbl8_group_index, tbl8_group_start, tbl8_index,
1688 int32_t tbl8_recycle_index;
1691 * Calculate the index into tbl24 and range. Note: All depths larger
1692 * than MAX_DEPTH_TBL24 are associated with only one tbl24 entry.
1694 tbl24_index = ip_masked >> 8;
1696 /* Calculate the index into tbl8 and range. */
1697 tbl8_group_index = lpm->tbl24[tbl24_index].group_idx;
1698 tbl8_group_start = tbl8_group_index * RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1699 tbl8_index = tbl8_group_start + (ip_masked & 0xFF);
1700 tbl8_range = depth_to_range(depth);
1702 if (sub_rule_index < 0) {
1704 * Loop through the range of entries on tbl8 for which the
1705 * rule_to_delete must be removed or modified.
1707 for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
1708 if (lpm->tbl8[i].depth <= depth)
1709 lpm->tbl8[i].valid = INVALID;
1712 /* Set new tbl8 entry. */
1713 struct rte_lpm_tbl_entry new_tbl8_entry = {
1715 .depth = sub_rule_depth,
1716 .valid_group = lpm->tbl8[tbl8_group_start].valid_group,
1717 .next_hop = lpm->rules_tbl[sub_rule_index].next_hop,
1721 * Loop through the range of entries on tbl8 for which the
1722 * rule_to_delete must be modified.
1724 for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
1725 if (lpm->tbl8[i].depth <= depth)
1726 lpm->tbl8[i] = new_tbl8_entry;
1731 * Check if there are any valid entries in this tbl8 group. If all
1732 * tbl8 entries are invalid we can free the tbl8 and invalidate the
1733 * associated tbl24 entry.
1736 tbl8_recycle_index = tbl8_recycle_check_v1604(lpm->tbl8, tbl8_group_start);
1738 if (tbl8_recycle_index == -EINVAL) {
1739 /* Set tbl24 before freeing tbl8 to avoid race condition. */
1740 lpm->tbl24[tbl24_index].valid = 0;
1741 tbl8_free_v1604(lpm->tbl8, tbl8_group_start);
1742 } else if (tbl8_recycle_index > -1) {
1743 /* Update tbl24 entry. */
1744 struct rte_lpm_tbl_entry new_tbl24_entry = {
1745 .next_hop = lpm->tbl8[tbl8_recycle_index].next_hop,
1748 .depth = lpm->tbl8[tbl8_recycle_index].depth,
1751 /* Set tbl24 before freeing tbl8 to avoid race condition. */
1752 lpm->tbl24[tbl24_index] = new_tbl24_entry;
1753 tbl8_free_v1604(lpm->tbl8, tbl8_group_start);
1763 rte_lpm_delete_v20(struct rte_lpm_v20 *lpm, uint32_t ip, uint8_t depth)
1765 int32_t rule_to_delete_index, sub_rule_index;
1767 uint8_t sub_rule_depth;
1769 * Check input arguments. Note: IP must be a positive integer of 32
1770 * bits in length therefore it need not be checked.
1772 if ((lpm == NULL) || (depth < 1) || (depth > RTE_LPM_MAX_DEPTH)) {
1776 ip_masked = ip & depth_to_mask(depth);
1779 * Find the index of the input rule, that needs to be deleted, in the
1782 rule_to_delete_index = rule_find_v20(lpm, ip_masked, depth);
1785 * Check if rule_to_delete_index was found. If no rule was found the
1786 * function rule_find returns -EINVAL.
1788 if (rule_to_delete_index < 0)
1791 /* Delete the rule from the rule table. */
1792 rule_delete_v20(lpm, rule_to_delete_index, depth);
1795 * Find rule to replace the rule_to_delete. If there is no rule to
1796 * replace the rule_to_delete we return -1 and invalidate the table
1797 * entries associated with this rule.
1800 sub_rule_index = find_previous_rule_v20(lpm, ip, depth, &sub_rule_depth);
1803 * If the input depth value is less than 25 use function
1804 * delete_depth_small otherwise use delete_depth_big.
1806 if (depth <= MAX_DEPTH_TBL24) {
1807 return delete_depth_small_v20(lpm, ip_masked, depth,
1808 sub_rule_index, sub_rule_depth);
1809 } else { /* If depth > MAX_DEPTH_TBL24 */
1810 return delete_depth_big_v20(lpm, ip_masked, depth, sub_rule_index,
1814 VERSION_SYMBOL(rte_lpm_delete, _v20, 2.0);
1817 rte_lpm_delete_v1604(struct rte_lpm *lpm, uint32_t ip, uint8_t depth)
1819 int32_t rule_to_delete_index, sub_rule_index;
1821 uint8_t sub_rule_depth;
1823 * Check input arguments. Note: IP must be a positive integer of 32
1824 * bits in length therefore it need not be checked.
1826 if ((lpm == NULL) || (depth < 1) || (depth > RTE_LPM_MAX_DEPTH)) {
1830 ip_masked = ip & depth_to_mask(depth);
1833 * Find the index of the input rule, that needs to be deleted, in the
1836 rule_to_delete_index = rule_find_v1604(lpm, ip_masked, depth);
1839 * Check if rule_to_delete_index was found. If no rule was found the
1840 * function rule_find returns -EINVAL.
1842 if (rule_to_delete_index < 0)
1845 /* Delete the rule from the rule table. */
1846 rule_delete_v1604(lpm, rule_to_delete_index, depth);
1849 * Find rule to replace the rule_to_delete. If there is no rule to
1850 * replace the rule_to_delete we return -1 and invalidate the table
1851 * entries associated with this rule.
1854 sub_rule_index = find_previous_rule_v1604(lpm, ip, depth, &sub_rule_depth);
1857 * If the input depth value is less than 25 use function
1858 * delete_depth_small otherwise use delete_depth_big.
1860 if (depth <= MAX_DEPTH_TBL24) {
1861 return delete_depth_small_v1604(lpm, ip_masked, depth,
1862 sub_rule_index, sub_rule_depth);
1863 } else { /* If depth > MAX_DEPTH_TBL24 */
1864 return delete_depth_big_v1604(lpm, ip_masked, depth, sub_rule_index,
1868 BIND_DEFAULT_SYMBOL(rte_lpm_delete, _v1604, 16.04);
1869 MAP_STATIC_SYMBOL(int rte_lpm_delete(struct rte_lpm *lpm, uint32_t ip,
1870 uint8_t depth), rte_lpm_delete_v1604);
1873 * Delete all rules from the LPM table.
1876 rte_lpm_delete_all_v20(struct rte_lpm_v20 *lpm)
1878 /* Zero rule information. */
1879 memset(lpm->rule_info, 0, sizeof(lpm->rule_info));
1882 memset(lpm->tbl24, 0, sizeof(lpm->tbl24));
1885 memset(lpm->tbl8, 0, sizeof(lpm->tbl8));
1887 /* Delete all rules form the rules table. */
1888 memset(lpm->rules_tbl, 0, sizeof(lpm->rules_tbl[0]) * lpm->max_rules);
1890 VERSION_SYMBOL(rte_lpm_delete_all, _v20, 2.0);
1893 rte_lpm_delete_all_v1604(struct rte_lpm *lpm)
1895 /* Zero rule information. */
1896 memset(lpm->rule_info, 0, sizeof(lpm->rule_info));
1899 memset(lpm->tbl24, 0, sizeof(lpm->tbl24));
1902 memset(lpm->tbl8, 0, sizeof(lpm->tbl8[0])
1903 * RTE_LPM_TBL8_GROUP_NUM_ENTRIES * lpm->number_tbl8s);
1905 /* Delete all rules form the rules table. */
1906 memset(lpm->rules_tbl, 0, sizeof(lpm->rules_tbl[0]) * lpm->max_rules);
1908 BIND_DEFAULT_SYMBOL(rte_lpm_delete_all, _v1604, 16.04);
1909 MAP_STATIC_SYMBOL(void rte_lpm_delete_all(struct rte_lpm *lpm),
1910 rte_lpm_delete_all_v1604);