4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 #include <sys/queue.h>
42 #include <rte_branch_prediction.h>
43 #include <rte_common.h>
44 #include <rte_memory.h> /* for definition of RTE_CACHE_LINE_SIZE */
45 #include <rte_malloc.h>
46 #include <rte_memzone.h>
48 #include <rte_eal_memconfig.h>
49 #include <rte_per_lcore.h>
50 #include <rte_string_fns.h>
51 #include <rte_errno.h>
52 #include <rte_rwlock.h>
53 #include <rte_spinlock.h>
57 TAILQ_HEAD(rte_lpm_list, rte_tailq_entry);
59 static struct rte_tailq_elem rte_lpm_tailq = {
62 EAL_REGISTER_TAILQ(rte_lpm_tailq)
64 #define MAX_DEPTH_TBL24 24
71 /* Macro to enable/disable run-time checks. */
72 #if defined(RTE_LIBRTE_LPM_DEBUG)
73 #include <rte_debug.h>
74 #define VERIFY_DEPTH(depth) do { \
75 if ((depth == 0) || (depth > RTE_LPM_MAX_DEPTH)) \
76 rte_panic("LPM: Invalid depth (%u) at line %d", \
77 (unsigned)(depth), __LINE__); \
80 #define VERIFY_DEPTH(depth)
84 * Converts a given depth value to its corresponding mask value.
86 * depth (IN) : range = 1 - 32
87 * mask (OUT) : 32bit mask
89 static uint32_t __attribute__((pure))
90 depth_to_mask(uint8_t depth)
94 /* To calculate a mask start with a 1 on the left hand side and right
95 * shift while populating the left hand side with 1's
97 return (int)0x80000000 >> (depth - 1);
101 * Converts given depth value to its corresponding range value.
103 static inline uint32_t __attribute__((pure))
104 depth_to_range(uint8_t depth)
109 * Calculate tbl24 range. (Note: 2^depth = 1 << depth)
111 if (depth <= MAX_DEPTH_TBL24)
112 return 1 << (MAX_DEPTH_TBL24 - depth);
114 /* Else if depth is greater than 24 */
115 return 1 << (RTE_LPM_MAX_DEPTH - depth);
119 * Find an existing lpm table and return a pointer to it.
122 rte_lpm_find_existing_v20(const char *name)
124 struct rte_lpm_v20 *l = NULL;
125 struct rte_tailq_entry *te;
126 struct rte_lpm_list *lpm_list;
128 lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list);
130 rte_rwlock_read_lock(RTE_EAL_TAILQ_RWLOCK);
131 TAILQ_FOREACH(te, lpm_list, next) {
132 l = (struct rte_lpm_v20 *) te->data;
133 if (strncmp(name, l->name, RTE_LPM_NAMESIZE) == 0)
136 rte_rwlock_read_unlock(RTE_EAL_TAILQ_RWLOCK);
145 VERSION_SYMBOL(rte_lpm_find_existing, _v20, 2.0);
148 rte_lpm_find_existing_v1604(const char *name)
150 struct rte_lpm *l = NULL;
151 struct rte_tailq_entry *te;
152 struct rte_lpm_list *lpm_list;
154 lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list);
156 rte_rwlock_read_lock(RTE_EAL_TAILQ_RWLOCK);
157 TAILQ_FOREACH(te, lpm_list, next) {
158 l = (struct rte_lpm *) te->data;
159 if (strncmp(name, l->name, RTE_LPM_NAMESIZE) == 0)
162 rte_rwlock_read_unlock(RTE_EAL_TAILQ_RWLOCK);
171 BIND_DEFAULT_SYMBOL(rte_lpm_find_existing, _v1604, 16.04);
172 MAP_STATIC_SYMBOL(struct rte_lpm *rte_lpm_find_existing(const char *name),
173 rte_lpm_find_existing_v1604);
176 * Allocates memory for LPM object
179 rte_lpm_create_v20(const char *name, int socket_id, int max_rules,
180 __rte_unused int flags)
182 char mem_name[RTE_LPM_NAMESIZE];
183 struct rte_lpm_v20 *lpm = NULL;
184 struct rte_tailq_entry *te;
186 struct rte_lpm_list *lpm_list;
188 lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list);
190 RTE_BUILD_BUG_ON(sizeof(struct rte_lpm_tbl_entry_v20) != 2);
192 /* Check user arguments. */
193 if ((name == NULL) || (socket_id < -1) || (max_rules == 0)) {
198 snprintf(mem_name, sizeof(mem_name), "LPM_%s", name);
200 /* Determine the amount of memory to allocate. */
201 mem_size = sizeof(*lpm) + (sizeof(lpm->rules_tbl[0]) * max_rules);
203 rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
205 /* guarantee there's no existing */
206 TAILQ_FOREACH(te, lpm_list, next) {
207 lpm = (struct rte_lpm_v20 *) te->data;
208 if (strncmp(name, lpm->name, RTE_LPM_NAMESIZE) == 0)
217 /* allocate tailq entry */
218 te = rte_zmalloc("LPM_TAILQ_ENTRY", sizeof(*te), 0);
220 RTE_LOG(ERR, LPM, "Failed to allocate tailq entry\n");
225 /* Allocate memory to store the LPM data structures. */
226 lpm = (struct rte_lpm_v20 *)rte_zmalloc_socket(mem_name, mem_size,
227 RTE_CACHE_LINE_SIZE, socket_id);
229 RTE_LOG(ERR, LPM, "LPM memory allocation failed\n");
235 /* Save user arguments. */
236 lpm->max_rules = max_rules;
237 snprintf(lpm->name, sizeof(lpm->name), "%s", name);
239 te->data = (void *) lpm;
241 TAILQ_INSERT_TAIL(lpm_list, te, next);
244 rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
248 VERSION_SYMBOL(rte_lpm_create, _v20, 2.0);
251 rte_lpm_create_v1604(const char *name, int socket_id,
252 const struct rte_lpm_config *config)
254 char mem_name[RTE_LPM_NAMESIZE];
255 struct rte_lpm *lpm = NULL;
256 struct rte_tailq_entry *te;
257 uint32_t mem_size, rules_size, tbl8s_size;
258 struct rte_lpm_list *lpm_list;
260 lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list);
262 RTE_BUILD_BUG_ON(sizeof(struct rte_lpm_tbl_entry) != 4);
264 /* Check user arguments. */
265 if ((name == NULL) || (socket_id < -1) || (config->max_rules == 0)
266 || config->number_tbl8s > RTE_LPM_MAX_TBL8_NUM_GROUPS) {
271 snprintf(mem_name, sizeof(mem_name), "LPM_%s", name);
273 /* Determine the amount of memory to allocate. */
274 mem_size = sizeof(*lpm);
275 rules_size = sizeof(struct rte_lpm_rule) * config->max_rules;
276 tbl8s_size = (sizeof(struct rte_lpm_tbl_entry) *
277 RTE_LPM_TBL8_GROUP_NUM_ENTRIES * config->number_tbl8s);
279 rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
281 /* guarantee there's no existing */
282 TAILQ_FOREACH(te, lpm_list, next) {
283 lpm = (struct rte_lpm *) te->data;
284 if (strncmp(name, lpm->name, RTE_LPM_NAMESIZE) == 0)
293 /* allocate tailq entry */
294 te = rte_zmalloc("LPM_TAILQ_ENTRY", sizeof(*te), 0);
296 RTE_LOG(ERR, LPM, "Failed to allocate tailq entry\n");
301 /* Allocate memory to store the LPM data structures. */
302 lpm = (struct rte_lpm *)rte_zmalloc_socket(mem_name, mem_size,
303 RTE_CACHE_LINE_SIZE, socket_id);
305 RTE_LOG(ERR, LPM, "LPM memory allocation failed\n");
311 lpm->rules_tbl = (struct rte_lpm_rule *)rte_zmalloc_socket(NULL,
312 (size_t)rules_size, RTE_CACHE_LINE_SIZE, socket_id);
314 if (lpm->rules_tbl == NULL) {
315 RTE_LOG(ERR, LPM, "LPM rules_tbl memory allocation failed\n");
323 lpm->tbl8 = (struct rte_lpm_tbl_entry *)rte_zmalloc_socket(NULL,
324 (size_t)tbl8s_size, RTE_CACHE_LINE_SIZE, socket_id);
326 if (lpm->tbl8 == NULL) {
327 RTE_LOG(ERR, LPM, "LPM tbl8 memory allocation failed\n");
328 rte_free(lpm->rules_tbl);
336 /* Save user arguments. */
337 lpm->max_rules = config->max_rules;
338 lpm->number_tbl8s = config->number_tbl8s;
339 snprintf(lpm->name, sizeof(lpm->name), "%s", name);
341 te->data = (void *) lpm;
343 TAILQ_INSERT_TAIL(lpm_list, te, next);
346 rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
350 BIND_DEFAULT_SYMBOL(rte_lpm_create, _v1604, 16.04);
352 struct rte_lpm *rte_lpm_create(const char *name, int socket_id,
353 const struct rte_lpm_config *config), rte_lpm_create_v1604);
356 * Deallocates memory for given LPM table.
359 rte_lpm_free_v20(struct rte_lpm_v20 *lpm)
361 struct rte_lpm_list *lpm_list;
362 struct rte_tailq_entry *te;
364 /* Check user arguments. */
368 lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list);
370 rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
372 /* find our tailq entry */
373 TAILQ_FOREACH(te, lpm_list, next) {
374 if (te->data == (void *) lpm)
378 TAILQ_REMOVE(lpm_list, te, next);
380 rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
385 VERSION_SYMBOL(rte_lpm_free, _v20, 2.0);
388 rte_lpm_free_v1604(struct rte_lpm *lpm)
390 struct rte_lpm_list *lpm_list;
391 struct rte_tailq_entry *te;
393 /* Check user arguments. */
397 lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list);
399 rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
401 /* find our tailq entry */
402 TAILQ_FOREACH(te, lpm_list, next) {
403 if (te->data == (void *) lpm)
407 TAILQ_REMOVE(lpm_list, te, next);
409 rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
412 rte_free(lpm->rules_tbl);
416 BIND_DEFAULT_SYMBOL(rte_lpm_free, _v1604, 16.04);
417 MAP_STATIC_SYMBOL(void rte_lpm_free(struct rte_lpm *lpm),
421 * Adds a rule to the rule table.
423 * NOTE: The rule table is split into 32 groups. Each group contains rules that
424 * apply to a specific prefix depth (i.e. group 1 contains rules that apply to
425 * prefixes with a depth of 1 etc.). In the following code (depth - 1) is used
426 * to refer to depth 1 because even though the depth range is 1 - 32, depths
427 * are stored in the rule table from 0 - 31.
428 * NOTE: Valid range for depth parameter is 1 .. 32 inclusive.
430 static inline int32_t
431 rule_add_v20(struct rte_lpm_v20 *lpm, uint32_t ip_masked, uint8_t depth,
434 uint32_t rule_gindex, rule_index, last_rule;
439 /* Scan through rule group to see if rule already exists. */
440 if (lpm->rule_info[depth - 1].used_rules > 0) {
442 /* rule_gindex stands for rule group index. */
443 rule_gindex = lpm->rule_info[depth - 1].first_rule;
444 /* Initialise rule_index to point to start of rule group. */
445 rule_index = rule_gindex;
446 /* Last rule = Last used rule in this rule group. */
447 last_rule = rule_gindex + lpm->rule_info[depth - 1].used_rules;
449 for (; rule_index < last_rule; rule_index++) {
451 /* If rule already exists update its next_hop and return. */
452 if (lpm->rules_tbl[rule_index].ip == ip_masked) {
453 lpm->rules_tbl[rule_index].next_hop = next_hop;
459 if (rule_index == lpm->max_rules)
462 /* Calculate the position in which the rule will be stored. */
465 for (i = depth - 1; i > 0; i--) {
466 if (lpm->rule_info[i - 1].used_rules > 0) {
467 rule_index = lpm->rule_info[i - 1].first_rule
468 + lpm->rule_info[i - 1].used_rules;
472 if (rule_index == lpm->max_rules)
475 lpm->rule_info[depth - 1].first_rule = rule_index;
478 /* Make room for the new rule in the array. */
479 for (i = RTE_LPM_MAX_DEPTH; i > depth; i--) {
480 if (lpm->rule_info[i - 1].first_rule
481 + lpm->rule_info[i - 1].used_rules == lpm->max_rules)
484 if (lpm->rule_info[i - 1].used_rules > 0) {
485 lpm->rules_tbl[lpm->rule_info[i - 1].first_rule
486 + lpm->rule_info[i - 1].used_rules]
487 = lpm->rules_tbl[lpm->rule_info[i - 1].first_rule];
488 lpm->rule_info[i - 1].first_rule++;
492 /* Add the new rule. */
493 lpm->rules_tbl[rule_index].ip = ip_masked;
494 lpm->rules_tbl[rule_index].next_hop = next_hop;
496 /* Increment the used rules counter for this rule group. */
497 lpm->rule_info[depth - 1].used_rules++;
502 static inline int32_t
503 rule_add_v1604(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth,
506 uint32_t rule_gindex, rule_index, last_rule;
511 /* Scan through rule group to see if rule already exists. */
512 if (lpm->rule_info[depth - 1].used_rules > 0) {
514 /* rule_gindex stands for rule group index. */
515 rule_gindex = lpm->rule_info[depth - 1].first_rule;
516 /* Initialise rule_index to point to start of rule group. */
517 rule_index = rule_gindex;
518 /* Last rule = Last used rule in this rule group. */
519 last_rule = rule_gindex + lpm->rule_info[depth - 1].used_rules;
521 for (; rule_index < last_rule; rule_index++) {
523 /* If rule already exists update its next_hop and return. */
524 if (lpm->rules_tbl[rule_index].ip == ip_masked) {
525 lpm->rules_tbl[rule_index].next_hop = next_hop;
531 if (rule_index == lpm->max_rules)
534 /* Calculate the position in which the rule will be stored. */
537 for (i = depth - 1; i > 0; i--) {
538 if (lpm->rule_info[i - 1].used_rules > 0) {
539 rule_index = lpm->rule_info[i - 1].first_rule
540 + lpm->rule_info[i - 1].used_rules;
544 if (rule_index == lpm->max_rules)
547 lpm->rule_info[depth - 1].first_rule = rule_index;
550 /* Make room for the new rule in the array. */
551 for (i = RTE_LPM_MAX_DEPTH; i > depth; i--) {
552 if (lpm->rule_info[i - 1].first_rule
553 + lpm->rule_info[i - 1].used_rules == lpm->max_rules)
556 if (lpm->rule_info[i - 1].used_rules > 0) {
557 lpm->rules_tbl[lpm->rule_info[i - 1].first_rule
558 + lpm->rule_info[i - 1].used_rules]
559 = lpm->rules_tbl[lpm->rule_info[i - 1].first_rule];
560 lpm->rule_info[i - 1].first_rule++;
564 /* Add the new rule. */
565 lpm->rules_tbl[rule_index].ip = ip_masked;
566 lpm->rules_tbl[rule_index].next_hop = next_hop;
568 /* Increment the used rules counter for this rule group. */
569 lpm->rule_info[depth - 1].used_rules++;
575 * Delete a rule from the rule table.
576 * NOTE: Valid range for depth parameter is 1 .. 32 inclusive.
579 rule_delete_v20(struct rte_lpm_v20 *lpm, int32_t rule_index, uint8_t depth)
585 lpm->rules_tbl[rule_index] =
586 lpm->rules_tbl[lpm->rule_info[depth - 1].first_rule
587 + lpm->rule_info[depth - 1].used_rules - 1];
589 for (i = depth; i < RTE_LPM_MAX_DEPTH; i++) {
590 if (lpm->rule_info[i].used_rules > 0) {
591 lpm->rules_tbl[lpm->rule_info[i].first_rule - 1] =
592 lpm->rules_tbl[lpm->rule_info[i].first_rule
593 + lpm->rule_info[i].used_rules - 1];
594 lpm->rule_info[i].first_rule--;
598 lpm->rule_info[depth - 1].used_rules--;
602 rule_delete_v1604(struct rte_lpm *lpm, int32_t rule_index, uint8_t depth)
608 lpm->rules_tbl[rule_index] =
609 lpm->rules_tbl[lpm->rule_info[depth - 1].first_rule
610 + lpm->rule_info[depth - 1].used_rules - 1];
612 for (i = depth; i < RTE_LPM_MAX_DEPTH; i++) {
613 if (lpm->rule_info[i].used_rules > 0) {
614 lpm->rules_tbl[lpm->rule_info[i].first_rule - 1] =
615 lpm->rules_tbl[lpm->rule_info[i].first_rule
616 + lpm->rule_info[i].used_rules - 1];
617 lpm->rule_info[i].first_rule--;
621 lpm->rule_info[depth - 1].used_rules--;
625 * Finds a rule in rule table.
626 * NOTE: Valid range for depth parameter is 1 .. 32 inclusive.
628 static inline int32_t
629 rule_find_v20(struct rte_lpm_v20 *lpm, uint32_t ip_masked, uint8_t depth)
631 uint32_t rule_gindex, last_rule, rule_index;
635 rule_gindex = lpm->rule_info[depth - 1].first_rule;
636 last_rule = rule_gindex + lpm->rule_info[depth - 1].used_rules;
638 /* Scan used rules at given depth to find rule. */
639 for (rule_index = rule_gindex; rule_index < last_rule; rule_index++) {
640 /* If rule is found return the rule index. */
641 if (lpm->rules_tbl[rule_index].ip == ip_masked)
645 /* If rule is not found return -EINVAL. */
649 static inline int32_t
650 rule_find_v1604(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth)
652 uint32_t rule_gindex, last_rule, rule_index;
656 rule_gindex = lpm->rule_info[depth - 1].first_rule;
657 last_rule = rule_gindex + lpm->rule_info[depth - 1].used_rules;
659 /* Scan used rules at given depth to find rule. */
660 for (rule_index = rule_gindex; rule_index < last_rule; rule_index++) {
661 /* If rule is found return the rule index. */
662 if (lpm->rules_tbl[rule_index].ip == ip_masked)
666 /* If rule is not found return -EINVAL. */
671 * Find, clean and allocate a tbl8.
673 static inline int32_t
674 tbl8_alloc_v20(struct rte_lpm_tbl_entry_v20 *tbl8)
676 uint32_t group_idx; /* tbl8 group index. */
677 struct rte_lpm_tbl_entry_v20 *tbl8_entry;
679 /* Scan through tbl8 to find a free (i.e. INVALID) tbl8 group. */
680 for (group_idx = 0; group_idx < RTE_LPM_TBL8_NUM_GROUPS;
682 tbl8_entry = &tbl8[group_idx * RTE_LPM_TBL8_GROUP_NUM_ENTRIES];
683 /* If a free tbl8 group is found clean it and set as VALID. */
684 if (!tbl8_entry->valid_group) {
685 memset(&tbl8_entry[0], 0,
686 RTE_LPM_TBL8_GROUP_NUM_ENTRIES *
687 sizeof(tbl8_entry[0]));
689 tbl8_entry->valid_group = VALID;
691 /* Return group index for allocated tbl8 group. */
696 /* If there are no tbl8 groups free then return error. */
700 static inline int32_t
701 tbl8_alloc_v1604(struct rte_lpm_tbl_entry *tbl8, uint32_t number_tbl8s)
703 uint32_t group_idx; /* tbl8 group index. */
704 struct rte_lpm_tbl_entry *tbl8_entry;
706 /* Scan through tbl8 to find a free (i.e. INVALID) tbl8 group. */
707 for (group_idx = 0; group_idx < number_tbl8s; group_idx++) {
708 tbl8_entry = &tbl8[group_idx * RTE_LPM_TBL8_GROUP_NUM_ENTRIES];
709 /* If a free tbl8 group is found clean it and set as VALID. */
710 if (!tbl8_entry->valid_group) {
711 memset(&tbl8_entry[0], 0,
712 RTE_LPM_TBL8_GROUP_NUM_ENTRIES *
713 sizeof(tbl8_entry[0]));
715 tbl8_entry->valid_group = VALID;
717 /* Return group index for allocated tbl8 group. */
722 /* If there are no tbl8 groups free then return error. */
727 tbl8_free_v20(struct rte_lpm_tbl_entry_v20 *tbl8, uint32_t tbl8_group_start)
729 /* Set tbl8 group invalid*/
730 tbl8[tbl8_group_start].valid_group = INVALID;
734 tbl8_free_v1604(struct rte_lpm_tbl_entry *tbl8, uint32_t tbl8_group_start)
736 /* Set tbl8 group invalid*/
737 tbl8[tbl8_group_start].valid_group = INVALID;
740 static inline int32_t
741 add_depth_small_v20(struct rte_lpm_v20 *lpm, uint32_t ip, uint8_t depth,
744 uint32_t tbl24_index, tbl24_range, tbl8_index, tbl8_group_end, i, j;
746 /* Calculate the index into Table24. */
747 tbl24_index = ip >> 8;
748 tbl24_range = depth_to_range(depth);
750 for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) {
752 * For invalid OR valid and non-extended tbl 24 entries set
755 if (!lpm->tbl24[i].valid || (lpm->tbl24[i].valid_group == 0 &&
756 lpm->tbl24[i].depth <= depth)) {
758 struct rte_lpm_tbl_entry_v20 new_tbl24_entry = {
763 new_tbl24_entry.next_hop = next_hop;
765 /* Setting tbl24 entry in one go to avoid race
768 lpm->tbl24[i] = new_tbl24_entry;
773 if (lpm->tbl24[i].valid_group == 1) {
774 /* If tbl24 entry is valid and extended calculate the
777 tbl8_index = lpm->tbl24[i].group_idx *
778 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
779 tbl8_group_end = tbl8_index +
780 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
782 for (j = tbl8_index; j < tbl8_group_end; j++) {
783 if (!lpm->tbl8[j].valid ||
784 lpm->tbl8[j].depth <= depth) {
785 struct rte_lpm_tbl_entry_v20
788 .valid_group = VALID,
791 new_tbl8_entry.next_hop = next_hop;
794 * Setting tbl8 entry in one go to avoid
797 lpm->tbl8[j] = new_tbl8_entry;
808 static inline int32_t
809 add_depth_small_v1604(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
812 #define group_idx next_hop
813 uint32_t tbl24_index, tbl24_range, tbl8_index, tbl8_group_end, i, j;
815 /* Calculate the index into Table24. */
816 tbl24_index = ip >> 8;
817 tbl24_range = depth_to_range(depth);
819 for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) {
821 * For invalid OR valid and non-extended tbl 24 entries set
824 if (!lpm->tbl24[i].valid || (lpm->tbl24[i].valid_group == 0 &&
825 lpm->tbl24[i].depth <= depth)) {
827 struct rte_lpm_tbl_entry new_tbl24_entry = {
828 .next_hop = next_hop,
834 /* Setting tbl24 entry in one go to avoid race
837 lpm->tbl24[i] = new_tbl24_entry;
842 if (lpm->tbl24[i].valid_group == 1) {
843 /* If tbl24 entry is valid and extended calculate the
846 tbl8_index = lpm->tbl24[i].group_idx *
847 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
848 tbl8_group_end = tbl8_index +
849 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
851 for (j = tbl8_index; j < tbl8_group_end; j++) {
852 if (!lpm->tbl8[j].valid ||
853 lpm->tbl8[j].depth <= depth) {
854 struct rte_lpm_tbl_entry
857 .valid_group = VALID,
859 .next_hop = next_hop,
863 * Setting tbl8 entry in one go to avoid
866 lpm->tbl8[j] = new_tbl8_entry;
877 static inline int32_t
878 add_depth_big_v20(struct rte_lpm_v20 *lpm, uint32_t ip_masked, uint8_t depth,
881 uint32_t tbl24_index;
882 int32_t tbl8_group_index, tbl8_group_start, tbl8_group_end, tbl8_index,
885 tbl24_index = (ip_masked >> 8);
886 tbl8_range = depth_to_range(depth);
888 if (!lpm->tbl24[tbl24_index].valid) {
889 /* Search for a free tbl8 group. */
890 tbl8_group_index = tbl8_alloc_v20(lpm->tbl8);
892 /* Check tbl8 allocation was successful. */
893 if (tbl8_group_index < 0) {
894 return tbl8_group_index;
897 /* Find index into tbl8 and range. */
898 tbl8_index = (tbl8_group_index *
899 RTE_LPM_TBL8_GROUP_NUM_ENTRIES) +
902 /* Set tbl8 entry. */
903 for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
904 lpm->tbl8[i].depth = depth;
905 lpm->tbl8[i].next_hop = next_hop;
906 lpm->tbl8[i].valid = VALID;
910 * Update tbl24 entry to point to new tbl8 entry. Note: The
911 * ext_flag and tbl8_index need to be updated simultaneously,
912 * so assign whole structure in one go
915 struct rte_lpm_tbl_entry_v20 new_tbl24_entry = {
916 { .group_idx = (uint8_t)tbl8_group_index, },
922 lpm->tbl24[tbl24_index] = new_tbl24_entry;
924 } /* If valid entry but not extended calculate the index into Table8. */
925 else if (lpm->tbl24[tbl24_index].valid_group == 0) {
926 /* Search for free tbl8 group. */
927 tbl8_group_index = tbl8_alloc_v20(lpm->tbl8);
929 if (tbl8_group_index < 0) {
930 return tbl8_group_index;
933 tbl8_group_start = tbl8_group_index *
934 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
935 tbl8_group_end = tbl8_group_start +
936 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
938 /* Populate new tbl8 with tbl24 value. */
939 for (i = tbl8_group_start; i < tbl8_group_end; i++) {
940 lpm->tbl8[i].valid = VALID;
941 lpm->tbl8[i].depth = lpm->tbl24[tbl24_index].depth;
942 lpm->tbl8[i].next_hop =
943 lpm->tbl24[tbl24_index].next_hop;
946 tbl8_index = tbl8_group_start + (ip_masked & 0xFF);
948 /* Insert new rule into the tbl8 entry. */
949 for (i = tbl8_index; i < tbl8_index + tbl8_range; i++) {
950 lpm->tbl8[i].valid = VALID;
951 lpm->tbl8[i].depth = depth;
952 lpm->tbl8[i].next_hop = next_hop;
956 * Update tbl24 entry to point to new tbl8 entry. Note: The
957 * ext_flag and tbl8_index need to be updated simultaneously,
958 * so assign whole structure in one go.
961 struct rte_lpm_tbl_entry_v20 new_tbl24_entry = {
962 { .group_idx = (uint8_t)tbl8_group_index, },
968 lpm->tbl24[tbl24_index] = new_tbl24_entry;
971 * If it is valid, extended entry calculate the index into tbl8.
973 tbl8_group_index = lpm->tbl24[tbl24_index].group_idx;
974 tbl8_group_start = tbl8_group_index *
975 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
976 tbl8_index = tbl8_group_start + (ip_masked & 0xFF);
978 for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
980 if (!lpm->tbl8[i].valid ||
981 lpm->tbl8[i].depth <= depth) {
982 struct rte_lpm_tbl_entry_v20 new_tbl8_entry = {
985 .valid_group = lpm->tbl8[i].valid_group,
987 new_tbl8_entry.next_hop = next_hop;
989 * Setting tbl8 entry in one go to avoid race
992 lpm->tbl8[i] = new_tbl8_entry;
1002 static inline int32_t
1003 add_depth_big_v1604(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth,
1006 #define group_idx next_hop
1007 uint32_t tbl24_index;
1008 int32_t tbl8_group_index, tbl8_group_start, tbl8_group_end, tbl8_index,
1011 tbl24_index = (ip_masked >> 8);
1012 tbl8_range = depth_to_range(depth);
1014 if (!lpm->tbl24[tbl24_index].valid) {
1015 /* Search for a free tbl8 group. */
1016 tbl8_group_index = tbl8_alloc_v1604(lpm->tbl8, lpm->number_tbl8s);
1018 /* Check tbl8 allocation was successful. */
1019 if (tbl8_group_index < 0) {
1020 return tbl8_group_index;
1023 /* Find index into tbl8 and range. */
1024 tbl8_index = (tbl8_group_index *
1025 RTE_LPM_TBL8_GROUP_NUM_ENTRIES) +
1028 /* Set tbl8 entry. */
1029 for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
1030 lpm->tbl8[i].depth = depth;
1031 lpm->tbl8[i].next_hop = next_hop;
1032 lpm->tbl8[i].valid = VALID;
1036 * Update tbl24 entry to point to new tbl8 entry. Note: The
1037 * ext_flag and tbl8_index need to be updated simultaneously,
1038 * so assign whole structure in one go
1041 struct rte_lpm_tbl_entry new_tbl24_entry = {
1042 .group_idx = tbl8_group_index,
1048 lpm->tbl24[tbl24_index] = new_tbl24_entry;
1050 } /* If valid entry but not extended calculate the index into Table8. */
1051 else if (lpm->tbl24[tbl24_index].valid_group == 0) {
1052 /* Search for free tbl8 group. */
1053 tbl8_group_index = tbl8_alloc_v1604(lpm->tbl8, lpm->number_tbl8s);
1055 if (tbl8_group_index < 0) {
1056 return tbl8_group_index;
1059 tbl8_group_start = tbl8_group_index *
1060 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1061 tbl8_group_end = tbl8_group_start +
1062 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1064 /* Populate new tbl8 with tbl24 value. */
1065 for (i = tbl8_group_start; i < tbl8_group_end; i++) {
1066 lpm->tbl8[i].valid = VALID;
1067 lpm->tbl8[i].depth = lpm->tbl24[tbl24_index].depth;
1068 lpm->tbl8[i].next_hop =
1069 lpm->tbl24[tbl24_index].next_hop;
1072 tbl8_index = tbl8_group_start + (ip_masked & 0xFF);
1074 /* Insert new rule into the tbl8 entry. */
1075 for (i = tbl8_index; i < tbl8_index + tbl8_range; i++) {
1076 lpm->tbl8[i].valid = VALID;
1077 lpm->tbl8[i].depth = depth;
1078 lpm->tbl8[i].next_hop = next_hop;
1082 * Update tbl24 entry to point to new tbl8 entry. Note: The
1083 * ext_flag and tbl8_index need to be updated simultaneously,
1084 * so assign whole structure in one go.
1087 struct rte_lpm_tbl_entry new_tbl24_entry = {
1088 .group_idx = tbl8_group_index,
1094 lpm->tbl24[tbl24_index] = new_tbl24_entry;
1097 * If it is valid, extended entry calculate the index into tbl8.
1099 tbl8_group_index = lpm->tbl24[tbl24_index].group_idx;
1100 tbl8_group_start = tbl8_group_index *
1101 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1102 tbl8_index = tbl8_group_start + (ip_masked & 0xFF);
1104 for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
1106 if (!lpm->tbl8[i].valid ||
1107 lpm->tbl8[i].depth <= depth) {
1108 struct rte_lpm_tbl_entry new_tbl8_entry = {
1111 .next_hop = next_hop,
1112 .valid_group = lpm->tbl8[i].valid_group,
1116 * Setting tbl8 entry in one go to avoid race
1119 lpm->tbl8[i] = new_tbl8_entry;
1133 rte_lpm_add_v20(struct rte_lpm_v20 *lpm, uint32_t ip, uint8_t depth,
1136 int32_t rule_index, status = 0;
1139 /* Check user arguments. */
1140 if ((lpm == NULL) || (depth < 1) || (depth > RTE_LPM_MAX_DEPTH))
1143 ip_masked = ip & depth_to_mask(depth);
1145 /* Add the rule to the rule table. */
1146 rule_index = rule_add_v20(lpm, ip_masked, depth, next_hop);
1148 /* If the is no space available for new rule return error. */
1149 if (rule_index < 0) {
1153 if (depth <= MAX_DEPTH_TBL24) {
1154 status = add_depth_small_v20(lpm, ip_masked, depth, next_hop);
1155 } else { /* If depth > RTE_LPM_MAX_DEPTH_TBL24 */
1156 status = add_depth_big_v20(lpm, ip_masked, depth, next_hop);
1159 * If add fails due to exhaustion of tbl8 extensions delete
1160 * rule that was added to rule table.
1163 rule_delete_v20(lpm, rule_index, depth);
1171 VERSION_SYMBOL(rte_lpm_add, _v20, 2.0);
1174 rte_lpm_add_v1604(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
1177 int32_t rule_index, status = 0;
1180 /* Check user arguments. */
1181 if ((lpm == NULL) || (depth < 1) || (depth > RTE_LPM_MAX_DEPTH))
1184 ip_masked = ip & depth_to_mask(depth);
1186 /* Add the rule to the rule table. */
1187 rule_index = rule_add_v1604(lpm, ip_masked, depth, next_hop);
1189 /* If the is no space available for new rule return error. */
1190 if (rule_index < 0) {
1194 if (depth <= MAX_DEPTH_TBL24) {
1195 status = add_depth_small_v1604(lpm, ip_masked, depth, next_hop);
1196 } else { /* If depth > RTE_LPM_MAX_DEPTH_TBL24 */
1197 status = add_depth_big_v1604(lpm, ip_masked, depth, next_hop);
1200 * If add fails due to exhaustion of tbl8 extensions delete
1201 * rule that was added to rule table.
1204 rule_delete_v1604(lpm, rule_index, depth);
1212 BIND_DEFAULT_SYMBOL(rte_lpm_add, _v1604, 16.04);
1213 MAP_STATIC_SYMBOL(int rte_lpm_add(struct rte_lpm *lpm, uint32_t ip,
1214 uint8_t depth, uint32_t next_hop), rte_lpm_add_v1604);
1217 * Look for a rule in the high-level rules table
1220 rte_lpm_is_rule_present_v20(struct rte_lpm_v20 *lpm, uint32_t ip, uint8_t depth,
1226 /* Check user arguments. */
1227 if ((lpm == NULL) ||
1228 (next_hop == NULL) ||
1229 (depth < 1) || (depth > RTE_LPM_MAX_DEPTH))
1232 /* Look for the rule using rule_find. */
1233 ip_masked = ip & depth_to_mask(depth);
1234 rule_index = rule_find_v20(lpm, ip_masked, depth);
1236 if (rule_index >= 0) {
1237 *next_hop = lpm->rules_tbl[rule_index].next_hop;
1241 /* If rule is not found return 0. */
1244 VERSION_SYMBOL(rte_lpm_is_rule_present, _v20, 2.0);
1247 rte_lpm_is_rule_present_v1604(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
1253 /* Check user arguments. */
1254 if ((lpm == NULL) ||
1255 (next_hop == NULL) ||
1256 (depth < 1) || (depth > RTE_LPM_MAX_DEPTH))
1259 /* Look for the rule using rule_find. */
1260 ip_masked = ip & depth_to_mask(depth);
1261 rule_index = rule_find_v1604(lpm, ip_masked, depth);
1263 if (rule_index >= 0) {
1264 *next_hop = lpm->rules_tbl[rule_index].next_hop;
1268 /* If rule is not found return 0. */
1271 BIND_DEFAULT_SYMBOL(rte_lpm_is_rule_present, _v1604, 16.04);
1272 MAP_STATIC_SYMBOL(int rte_lpm_is_rule_present(struct rte_lpm *lpm, uint32_t ip,
1273 uint8_t depth, uint32_t *next_hop), rte_lpm_is_rule_present_v1604);
1275 static inline int32_t
1276 find_previous_rule_v20(struct rte_lpm_v20 *lpm, uint32_t ip, uint8_t depth,
1277 uint8_t *sub_rule_depth)
1283 for (prev_depth = (uint8_t)(depth - 1); prev_depth > 0; prev_depth--) {
1284 ip_masked = ip & depth_to_mask(prev_depth);
1286 rule_index = rule_find_v20(lpm, ip_masked, prev_depth);
1288 if (rule_index >= 0) {
1289 *sub_rule_depth = prev_depth;
1297 static inline int32_t
1298 find_previous_rule_v1604(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
1299 uint8_t *sub_rule_depth)
1305 for (prev_depth = (uint8_t)(depth - 1); prev_depth > 0; prev_depth--) {
1306 ip_masked = ip & depth_to_mask(prev_depth);
1308 rule_index = rule_find_v1604(lpm, ip_masked, prev_depth);
1310 if (rule_index >= 0) {
1311 *sub_rule_depth = prev_depth;
1319 static inline int32_t
1320 delete_depth_small_v20(struct rte_lpm_v20 *lpm, uint32_t ip_masked,
1321 uint8_t depth, int32_t sub_rule_index, uint8_t sub_rule_depth)
1323 uint32_t tbl24_range, tbl24_index, tbl8_group_index, tbl8_index, i, j;
1325 /* Calculate the range and index into Table24. */
1326 tbl24_range = depth_to_range(depth);
1327 tbl24_index = (ip_masked >> 8);
1330 * Firstly check the sub_rule_index. A -1 indicates no replacement rule
1331 * and a positive number indicates a sub_rule_index.
1333 if (sub_rule_index < 0) {
1335 * If no replacement rule exists then invalidate entries
1336 * associated with this rule.
1338 for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) {
1340 if (lpm->tbl24[i].valid_group == 0 &&
1341 lpm->tbl24[i].depth <= depth) {
1342 lpm->tbl24[i].valid = INVALID;
1343 } else if (lpm->tbl24[i].valid_group == 1) {
1345 * If TBL24 entry is extended, then there has
1346 * to be a rule with depth >= 25 in the
1347 * associated TBL8 group.
1350 tbl8_group_index = lpm->tbl24[i].group_idx;
1351 tbl8_index = tbl8_group_index *
1352 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1354 for (j = tbl8_index; j < (tbl8_index +
1355 RTE_LPM_TBL8_GROUP_NUM_ENTRIES); j++) {
1357 if (lpm->tbl8[j].depth <= depth)
1358 lpm->tbl8[j].valid = INVALID;
1364 * If a replacement rule exists then modify entries
1365 * associated with this rule.
1368 struct rte_lpm_tbl_entry_v20 new_tbl24_entry = {
1369 {.next_hop = lpm->rules_tbl[sub_rule_index].next_hop,},
1372 .depth = sub_rule_depth,
1375 struct rte_lpm_tbl_entry_v20 new_tbl8_entry = {
1377 .valid_group = VALID,
1378 .depth = sub_rule_depth,
1380 new_tbl8_entry.next_hop =
1381 lpm->rules_tbl[sub_rule_index].next_hop;
1383 for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) {
1385 if (lpm->tbl24[i].valid_group == 0 &&
1386 lpm->tbl24[i].depth <= depth) {
1387 lpm->tbl24[i] = new_tbl24_entry;
1388 } else if (lpm->tbl24[i].valid_group == 1) {
1390 * If TBL24 entry is extended, then there has
1391 * to be a rule with depth >= 25 in the
1392 * associated TBL8 group.
1395 tbl8_group_index = lpm->tbl24[i].group_idx;
1396 tbl8_index = tbl8_group_index *
1397 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1399 for (j = tbl8_index; j < (tbl8_index +
1400 RTE_LPM_TBL8_GROUP_NUM_ENTRIES); j++) {
1402 if (lpm->tbl8[j].depth <= depth)
1403 lpm->tbl8[j] = new_tbl8_entry;
1412 static inline int32_t
1413 delete_depth_small_v1604(struct rte_lpm *lpm, uint32_t ip_masked,
1414 uint8_t depth, int32_t sub_rule_index, uint8_t sub_rule_depth)
1416 #define group_idx next_hop
1417 uint32_t tbl24_range, tbl24_index, tbl8_group_index, tbl8_index, i, j;
1419 /* Calculate the range and index into Table24. */
1420 tbl24_range = depth_to_range(depth);
1421 tbl24_index = (ip_masked >> 8);
1424 * Firstly check the sub_rule_index. A -1 indicates no replacement rule
1425 * and a positive number indicates a sub_rule_index.
1427 if (sub_rule_index < 0) {
1429 * If no replacement rule exists then invalidate entries
1430 * associated with this rule.
1432 for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) {
1434 if (lpm->tbl24[i].valid_group == 0 &&
1435 lpm->tbl24[i].depth <= depth) {
1436 lpm->tbl24[i].valid = INVALID;
1437 } else if (lpm->tbl24[i].valid_group == 1) {
1439 * If TBL24 entry is extended, then there has
1440 * to be a rule with depth >= 25 in the
1441 * associated TBL8 group.
1444 tbl8_group_index = lpm->tbl24[i].group_idx;
1445 tbl8_index = tbl8_group_index *
1446 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1448 for (j = tbl8_index; j < (tbl8_index +
1449 RTE_LPM_TBL8_GROUP_NUM_ENTRIES); j++) {
1451 if (lpm->tbl8[j].depth <= depth)
1452 lpm->tbl8[j].valid = INVALID;
1458 * If a replacement rule exists then modify entries
1459 * associated with this rule.
1462 struct rte_lpm_tbl_entry new_tbl24_entry = {
1463 .next_hop = lpm->rules_tbl[sub_rule_index].next_hop,
1466 .depth = sub_rule_depth,
1469 struct rte_lpm_tbl_entry new_tbl8_entry = {
1471 .valid_group = VALID,
1472 .depth = sub_rule_depth,
1473 .next_hop = lpm->rules_tbl
1474 [sub_rule_index].next_hop,
1477 for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) {
1479 if (lpm->tbl24[i].valid_group == 0 &&
1480 lpm->tbl24[i].depth <= depth) {
1481 lpm->tbl24[i] = new_tbl24_entry;
1482 } else if (lpm->tbl24[i].valid_group == 1) {
1484 * If TBL24 entry is extended, then there has
1485 * to be a rule with depth >= 25 in the
1486 * associated TBL8 group.
1489 tbl8_group_index = lpm->tbl24[i].group_idx;
1490 tbl8_index = tbl8_group_index *
1491 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1493 for (j = tbl8_index; j < (tbl8_index +
1494 RTE_LPM_TBL8_GROUP_NUM_ENTRIES); j++) {
1496 if (lpm->tbl8[j].depth <= depth)
1497 lpm->tbl8[j] = new_tbl8_entry;
1507 * Checks if table 8 group can be recycled.
1509 * Return of -EEXIST means tbl8 is in use and thus can not be recycled.
1510 * Return of -EINVAL means tbl8 is empty and thus can be recycled
1511 * Return of value > -1 means tbl8 is in use but has all the same values and
1512 * thus can be recycled
1514 static inline int32_t
1515 tbl8_recycle_check_v20(struct rte_lpm_tbl_entry_v20 *tbl8,
1516 uint32_t tbl8_group_start)
1518 uint32_t tbl8_group_end, i;
1519 tbl8_group_end = tbl8_group_start + RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1522 * Check the first entry of the given tbl8. If it is invalid we know
1523 * this tbl8 does not contain any rule with a depth < RTE_LPM_MAX_DEPTH
1524 * (As they would affect all entries in a tbl8) and thus this table
1525 * can not be recycled.
1527 if (tbl8[tbl8_group_start].valid) {
1529 * If first entry is valid check if the depth is less than 24
1530 * and if so check the rest of the entries to verify that they
1531 * are all of this depth.
1533 if (tbl8[tbl8_group_start].depth <= MAX_DEPTH_TBL24) {
1534 for (i = (tbl8_group_start + 1); i < tbl8_group_end;
1537 if (tbl8[i].depth !=
1538 tbl8[tbl8_group_start].depth) {
1543 /* If all entries are the same return the tb8 index */
1544 return tbl8_group_start;
1550 * If the first entry is invalid check if the rest of the entries in
1551 * the tbl8 are invalid.
1553 for (i = (tbl8_group_start + 1); i < tbl8_group_end; i++) {
1557 /* If no valid entries are found then return -EINVAL. */
1561 static inline int32_t
1562 tbl8_recycle_check_v1604(struct rte_lpm_tbl_entry *tbl8,
1563 uint32_t tbl8_group_start)
1565 uint32_t tbl8_group_end, i;
1566 tbl8_group_end = tbl8_group_start + RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1569 * Check the first entry of the given tbl8. If it is invalid we know
1570 * this tbl8 does not contain any rule with a depth < RTE_LPM_MAX_DEPTH
1571 * (As they would affect all entries in a tbl8) and thus this table
1572 * can not be recycled.
1574 if (tbl8[tbl8_group_start].valid) {
1576 * If first entry is valid check if the depth is less than 24
1577 * and if so check the rest of the entries to verify that they
1578 * are all of this depth.
1580 if (tbl8[tbl8_group_start].depth <= MAX_DEPTH_TBL24) {
1581 for (i = (tbl8_group_start + 1); i < tbl8_group_end;
1584 if (tbl8[i].depth !=
1585 tbl8[tbl8_group_start].depth) {
1590 /* If all entries are the same return the tb8 index */
1591 return tbl8_group_start;
1597 * If the first entry is invalid check if the rest of the entries in
1598 * the tbl8 are invalid.
1600 for (i = (tbl8_group_start + 1); i < tbl8_group_end; i++) {
1604 /* If no valid entries are found then return -EINVAL. */
1608 static inline int32_t
1609 delete_depth_big_v20(struct rte_lpm_v20 *lpm, uint32_t ip_masked,
1610 uint8_t depth, int32_t sub_rule_index, uint8_t sub_rule_depth)
1612 uint32_t tbl24_index, tbl8_group_index, tbl8_group_start, tbl8_index,
1614 int32_t tbl8_recycle_index;
1617 * Calculate the index into tbl24 and range. Note: All depths larger
1618 * than MAX_DEPTH_TBL24 are associated with only one tbl24 entry.
1620 tbl24_index = ip_masked >> 8;
1622 /* Calculate the index into tbl8 and range. */
1623 tbl8_group_index = lpm->tbl24[tbl24_index].group_idx;
1624 tbl8_group_start = tbl8_group_index * RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1625 tbl8_index = tbl8_group_start + (ip_masked & 0xFF);
1626 tbl8_range = depth_to_range(depth);
1628 if (sub_rule_index < 0) {
1630 * Loop through the range of entries on tbl8 for which the
1631 * rule_to_delete must be removed or modified.
1633 for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
1634 if (lpm->tbl8[i].depth <= depth)
1635 lpm->tbl8[i].valid = INVALID;
1638 /* Set new tbl8 entry. */
1639 struct rte_lpm_tbl_entry_v20 new_tbl8_entry = {
1641 .depth = sub_rule_depth,
1642 .valid_group = lpm->tbl8[tbl8_group_start].valid_group,
1645 new_tbl8_entry.next_hop =
1646 lpm->rules_tbl[sub_rule_index].next_hop;
1648 * Loop through the range of entries on tbl8 for which the
1649 * rule_to_delete must be modified.
1651 for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
1652 if (lpm->tbl8[i].depth <= depth)
1653 lpm->tbl8[i] = new_tbl8_entry;
1658 * Check if there are any valid entries in this tbl8 group. If all
1659 * tbl8 entries are invalid we can free the tbl8 and invalidate the
1660 * associated tbl24 entry.
1663 tbl8_recycle_index = tbl8_recycle_check_v20(lpm->tbl8, tbl8_group_start);
1665 if (tbl8_recycle_index == -EINVAL) {
1666 /* Set tbl24 before freeing tbl8 to avoid race condition. */
1667 lpm->tbl24[tbl24_index].valid = 0;
1668 tbl8_free_v20(lpm->tbl8, tbl8_group_start);
1669 } else if (tbl8_recycle_index > -1) {
1670 /* Update tbl24 entry. */
1671 struct rte_lpm_tbl_entry_v20 new_tbl24_entry = {
1672 { .next_hop = lpm->tbl8[tbl8_recycle_index].next_hop, },
1675 .depth = lpm->tbl8[tbl8_recycle_index].depth,
1678 /* Set tbl24 before freeing tbl8 to avoid race condition. */
1679 lpm->tbl24[tbl24_index] = new_tbl24_entry;
1680 tbl8_free_v20(lpm->tbl8, tbl8_group_start);
1686 static inline int32_t
1687 delete_depth_big_v1604(struct rte_lpm *lpm, uint32_t ip_masked,
1688 uint8_t depth, int32_t sub_rule_index, uint8_t sub_rule_depth)
1690 #define group_idx next_hop
1691 uint32_t tbl24_index, tbl8_group_index, tbl8_group_start, tbl8_index,
1693 int32_t tbl8_recycle_index;
1696 * Calculate the index into tbl24 and range. Note: All depths larger
1697 * than MAX_DEPTH_TBL24 are associated with only one tbl24 entry.
1699 tbl24_index = ip_masked >> 8;
1701 /* Calculate the index into tbl8 and range. */
1702 tbl8_group_index = lpm->tbl24[tbl24_index].group_idx;
1703 tbl8_group_start = tbl8_group_index * RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1704 tbl8_index = tbl8_group_start + (ip_masked & 0xFF);
1705 tbl8_range = depth_to_range(depth);
1707 if (sub_rule_index < 0) {
1709 * Loop through the range of entries on tbl8 for which the
1710 * rule_to_delete must be removed or modified.
1712 for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
1713 if (lpm->tbl8[i].depth <= depth)
1714 lpm->tbl8[i].valid = INVALID;
1717 /* Set new tbl8 entry. */
1718 struct rte_lpm_tbl_entry new_tbl8_entry = {
1720 .depth = sub_rule_depth,
1721 .valid_group = lpm->tbl8[tbl8_group_start].valid_group,
1722 .next_hop = lpm->rules_tbl[sub_rule_index].next_hop,
1726 * Loop through the range of entries on tbl8 for which the
1727 * rule_to_delete must be modified.
1729 for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
1730 if (lpm->tbl8[i].depth <= depth)
1731 lpm->tbl8[i] = new_tbl8_entry;
1736 * Check if there are any valid entries in this tbl8 group. If all
1737 * tbl8 entries are invalid we can free the tbl8 and invalidate the
1738 * associated tbl24 entry.
1741 tbl8_recycle_index = tbl8_recycle_check_v1604(lpm->tbl8, tbl8_group_start);
1743 if (tbl8_recycle_index == -EINVAL) {
1744 /* Set tbl24 before freeing tbl8 to avoid race condition. */
1745 lpm->tbl24[tbl24_index].valid = 0;
1746 tbl8_free_v1604(lpm->tbl8, tbl8_group_start);
1747 } else if (tbl8_recycle_index > -1) {
1748 /* Update tbl24 entry. */
1749 struct rte_lpm_tbl_entry new_tbl24_entry = {
1750 .next_hop = lpm->tbl8[tbl8_recycle_index].next_hop,
1753 .depth = lpm->tbl8[tbl8_recycle_index].depth,
1756 /* Set tbl24 before freeing tbl8 to avoid race condition. */
1757 lpm->tbl24[tbl24_index] = new_tbl24_entry;
1758 tbl8_free_v1604(lpm->tbl8, tbl8_group_start);
1768 rte_lpm_delete_v20(struct rte_lpm_v20 *lpm, uint32_t ip, uint8_t depth)
1770 int32_t rule_to_delete_index, sub_rule_index;
1772 uint8_t sub_rule_depth;
1774 * Check input arguments. Note: IP must be a positive integer of 32
1775 * bits in length therefore it need not be checked.
1777 if ((lpm == NULL) || (depth < 1) || (depth > RTE_LPM_MAX_DEPTH)) {
1781 ip_masked = ip & depth_to_mask(depth);
1784 * Find the index of the input rule, that needs to be deleted, in the
1787 rule_to_delete_index = rule_find_v20(lpm, ip_masked, depth);
1790 * Check if rule_to_delete_index was found. If no rule was found the
1791 * function rule_find returns -EINVAL.
1793 if (rule_to_delete_index < 0)
1796 /* Delete the rule from the rule table. */
1797 rule_delete_v20(lpm, rule_to_delete_index, depth);
1800 * Find rule to replace the rule_to_delete. If there is no rule to
1801 * replace the rule_to_delete we return -1 and invalidate the table
1802 * entries associated with this rule.
1805 sub_rule_index = find_previous_rule_v20(lpm, ip, depth, &sub_rule_depth);
1808 * If the input depth value is less than 25 use function
1809 * delete_depth_small otherwise use delete_depth_big.
1811 if (depth <= MAX_DEPTH_TBL24) {
1812 return delete_depth_small_v20(lpm, ip_masked, depth,
1813 sub_rule_index, sub_rule_depth);
1814 } else { /* If depth > MAX_DEPTH_TBL24 */
1815 return delete_depth_big_v20(lpm, ip_masked, depth, sub_rule_index,
1819 VERSION_SYMBOL(rte_lpm_delete, _v20, 2.0);
1822 rte_lpm_delete_v1604(struct rte_lpm *lpm, uint32_t ip, uint8_t depth)
1824 int32_t rule_to_delete_index, sub_rule_index;
1826 uint8_t sub_rule_depth;
1828 * Check input arguments. Note: IP must be a positive integer of 32
1829 * bits in length therefore it need not be checked.
1831 if ((lpm == NULL) || (depth < 1) || (depth > RTE_LPM_MAX_DEPTH)) {
1835 ip_masked = ip & depth_to_mask(depth);
1838 * Find the index of the input rule, that needs to be deleted, in the
1841 rule_to_delete_index = rule_find_v1604(lpm, ip_masked, depth);
1844 * Check if rule_to_delete_index was found. If no rule was found the
1845 * function rule_find returns -EINVAL.
1847 if (rule_to_delete_index < 0)
1850 /* Delete the rule from the rule table. */
1851 rule_delete_v1604(lpm, rule_to_delete_index, depth);
1854 * Find rule to replace the rule_to_delete. If there is no rule to
1855 * replace the rule_to_delete we return -1 and invalidate the table
1856 * entries associated with this rule.
1859 sub_rule_index = find_previous_rule_v1604(lpm, ip, depth, &sub_rule_depth);
1862 * If the input depth value is less than 25 use function
1863 * delete_depth_small otherwise use delete_depth_big.
1865 if (depth <= MAX_DEPTH_TBL24) {
1866 return delete_depth_small_v1604(lpm, ip_masked, depth,
1867 sub_rule_index, sub_rule_depth);
1868 } else { /* If depth > MAX_DEPTH_TBL24 */
1869 return delete_depth_big_v1604(lpm, ip_masked, depth, sub_rule_index,
1873 BIND_DEFAULT_SYMBOL(rte_lpm_delete, _v1604, 16.04);
1874 MAP_STATIC_SYMBOL(int rte_lpm_delete(struct rte_lpm *lpm, uint32_t ip,
1875 uint8_t depth), rte_lpm_delete_v1604);
1878 * Delete all rules from the LPM table.
1881 rte_lpm_delete_all_v20(struct rte_lpm_v20 *lpm)
1883 /* Zero rule information. */
1884 memset(lpm->rule_info, 0, sizeof(lpm->rule_info));
1887 memset(lpm->tbl24, 0, sizeof(lpm->tbl24));
1890 memset(lpm->tbl8, 0, sizeof(lpm->tbl8));
1892 /* Delete all rules form the rules table. */
1893 memset(lpm->rules_tbl, 0, sizeof(lpm->rules_tbl[0]) * lpm->max_rules);
1895 VERSION_SYMBOL(rte_lpm_delete_all, _v20, 2.0);
1898 rte_lpm_delete_all_v1604(struct rte_lpm *lpm)
1900 /* Zero rule information. */
1901 memset(lpm->rule_info, 0, sizeof(lpm->rule_info));
1904 memset(lpm->tbl24, 0, sizeof(lpm->tbl24));
1907 memset(lpm->tbl8, 0, sizeof(lpm->tbl8[0])
1908 * RTE_LPM_TBL8_GROUP_NUM_ENTRIES * lpm->number_tbl8s);
1910 /* Delete all rules form the rules table. */
1911 memset(lpm->rules_tbl, 0, sizeof(lpm->rules_tbl[0]) * lpm->max_rules);
1913 BIND_DEFAULT_SYMBOL(rte_lpm_delete_all, _v1604, 16.04);
1914 MAP_STATIC_SYMBOL(void rte_lpm_delete_all(struct rte_lpm *lpm),
1915 rte_lpm_delete_all_v1604);