4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40 #include <sys/queue.h>
43 #include <rte_branch_prediction.h>
44 #include <rte_common.h>
45 #include <rte_memory.h> /* for definition of RTE_CACHE_LINE_SIZE */
46 #include <rte_malloc.h>
47 #include <rte_memzone.h>
49 #include <rte_eal_memconfig.h>
50 #include <rte_per_lcore.h>
51 #include <rte_string_fns.h>
52 #include <rte_errno.h>
53 #include <rte_rwlock.h>
54 #include <rte_spinlock.h>
58 TAILQ_HEAD(rte_lpm_list, rte_tailq_entry);
60 static struct rte_tailq_elem rte_lpm_tailq = {
63 EAL_REGISTER_TAILQ(rte_lpm_tailq)
65 #define MAX_DEPTH_TBL24 24
72 /* Macro to enable/disable run-time checks. */
73 #if defined(RTE_LIBRTE_LPM_DEBUG)
74 #include <rte_debug.h>
75 #define VERIFY_DEPTH(depth) do { \
76 if ((depth == 0) || (depth > RTE_LPM_MAX_DEPTH)) \
77 rte_panic("LPM: Invalid depth (%u) at line %d", \
78 (unsigned)(depth), __LINE__); \
81 #define VERIFY_DEPTH(depth)
85 * Converts a given depth value to its corresponding mask value.
87 * depth (IN) : range = 1 - 32
88 * mask (OUT) : 32bit mask
90 static uint32_t __attribute__((pure))
91 depth_to_mask(uint8_t depth)
95 /* To calculate a mask start with a 1 on the left hand side and right
96 * shift while populating the left hand side with 1's
98 return (int)0x80000000 >> (depth - 1);
102 * Converts given depth value to its corresponding range value.
104 static inline uint32_t __attribute__((pure))
105 depth_to_range(uint8_t depth)
110 * Calculate tbl24 range. (Note: 2^depth = 1 << depth)
112 if (depth <= MAX_DEPTH_TBL24)
113 return 1 << (MAX_DEPTH_TBL24 - depth);
115 /* Else if depth is greater than 24 */
116 return 1 << (RTE_LPM_MAX_DEPTH - depth);
120 * Find an existing lpm table and return a pointer to it.
123 rte_lpm_find_existing_v20(const char *name)
125 struct rte_lpm_v20 *l = NULL;
126 struct rte_tailq_entry *te;
127 struct rte_lpm_list *lpm_list;
129 lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list);
131 rte_rwlock_read_lock(RTE_EAL_TAILQ_RWLOCK);
132 TAILQ_FOREACH(te, lpm_list, next) {
133 l = (struct rte_lpm_v20 *) te->data;
134 if (strncmp(name, l->name, RTE_LPM_NAMESIZE) == 0)
137 rte_rwlock_read_unlock(RTE_EAL_TAILQ_RWLOCK);
146 VERSION_SYMBOL(rte_lpm_find_existing, _v20, 2.0);
149 rte_lpm_find_existing_v1604(const char *name)
151 struct rte_lpm *l = NULL;
152 struct rte_tailq_entry *te;
153 struct rte_lpm_list *lpm_list;
155 lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list);
157 rte_rwlock_read_lock(RTE_EAL_TAILQ_RWLOCK);
158 TAILQ_FOREACH(te, lpm_list, next) {
159 l = (struct rte_lpm *) te->data;
160 if (strncmp(name, l->name, RTE_LPM_NAMESIZE) == 0)
163 rte_rwlock_read_unlock(RTE_EAL_TAILQ_RWLOCK);
172 BIND_DEFAULT_SYMBOL(rte_lpm_find_existing, _v1604, 16.04);
173 MAP_STATIC_SYMBOL(struct rte_lpm *rte_lpm_find_existing(const char *name),
174 rte_lpm_find_existing_v1604);
177 * Allocates memory for LPM object
180 rte_lpm_create_v20(const char *name, int socket_id, int max_rules,
181 __rte_unused int flags)
183 char mem_name[RTE_LPM_NAMESIZE];
184 struct rte_lpm_v20 *lpm = NULL;
185 struct rte_tailq_entry *te;
187 struct rte_lpm_list *lpm_list;
189 lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list);
191 RTE_BUILD_BUG_ON(sizeof(struct rte_lpm_tbl_entry_v20) != 2);
193 /* Check user arguments. */
194 if ((name == NULL) || (socket_id < -1) || (max_rules == 0)) {
199 snprintf(mem_name, sizeof(mem_name), "LPM_%s", name);
201 /* Determine the amount of memory to allocate. */
202 mem_size = sizeof(*lpm) + (sizeof(lpm->rules_tbl[0]) * max_rules);
204 rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
206 /* guarantee there's no existing */
207 TAILQ_FOREACH(te, lpm_list, next) {
208 lpm = (struct rte_lpm_v20 *) te->data;
209 if (strncmp(name, lpm->name, RTE_LPM_NAMESIZE) == 0)
215 /* allocate tailq entry */
216 te = rte_zmalloc("LPM_TAILQ_ENTRY", sizeof(*te), 0);
218 RTE_LOG(ERR, LPM, "Failed to allocate tailq entry\n");
222 /* Allocate memory to store the LPM data structures. */
223 lpm = (struct rte_lpm_v20 *)rte_zmalloc_socket(mem_name, mem_size,
224 RTE_CACHE_LINE_SIZE, socket_id);
226 RTE_LOG(ERR, LPM, "LPM memory allocation failed\n");
231 /* Save user arguments. */
232 lpm->max_rules = max_rules;
233 snprintf(lpm->name, sizeof(lpm->name), "%s", name);
235 te->data = (void *) lpm;
237 TAILQ_INSERT_TAIL(lpm_list, te, next);
240 rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
244 VERSION_SYMBOL(rte_lpm_create, _v20, 2.0);
247 rte_lpm_create_v1604(const char *name, int socket_id,
248 const struct rte_lpm_config *config)
250 char mem_name[RTE_LPM_NAMESIZE];
251 struct rte_lpm *lpm = NULL;
252 struct rte_tailq_entry *te;
253 uint32_t mem_size, rules_size, tbl8s_size;
254 struct rte_lpm_list *lpm_list;
256 lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list);
258 RTE_BUILD_BUG_ON(sizeof(struct rte_lpm_tbl_entry) != 4);
260 /* Check user arguments. */
261 if ((name == NULL) || (socket_id < -1) || (config->max_rules == 0)
262 || config->number_tbl8s > RTE_LPM_MAX_TBL8_NUM_GROUPS) {
267 snprintf(mem_name, sizeof(mem_name), "LPM_%s", name);
269 /* Determine the amount of memory to allocate. */
270 mem_size = sizeof(*lpm);
271 rules_size = sizeof(struct rte_lpm_rule) * config->max_rules;
272 tbl8s_size = (sizeof(struct rte_lpm_tbl_entry) *
273 RTE_LPM_TBL8_GROUP_NUM_ENTRIES * config->number_tbl8s);
275 rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
277 /* guarantee there's no existing */
278 TAILQ_FOREACH(te, lpm_list, next) {
279 lpm = (struct rte_lpm *) te->data;
280 if (strncmp(name, lpm->name, RTE_LPM_NAMESIZE) == 0)
286 /* allocate tailq entry */
287 te = rte_zmalloc("LPM_TAILQ_ENTRY", sizeof(*te), 0);
289 RTE_LOG(ERR, LPM, "Failed to allocate tailq entry\n");
293 /* Allocate memory to store the LPM data structures. */
294 lpm = (struct rte_lpm *)rte_zmalloc_socket(mem_name, mem_size,
295 RTE_CACHE_LINE_SIZE, socket_id);
297 RTE_LOG(ERR, LPM, "LPM memory allocation failed\n");
302 lpm->rules_tbl = (struct rte_lpm_rule *)rte_zmalloc_socket(NULL,
303 (size_t)rules_size, RTE_CACHE_LINE_SIZE, socket_id);
305 if (lpm->rules_tbl == NULL) {
306 RTE_LOG(ERR, LPM, "LPM memory allocation failed\n");
312 lpm->tbl8 = (struct rte_lpm_tbl_entry *)rte_zmalloc_socket(NULL,
313 (size_t)tbl8s_size, RTE_CACHE_LINE_SIZE, socket_id);
315 if (lpm->tbl8 == NULL) {
316 RTE_LOG(ERR, LPM, "LPM memory allocation failed\n");
322 /* Save user arguments. */
323 lpm->max_rules = config->max_rules;
324 lpm->number_tbl8s = config->number_tbl8s;
325 snprintf(lpm->name, sizeof(lpm->name), "%s", name);
327 te->data = (void *) lpm;
329 TAILQ_INSERT_TAIL(lpm_list, te, next);
332 rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
336 BIND_DEFAULT_SYMBOL(rte_lpm_create, _v1604, 16.04);
338 struct rte_lpm *rte_lpm_create(const char *name, int socket_id,
339 const struct rte_lpm_config *config), rte_lpm_create_v1604);
342 * Deallocates memory for given LPM table.
345 rte_lpm_free_v20(struct rte_lpm_v20 *lpm)
347 struct rte_lpm_list *lpm_list;
348 struct rte_tailq_entry *te;
350 /* Check user arguments. */
354 lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list);
356 rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
358 /* find our tailq entry */
359 TAILQ_FOREACH(te, lpm_list, next) {
360 if (te->data == (void *) lpm)
364 TAILQ_REMOVE(lpm_list, te, next);
366 rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
368 rte_free(lpm->rules_tbl);
372 VERSION_SYMBOL(rte_lpm_free, _v20, 2.0);
375 rte_lpm_free_v1604(struct rte_lpm *lpm)
377 struct rte_lpm_list *lpm_list;
378 struct rte_tailq_entry *te;
380 /* Check user arguments. */
384 lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list);
386 rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
388 /* find our tailq entry */
389 TAILQ_FOREACH(te, lpm_list, next) {
390 if (te->data == (void *) lpm)
394 TAILQ_REMOVE(lpm_list, te, next);
396 rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
398 rte_free(lpm->rules_tbl);
402 BIND_DEFAULT_SYMBOL(rte_lpm_free, _v1604, 16.04);
403 MAP_STATIC_SYMBOL(void rte_lpm_free(struct rte_lpm *lpm),
407 * Adds a rule to the rule table.
409 * NOTE: The rule table is split into 32 groups. Each group contains rules that
410 * apply to a specific prefix depth (i.e. group 1 contains rules that apply to
411 * prefixes with a depth of 1 etc.). In the following code (depth - 1) is used
412 * to refer to depth 1 because even though the depth range is 1 - 32, depths
413 * are stored in the rule table from 0 - 31.
414 * NOTE: Valid range for depth parameter is 1 .. 32 inclusive.
416 static inline int32_t
417 rule_add_v20(struct rte_lpm_v20 *lpm, uint32_t ip_masked, uint8_t depth,
420 uint32_t rule_gindex, rule_index, last_rule;
425 /* Scan through rule group to see if rule already exists. */
426 if (lpm->rule_info[depth - 1].used_rules > 0) {
428 /* rule_gindex stands for rule group index. */
429 rule_gindex = lpm->rule_info[depth - 1].first_rule;
430 /* Initialise rule_index to point to start of rule group. */
431 rule_index = rule_gindex;
432 /* Last rule = Last used rule in this rule group. */
433 last_rule = rule_gindex + lpm->rule_info[depth - 1].used_rules;
435 for (; rule_index < last_rule; rule_index++) {
437 /* If rule already exists update its next_hop and return. */
438 if (lpm->rules_tbl[rule_index].ip == ip_masked) {
439 lpm->rules_tbl[rule_index].next_hop = next_hop;
445 if (rule_index == lpm->max_rules)
448 /* Calculate the position in which the rule will be stored. */
451 for (i = depth - 1; i > 0; i--) {
452 if (lpm->rule_info[i - 1].used_rules > 0) {
453 rule_index = lpm->rule_info[i - 1].first_rule
454 + lpm->rule_info[i - 1].used_rules;
458 if (rule_index == lpm->max_rules)
461 lpm->rule_info[depth - 1].first_rule = rule_index;
464 /* Make room for the new rule in the array. */
465 for (i = RTE_LPM_MAX_DEPTH; i > depth; i--) {
466 if (lpm->rule_info[i - 1].first_rule
467 + lpm->rule_info[i - 1].used_rules == lpm->max_rules)
470 if (lpm->rule_info[i - 1].used_rules > 0) {
471 lpm->rules_tbl[lpm->rule_info[i - 1].first_rule
472 + lpm->rule_info[i - 1].used_rules]
473 = lpm->rules_tbl[lpm->rule_info[i - 1].first_rule];
474 lpm->rule_info[i - 1].first_rule++;
478 /* Add the new rule. */
479 lpm->rules_tbl[rule_index].ip = ip_masked;
480 lpm->rules_tbl[rule_index].next_hop = next_hop;
482 /* Increment the used rules counter for this rule group. */
483 lpm->rule_info[depth - 1].used_rules++;
488 static inline int32_t
489 rule_add_v1604(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth,
492 uint32_t rule_gindex, rule_index, last_rule;
497 /* Scan through rule group to see if rule already exists. */
498 if (lpm->rule_info[depth - 1].used_rules > 0) {
500 /* rule_gindex stands for rule group index. */
501 rule_gindex = lpm->rule_info[depth - 1].first_rule;
502 /* Initialise rule_index to point to start of rule group. */
503 rule_index = rule_gindex;
504 /* Last rule = Last used rule in this rule group. */
505 last_rule = rule_gindex + lpm->rule_info[depth - 1].used_rules;
507 for (; rule_index < last_rule; rule_index++) {
509 /* If rule already exists update its next_hop and return. */
510 if (lpm->rules_tbl[rule_index].ip == ip_masked) {
511 lpm->rules_tbl[rule_index].next_hop = next_hop;
517 if (rule_index == lpm->max_rules)
520 /* Calculate the position in which the rule will be stored. */
523 for (i = depth - 1; i > 0; i--) {
524 if (lpm->rule_info[i - 1].used_rules > 0) {
525 rule_index = lpm->rule_info[i - 1].first_rule
526 + lpm->rule_info[i - 1].used_rules;
530 if (rule_index == lpm->max_rules)
533 lpm->rule_info[depth - 1].first_rule = rule_index;
536 /* Make room for the new rule in the array. */
537 for (i = RTE_LPM_MAX_DEPTH; i > depth; i--) {
538 if (lpm->rule_info[i - 1].first_rule
539 + lpm->rule_info[i - 1].used_rules == lpm->max_rules)
542 if (lpm->rule_info[i - 1].used_rules > 0) {
543 lpm->rules_tbl[lpm->rule_info[i - 1].first_rule
544 + lpm->rule_info[i - 1].used_rules]
545 = lpm->rules_tbl[lpm->rule_info[i - 1].first_rule];
546 lpm->rule_info[i - 1].first_rule++;
550 /* Add the new rule. */
551 lpm->rules_tbl[rule_index].ip = ip_masked;
552 lpm->rules_tbl[rule_index].next_hop = next_hop;
554 /* Increment the used rules counter for this rule group. */
555 lpm->rule_info[depth - 1].used_rules++;
561 * Delete a rule from the rule table.
562 * NOTE: Valid range for depth parameter is 1 .. 32 inclusive.
565 rule_delete_v20(struct rte_lpm_v20 *lpm, int32_t rule_index, uint8_t depth)
571 lpm->rules_tbl[rule_index] =
572 lpm->rules_tbl[lpm->rule_info[depth - 1].first_rule
573 + lpm->rule_info[depth - 1].used_rules - 1];
575 for (i = depth; i < RTE_LPM_MAX_DEPTH; i++) {
576 if (lpm->rule_info[i].used_rules > 0) {
577 lpm->rules_tbl[lpm->rule_info[i].first_rule - 1] =
578 lpm->rules_tbl[lpm->rule_info[i].first_rule
579 + lpm->rule_info[i].used_rules - 1];
580 lpm->rule_info[i].first_rule--;
584 lpm->rule_info[depth - 1].used_rules--;
588 rule_delete_v1604(struct rte_lpm *lpm, int32_t rule_index, uint8_t depth)
594 lpm->rules_tbl[rule_index] =
595 lpm->rules_tbl[lpm->rule_info[depth - 1].first_rule
596 + lpm->rule_info[depth - 1].used_rules - 1];
598 for (i = depth; i < RTE_LPM_MAX_DEPTH; i++) {
599 if (lpm->rule_info[i].used_rules > 0) {
600 lpm->rules_tbl[lpm->rule_info[i].first_rule - 1] =
601 lpm->rules_tbl[lpm->rule_info[i].first_rule
602 + lpm->rule_info[i].used_rules - 1];
603 lpm->rule_info[i].first_rule--;
607 lpm->rule_info[depth - 1].used_rules--;
611 * Finds a rule in rule table.
612 * NOTE: Valid range for depth parameter is 1 .. 32 inclusive.
614 static inline int32_t
615 rule_find_v20(struct rte_lpm_v20 *lpm, uint32_t ip_masked, uint8_t depth)
617 uint32_t rule_gindex, last_rule, rule_index;
621 rule_gindex = lpm->rule_info[depth - 1].first_rule;
622 last_rule = rule_gindex + lpm->rule_info[depth - 1].used_rules;
624 /* Scan used rules at given depth to find rule. */
625 for (rule_index = rule_gindex; rule_index < last_rule; rule_index++) {
626 /* If rule is found return the rule index. */
627 if (lpm->rules_tbl[rule_index].ip == ip_masked)
631 /* If rule is not found return -EINVAL. */
635 static inline int32_t
636 rule_find_v1604(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth)
638 uint32_t rule_gindex, last_rule, rule_index;
642 rule_gindex = lpm->rule_info[depth - 1].first_rule;
643 last_rule = rule_gindex + lpm->rule_info[depth - 1].used_rules;
645 /* Scan used rules at given depth to find rule. */
646 for (rule_index = rule_gindex; rule_index < last_rule; rule_index++) {
647 /* If rule is found return the rule index. */
648 if (lpm->rules_tbl[rule_index].ip == ip_masked)
652 /* If rule is not found return -EINVAL. */
657 * Find, clean and allocate a tbl8.
659 static inline int32_t
660 tbl8_alloc_v20(struct rte_lpm_tbl_entry_v20 *tbl8)
662 uint32_t group_idx; /* tbl8 group index. */
663 struct rte_lpm_tbl_entry_v20 *tbl8_entry;
665 /* Scan through tbl8 to find a free (i.e. INVALID) tbl8 group. */
666 for (group_idx = 0; group_idx < RTE_LPM_TBL8_NUM_GROUPS;
668 tbl8_entry = &tbl8[group_idx * RTE_LPM_TBL8_GROUP_NUM_ENTRIES];
669 /* If a free tbl8 group is found clean it and set as VALID. */
670 if (!tbl8_entry->valid_group) {
671 memset(&tbl8_entry[0], 0,
672 RTE_LPM_TBL8_GROUP_NUM_ENTRIES *
673 sizeof(tbl8_entry[0]));
675 tbl8_entry->valid_group = VALID;
677 /* Return group index for allocated tbl8 group. */
682 /* If there are no tbl8 groups free then return error. */
686 static inline int32_t
687 tbl8_alloc_v1604(struct rte_lpm_tbl_entry *tbl8, uint32_t number_tbl8s)
689 uint32_t group_idx; /* tbl8 group index. */
690 struct rte_lpm_tbl_entry *tbl8_entry;
692 /* Scan through tbl8 to find a free (i.e. INVALID) tbl8 group. */
693 for (group_idx = 0; group_idx < number_tbl8s; group_idx++) {
694 tbl8_entry = &tbl8[group_idx * RTE_LPM_TBL8_GROUP_NUM_ENTRIES];
695 /* If a free tbl8 group is found clean it and set as VALID. */
696 if (!tbl8_entry->valid_group) {
697 memset(&tbl8_entry[0], 0,
698 RTE_LPM_TBL8_GROUP_NUM_ENTRIES *
699 sizeof(tbl8_entry[0]));
701 tbl8_entry->valid_group = VALID;
703 /* Return group index for allocated tbl8 group. */
708 /* If there are no tbl8 groups free then return error. */
713 tbl8_free_v20(struct rte_lpm_tbl_entry_v20 *tbl8, uint32_t tbl8_group_start)
715 /* Set tbl8 group invalid*/
716 tbl8[tbl8_group_start].valid_group = INVALID;
720 tbl8_free_v1604(struct rte_lpm_tbl_entry *tbl8, uint32_t tbl8_group_start)
722 /* Set tbl8 group invalid*/
723 tbl8[tbl8_group_start].valid_group = INVALID;
726 static inline int32_t
727 add_depth_small_v20(struct rte_lpm_v20 *lpm, uint32_t ip, uint8_t depth,
730 uint32_t tbl24_index, tbl24_range, tbl8_index, tbl8_group_end, i, j;
732 /* Calculate the index into Table24. */
733 tbl24_index = ip >> 8;
734 tbl24_range = depth_to_range(depth);
736 for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) {
738 * For invalid OR valid and non-extended tbl 24 entries set
741 if (!lpm->tbl24[i].valid || (lpm->tbl24[i].valid_group == 0 &&
742 lpm->tbl24[i].depth <= depth)) {
744 struct rte_lpm_tbl_entry_v20 new_tbl24_entry = {
745 { .next_hop = next_hop, },
751 /* Setting tbl24 entry in one go to avoid race
754 lpm->tbl24[i] = new_tbl24_entry;
759 if (lpm->tbl24[i].valid_group == 1) {
760 /* If tbl24 entry is valid and extended calculate the
763 tbl8_index = lpm->tbl24[i].group_idx *
764 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
765 tbl8_group_end = tbl8_index +
766 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
768 for (j = tbl8_index; j < tbl8_group_end; j++) {
769 if (!lpm->tbl8[j].valid ||
770 lpm->tbl8[j].depth <= depth) {
771 struct rte_lpm_tbl_entry_v20
774 .valid_group = VALID,
776 .next_hop = next_hop,
780 * Setting tbl8 entry in one go to avoid
783 lpm->tbl8[j] = new_tbl8_entry;
794 static inline int32_t
795 add_depth_small_v1604(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
798 #define group_idx next_hop
799 uint32_t tbl24_index, tbl24_range, tbl8_index, tbl8_group_end, i, j;
801 /* Calculate the index into Table24. */
802 tbl24_index = ip >> 8;
803 tbl24_range = depth_to_range(depth);
805 for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) {
807 * For invalid OR valid and non-extended tbl 24 entries set
810 if (!lpm->tbl24[i].valid || (lpm->tbl24[i].valid_group == 0 &&
811 lpm->tbl24[i].depth <= depth)) {
813 struct rte_lpm_tbl_entry new_tbl24_entry = {
814 .next_hop = next_hop,
820 /* Setting tbl24 entry in one go to avoid race
823 lpm->tbl24[i] = new_tbl24_entry;
828 if (lpm->tbl24[i].valid_group == 1) {
829 /* If tbl24 entry is valid and extended calculate the
832 tbl8_index = lpm->tbl24[i].group_idx *
833 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
834 tbl8_group_end = tbl8_index +
835 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
837 for (j = tbl8_index; j < tbl8_group_end; j++) {
838 if (!lpm->tbl8[j].valid ||
839 lpm->tbl8[j].depth <= depth) {
840 struct rte_lpm_tbl_entry
843 .valid_group = VALID,
845 .next_hop = next_hop,
849 * Setting tbl8 entry in one go to avoid
852 lpm->tbl8[j] = new_tbl8_entry;
863 static inline int32_t
864 add_depth_big_v20(struct rte_lpm_v20 *lpm, uint32_t ip_masked, uint8_t depth,
867 uint32_t tbl24_index;
868 int32_t tbl8_group_index, tbl8_group_start, tbl8_group_end, tbl8_index,
871 tbl24_index = (ip_masked >> 8);
872 tbl8_range = depth_to_range(depth);
874 if (!lpm->tbl24[tbl24_index].valid) {
875 /* Search for a free tbl8 group. */
876 tbl8_group_index = tbl8_alloc_v20(lpm->tbl8);
878 /* Check tbl8 allocation was successful. */
879 if (tbl8_group_index < 0) {
880 return tbl8_group_index;
883 /* Find index into tbl8 and range. */
884 tbl8_index = (tbl8_group_index *
885 RTE_LPM_TBL8_GROUP_NUM_ENTRIES) +
888 /* Set tbl8 entry. */
889 for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
890 lpm->tbl8[i].depth = depth;
891 lpm->tbl8[i].next_hop = next_hop;
892 lpm->tbl8[i].valid = VALID;
896 * Update tbl24 entry to point to new tbl8 entry. Note: The
897 * ext_flag and tbl8_index need to be updated simultaneously,
898 * so assign whole structure in one go
901 struct rte_lpm_tbl_entry_v20 new_tbl24_entry = {
902 { .group_idx = (uint8_t)tbl8_group_index, },
908 lpm->tbl24[tbl24_index] = new_tbl24_entry;
910 } /* If valid entry but not extended calculate the index into Table8. */
911 else if (lpm->tbl24[tbl24_index].valid_group == 0) {
912 /* Search for free tbl8 group. */
913 tbl8_group_index = tbl8_alloc_v20(lpm->tbl8);
915 if (tbl8_group_index < 0) {
916 return tbl8_group_index;
919 tbl8_group_start = tbl8_group_index *
920 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
921 tbl8_group_end = tbl8_group_start +
922 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
924 /* Populate new tbl8 with tbl24 value. */
925 for (i = tbl8_group_start; i < tbl8_group_end; i++) {
926 lpm->tbl8[i].valid = VALID;
927 lpm->tbl8[i].depth = lpm->tbl24[tbl24_index].depth;
928 lpm->tbl8[i].next_hop =
929 lpm->tbl24[tbl24_index].next_hop;
932 tbl8_index = tbl8_group_start + (ip_masked & 0xFF);
934 /* Insert new rule into the tbl8 entry. */
935 for (i = tbl8_index; i < tbl8_index + tbl8_range; i++) {
936 if (!lpm->tbl8[i].valid ||
937 lpm->tbl8[i].depth <= depth) {
938 lpm->tbl8[i].valid = VALID;
939 lpm->tbl8[i].depth = depth;
940 lpm->tbl8[i].next_hop = next_hop;
947 * Update tbl24 entry to point to new tbl8 entry. Note: The
948 * ext_flag and tbl8_index need to be updated simultaneously,
949 * so assign whole structure in one go.
952 struct rte_lpm_tbl_entry_v20 new_tbl24_entry = {
953 { .group_idx = (uint8_t)tbl8_group_index, },
959 lpm->tbl24[tbl24_index] = new_tbl24_entry;
962 * If it is valid, extended entry calculate the index into tbl8.
964 tbl8_group_index = lpm->tbl24[tbl24_index].group_idx;
965 tbl8_group_start = tbl8_group_index *
966 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
967 tbl8_index = tbl8_group_start + (ip_masked & 0xFF);
969 for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
971 if (!lpm->tbl8[i].valid ||
972 lpm->tbl8[i].depth <= depth) {
973 struct rte_lpm_tbl_entry_v20 new_tbl8_entry = {
976 .next_hop = next_hop,
977 .valid_group = lpm->tbl8[i].valid_group,
981 * Setting tbl8 entry in one go to avoid race
984 lpm->tbl8[i] = new_tbl8_entry;
994 static inline int32_t
995 add_depth_big_v1604(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth,
998 #define group_idx next_hop
999 uint32_t tbl24_index;
1000 int32_t tbl8_group_index, tbl8_group_start, tbl8_group_end, tbl8_index,
1003 tbl24_index = (ip_masked >> 8);
1004 tbl8_range = depth_to_range(depth);
1006 if (!lpm->tbl24[tbl24_index].valid) {
1007 /* Search for a free tbl8 group. */
1008 tbl8_group_index = tbl8_alloc_v1604(lpm->tbl8, lpm->number_tbl8s);
1010 /* Check tbl8 allocation was successful. */
1011 if (tbl8_group_index < 0) {
1012 return tbl8_group_index;
1015 /* Find index into tbl8 and range. */
1016 tbl8_index = (tbl8_group_index *
1017 RTE_LPM_TBL8_GROUP_NUM_ENTRIES) +
1020 /* Set tbl8 entry. */
1021 for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
1022 lpm->tbl8[i].depth = depth;
1023 lpm->tbl8[i].next_hop = next_hop;
1024 lpm->tbl8[i].valid = VALID;
1028 * Update tbl24 entry to point to new tbl8 entry. Note: The
1029 * ext_flag and tbl8_index need to be updated simultaneously,
1030 * so assign whole structure in one go
1033 struct rte_lpm_tbl_entry new_tbl24_entry = {
1034 .group_idx = (uint8_t)tbl8_group_index,
1040 lpm->tbl24[tbl24_index] = new_tbl24_entry;
1042 } /* If valid entry but not extended calculate the index into Table8. */
1043 else if (lpm->tbl24[tbl24_index].valid_group == 0) {
1044 /* Search for free tbl8 group. */
1045 tbl8_group_index = tbl8_alloc_v1604(lpm->tbl8, lpm->number_tbl8s);
1047 if (tbl8_group_index < 0) {
1048 return tbl8_group_index;
1051 tbl8_group_start = tbl8_group_index *
1052 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1053 tbl8_group_end = tbl8_group_start +
1054 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1056 /* Populate new tbl8 with tbl24 value. */
1057 for (i = tbl8_group_start; i < tbl8_group_end; i++) {
1058 lpm->tbl8[i].valid = VALID;
1059 lpm->tbl8[i].depth = lpm->tbl24[tbl24_index].depth;
1060 lpm->tbl8[i].next_hop =
1061 lpm->tbl24[tbl24_index].next_hop;
1064 tbl8_index = tbl8_group_start + (ip_masked & 0xFF);
1066 /* Insert new rule into the tbl8 entry. */
1067 for (i = tbl8_index; i < tbl8_index + tbl8_range; i++) {
1068 if (!lpm->tbl8[i].valid ||
1069 lpm->tbl8[i].depth <= depth) {
1070 lpm->tbl8[i].valid = VALID;
1071 lpm->tbl8[i].depth = depth;
1072 lpm->tbl8[i].next_hop = next_hop;
1079 * Update tbl24 entry to point to new tbl8 entry. Note: The
1080 * ext_flag and tbl8_index need to be updated simultaneously,
1081 * so assign whole structure in one go.
1084 struct rte_lpm_tbl_entry new_tbl24_entry = {
1085 .group_idx = (uint8_t)tbl8_group_index,
1091 lpm->tbl24[tbl24_index] = new_tbl24_entry;
1094 * If it is valid, extended entry calculate the index into tbl8.
1096 tbl8_group_index = lpm->tbl24[tbl24_index].group_idx;
1097 tbl8_group_start = tbl8_group_index *
1098 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1099 tbl8_index = tbl8_group_start + (ip_masked & 0xFF);
1101 for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
1103 if (!lpm->tbl8[i].valid ||
1104 lpm->tbl8[i].depth <= depth) {
1105 struct rte_lpm_tbl_entry new_tbl8_entry = {
1108 .next_hop = next_hop,
1109 .valid_group = lpm->tbl8[i].valid_group,
1113 * Setting tbl8 entry in one go to avoid race
1116 lpm->tbl8[i] = new_tbl8_entry;
1130 rte_lpm_add_v20(struct rte_lpm_v20 *lpm, uint32_t ip, uint8_t depth,
1133 int32_t rule_index, status = 0;
1136 /* Check user arguments. */
1137 if ((lpm == NULL) || (depth < 1) || (depth > RTE_LPM_MAX_DEPTH))
1140 ip_masked = ip & depth_to_mask(depth);
1142 /* Add the rule to the rule table. */
1143 rule_index = rule_add_v20(lpm, ip_masked, depth, next_hop);
1145 /* If the is no space available for new rule return error. */
1146 if (rule_index < 0) {
1150 if (depth <= MAX_DEPTH_TBL24) {
1151 status = add_depth_small_v20(lpm, ip_masked, depth, next_hop);
1152 } else { /* If depth > RTE_LPM_MAX_DEPTH_TBL24 */
1153 status = add_depth_big_v20(lpm, ip_masked, depth, next_hop);
1156 * If add fails due to exhaustion of tbl8 extensions delete
1157 * rule that was added to rule table.
1160 rule_delete_v20(lpm, rule_index, depth);
1168 VERSION_SYMBOL(rte_lpm_add, _v20, 2.0);
1171 rte_lpm_add_v1604(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
1174 int32_t rule_index, status = 0;
1177 /* Check user arguments. */
1178 if ((lpm == NULL) || (depth < 1) || (depth > RTE_LPM_MAX_DEPTH))
1181 ip_masked = ip & depth_to_mask(depth);
1183 /* Add the rule to the rule table. */
1184 rule_index = rule_add_v1604(lpm, ip_masked, depth, next_hop);
1186 /* If the is no space available for new rule return error. */
1187 if (rule_index < 0) {
1191 if (depth <= MAX_DEPTH_TBL24) {
1192 status = add_depth_small_v1604(lpm, ip_masked, depth, next_hop);
1193 } else { /* If depth > RTE_LPM_MAX_DEPTH_TBL24 */
1194 status = add_depth_big_v1604(lpm, ip_masked, depth, next_hop);
1197 * If add fails due to exhaustion of tbl8 extensions delete
1198 * rule that was added to rule table.
1201 rule_delete_v1604(lpm, rule_index, depth);
1209 BIND_DEFAULT_SYMBOL(rte_lpm_add, _v1604, 16.04);
1210 MAP_STATIC_SYMBOL(int rte_lpm_add(struct rte_lpm *lpm, uint32_t ip,
1211 uint8_t depth, uint32_t next_hop), rte_lpm_add_v1604);
1214 * Look for a rule in the high-level rules table
1217 rte_lpm_is_rule_present_v20(struct rte_lpm_v20 *lpm, uint32_t ip, uint8_t depth,
1223 /* Check user arguments. */
1224 if ((lpm == NULL) ||
1225 (next_hop == NULL) ||
1226 (depth < 1) || (depth > RTE_LPM_MAX_DEPTH))
1229 /* Look for the rule using rule_find. */
1230 ip_masked = ip & depth_to_mask(depth);
1231 rule_index = rule_find_v20(lpm, ip_masked, depth);
1233 if (rule_index >= 0) {
1234 *next_hop = lpm->rules_tbl[rule_index].next_hop;
1238 /* If rule is not found return 0. */
1241 VERSION_SYMBOL(rte_lpm_is_rule_present, _v20, 2.0);
1244 rte_lpm_is_rule_present_v1604(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
1250 /* Check user arguments. */
1251 if ((lpm == NULL) ||
1252 (next_hop == NULL) ||
1253 (depth < 1) || (depth > RTE_LPM_MAX_DEPTH))
1256 /* Look for the rule using rule_find. */
1257 ip_masked = ip & depth_to_mask(depth);
1258 rule_index = rule_find_v1604(lpm, ip_masked, depth);
1260 if (rule_index >= 0) {
1261 *next_hop = lpm->rules_tbl[rule_index].next_hop;
1265 /* If rule is not found return 0. */
1268 BIND_DEFAULT_SYMBOL(rte_lpm_is_rule_present, _v1604, 16.04);
1269 MAP_STATIC_SYMBOL(int rte_lpm_is_rule_present(struct rte_lpm *lpm, uint32_t ip,
1270 uint8_t depth, uint32_t *next_hop), rte_lpm_is_rule_present_v1604);
1272 static inline int32_t
1273 find_previous_rule_v20(struct rte_lpm_v20 *lpm, uint32_t ip, uint8_t depth,
1274 uint8_t *sub_rule_depth)
1280 for (prev_depth = (uint8_t)(depth - 1); prev_depth > 0; prev_depth--) {
1281 ip_masked = ip & depth_to_mask(prev_depth);
1283 rule_index = rule_find_v20(lpm, ip_masked, prev_depth);
1285 if (rule_index >= 0) {
1286 *sub_rule_depth = prev_depth;
1294 static inline int32_t
1295 find_previous_rule_v1604(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
1296 uint8_t *sub_rule_depth)
1302 for (prev_depth = (uint8_t)(depth - 1); prev_depth > 0; prev_depth--) {
1303 ip_masked = ip & depth_to_mask(prev_depth);
1305 rule_index = rule_find_v1604(lpm, ip_masked, prev_depth);
1307 if (rule_index >= 0) {
1308 *sub_rule_depth = prev_depth;
1316 static inline int32_t
1317 delete_depth_small_v20(struct rte_lpm_v20 *lpm, uint32_t ip_masked,
1318 uint8_t depth, int32_t sub_rule_index, uint8_t sub_rule_depth)
1320 uint32_t tbl24_range, tbl24_index, tbl8_group_index, tbl8_index, i, j;
1322 /* Calculate the range and index into Table24. */
1323 tbl24_range = depth_to_range(depth);
1324 tbl24_index = (ip_masked >> 8);
1327 * Firstly check the sub_rule_index. A -1 indicates no replacement rule
1328 * and a positive number indicates a sub_rule_index.
1330 if (sub_rule_index < 0) {
1332 * If no replacement rule exists then invalidate entries
1333 * associated with this rule.
1335 for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) {
1337 if (lpm->tbl24[i].valid_group == 0 &&
1338 lpm->tbl24[i].depth <= depth) {
1339 lpm->tbl24[i].valid = INVALID;
1340 } else if (lpm->tbl24[i].valid_group == 1) {
1342 * If TBL24 entry is extended, then there has
1343 * to be a rule with depth >= 25 in the
1344 * associated TBL8 group.
1347 tbl8_group_index = lpm->tbl24[i].group_idx;
1348 tbl8_index = tbl8_group_index *
1349 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1351 for (j = tbl8_index; j < (tbl8_index +
1352 RTE_LPM_TBL8_GROUP_NUM_ENTRIES); j++) {
1354 if (lpm->tbl8[j].depth <= depth)
1355 lpm->tbl8[j].valid = INVALID;
1361 * If a replacement rule exists then modify entries
1362 * associated with this rule.
1365 struct rte_lpm_tbl_entry_v20 new_tbl24_entry = {
1366 {.next_hop = lpm->rules_tbl[sub_rule_index].next_hop,},
1369 .depth = sub_rule_depth,
1372 struct rte_lpm_tbl_entry_v20 new_tbl8_entry = {
1374 .valid_group = VALID,
1375 .depth = sub_rule_depth,
1376 .next_hop = lpm->rules_tbl
1377 [sub_rule_index].next_hop,
1380 for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) {
1382 if (lpm->tbl24[i].valid_group == 0 &&
1383 lpm->tbl24[i].depth <= depth) {
1384 lpm->tbl24[i] = new_tbl24_entry;
1385 } else if (lpm->tbl24[i].valid_group == 1) {
1387 * If TBL24 entry is extended, then there has
1388 * to be a rule with depth >= 25 in the
1389 * associated TBL8 group.
1392 tbl8_group_index = lpm->tbl24[i].group_idx;
1393 tbl8_index = tbl8_group_index *
1394 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1396 for (j = tbl8_index; j < (tbl8_index +
1397 RTE_LPM_TBL8_GROUP_NUM_ENTRIES); j++) {
1399 if (lpm->tbl8[j].depth <= depth)
1400 lpm->tbl8[j] = new_tbl8_entry;
1409 static inline int32_t
1410 delete_depth_small_v1604(struct rte_lpm *lpm, uint32_t ip_masked,
1411 uint8_t depth, int32_t sub_rule_index, uint8_t sub_rule_depth)
1413 #define group_idx next_hop
1414 uint32_t tbl24_range, tbl24_index, tbl8_group_index, tbl8_index, i, j;
1416 /* Calculate the range and index into Table24. */
1417 tbl24_range = depth_to_range(depth);
1418 tbl24_index = (ip_masked >> 8);
1421 * Firstly check the sub_rule_index. A -1 indicates no replacement rule
1422 * and a positive number indicates a sub_rule_index.
1424 if (sub_rule_index < 0) {
1426 * If no replacement rule exists then invalidate entries
1427 * associated with this rule.
1429 for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) {
1431 if (lpm->tbl24[i].valid_group == 0 &&
1432 lpm->tbl24[i].depth <= depth) {
1433 lpm->tbl24[i].valid = INVALID;
1434 } else if (lpm->tbl24[i].valid_group == 1) {
1436 * If TBL24 entry is extended, then there has
1437 * to be a rule with depth >= 25 in the
1438 * associated TBL8 group.
1441 tbl8_group_index = lpm->tbl24[i].group_idx;
1442 tbl8_index = tbl8_group_index *
1443 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1445 for (j = tbl8_index; j < (tbl8_index +
1446 RTE_LPM_TBL8_GROUP_NUM_ENTRIES); j++) {
1448 if (lpm->tbl8[j].depth <= depth)
1449 lpm->tbl8[j].valid = INVALID;
1455 * If a replacement rule exists then modify entries
1456 * associated with this rule.
1459 struct rte_lpm_tbl_entry new_tbl24_entry = {
1460 .next_hop = lpm->rules_tbl[sub_rule_index].next_hop,
1463 .depth = sub_rule_depth,
1466 struct rte_lpm_tbl_entry new_tbl8_entry = {
1468 .valid_group = VALID,
1469 .depth = sub_rule_depth,
1470 .next_hop = lpm->rules_tbl
1471 [sub_rule_index].next_hop,
1474 for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) {
1476 if (lpm->tbl24[i].valid_group == 0 &&
1477 lpm->tbl24[i].depth <= depth) {
1478 lpm->tbl24[i] = new_tbl24_entry;
1479 } else if (lpm->tbl24[i].valid_group == 1) {
1481 * If TBL24 entry is extended, then there has
1482 * to be a rule with depth >= 25 in the
1483 * associated TBL8 group.
1486 tbl8_group_index = lpm->tbl24[i].group_idx;
1487 tbl8_index = tbl8_group_index *
1488 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1490 for (j = tbl8_index; j < (tbl8_index +
1491 RTE_LPM_TBL8_GROUP_NUM_ENTRIES); j++) {
1493 if (lpm->tbl8[j].depth <= depth)
1494 lpm->tbl8[j] = new_tbl8_entry;
1504 * Checks if table 8 group can be recycled.
1506 * Return of -EEXIST means tbl8 is in use and thus can not be recycled.
1507 * Return of -EINVAL means tbl8 is empty and thus can be recycled
1508 * Return of value > -1 means tbl8 is in use but has all the same values and
1509 * thus can be recycled
1511 static inline int32_t
1512 tbl8_recycle_check_v20(struct rte_lpm_tbl_entry_v20 *tbl8,
1513 uint32_t tbl8_group_start)
1515 uint32_t tbl8_group_end, i;
1516 tbl8_group_end = tbl8_group_start + RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1519 * Check the first entry of the given tbl8. If it is invalid we know
1520 * this tbl8 does not contain any rule with a depth < RTE_LPM_MAX_DEPTH
1521 * (As they would affect all entries in a tbl8) and thus this table
1522 * can not be recycled.
1524 if (tbl8[tbl8_group_start].valid) {
1526 * If first entry is valid check if the depth is less than 24
1527 * and if so check the rest of the entries to verify that they
1528 * are all of this depth.
1530 if (tbl8[tbl8_group_start].depth < MAX_DEPTH_TBL24) {
1531 for (i = (tbl8_group_start + 1); i < tbl8_group_end;
1534 if (tbl8[i].depth !=
1535 tbl8[tbl8_group_start].depth) {
1540 /* If all entries are the same return the tb8 index */
1541 return tbl8_group_start;
1547 * If the first entry is invalid check if the rest of the entries in
1548 * the tbl8 are invalid.
1550 for (i = (tbl8_group_start + 1); i < tbl8_group_end; i++) {
1554 /* If no valid entries are found then return -EINVAL. */
1558 static inline int32_t
1559 tbl8_recycle_check_v1604(struct rte_lpm_tbl_entry *tbl8,
1560 uint32_t tbl8_group_start)
1562 uint32_t tbl8_group_end, i;
1563 tbl8_group_end = tbl8_group_start + RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1566 * Check the first entry of the given tbl8. If it is invalid we know
1567 * this tbl8 does not contain any rule with a depth < RTE_LPM_MAX_DEPTH
1568 * (As they would affect all entries in a tbl8) and thus this table
1569 * can not be recycled.
1571 if (tbl8[tbl8_group_start].valid) {
1573 * If first entry is valid check if the depth is less than 24
1574 * and if so check the rest of the entries to verify that they
1575 * are all of this depth.
1577 if (tbl8[tbl8_group_start].depth < MAX_DEPTH_TBL24) {
1578 for (i = (tbl8_group_start + 1); i < tbl8_group_end;
1581 if (tbl8[i].depth !=
1582 tbl8[tbl8_group_start].depth) {
1587 /* If all entries are the same return the tb8 index */
1588 return tbl8_group_start;
1594 * If the first entry is invalid check if the rest of the entries in
1595 * the tbl8 are invalid.
1597 for (i = (tbl8_group_start + 1); i < tbl8_group_end; i++) {
1601 /* If no valid entries are found then return -EINVAL. */
1605 static inline int32_t
1606 delete_depth_big_v20(struct rte_lpm_v20 *lpm, uint32_t ip_masked,
1607 uint8_t depth, int32_t sub_rule_index, uint8_t sub_rule_depth)
1609 uint32_t tbl24_index, tbl8_group_index, tbl8_group_start, tbl8_index,
1611 int32_t tbl8_recycle_index;
1614 * Calculate the index into tbl24 and range. Note: All depths larger
1615 * than MAX_DEPTH_TBL24 are associated with only one tbl24 entry.
1617 tbl24_index = ip_masked >> 8;
1619 /* Calculate the index into tbl8 and range. */
1620 tbl8_group_index = lpm->tbl24[tbl24_index].group_idx;
1621 tbl8_group_start = tbl8_group_index * RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1622 tbl8_index = tbl8_group_start + (ip_masked & 0xFF);
1623 tbl8_range = depth_to_range(depth);
1625 if (sub_rule_index < 0) {
1627 * Loop through the range of entries on tbl8 for which the
1628 * rule_to_delete must be removed or modified.
1630 for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
1631 if (lpm->tbl8[i].depth <= depth)
1632 lpm->tbl8[i].valid = INVALID;
1635 /* Set new tbl8 entry. */
1636 struct rte_lpm_tbl_entry_v20 new_tbl8_entry = {
1638 .depth = sub_rule_depth,
1639 .valid_group = lpm->tbl8[tbl8_group_start].valid_group,
1640 .next_hop = lpm->rules_tbl[sub_rule_index].next_hop,
1644 * Loop through the range of entries on tbl8 for which the
1645 * rule_to_delete must be modified.
1647 for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
1648 if (lpm->tbl8[i].depth <= depth)
1649 lpm->tbl8[i] = new_tbl8_entry;
1654 * Check if there are any valid entries in this tbl8 group. If all
1655 * tbl8 entries are invalid we can free the tbl8 and invalidate the
1656 * associated tbl24 entry.
1659 tbl8_recycle_index = tbl8_recycle_check_v20(lpm->tbl8, tbl8_group_start);
1661 if (tbl8_recycle_index == -EINVAL) {
1662 /* Set tbl24 before freeing tbl8 to avoid race condition. */
1663 lpm->tbl24[tbl24_index].valid = 0;
1664 tbl8_free_v20(lpm->tbl8, tbl8_group_start);
1665 } else if (tbl8_recycle_index > -1) {
1666 /* Update tbl24 entry. */
1667 struct rte_lpm_tbl_entry_v20 new_tbl24_entry = {
1668 { .next_hop = lpm->tbl8[tbl8_recycle_index].next_hop, },
1671 .depth = lpm->tbl8[tbl8_recycle_index].depth,
1674 /* Set tbl24 before freeing tbl8 to avoid race condition. */
1675 lpm->tbl24[tbl24_index] = new_tbl24_entry;
1676 tbl8_free_v20(lpm->tbl8, tbl8_group_start);
1682 static inline int32_t
1683 delete_depth_big_v1604(struct rte_lpm *lpm, uint32_t ip_masked,
1684 uint8_t depth, int32_t sub_rule_index, uint8_t sub_rule_depth)
1686 #define group_idx next_hop
1687 uint32_t tbl24_index, tbl8_group_index, tbl8_group_start, tbl8_index,
1689 int32_t tbl8_recycle_index;
1692 * Calculate the index into tbl24 and range. Note: All depths larger
1693 * than MAX_DEPTH_TBL24 are associated with only one tbl24 entry.
1695 tbl24_index = ip_masked >> 8;
1697 /* Calculate the index into tbl8 and range. */
1698 tbl8_group_index = lpm->tbl24[tbl24_index].group_idx;
1699 tbl8_group_start = tbl8_group_index * RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1700 tbl8_index = tbl8_group_start + (ip_masked & 0xFF);
1701 tbl8_range = depth_to_range(depth);
1703 if (sub_rule_index < 0) {
1705 * Loop through the range of entries on tbl8 for which the
1706 * rule_to_delete must be removed or modified.
1708 for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
1709 if (lpm->tbl8[i].depth <= depth)
1710 lpm->tbl8[i].valid = INVALID;
1713 /* Set new tbl8 entry. */
1714 struct rte_lpm_tbl_entry new_tbl8_entry = {
1716 .depth = sub_rule_depth,
1717 .valid_group = lpm->tbl8[tbl8_group_start].valid_group,
1718 .next_hop = lpm->rules_tbl[sub_rule_index].next_hop,
1722 * Loop through the range of entries on tbl8 for which the
1723 * rule_to_delete must be modified.
1725 for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
1726 if (lpm->tbl8[i].depth <= depth)
1727 lpm->tbl8[i] = new_tbl8_entry;
1732 * Check if there are any valid entries in this tbl8 group. If all
1733 * tbl8 entries are invalid we can free the tbl8 and invalidate the
1734 * associated tbl24 entry.
1737 tbl8_recycle_index = tbl8_recycle_check_v1604(lpm->tbl8, tbl8_group_start);
1739 if (tbl8_recycle_index == -EINVAL) {
1740 /* Set tbl24 before freeing tbl8 to avoid race condition. */
1741 lpm->tbl24[tbl24_index].valid = 0;
1742 tbl8_free_v1604(lpm->tbl8, tbl8_group_start);
1743 } else if (tbl8_recycle_index > -1) {
1744 /* Update tbl24 entry. */
1745 struct rte_lpm_tbl_entry new_tbl24_entry = {
1746 .next_hop = lpm->tbl8[tbl8_recycle_index].next_hop,
1749 .depth = lpm->tbl8[tbl8_recycle_index].depth,
1752 /* Set tbl24 before freeing tbl8 to avoid race condition. */
1753 lpm->tbl24[tbl24_index] = new_tbl24_entry;
1754 tbl8_free_v1604(lpm->tbl8, tbl8_group_start);
1764 rte_lpm_delete_v20(struct rte_lpm_v20 *lpm, uint32_t ip, uint8_t depth)
1766 int32_t rule_to_delete_index, sub_rule_index;
1768 uint8_t sub_rule_depth;
1770 * Check input arguments. Note: IP must be a positive integer of 32
1771 * bits in length therefore it need not be checked.
1773 if ((lpm == NULL) || (depth < 1) || (depth > RTE_LPM_MAX_DEPTH)) {
1777 ip_masked = ip & depth_to_mask(depth);
1780 * Find the index of the input rule, that needs to be deleted, in the
1783 rule_to_delete_index = rule_find_v20(lpm, ip_masked, depth);
1786 * Check if rule_to_delete_index was found. If no rule was found the
1787 * function rule_find returns -EINVAL.
1789 if (rule_to_delete_index < 0)
1792 /* Delete the rule from the rule table. */
1793 rule_delete_v20(lpm, rule_to_delete_index, depth);
1796 * Find rule to replace the rule_to_delete. If there is no rule to
1797 * replace the rule_to_delete we return -1 and invalidate the table
1798 * entries associated with this rule.
1801 sub_rule_index = find_previous_rule_v20(lpm, ip, depth, &sub_rule_depth);
1804 * If the input depth value is less than 25 use function
1805 * delete_depth_small otherwise use delete_depth_big.
1807 if (depth <= MAX_DEPTH_TBL24) {
1808 return delete_depth_small_v20(lpm, ip_masked, depth,
1809 sub_rule_index, sub_rule_depth);
1810 } else { /* If depth > MAX_DEPTH_TBL24 */
1811 return delete_depth_big_v20(lpm, ip_masked, depth, sub_rule_index,
1815 VERSION_SYMBOL(rte_lpm_delete, _v20, 2.0);
1818 rte_lpm_delete_v1604(struct rte_lpm *lpm, uint32_t ip, uint8_t depth)
1820 int32_t rule_to_delete_index, sub_rule_index;
1822 uint8_t sub_rule_depth;
1824 * Check input arguments. Note: IP must be a positive integer of 32
1825 * bits in length therefore it need not be checked.
1827 if ((lpm == NULL) || (depth < 1) || (depth > RTE_LPM_MAX_DEPTH)) {
1831 ip_masked = ip & depth_to_mask(depth);
1834 * Find the index of the input rule, that needs to be deleted, in the
1837 rule_to_delete_index = rule_find_v1604(lpm, ip_masked, depth);
1840 * Check if rule_to_delete_index was found. If no rule was found the
1841 * function rule_find returns -EINVAL.
1843 if (rule_to_delete_index < 0)
1846 /* Delete the rule from the rule table. */
1847 rule_delete_v1604(lpm, rule_to_delete_index, depth);
1850 * Find rule to replace the rule_to_delete. If there is no rule to
1851 * replace the rule_to_delete we return -1 and invalidate the table
1852 * entries associated with this rule.
1855 sub_rule_index = find_previous_rule_v1604(lpm, ip, depth, &sub_rule_depth);
1858 * If the input depth value is less than 25 use function
1859 * delete_depth_small otherwise use delete_depth_big.
1861 if (depth <= MAX_DEPTH_TBL24) {
1862 return delete_depth_small_v1604(lpm, ip_masked, depth,
1863 sub_rule_index, sub_rule_depth);
1864 } else { /* If depth > MAX_DEPTH_TBL24 */
1865 return delete_depth_big_v1604(lpm, ip_masked, depth, sub_rule_index,
1869 BIND_DEFAULT_SYMBOL(rte_lpm_delete, _v1604, 16.04);
1870 MAP_STATIC_SYMBOL(int rte_lpm_delete(struct rte_lpm *lpm, uint32_t ip,
1871 uint8_t depth), rte_lpm_delete_v1604);
1874 * Delete all rules from the LPM table.
1877 rte_lpm_delete_all_v20(struct rte_lpm_v20 *lpm)
1879 /* Zero rule information. */
1880 memset(lpm->rule_info, 0, sizeof(lpm->rule_info));
1883 memset(lpm->tbl24, 0, sizeof(lpm->tbl24));
1886 memset(lpm->tbl8, 0, sizeof(lpm->tbl8));
1888 /* Delete all rules form the rules table. */
1889 memset(lpm->rules_tbl, 0, sizeof(lpm->rules_tbl[0]) * lpm->max_rules);
1891 VERSION_SYMBOL(rte_lpm_delete_all, _v20, 2.0);
1894 rte_lpm_delete_all_v1604(struct rte_lpm *lpm)
1896 /* Zero rule information. */
1897 memset(lpm->rule_info, 0, sizeof(lpm->rule_info));
1900 memset(lpm->tbl24, 0, sizeof(lpm->tbl24));
1903 memset(lpm->tbl8, 0, sizeof(lpm->tbl8[0])
1904 * RTE_LPM_TBL8_GROUP_NUM_ENTRIES * lpm->number_tbl8s);
1906 /* Delete all rules form the rules table. */
1907 memset(lpm->rules_tbl, 0, sizeof(lpm->rules_tbl[0]) * lpm->max_rules);
1909 BIND_DEFAULT_SYMBOL(rte_lpm_delete_all, _v1604, 16.04);
1910 MAP_STATIC_SYMBOL(void rte_lpm_delete_all(struct rte_lpm *lpm),
1911 rte_lpm_delete_all_v1604);