4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40 #include <sys/queue.h>
43 #include <rte_branch_prediction.h>
44 #include <rte_common.h>
45 #include <rte_memory.h> /* for definition of RTE_CACHE_LINE_SIZE */
46 #include <rte_malloc.h>
47 #include <rte_memzone.h>
49 #include <rte_eal_memconfig.h>
50 #include <rte_per_lcore.h>
51 #include <rte_string_fns.h>
52 #include <rte_errno.h>
53 #include <rte_rwlock.h>
54 #include <rte_spinlock.h>
58 TAILQ_HEAD(rte_lpm_list, rte_tailq_entry);
60 static struct rte_tailq_elem rte_lpm_tailq = {
63 EAL_REGISTER_TAILQ(rte_lpm_tailq)
65 #define MAX_DEPTH_TBL24 24
72 /* Macro to enable/disable run-time checks. */
73 #if defined(RTE_LIBRTE_LPM_DEBUG)
74 #include <rte_debug.h>
75 #define VERIFY_DEPTH(depth) do { \
76 if ((depth == 0) || (depth > RTE_LPM_MAX_DEPTH)) \
77 rte_panic("LPM: Invalid depth (%u) at line %d", \
78 (unsigned)(depth), __LINE__); \
81 #define VERIFY_DEPTH(depth)
85 * Converts a given depth value to its corresponding mask value.
87 * depth (IN) : range = 1 - 32
88 * mask (OUT) : 32bit mask
90 static uint32_t __attribute__((pure))
91 depth_to_mask(uint8_t depth)
95 /* To calculate a mask start with a 1 on the left hand side and right
96 * shift while populating the left hand side with 1's
98 return (int)0x80000000 >> (depth - 1);
102 * Converts given depth value to its corresponding range value.
104 static inline uint32_t __attribute__((pure))
105 depth_to_range(uint8_t depth)
110 * Calculate tbl24 range. (Note: 2^depth = 1 << depth)
112 if (depth <= MAX_DEPTH_TBL24)
113 return 1 << (MAX_DEPTH_TBL24 - depth);
115 /* Else if depth is greater than 24 */
116 return 1 << (RTE_LPM_MAX_DEPTH - depth);
120 * Find an existing lpm table and return a pointer to it.
123 rte_lpm_find_existing_v20(const char *name)
125 struct rte_lpm_v20 *l = NULL;
126 struct rte_tailq_entry *te;
127 struct rte_lpm_list *lpm_list;
129 lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list);
131 rte_rwlock_read_lock(RTE_EAL_TAILQ_RWLOCK);
132 TAILQ_FOREACH(te, lpm_list, next) {
133 l = (struct rte_lpm_v20 *) te->data;
134 if (strncmp(name, l->name, RTE_LPM_NAMESIZE) == 0)
137 rte_rwlock_read_unlock(RTE_EAL_TAILQ_RWLOCK);
146 VERSION_SYMBOL(rte_lpm_find_existing, _v20, 2.0);
149 rte_lpm_find_existing_v1604(const char *name)
151 struct rte_lpm *l = NULL;
152 struct rte_tailq_entry *te;
153 struct rte_lpm_list *lpm_list;
155 lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list);
157 rte_rwlock_read_lock(RTE_EAL_TAILQ_RWLOCK);
158 TAILQ_FOREACH(te, lpm_list, next) {
159 l = (struct rte_lpm *) te->data;
160 if (strncmp(name, l->name, RTE_LPM_NAMESIZE) == 0)
163 rte_rwlock_read_unlock(RTE_EAL_TAILQ_RWLOCK);
172 BIND_DEFAULT_SYMBOL(rte_lpm_find_existing, _v1604, 16.04);
173 MAP_STATIC_SYMBOL(struct rte_lpm *rte_lpm_find_existing(const char *name),
174 rte_lpm_find_existing_v1604);
177 * Allocates memory for LPM object
180 rte_lpm_create_v20(const char *name, int socket_id, int max_rules,
181 __rte_unused int flags)
183 char mem_name[RTE_LPM_NAMESIZE];
184 struct rte_lpm_v20 *lpm = NULL;
185 struct rte_tailq_entry *te;
187 struct rte_lpm_list *lpm_list;
189 lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list);
191 RTE_BUILD_BUG_ON(sizeof(struct rte_lpm_tbl_entry_v20) != 2);
193 /* Check user arguments. */
194 if ((name == NULL) || (socket_id < -1) || (max_rules == 0)) {
199 snprintf(mem_name, sizeof(mem_name), "LPM_%s", name);
201 /* Determine the amount of memory to allocate. */
202 mem_size = sizeof(*lpm) + (sizeof(lpm->rules_tbl[0]) * max_rules);
204 rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
206 /* guarantee there's no existing */
207 TAILQ_FOREACH(te, lpm_list, next) {
208 lpm = (struct rte_lpm_v20 *) te->data;
209 if (strncmp(name, lpm->name, RTE_LPM_NAMESIZE) == 0)
218 /* allocate tailq entry */
219 te = rte_zmalloc("LPM_TAILQ_ENTRY", sizeof(*te), 0);
221 RTE_LOG(ERR, LPM, "Failed to allocate tailq entry\n");
225 /* Allocate memory to store the LPM data structures. */
226 lpm = (struct rte_lpm_v20 *)rte_zmalloc_socket(mem_name, mem_size,
227 RTE_CACHE_LINE_SIZE, socket_id);
229 RTE_LOG(ERR, LPM, "LPM memory allocation failed\n");
234 /* Save user arguments. */
235 lpm->max_rules = max_rules;
236 snprintf(lpm->name, sizeof(lpm->name), "%s", name);
238 te->data = (void *) lpm;
240 TAILQ_INSERT_TAIL(lpm_list, te, next);
243 rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
247 VERSION_SYMBOL(rte_lpm_create, _v20, 2.0);
250 rte_lpm_create_v1604(const char *name, int socket_id,
251 const struct rte_lpm_config *config)
253 char mem_name[RTE_LPM_NAMESIZE];
254 struct rte_lpm *lpm = NULL;
255 struct rte_tailq_entry *te;
256 uint32_t mem_size, rules_size, tbl8s_size;
257 struct rte_lpm_list *lpm_list;
259 lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list);
261 RTE_BUILD_BUG_ON(sizeof(struct rte_lpm_tbl_entry) != 4);
263 /* Check user arguments. */
264 if ((name == NULL) || (socket_id < -1) || (config->max_rules == 0)
265 || config->number_tbl8s > RTE_LPM_MAX_TBL8_NUM_GROUPS) {
270 snprintf(mem_name, sizeof(mem_name), "LPM_%s", name);
272 /* Determine the amount of memory to allocate. */
273 mem_size = sizeof(*lpm);
274 rules_size = sizeof(struct rte_lpm_rule) * config->max_rules;
275 tbl8s_size = (sizeof(struct rte_lpm_tbl_entry) *
276 RTE_LPM_TBL8_GROUP_NUM_ENTRIES * config->number_tbl8s);
278 rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
280 /* guarantee there's no existing */
281 TAILQ_FOREACH(te, lpm_list, next) {
282 lpm = (struct rte_lpm *) te->data;
283 if (strncmp(name, lpm->name, RTE_LPM_NAMESIZE) == 0)
292 /* allocate tailq entry */
293 te = rte_zmalloc("LPM_TAILQ_ENTRY", sizeof(*te), 0);
295 RTE_LOG(ERR, LPM, "Failed to allocate tailq entry\n");
299 /* Allocate memory to store the LPM data structures. */
300 lpm = (struct rte_lpm *)rte_zmalloc_socket(mem_name, mem_size,
301 RTE_CACHE_LINE_SIZE, socket_id);
303 RTE_LOG(ERR, LPM, "LPM memory allocation failed\n");
308 lpm->rules_tbl = (struct rte_lpm_rule *)rte_zmalloc_socket(NULL,
309 (size_t)rules_size, RTE_CACHE_LINE_SIZE, socket_id);
311 if (lpm->rules_tbl == NULL) {
312 RTE_LOG(ERR, LPM, "LPM rules_tbl memory allocation failed\n");
319 lpm->tbl8 = (struct rte_lpm_tbl_entry *)rte_zmalloc_socket(NULL,
320 (size_t)tbl8s_size, RTE_CACHE_LINE_SIZE, socket_id);
322 if (lpm->tbl8 == NULL) {
323 RTE_LOG(ERR, LPM, "LPM tbl8 memory allocation failed\n");
330 /* Save user arguments. */
331 lpm->max_rules = config->max_rules;
332 lpm->number_tbl8s = config->number_tbl8s;
333 snprintf(lpm->name, sizeof(lpm->name), "%s", name);
335 te->data = (void *) lpm;
337 TAILQ_INSERT_TAIL(lpm_list, te, next);
340 rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
344 BIND_DEFAULT_SYMBOL(rte_lpm_create, _v1604, 16.04);
346 struct rte_lpm *rte_lpm_create(const char *name, int socket_id,
347 const struct rte_lpm_config *config), rte_lpm_create_v1604);
350 * Deallocates memory for given LPM table.
353 rte_lpm_free_v20(struct rte_lpm_v20 *lpm)
355 struct rte_lpm_list *lpm_list;
356 struct rte_tailq_entry *te;
358 /* Check user arguments. */
362 lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list);
364 rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
366 /* find our tailq entry */
367 TAILQ_FOREACH(te, lpm_list, next) {
368 if (te->data == (void *) lpm)
372 TAILQ_REMOVE(lpm_list, te, next);
374 rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
379 VERSION_SYMBOL(rte_lpm_free, _v20, 2.0);
382 rte_lpm_free_v1604(struct rte_lpm *lpm)
384 struct rte_lpm_list *lpm_list;
385 struct rte_tailq_entry *te;
387 /* Check user arguments. */
391 lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list);
393 rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
395 /* find our tailq entry */
396 TAILQ_FOREACH(te, lpm_list, next) {
397 if (te->data == (void *) lpm)
401 TAILQ_REMOVE(lpm_list, te, next);
403 rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
405 rte_free(lpm->rules_tbl);
409 BIND_DEFAULT_SYMBOL(rte_lpm_free, _v1604, 16.04);
410 MAP_STATIC_SYMBOL(void rte_lpm_free(struct rte_lpm *lpm),
414 * Adds a rule to the rule table.
416 * NOTE: The rule table is split into 32 groups. Each group contains rules that
417 * apply to a specific prefix depth (i.e. group 1 contains rules that apply to
418 * prefixes with a depth of 1 etc.). In the following code (depth - 1) is used
419 * to refer to depth 1 because even though the depth range is 1 - 32, depths
420 * are stored in the rule table from 0 - 31.
421 * NOTE: Valid range for depth parameter is 1 .. 32 inclusive.
423 static inline int32_t
424 rule_add_v20(struct rte_lpm_v20 *lpm, uint32_t ip_masked, uint8_t depth,
427 uint32_t rule_gindex, rule_index, last_rule;
432 /* Scan through rule group to see if rule already exists. */
433 if (lpm->rule_info[depth - 1].used_rules > 0) {
435 /* rule_gindex stands for rule group index. */
436 rule_gindex = lpm->rule_info[depth - 1].first_rule;
437 /* Initialise rule_index to point to start of rule group. */
438 rule_index = rule_gindex;
439 /* Last rule = Last used rule in this rule group. */
440 last_rule = rule_gindex + lpm->rule_info[depth - 1].used_rules;
442 for (; rule_index < last_rule; rule_index++) {
444 /* If rule already exists update its next_hop and return. */
445 if (lpm->rules_tbl[rule_index].ip == ip_masked) {
446 lpm->rules_tbl[rule_index].next_hop = next_hop;
452 if (rule_index == lpm->max_rules)
455 /* Calculate the position in which the rule will be stored. */
458 for (i = depth - 1; i > 0; i--) {
459 if (lpm->rule_info[i - 1].used_rules > 0) {
460 rule_index = lpm->rule_info[i - 1].first_rule
461 + lpm->rule_info[i - 1].used_rules;
465 if (rule_index == lpm->max_rules)
468 lpm->rule_info[depth - 1].first_rule = rule_index;
471 /* Make room for the new rule in the array. */
472 for (i = RTE_LPM_MAX_DEPTH; i > depth; i--) {
473 if (lpm->rule_info[i - 1].first_rule
474 + lpm->rule_info[i - 1].used_rules == lpm->max_rules)
477 if (lpm->rule_info[i - 1].used_rules > 0) {
478 lpm->rules_tbl[lpm->rule_info[i - 1].first_rule
479 + lpm->rule_info[i - 1].used_rules]
480 = lpm->rules_tbl[lpm->rule_info[i - 1].first_rule];
481 lpm->rule_info[i - 1].first_rule++;
485 /* Add the new rule. */
486 lpm->rules_tbl[rule_index].ip = ip_masked;
487 lpm->rules_tbl[rule_index].next_hop = next_hop;
489 /* Increment the used rules counter for this rule group. */
490 lpm->rule_info[depth - 1].used_rules++;
495 static inline int32_t
496 rule_add_v1604(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth,
499 uint32_t rule_gindex, rule_index, last_rule;
504 /* Scan through rule group to see if rule already exists. */
505 if (lpm->rule_info[depth - 1].used_rules > 0) {
507 /* rule_gindex stands for rule group index. */
508 rule_gindex = lpm->rule_info[depth - 1].first_rule;
509 /* Initialise rule_index to point to start of rule group. */
510 rule_index = rule_gindex;
511 /* Last rule = Last used rule in this rule group. */
512 last_rule = rule_gindex + lpm->rule_info[depth - 1].used_rules;
514 for (; rule_index < last_rule; rule_index++) {
516 /* If rule already exists update its next_hop and return. */
517 if (lpm->rules_tbl[rule_index].ip == ip_masked) {
518 lpm->rules_tbl[rule_index].next_hop = next_hop;
524 if (rule_index == lpm->max_rules)
527 /* Calculate the position in which the rule will be stored. */
530 for (i = depth - 1; i > 0; i--) {
531 if (lpm->rule_info[i - 1].used_rules > 0) {
532 rule_index = lpm->rule_info[i - 1].first_rule
533 + lpm->rule_info[i - 1].used_rules;
537 if (rule_index == lpm->max_rules)
540 lpm->rule_info[depth - 1].first_rule = rule_index;
543 /* Make room for the new rule in the array. */
544 for (i = RTE_LPM_MAX_DEPTH; i > depth; i--) {
545 if (lpm->rule_info[i - 1].first_rule
546 + lpm->rule_info[i - 1].used_rules == lpm->max_rules)
549 if (lpm->rule_info[i - 1].used_rules > 0) {
550 lpm->rules_tbl[lpm->rule_info[i - 1].first_rule
551 + lpm->rule_info[i - 1].used_rules]
552 = lpm->rules_tbl[lpm->rule_info[i - 1].first_rule];
553 lpm->rule_info[i - 1].first_rule++;
557 /* Add the new rule. */
558 lpm->rules_tbl[rule_index].ip = ip_masked;
559 lpm->rules_tbl[rule_index].next_hop = next_hop;
561 /* Increment the used rules counter for this rule group. */
562 lpm->rule_info[depth - 1].used_rules++;
568 * Delete a rule from the rule table.
569 * NOTE: Valid range for depth parameter is 1 .. 32 inclusive.
572 rule_delete_v20(struct rte_lpm_v20 *lpm, int32_t rule_index, uint8_t depth)
578 lpm->rules_tbl[rule_index] =
579 lpm->rules_tbl[lpm->rule_info[depth - 1].first_rule
580 + lpm->rule_info[depth - 1].used_rules - 1];
582 for (i = depth; i < RTE_LPM_MAX_DEPTH; i++) {
583 if (lpm->rule_info[i].used_rules > 0) {
584 lpm->rules_tbl[lpm->rule_info[i].first_rule - 1] =
585 lpm->rules_tbl[lpm->rule_info[i].first_rule
586 + lpm->rule_info[i].used_rules - 1];
587 lpm->rule_info[i].first_rule--;
591 lpm->rule_info[depth - 1].used_rules--;
595 rule_delete_v1604(struct rte_lpm *lpm, int32_t rule_index, uint8_t depth)
601 lpm->rules_tbl[rule_index] =
602 lpm->rules_tbl[lpm->rule_info[depth - 1].first_rule
603 + lpm->rule_info[depth - 1].used_rules - 1];
605 for (i = depth; i < RTE_LPM_MAX_DEPTH; i++) {
606 if (lpm->rule_info[i].used_rules > 0) {
607 lpm->rules_tbl[lpm->rule_info[i].first_rule - 1] =
608 lpm->rules_tbl[lpm->rule_info[i].first_rule
609 + lpm->rule_info[i].used_rules - 1];
610 lpm->rule_info[i].first_rule--;
614 lpm->rule_info[depth - 1].used_rules--;
618 * Finds a rule in rule table.
619 * NOTE: Valid range for depth parameter is 1 .. 32 inclusive.
621 static inline int32_t
622 rule_find_v20(struct rte_lpm_v20 *lpm, uint32_t ip_masked, uint8_t depth)
624 uint32_t rule_gindex, last_rule, rule_index;
628 rule_gindex = lpm->rule_info[depth - 1].first_rule;
629 last_rule = rule_gindex + lpm->rule_info[depth - 1].used_rules;
631 /* Scan used rules at given depth to find rule. */
632 for (rule_index = rule_gindex; rule_index < last_rule; rule_index++) {
633 /* If rule is found return the rule index. */
634 if (lpm->rules_tbl[rule_index].ip == ip_masked)
638 /* If rule is not found return -EINVAL. */
642 static inline int32_t
643 rule_find_v1604(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth)
645 uint32_t rule_gindex, last_rule, rule_index;
649 rule_gindex = lpm->rule_info[depth - 1].first_rule;
650 last_rule = rule_gindex + lpm->rule_info[depth - 1].used_rules;
652 /* Scan used rules at given depth to find rule. */
653 for (rule_index = rule_gindex; rule_index < last_rule; rule_index++) {
654 /* If rule is found return the rule index. */
655 if (lpm->rules_tbl[rule_index].ip == ip_masked)
659 /* If rule is not found return -EINVAL. */
664 * Find, clean and allocate a tbl8.
666 static inline int32_t
667 tbl8_alloc_v20(struct rte_lpm_tbl_entry_v20 *tbl8)
669 uint32_t group_idx; /* tbl8 group index. */
670 struct rte_lpm_tbl_entry_v20 *tbl8_entry;
672 /* Scan through tbl8 to find a free (i.e. INVALID) tbl8 group. */
673 for (group_idx = 0; group_idx < RTE_LPM_TBL8_NUM_GROUPS;
675 tbl8_entry = &tbl8[group_idx * RTE_LPM_TBL8_GROUP_NUM_ENTRIES];
676 /* If a free tbl8 group is found clean it and set as VALID. */
677 if (!tbl8_entry->valid_group) {
678 memset(&tbl8_entry[0], 0,
679 RTE_LPM_TBL8_GROUP_NUM_ENTRIES *
680 sizeof(tbl8_entry[0]));
682 tbl8_entry->valid_group = VALID;
684 /* Return group index for allocated tbl8 group. */
689 /* If there are no tbl8 groups free then return error. */
693 static inline int32_t
694 tbl8_alloc_v1604(struct rte_lpm_tbl_entry *tbl8, uint32_t number_tbl8s)
696 uint32_t group_idx; /* tbl8 group index. */
697 struct rte_lpm_tbl_entry *tbl8_entry;
699 /* Scan through tbl8 to find a free (i.e. INVALID) tbl8 group. */
700 for (group_idx = 0; group_idx < number_tbl8s; group_idx++) {
701 tbl8_entry = &tbl8[group_idx * RTE_LPM_TBL8_GROUP_NUM_ENTRIES];
702 /* If a free tbl8 group is found clean it and set as VALID. */
703 if (!tbl8_entry->valid_group) {
704 memset(&tbl8_entry[0], 0,
705 RTE_LPM_TBL8_GROUP_NUM_ENTRIES *
706 sizeof(tbl8_entry[0]));
708 tbl8_entry->valid_group = VALID;
710 /* Return group index for allocated tbl8 group. */
715 /* If there are no tbl8 groups free then return error. */
720 tbl8_free_v20(struct rte_lpm_tbl_entry_v20 *tbl8, uint32_t tbl8_group_start)
722 /* Set tbl8 group invalid*/
723 tbl8[tbl8_group_start].valid_group = INVALID;
727 tbl8_free_v1604(struct rte_lpm_tbl_entry *tbl8, uint32_t tbl8_group_start)
729 /* Set tbl8 group invalid*/
730 tbl8[tbl8_group_start].valid_group = INVALID;
733 static inline int32_t
734 add_depth_small_v20(struct rte_lpm_v20 *lpm, uint32_t ip, uint8_t depth,
737 uint32_t tbl24_index, tbl24_range, tbl8_index, tbl8_group_end, i, j;
739 /* Calculate the index into Table24. */
740 tbl24_index = ip >> 8;
741 tbl24_range = depth_to_range(depth);
743 for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) {
745 * For invalid OR valid and non-extended tbl 24 entries set
748 if (!lpm->tbl24[i].valid || (lpm->tbl24[i].valid_group == 0 &&
749 lpm->tbl24[i].depth <= depth)) {
751 struct rte_lpm_tbl_entry_v20 new_tbl24_entry = {
756 new_tbl24_entry.next_hop = next_hop;
758 /* Setting tbl24 entry in one go to avoid race
761 lpm->tbl24[i] = new_tbl24_entry;
766 if (lpm->tbl24[i].valid_group == 1) {
767 /* If tbl24 entry is valid and extended calculate the
770 tbl8_index = lpm->tbl24[i].group_idx *
771 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
772 tbl8_group_end = tbl8_index +
773 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
775 for (j = tbl8_index; j < tbl8_group_end; j++) {
776 if (!lpm->tbl8[j].valid ||
777 lpm->tbl8[j].depth <= depth) {
778 struct rte_lpm_tbl_entry_v20
781 .valid_group = VALID,
784 new_tbl8_entry.next_hop = next_hop;
787 * Setting tbl8 entry in one go to avoid
790 lpm->tbl8[j] = new_tbl8_entry;
801 static inline int32_t
802 add_depth_small_v1604(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
805 #define group_idx next_hop
806 uint32_t tbl24_index, tbl24_range, tbl8_index, tbl8_group_end, i, j;
808 /* Calculate the index into Table24. */
809 tbl24_index = ip >> 8;
810 tbl24_range = depth_to_range(depth);
812 for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) {
814 * For invalid OR valid and non-extended tbl 24 entries set
817 if (!lpm->tbl24[i].valid || (lpm->tbl24[i].valid_group == 0 &&
818 lpm->tbl24[i].depth <= depth)) {
820 struct rte_lpm_tbl_entry new_tbl24_entry = {
821 .next_hop = next_hop,
827 /* Setting tbl24 entry in one go to avoid race
830 lpm->tbl24[i] = new_tbl24_entry;
835 if (lpm->tbl24[i].valid_group == 1) {
836 /* If tbl24 entry is valid and extended calculate the
839 tbl8_index = lpm->tbl24[i].group_idx *
840 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
841 tbl8_group_end = tbl8_index +
842 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
844 for (j = tbl8_index; j < tbl8_group_end; j++) {
845 if (!lpm->tbl8[j].valid ||
846 lpm->tbl8[j].depth <= depth) {
847 struct rte_lpm_tbl_entry
850 .valid_group = VALID,
852 .next_hop = next_hop,
856 * Setting tbl8 entry in one go to avoid
859 lpm->tbl8[j] = new_tbl8_entry;
870 static inline int32_t
871 add_depth_big_v20(struct rte_lpm_v20 *lpm, uint32_t ip_masked, uint8_t depth,
874 uint32_t tbl24_index;
875 int32_t tbl8_group_index, tbl8_group_start, tbl8_group_end, tbl8_index,
878 tbl24_index = (ip_masked >> 8);
879 tbl8_range = depth_to_range(depth);
881 if (!lpm->tbl24[tbl24_index].valid) {
882 /* Search for a free tbl8 group. */
883 tbl8_group_index = tbl8_alloc_v20(lpm->tbl8);
885 /* Check tbl8 allocation was successful. */
886 if (tbl8_group_index < 0) {
887 return tbl8_group_index;
890 /* Find index into tbl8 and range. */
891 tbl8_index = (tbl8_group_index *
892 RTE_LPM_TBL8_GROUP_NUM_ENTRIES) +
895 /* Set tbl8 entry. */
896 for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
897 lpm->tbl8[i].depth = depth;
898 lpm->tbl8[i].next_hop = next_hop;
899 lpm->tbl8[i].valid = VALID;
903 * Update tbl24 entry to point to new tbl8 entry. Note: The
904 * ext_flag and tbl8_index need to be updated simultaneously,
905 * so assign whole structure in one go
908 struct rte_lpm_tbl_entry_v20 new_tbl24_entry = {
909 { .group_idx = (uint8_t)tbl8_group_index, },
915 lpm->tbl24[tbl24_index] = new_tbl24_entry;
917 } /* If valid entry but not extended calculate the index into Table8. */
918 else if (lpm->tbl24[tbl24_index].valid_group == 0) {
919 /* Search for free tbl8 group. */
920 tbl8_group_index = tbl8_alloc_v20(lpm->tbl8);
922 if (tbl8_group_index < 0) {
923 return tbl8_group_index;
926 tbl8_group_start = tbl8_group_index *
927 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
928 tbl8_group_end = tbl8_group_start +
929 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
931 /* Populate new tbl8 with tbl24 value. */
932 for (i = tbl8_group_start; i < tbl8_group_end; i++) {
933 lpm->tbl8[i].valid = VALID;
934 lpm->tbl8[i].depth = lpm->tbl24[tbl24_index].depth;
935 lpm->tbl8[i].next_hop =
936 lpm->tbl24[tbl24_index].next_hop;
939 tbl8_index = tbl8_group_start + (ip_masked & 0xFF);
941 /* Insert new rule into the tbl8 entry. */
942 for (i = tbl8_index; i < tbl8_index + tbl8_range; i++) {
943 if (!lpm->tbl8[i].valid ||
944 lpm->tbl8[i].depth <= depth) {
945 lpm->tbl8[i].valid = VALID;
946 lpm->tbl8[i].depth = depth;
947 lpm->tbl8[i].next_hop = next_hop;
954 * Update tbl24 entry to point to new tbl8 entry. Note: The
955 * ext_flag and tbl8_index need to be updated simultaneously,
956 * so assign whole structure in one go.
959 struct rte_lpm_tbl_entry_v20 new_tbl24_entry = {
960 { .group_idx = (uint8_t)tbl8_group_index, },
966 lpm->tbl24[tbl24_index] = new_tbl24_entry;
969 * If it is valid, extended entry calculate the index into tbl8.
971 tbl8_group_index = lpm->tbl24[tbl24_index].group_idx;
972 tbl8_group_start = tbl8_group_index *
973 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
974 tbl8_index = tbl8_group_start + (ip_masked & 0xFF);
976 for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
978 if (!lpm->tbl8[i].valid ||
979 lpm->tbl8[i].depth <= depth) {
980 struct rte_lpm_tbl_entry_v20 new_tbl8_entry = {
983 .valid_group = lpm->tbl8[i].valid_group,
985 new_tbl8_entry.next_hop = next_hop;
987 * Setting tbl8 entry in one go to avoid race
990 lpm->tbl8[i] = new_tbl8_entry;
1000 static inline int32_t
1001 add_depth_big_v1604(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth,
1004 #define group_idx next_hop
1005 uint32_t tbl24_index;
1006 int32_t tbl8_group_index, tbl8_group_start, tbl8_group_end, tbl8_index,
1009 tbl24_index = (ip_masked >> 8);
1010 tbl8_range = depth_to_range(depth);
1012 if (!lpm->tbl24[tbl24_index].valid) {
1013 /* Search for a free tbl8 group. */
1014 tbl8_group_index = tbl8_alloc_v1604(lpm->tbl8, lpm->number_tbl8s);
1016 /* Check tbl8 allocation was successful. */
1017 if (tbl8_group_index < 0) {
1018 return tbl8_group_index;
1021 /* Find index into tbl8 and range. */
1022 tbl8_index = (tbl8_group_index *
1023 RTE_LPM_TBL8_GROUP_NUM_ENTRIES) +
1026 /* Set tbl8 entry. */
1027 for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
1028 lpm->tbl8[i].depth = depth;
1029 lpm->tbl8[i].next_hop = next_hop;
1030 lpm->tbl8[i].valid = VALID;
1034 * Update tbl24 entry to point to new tbl8 entry. Note: The
1035 * ext_flag and tbl8_index need to be updated simultaneously,
1036 * so assign whole structure in one go
1039 struct rte_lpm_tbl_entry new_tbl24_entry = {
1040 .group_idx = (uint8_t)tbl8_group_index,
1046 lpm->tbl24[tbl24_index] = new_tbl24_entry;
1048 } /* If valid entry but not extended calculate the index into Table8. */
1049 else if (lpm->tbl24[tbl24_index].valid_group == 0) {
1050 /* Search for free tbl8 group. */
1051 tbl8_group_index = tbl8_alloc_v1604(lpm->tbl8, lpm->number_tbl8s);
1053 if (tbl8_group_index < 0) {
1054 return tbl8_group_index;
1057 tbl8_group_start = tbl8_group_index *
1058 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1059 tbl8_group_end = tbl8_group_start +
1060 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1062 /* Populate new tbl8 with tbl24 value. */
1063 for (i = tbl8_group_start; i < tbl8_group_end; i++) {
1064 lpm->tbl8[i].valid = VALID;
1065 lpm->tbl8[i].depth = lpm->tbl24[tbl24_index].depth;
1066 lpm->tbl8[i].next_hop =
1067 lpm->tbl24[tbl24_index].next_hop;
1070 tbl8_index = tbl8_group_start + (ip_masked & 0xFF);
1072 /* Insert new rule into the tbl8 entry. */
1073 for (i = tbl8_index; i < tbl8_index + tbl8_range; i++) {
1074 if (!lpm->tbl8[i].valid ||
1075 lpm->tbl8[i].depth <= depth) {
1076 lpm->tbl8[i].valid = VALID;
1077 lpm->tbl8[i].depth = depth;
1078 lpm->tbl8[i].next_hop = next_hop;
1085 * Update tbl24 entry to point to new tbl8 entry. Note: The
1086 * ext_flag and tbl8_index need to be updated simultaneously,
1087 * so assign whole structure in one go.
1090 struct rte_lpm_tbl_entry new_tbl24_entry = {
1091 .group_idx = (uint8_t)tbl8_group_index,
1097 lpm->tbl24[tbl24_index] = new_tbl24_entry;
1100 * If it is valid, extended entry calculate the index into tbl8.
1102 tbl8_group_index = lpm->tbl24[tbl24_index].group_idx;
1103 tbl8_group_start = tbl8_group_index *
1104 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1105 tbl8_index = tbl8_group_start + (ip_masked & 0xFF);
1107 for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
1109 if (!lpm->tbl8[i].valid ||
1110 lpm->tbl8[i].depth <= depth) {
1111 struct rte_lpm_tbl_entry new_tbl8_entry = {
1114 .next_hop = next_hop,
1115 .valid_group = lpm->tbl8[i].valid_group,
1119 * Setting tbl8 entry in one go to avoid race
1122 lpm->tbl8[i] = new_tbl8_entry;
1136 rte_lpm_add_v20(struct rte_lpm_v20 *lpm, uint32_t ip, uint8_t depth,
1139 int32_t rule_index, status = 0;
1142 /* Check user arguments. */
1143 if ((lpm == NULL) || (depth < 1) || (depth > RTE_LPM_MAX_DEPTH))
1146 ip_masked = ip & depth_to_mask(depth);
1148 /* Add the rule to the rule table. */
1149 rule_index = rule_add_v20(lpm, ip_masked, depth, next_hop);
1151 /* If the is no space available for new rule return error. */
1152 if (rule_index < 0) {
1156 if (depth <= MAX_DEPTH_TBL24) {
1157 status = add_depth_small_v20(lpm, ip_masked, depth, next_hop);
1158 } else { /* If depth > RTE_LPM_MAX_DEPTH_TBL24 */
1159 status = add_depth_big_v20(lpm, ip_masked, depth, next_hop);
1162 * If add fails due to exhaustion of tbl8 extensions delete
1163 * rule that was added to rule table.
1166 rule_delete_v20(lpm, rule_index, depth);
1174 VERSION_SYMBOL(rte_lpm_add, _v20, 2.0);
1177 rte_lpm_add_v1604(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
1180 int32_t rule_index, status = 0;
1183 /* Check user arguments. */
1184 if ((lpm == NULL) || (depth < 1) || (depth > RTE_LPM_MAX_DEPTH))
1187 ip_masked = ip & depth_to_mask(depth);
1189 /* Add the rule to the rule table. */
1190 rule_index = rule_add_v1604(lpm, ip_masked, depth, next_hop);
1192 /* If the is no space available for new rule return error. */
1193 if (rule_index < 0) {
1197 if (depth <= MAX_DEPTH_TBL24) {
1198 status = add_depth_small_v1604(lpm, ip_masked, depth, next_hop);
1199 } else { /* If depth > RTE_LPM_MAX_DEPTH_TBL24 */
1200 status = add_depth_big_v1604(lpm, ip_masked, depth, next_hop);
1203 * If add fails due to exhaustion of tbl8 extensions delete
1204 * rule that was added to rule table.
1207 rule_delete_v1604(lpm, rule_index, depth);
1215 BIND_DEFAULT_SYMBOL(rte_lpm_add, _v1604, 16.04);
1216 MAP_STATIC_SYMBOL(int rte_lpm_add(struct rte_lpm *lpm, uint32_t ip,
1217 uint8_t depth, uint32_t next_hop), rte_lpm_add_v1604);
1220 * Look for a rule in the high-level rules table
1223 rte_lpm_is_rule_present_v20(struct rte_lpm_v20 *lpm, uint32_t ip, uint8_t depth,
1229 /* Check user arguments. */
1230 if ((lpm == NULL) ||
1231 (next_hop == NULL) ||
1232 (depth < 1) || (depth > RTE_LPM_MAX_DEPTH))
1235 /* Look for the rule using rule_find. */
1236 ip_masked = ip & depth_to_mask(depth);
1237 rule_index = rule_find_v20(lpm, ip_masked, depth);
1239 if (rule_index >= 0) {
1240 *next_hop = lpm->rules_tbl[rule_index].next_hop;
1244 /* If rule is not found return 0. */
1247 VERSION_SYMBOL(rte_lpm_is_rule_present, _v20, 2.0);
1250 rte_lpm_is_rule_present_v1604(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
1256 /* Check user arguments. */
1257 if ((lpm == NULL) ||
1258 (next_hop == NULL) ||
1259 (depth < 1) || (depth > RTE_LPM_MAX_DEPTH))
1262 /* Look for the rule using rule_find. */
1263 ip_masked = ip & depth_to_mask(depth);
1264 rule_index = rule_find_v1604(lpm, ip_masked, depth);
1266 if (rule_index >= 0) {
1267 *next_hop = lpm->rules_tbl[rule_index].next_hop;
1271 /* If rule is not found return 0. */
1274 BIND_DEFAULT_SYMBOL(rte_lpm_is_rule_present, _v1604, 16.04);
1275 MAP_STATIC_SYMBOL(int rte_lpm_is_rule_present(struct rte_lpm *lpm, uint32_t ip,
1276 uint8_t depth, uint32_t *next_hop), rte_lpm_is_rule_present_v1604);
1278 static inline int32_t
1279 find_previous_rule_v20(struct rte_lpm_v20 *lpm, uint32_t ip, uint8_t depth,
1280 uint8_t *sub_rule_depth)
1286 for (prev_depth = (uint8_t)(depth - 1); prev_depth > 0; prev_depth--) {
1287 ip_masked = ip & depth_to_mask(prev_depth);
1289 rule_index = rule_find_v20(lpm, ip_masked, prev_depth);
1291 if (rule_index >= 0) {
1292 *sub_rule_depth = prev_depth;
1300 static inline int32_t
1301 find_previous_rule_v1604(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
1302 uint8_t *sub_rule_depth)
1308 for (prev_depth = (uint8_t)(depth - 1); prev_depth > 0; prev_depth--) {
1309 ip_masked = ip & depth_to_mask(prev_depth);
1311 rule_index = rule_find_v1604(lpm, ip_masked, prev_depth);
1313 if (rule_index >= 0) {
1314 *sub_rule_depth = prev_depth;
1322 static inline int32_t
1323 delete_depth_small_v20(struct rte_lpm_v20 *lpm, uint32_t ip_masked,
1324 uint8_t depth, int32_t sub_rule_index, uint8_t sub_rule_depth)
1326 uint32_t tbl24_range, tbl24_index, tbl8_group_index, tbl8_index, i, j;
1328 /* Calculate the range and index into Table24. */
1329 tbl24_range = depth_to_range(depth);
1330 tbl24_index = (ip_masked >> 8);
1333 * Firstly check the sub_rule_index. A -1 indicates no replacement rule
1334 * and a positive number indicates a sub_rule_index.
1336 if (sub_rule_index < 0) {
1338 * If no replacement rule exists then invalidate entries
1339 * associated with this rule.
1341 for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) {
1343 if (lpm->tbl24[i].valid_group == 0 &&
1344 lpm->tbl24[i].depth <= depth) {
1345 lpm->tbl24[i].valid = INVALID;
1346 } else if (lpm->tbl24[i].valid_group == 1) {
1348 * If TBL24 entry is extended, then there has
1349 * to be a rule with depth >= 25 in the
1350 * associated TBL8 group.
1353 tbl8_group_index = lpm->tbl24[i].group_idx;
1354 tbl8_index = tbl8_group_index *
1355 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1357 for (j = tbl8_index; j < (tbl8_index +
1358 RTE_LPM_TBL8_GROUP_NUM_ENTRIES); j++) {
1360 if (lpm->tbl8[j].depth <= depth)
1361 lpm->tbl8[j].valid = INVALID;
1367 * If a replacement rule exists then modify entries
1368 * associated with this rule.
1371 struct rte_lpm_tbl_entry_v20 new_tbl24_entry = {
1372 {.next_hop = lpm->rules_tbl[sub_rule_index].next_hop,},
1375 .depth = sub_rule_depth,
1378 struct rte_lpm_tbl_entry_v20 new_tbl8_entry = {
1380 .valid_group = VALID,
1381 .depth = sub_rule_depth,
1383 new_tbl8_entry.next_hop =
1384 lpm->rules_tbl[sub_rule_index].next_hop;
1386 for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) {
1388 if (lpm->tbl24[i].valid_group == 0 &&
1389 lpm->tbl24[i].depth <= depth) {
1390 lpm->tbl24[i] = new_tbl24_entry;
1391 } else if (lpm->tbl24[i].valid_group == 1) {
1393 * If TBL24 entry is extended, then there has
1394 * to be a rule with depth >= 25 in the
1395 * associated TBL8 group.
1398 tbl8_group_index = lpm->tbl24[i].group_idx;
1399 tbl8_index = tbl8_group_index *
1400 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1402 for (j = tbl8_index; j < (tbl8_index +
1403 RTE_LPM_TBL8_GROUP_NUM_ENTRIES); j++) {
1405 if (lpm->tbl8[j].depth <= depth)
1406 lpm->tbl8[j] = new_tbl8_entry;
1415 static inline int32_t
1416 delete_depth_small_v1604(struct rte_lpm *lpm, uint32_t ip_masked,
1417 uint8_t depth, int32_t sub_rule_index, uint8_t sub_rule_depth)
1419 #define group_idx next_hop
1420 uint32_t tbl24_range, tbl24_index, tbl8_group_index, tbl8_index, i, j;
1422 /* Calculate the range and index into Table24. */
1423 tbl24_range = depth_to_range(depth);
1424 tbl24_index = (ip_masked >> 8);
1427 * Firstly check the sub_rule_index. A -1 indicates no replacement rule
1428 * and a positive number indicates a sub_rule_index.
1430 if (sub_rule_index < 0) {
1432 * If no replacement rule exists then invalidate entries
1433 * associated with this rule.
1435 for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) {
1437 if (lpm->tbl24[i].valid_group == 0 &&
1438 lpm->tbl24[i].depth <= depth) {
1439 lpm->tbl24[i].valid = INVALID;
1440 } else if (lpm->tbl24[i].valid_group == 1) {
1442 * If TBL24 entry is extended, then there has
1443 * to be a rule with depth >= 25 in the
1444 * associated TBL8 group.
1447 tbl8_group_index = lpm->tbl24[i].group_idx;
1448 tbl8_index = tbl8_group_index *
1449 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1451 for (j = tbl8_index; j < (tbl8_index +
1452 RTE_LPM_TBL8_GROUP_NUM_ENTRIES); j++) {
1454 if (lpm->tbl8[j].depth <= depth)
1455 lpm->tbl8[j].valid = INVALID;
1461 * If a replacement rule exists then modify entries
1462 * associated with this rule.
1465 struct rte_lpm_tbl_entry new_tbl24_entry = {
1466 .next_hop = lpm->rules_tbl[sub_rule_index].next_hop,
1469 .depth = sub_rule_depth,
1472 struct rte_lpm_tbl_entry new_tbl8_entry = {
1474 .valid_group = VALID,
1475 .depth = sub_rule_depth,
1476 .next_hop = lpm->rules_tbl
1477 [sub_rule_index].next_hop,
1480 for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) {
1482 if (lpm->tbl24[i].valid_group == 0 &&
1483 lpm->tbl24[i].depth <= depth) {
1484 lpm->tbl24[i] = new_tbl24_entry;
1485 } else if (lpm->tbl24[i].valid_group == 1) {
1487 * If TBL24 entry is extended, then there has
1488 * to be a rule with depth >= 25 in the
1489 * associated TBL8 group.
1492 tbl8_group_index = lpm->tbl24[i].group_idx;
1493 tbl8_index = tbl8_group_index *
1494 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1496 for (j = tbl8_index; j < (tbl8_index +
1497 RTE_LPM_TBL8_GROUP_NUM_ENTRIES); j++) {
1499 if (lpm->tbl8[j].depth <= depth)
1500 lpm->tbl8[j] = new_tbl8_entry;
1510 * Checks if table 8 group can be recycled.
1512 * Return of -EEXIST means tbl8 is in use and thus can not be recycled.
1513 * Return of -EINVAL means tbl8 is empty and thus can be recycled
1514 * Return of value > -1 means tbl8 is in use but has all the same values and
1515 * thus can be recycled
1517 static inline int32_t
1518 tbl8_recycle_check_v20(struct rte_lpm_tbl_entry_v20 *tbl8,
1519 uint32_t tbl8_group_start)
1521 uint32_t tbl8_group_end, i;
1522 tbl8_group_end = tbl8_group_start + RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1525 * Check the first entry of the given tbl8. If it is invalid we know
1526 * this tbl8 does not contain any rule with a depth < RTE_LPM_MAX_DEPTH
1527 * (As they would affect all entries in a tbl8) and thus this table
1528 * can not be recycled.
1530 if (tbl8[tbl8_group_start].valid) {
1532 * If first entry is valid check if the depth is less than 24
1533 * and if so check the rest of the entries to verify that they
1534 * are all of this depth.
1536 if (tbl8[tbl8_group_start].depth < MAX_DEPTH_TBL24) {
1537 for (i = (tbl8_group_start + 1); i < tbl8_group_end;
1540 if (tbl8[i].depth !=
1541 tbl8[tbl8_group_start].depth) {
1546 /* If all entries are the same return the tb8 index */
1547 return tbl8_group_start;
1553 * If the first entry is invalid check if the rest of the entries in
1554 * the tbl8 are invalid.
1556 for (i = (tbl8_group_start + 1); i < tbl8_group_end; i++) {
1560 /* If no valid entries are found then return -EINVAL. */
1564 static inline int32_t
1565 tbl8_recycle_check_v1604(struct rte_lpm_tbl_entry *tbl8,
1566 uint32_t tbl8_group_start)
1568 uint32_t tbl8_group_end, i;
1569 tbl8_group_end = tbl8_group_start + RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1572 * Check the first entry of the given tbl8. If it is invalid we know
1573 * this tbl8 does not contain any rule with a depth < RTE_LPM_MAX_DEPTH
1574 * (As they would affect all entries in a tbl8) and thus this table
1575 * can not be recycled.
1577 if (tbl8[tbl8_group_start].valid) {
1579 * If first entry is valid check if the depth is less than 24
1580 * and if so check the rest of the entries to verify that they
1581 * are all of this depth.
1583 if (tbl8[tbl8_group_start].depth < MAX_DEPTH_TBL24) {
1584 for (i = (tbl8_group_start + 1); i < tbl8_group_end;
1587 if (tbl8[i].depth !=
1588 tbl8[tbl8_group_start].depth) {
1593 /* If all entries are the same return the tb8 index */
1594 return tbl8_group_start;
1600 * If the first entry is invalid check if the rest of the entries in
1601 * the tbl8 are invalid.
1603 for (i = (tbl8_group_start + 1); i < tbl8_group_end; i++) {
1607 /* If no valid entries are found then return -EINVAL. */
1611 static inline int32_t
1612 delete_depth_big_v20(struct rte_lpm_v20 *lpm, uint32_t ip_masked,
1613 uint8_t depth, int32_t sub_rule_index, uint8_t sub_rule_depth)
1615 uint32_t tbl24_index, tbl8_group_index, tbl8_group_start, tbl8_index,
1617 int32_t tbl8_recycle_index;
1620 * Calculate the index into tbl24 and range. Note: All depths larger
1621 * than MAX_DEPTH_TBL24 are associated with only one tbl24 entry.
1623 tbl24_index = ip_masked >> 8;
1625 /* Calculate the index into tbl8 and range. */
1626 tbl8_group_index = lpm->tbl24[tbl24_index].group_idx;
1627 tbl8_group_start = tbl8_group_index * RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1628 tbl8_index = tbl8_group_start + (ip_masked & 0xFF);
1629 tbl8_range = depth_to_range(depth);
1631 if (sub_rule_index < 0) {
1633 * Loop through the range of entries on tbl8 for which the
1634 * rule_to_delete must be removed or modified.
1636 for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
1637 if (lpm->tbl8[i].depth <= depth)
1638 lpm->tbl8[i].valid = INVALID;
1641 /* Set new tbl8 entry. */
1642 struct rte_lpm_tbl_entry_v20 new_tbl8_entry = {
1644 .depth = sub_rule_depth,
1645 .valid_group = lpm->tbl8[tbl8_group_start].valid_group,
1648 new_tbl8_entry.next_hop =
1649 lpm->rules_tbl[sub_rule_index].next_hop;
1651 * Loop through the range of entries on tbl8 for which the
1652 * rule_to_delete must be modified.
1654 for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
1655 if (lpm->tbl8[i].depth <= depth)
1656 lpm->tbl8[i] = new_tbl8_entry;
1661 * Check if there are any valid entries in this tbl8 group. If all
1662 * tbl8 entries are invalid we can free the tbl8 and invalidate the
1663 * associated tbl24 entry.
1666 tbl8_recycle_index = tbl8_recycle_check_v20(lpm->tbl8, tbl8_group_start);
1668 if (tbl8_recycle_index == -EINVAL) {
1669 /* Set tbl24 before freeing tbl8 to avoid race condition. */
1670 lpm->tbl24[tbl24_index].valid = 0;
1671 tbl8_free_v20(lpm->tbl8, tbl8_group_start);
1672 } else if (tbl8_recycle_index > -1) {
1673 /* Update tbl24 entry. */
1674 struct rte_lpm_tbl_entry_v20 new_tbl24_entry = {
1675 { .next_hop = lpm->tbl8[tbl8_recycle_index].next_hop, },
1678 .depth = lpm->tbl8[tbl8_recycle_index].depth,
1681 /* Set tbl24 before freeing tbl8 to avoid race condition. */
1682 lpm->tbl24[tbl24_index] = new_tbl24_entry;
1683 tbl8_free_v20(lpm->tbl8, tbl8_group_start);
1689 static inline int32_t
1690 delete_depth_big_v1604(struct rte_lpm *lpm, uint32_t ip_masked,
1691 uint8_t depth, int32_t sub_rule_index, uint8_t sub_rule_depth)
1693 #define group_idx next_hop
1694 uint32_t tbl24_index, tbl8_group_index, tbl8_group_start, tbl8_index,
1696 int32_t tbl8_recycle_index;
1699 * Calculate the index into tbl24 and range. Note: All depths larger
1700 * than MAX_DEPTH_TBL24 are associated with only one tbl24 entry.
1702 tbl24_index = ip_masked >> 8;
1704 /* Calculate the index into tbl8 and range. */
1705 tbl8_group_index = lpm->tbl24[tbl24_index].group_idx;
1706 tbl8_group_start = tbl8_group_index * RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1707 tbl8_index = tbl8_group_start + (ip_masked & 0xFF);
1708 tbl8_range = depth_to_range(depth);
1710 if (sub_rule_index < 0) {
1712 * Loop through the range of entries on tbl8 for which the
1713 * rule_to_delete must be removed or modified.
1715 for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
1716 if (lpm->tbl8[i].depth <= depth)
1717 lpm->tbl8[i].valid = INVALID;
1720 /* Set new tbl8 entry. */
1721 struct rte_lpm_tbl_entry new_tbl8_entry = {
1723 .depth = sub_rule_depth,
1724 .valid_group = lpm->tbl8[tbl8_group_start].valid_group,
1725 .next_hop = lpm->rules_tbl[sub_rule_index].next_hop,
1729 * Loop through the range of entries on tbl8 for which the
1730 * rule_to_delete must be modified.
1732 for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
1733 if (lpm->tbl8[i].depth <= depth)
1734 lpm->tbl8[i] = new_tbl8_entry;
1739 * Check if there are any valid entries in this tbl8 group. If all
1740 * tbl8 entries are invalid we can free the tbl8 and invalidate the
1741 * associated tbl24 entry.
1744 tbl8_recycle_index = tbl8_recycle_check_v1604(lpm->tbl8, tbl8_group_start);
1746 if (tbl8_recycle_index == -EINVAL) {
1747 /* Set tbl24 before freeing tbl8 to avoid race condition. */
1748 lpm->tbl24[tbl24_index].valid = 0;
1749 tbl8_free_v1604(lpm->tbl8, tbl8_group_start);
1750 } else if (tbl8_recycle_index > -1) {
1751 /* Update tbl24 entry. */
1752 struct rte_lpm_tbl_entry new_tbl24_entry = {
1753 .next_hop = lpm->tbl8[tbl8_recycle_index].next_hop,
1756 .depth = lpm->tbl8[tbl8_recycle_index].depth,
1759 /* Set tbl24 before freeing tbl8 to avoid race condition. */
1760 lpm->tbl24[tbl24_index] = new_tbl24_entry;
1761 tbl8_free_v1604(lpm->tbl8, tbl8_group_start);
1771 rte_lpm_delete_v20(struct rte_lpm_v20 *lpm, uint32_t ip, uint8_t depth)
1773 int32_t rule_to_delete_index, sub_rule_index;
1775 uint8_t sub_rule_depth;
1777 * Check input arguments. Note: IP must be a positive integer of 32
1778 * bits in length therefore it need not be checked.
1780 if ((lpm == NULL) || (depth < 1) || (depth > RTE_LPM_MAX_DEPTH)) {
1784 ip_masked = ip & depth_to_mask(depth);
1787 * Find the index of the input rule, that needs to be deleted, in the
1790 rule_to_delete_index = rule_find_v20(lpm, ip_masked, depth);
1793 * Check if rule_to_delete_index was found. If no rule was found the
1794 * function rule_find returns -EINVAL.
1796 if (rule_to_delete_index < 0)
1799 /* Delete the rule from the rule table. */
1800 rule_delete_v20(lpm, rule_to_delete_index, depth);
1803 * Find rule to replace the rule_to_delete. If there is no rule to
1804 * replace the rule_to_delete we return -1 and invalidate the table
1805 * entries associated with this rule.
1808 sub_rule_index = find_previous_rule_v20(lpm, ip, depth, &sub_rule_depth);
1811 * If the input depth value is less than 25 use function
1812 * delete_depth_small otherwise use delete_depth_big.
1814 if (depth <= MAX_DEPTH_TBL24) {
1815 return delete_depth_small_v20(lpm, ip_masked, depth,
1816 sub_rule_index, sub_rule_depth);
1817 } else { /* If depth > MAX_DEPTH_TBL24 */
1818 return delete_depth_big_v20(lpm, ip_masked, depth, sub_rule_index,
1822 VERSION_SYMBOL(rte_lpm_delete, _v20, 2.0);
1825 rte_lpm_delete_v1604(struct rte_lpm *lpm, uint32_t ip, uint8_t depth)
1827 int32_t rule_to_delete_index, sub_rule_index;
1829 uint8_t sub_rule_depth;
1831 * Check input arguments. Note: IP must be a positive integer of 32
1832 * bits in length therefore it need not be checked.
1834 if ((lpm == NULL) || (depth < 1) || (depth > RTE_LPM_MAX_DEPTH)) {
1838 ip_masked = ip & depth_to_mask(depth);
1841 * Find the index of the input rule, that needs to be deleted, in the
1844 rule_to_delete_index = rule_find_v1604(lpm, ip_masked, depth);
1847 * Check if rule_to_delete_index was found. If no rule was found the
1848 * function rule_find returns -EINVAL.
1850 if (rule_to_delete_index < 0)
1853 /* Delete the rule from the rule table. */
1854 rule_delete_v1604(lpm, rule_to_delete_index, depth);
1857 * Find rule to replace the rule_to_delete. If there is no rule to
1858 * replace the rule_to_delete we return -1 and invalidate the table
1859 * entries associated with this rule.
1862 sub_rule_index = find_previous_rule_v1604(lpm, ip, depth, &sub_rule_depth);
1865 * If the input depth value is less than 25 use function
1866 * delete_depth_small otherwise use delete_depth_big.
1868 if (depth <= MAX_DEPTH_TBL24) {
1869 return delete_depth_small_v1604(lpm, ip_masked, depth,
1870 sub_rule_index, sub_rule_depth);
1871 } else { /* If depth > MAX_DEPTH_TBL24 */
1872 return delete_depth_big_v1604(lpm, ip_masked, depth, sub_rule_index,
1876 BIND_DEFAULT_SYMBOL(rte_lpm_delete, _v1604, 16.04);
1877 MAP_STATIC_SYMBOL(int rte_lpm_delete(struct rte_lpm *lpm, uint32_t ip,
1878 uint8_t depth), rte_lpm_delete_v1604);
1881 * Delete all rules from the LPM table.
1884 rte_lpm_delete_all_v20(struct rte_lpm_v20 *lpm)
1886 /* Zero rule information. */
1887 memset(lpm->rule_info, 0, sizeof(lpm->rule_info));
1890 memset(lpm->tbl24, 0, sizeof(lpm->tbl24));
1893 memset(lpm->tbl8, 0, sizeof(lpm->tbl8));
1895 /* Delete all rules form the rules table. */
1896 memset(lpm->rules_tbl, 0, sizeof(lpm->rules_tbl[0]) * lpm->max_rules);
1898 VERSION_SYMBOL(rte_lpm_delete_all, _v20, 2.0);
1901 rte_lpm_delete_all_v1604(struct rte_lpm *lpm)
1903 /* Zero rule information. */
1904 memset(lpm->rule_info, 0, sizeof(lpm->rule_info));
1907 memset(lpm->tbl24, 0, sizeof(lpm->tbl24));
1910 memset(lpm->tbl8, 0, sizeof(lpm->tbl8[0])
1911 * RTE_LPM_TBL8_GROUP_NUM_ENTRIES * lpm->number_tbl8s);
1913 /* Delete all rules form the rules table. */
1914 memset(lpm->rules_tbl, 0, sizeof(lpm->rules_tbl[0]) * lpm->max_rules);
1916 BIND_DEFAULT_SYMBOL(rte_lpm_delete_all, _v1604, 16.04);
1917 MAP_STATIC_SYMBOL(void rte_lpm_delete_all(struct rte_lpm *lpm),
1918 rte_lpm_delete_all_v1604);