4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40 #include <sys/queue.h>
43 #include <rte_branch_prediction.h>
44 #include <rte_common.h>
45 #include <rte_memory.h> /* for definition of RTE_CACHE_LINE_SIZE */
46 #include <rte_malloc.h>
47 #include <rte_memzone.h>
49 #include <rte_eal_memconfig.h>
50 #include <rte_per_lcore.h>
51 #include <rte_string_fns.h>
52 #include <rte_errno.h>
53 #include <rte_rwlock.h>
54 #include <rte_spinlock.h>
58 TAILQ_HEAD(rte_lpm_list, rte_tailq_entry);
60 static struct rte_tailq_elem rte_lpm_tailq = {
63 EAL_REGISTER_TAILQ(rte_lpm_tailq)
65 #define MAX_DEPTH_TBL24 24
72 /* Macro to enable/disable run-time checks. */
73 #if defined(RTE_LIBRTE_LPM_DEBUG)
74 #include <rte_debug.h>
75 #define VERIFY_DEPTH(depth) do { \
76 if ((depth == 0) || (depth > RTE_LPM_MAX_DEPTH)) \
77 rte_panic("LPM: Invalid depth (%u) at line %d", \
78 (unsigned)(depth), __LINE__); \
81 #define VERIFY_DEPTH(depth)
85 * Converts a given depth value to its corresponding mask value.
87 * depth (IN) : range = 1 - 32
88 * mask (OUT) : 32bit mask
90 static uint32_t __attribute__((pure))
91 depth_to_mask(uint8_t depth)
95 /* To calculate a mask start with a 1 on the left hand side and right
96 * shift while populating the left hand side with 1's
98 return (int)0x80000000 >> (depth - 1);
102 * Converts given depth value to its corresponding range value.
104 static inline uint32_t __attribute__((pure))
105 depth_to_range(uint8_t depth)
110 * Calculate tbl24 range. (Note: 2^depth = 1 << depth)
112 if (depth <= MAX_DEPTH_TBL24)
113 return 1 << (MAX_DEPTH_TBL24 - depth);
115 /* Else if depth is greater than 24 */
116 return 1 << (RTE_LPM_MAX_DEPTH - depth);
120 * Find an existing lpm table and return a pointer to it.
123 rte_lpm_find_existing_v20(const char *name)
125 struct rte_lpm_v20 *l = NULL;
126 struct rte_tailq_entry *te;
127 struct rte_lpm_list *lpm_list;
129 lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list);
131 rte_rwlock_read_lock(RTE_EAL_TAILQ_RWLOCK);
132 TAILQ_FOREACH(te, lpm_list, next) {
133 l = (struct rte_lpm_v20 *) te->data;
134 if (strncmp(name, l->name, RTE_LPM_NAMESIZE) == 0)
137 rte_rwlock_read_unlock(RTE_EAL_TAILQ_RWLOCK);
146 VERSION_SYMBOL(rte_lpm_find_existing, _v20, 2.0);
149 rte_lpm_find_existing_v1604(const char *name)
151 struct rte_lpm *l = NULL;
152 struct rte_tailq_entry *te;
153 struct rte_lpm_list *lpm_list;
155 lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list);
157 rte_rwlock_read_lock(RTE_EAL_TAILQ_RWLOCK);
158 TAILQ_FOREACH(te, lpm_list, next) {
159 l = (struct rte_lpm *) te->data;
160 if (strncmp(name, l->name, RTE_LPM_NAMESIZE) == 0)
163 rte_rwlock_read_unlock(RTE_EAL_TAILQ_RWLOCK);
172 BIND_DEFAULT_SYMBOL(rte_lpm_find_existing, _v1604, 16.04);
173 MAP_STATIC_SYMBOL(struct rte_lpm *rte_lpm_find_existing(const char *name),
174 rte_lpm_find_existing_v1604);
177 * Allocates memory for LPM object
180 rte_lpm_create_v20(const char *name, int socket_id, int max_rules,
181 __rte_unused int flags)
183 char mem_name[RTE_LPM_NAMESIZE];
184 struct rte_lpm_v20 *lpm = NULL;
185 struct rte_tailq_entry *te;
187 struct rte_lpm_list *lpm_list;
189 lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list);
191 RTE_BUILD_BUG_ON(sizeof(struct rte_lpm_tbl_entry_v20) != 2);
193 /* Check user arguments. */
194 if ((name == NULL) || (socket_id < -1) || (max_rules == 0)) {
199 snprintf(mem_name, sizeof(mem_name), "LPM_%s", name);
201 /* Determine the amount of memory to allocate. */
202 mem_size = sizeof(*lpm) + (sizeof(lpm->rules_tbl[0]) * max_rules);
204 rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
206 /* guarantee there's no existing */
207 TAILQ_FOREACH(te, lpm_list, next) {
208 lpm = (struct rte_lpm_v20 *) te->data;
209 if (strncmp(name, lpm->name, RTE_LPM_NAMESIZE) == 0)
218 /* allocate tailq entry */
219 te = rte_zmalloc("LPM_TAILQ_ENTRY", sizeof(*te), 0);
221 RTE_LOG(ERR, LPM, "Failed to allocate tailq entry\n");
225 /* Allocate memory to store the LPM data structures. */
226 lpm = (struct rte_lpm_v20 *)rte_zmalloc_socket(mem_name, mem_size,
227 RTE_CACHE_LINE_SIZE, socket_id);
229 RTE_LOG(ERR, LPM, "LPM memory allocation failed\n");
234 /* Save user arguments. */
235 lpm->max_rules = max_rules;
236 snprintf(lpm->name, sizeof(lpm->name), "%s", name);
238 te->data = (void *) lpm;
240 TAILQ_INSERT_TAIL(lpm_list, te, next);
243 rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
247 VERSION_SYMBOL(rte_lpm_create, _v20, 2.0);
250 rte_lpm_create_v1604(const char *name, int socket_id,
251 const struct rte_lpm_config *config)
253 char mem_name[RTE_LPM_NAMESIZE];
254 struct rte_lpm *lpm = NULL;
255 struct rte_tailq_entry *te;
256 uint32_t mem_size, rules_size, tbl8s_size;
257 struct rte_lpm_list *lpm_list;
259 lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list);
261 RTE_BUILD_BUG_ON(sizeof(struct rte_lpm_tbl_entry) != 4);
263 /* Check user arguments. */
264 if ((name == NULL) || (socket_id < -1) || (config->max_rules == 0)
265 || config->number_tbl8s > RTE_LPM_MAX_TBL8_NUM_GROUPS) {
270 snprintf(mem_name, sizeof(mem_name), "LPM_%s", name);
272 /* Determine the amount of memory to allocate. */
273 mem_size = sizeof(*lpm);
274 rules_size = sizeof(struct rte_lpm_rule) * config->max_rules;
275 tbl8s_size = (sizeof(struct rte_lpm_tbl_entry) *
276 RTE_LPM_TBL8_GROUP_NUM_ENTRIES * config->number_tbl8s);
278 rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
280 /* guarantee there's no existing */
281 TAILQ_FOREACH(te, lpm_list, next) {
282 lpm = (struct rte_lpm *) te->data;
283 if (strncmp(name, lpm->name, RTE_LPM_NAMESIZE) == 0)
292 /* allocate tailq entry */
293 te = rte_zmalloc("LPM_TAILQ_ENTRY", sizeof(*te), 0);
295 RTE_LOG(ERR, LPM, "Failed to allocate tailq entry\n");
299 /* Allocate memory to store the LPM data structures. */
300 lpm = (struct rte_lpm *)rte_zmalloc_socket(mem_name, mem_size,
301 RTE_CACHE_LINE_SIZE, socket_id);
303 RTE_LOG(ERR, LPM, "LPM memory allocation failed\n");
308 lpm->rules_tbl = (struct rte_lpm_rule *)rte_zmalloc_socket(NULL,
309 (size_t)rules_size, RTE_CACHE_LINE_SIZE, socket_id);
311 if (lpm->rules_tbl == NULL) {
312 RTE_LOG(ERR, LPM, "LPM rules_tbl memory allocation failed\n");
319 lpm->tbl8 = (struct rte_lpm_tbl_entry *)rte_zmalloc_socket(NULL,
320 (size_t)tbl8s_size, RTE_CACHE_LINE_SIZE, socket_id);
322 if (lpm->tbl8 == NULL) {
323 RTE_LOG(ERR, LPM, "LPM tbl8 memory allocation failed\n");
330 /* Save user arguments. */
331 lpm->max_rules = config->max_rules;
332 lpm->number_tbl8s = config->number_tbl8s;
333 snprintf(lpm->name, sizeof(lpm->name), "%s", name);
335 te->data = (void *) lpm;
337 TAILQ_INSERT_TAIL(lpm_list, te, next);
340 rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
344 BIND_DEFAULT_SYMBOL(rte_lpm_create, _v1604, 16.04);
346 struct rte_lpm *rte_lpm_create(const char *name, int socket_id,
347 const struct rte_lpm_config *config), rte_lpm_create_v1604);
350 * Deallocates memory for given LPM table.
353 rte_lpm_free_v20(struct rte_lpm_v20 *lpm)
355 struct rte_lpm_list *lpm_list;
356 struct rte_tailq_entry *te;
358 /* Check user arguments. */
362 lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list);
364 rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
366 /* find our tailq entry */
367 TAILQ_FOREACH(te, lpm_list, next) {
368 if (te->data == (void *) lpm)
372 TAILQ_REMOVE(lpm_list, te, next);
374 rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
376 rte_free(lpm->rules_tbl);
380 VERSION_SYMBOL(rte_lpm_free, _v20, 2.0);
383 rte_lpm_free_v1604(struct rte_lpm *lpm)
385 struct rte_lpm_list *lpm_list;
386 struct rte_tailq_entry *te;
388 /* Check user arguments. */
392 lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list);
394 rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
396 /* find our tailq entry */
397 TAILQ_FOREACH(te, lpm_list, next) {
398 if (te->data == (void *) lpm)
402 TAILQ_REMOVE(lpm_list, te, next);
404 rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
406 rte_free(lpm->rules_tbl);
410 BIND_DEFAULT_SYMBOL(rte_lpm_free, _v1604, 16.04);
411 MAP_STATIC_SYMBOL(void rte_lpm_free(struct rte_lpm *lpm),
415 * Adds a rule to the rule table.
417 * NOTE: The rule table is split into 32 groups. Each group contains rules that
418 * apply to a specific prefix depth (i.e. group 1 contains rules that apply to
419 * prefixes with a depth of 1 etc.). In the following code (depth - 1) is used
420 * to refer to depth 1 because even though the depth range is 1 - 32, depths
421 * are stored in the rule table from 0 - 31.
422 * NOTE: Valid range for depth parameter is 1 .. 32 inclusive.
424 static inline int32_t
425 rule_add_v20(struct rte_lpm_v20 *lpm, uint32_t ip_masked, uint8_t depth,
428 uint32_t rule_gindex, rule_index, last_rule;
433 /* Scan through rule group to see if rule already exists. */
434 if (lpm->rule_info[depth - 1].used_rules > 0) {
436 /* rule_gindex stands for rule group index. */
437 rule_gindex = lpm->rule_info[depth - 1].first_rule;
438 /* Initialise rule_index to point to start of rule group. */
439 rule_index = rule_gindex;
440 /* Last rule = Last used rule in this rule group. */
441 last_rule = rule_gindex + lpm->rule_info[depth - 1].used_rules;
443 for (; rule_index < last_rule; rule_index++) {
445 /* If rule already exists update its next_hop and return. */
446 if (lpm->rules_tbl[rule_index].ip == ip_masked) {
447 lpm->rules_tbl[rule_index].next_hop = next_hop;
453 if (rule_index == lpm->max_rules)
456 /* Calculate the position in which the rule will be stored. */
459 for (i = depth - 1; i > 0; i--) {
460 if (lpm->rule_info[i - 1].used_rules > 0) {
461 rule_index = lpm->rule_info[i - 1].first_rule
462 + lpm->rule_info[i - 1].used_rules;
466 if (rule_index == lpm->max_rules)
469 lpm->rule_info[depth - 1].first_rule = rule_index;
472 /* Make room for the new rule in the array. */
473 for (i = RTE_LPM_MAX_DEPTH; i > depth; i--) {
474 if (lpm->rule_info[i - 1].first_rule
475 + lpm->rule_info[i - 1].used_rules == lpm->max_rules)
478 if (lpm->rule_info[i - 1].used_rules > 0) {
479 lpm->rules_tbl[lpm->rule_info[i - 1].first_rule
480 + lpm->rule_info[i - 1].used_rules]
481 = lpm->rules_tbl[lpm->rule_info[i - 1].first_rule];
482 lpm->rule_info[i - 1].first_rule++;
486 /* Add the new rule. */
487 lpm->rules_tbl[rule_index].ip = ip_masked;
488 lpm->rules_tbl[rule_index].next_hop = next_hop;
490 /* Increment the used rules counter for this rule group. */
491 lpm->rule_info[depth - 1].used_rules++;
496 static inline int32_t
497 rule_add_v1604(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth,
500 uint32_t rule_gindex, rule_index, last_rule;
505 /* Scan through rule group to see if rule already exists. */
506 if (lpm->rule_info[depth - 1].used_rules > 0) {
508 /* rule_gindex stands for rule group index. */
509 rule_gindex = lpm->rule_info[depth - 1].first_rule;
510 /* Initialise rule_index to point to start of rule group. */
511 rule_index = rule_gindex;
512 /* Last rule = Last used rule in this rule group. */
513 last_rule = rule_gindex + lpm->rule_info[depth - 1].used_rules;
515 for (; rule_index < last_rule; rule_index++) {
517 /* If rule already exists update its next_hop and return. */
518 if (lpm->rules_tbl[rule_index].ip == ip_masked) {
519 lpm->rules_tbl[rule_index].next_hop = next_hop;
525 if (rule_index == lpm->max_rules)
528 /* Calculate the position in which the rule will be stored. */
531 for (i = depth - 1; i > 0; i--) {
532 if (lpm->rule_info[i - 1].used_rules > 0) {
533 rule_index = lpm->rule_info[i - 1].first_rule
534 + lpm->rule_info[i - 1].used_rules;
538 if (rule_index == lpm->max_rules)
541 lpm->rule_info[depth - 1].first_rule = rule_index;
544 /* Make room for the new rule in the array. */
545 for (i = RTE_LPM_MAX_DEPTH; i > depth; i--) {
546 if (lpm->rule_info[i - 1].first_rule
547 + lpm->rule_info[i - 1].used_rules == lpm->max_rules)
550 if (lpm->rule_info[i - 1].used_rules > 0) {
551 lpm->rules_tbl[lpm->rule_info[i - 1].first_rule
552 + lpm->rule_info[i - 1].used_rules]
553 = lpm->rules_tbl[lpm->rule_info[i - 1].first_rule];
554 lpm->rule_info[i - 1].first_rule++;
558 /* Add the new rule. */
559 lpm->rules_tbl[rule_index].ip = ip_masked;
560 lpm->rules_tbl[rule_index].next_hop = next_hop;
562 /* Increment the used rules counter for this rule group. */
563 lpm->rule_info[depth - 1].used_rules++;
569 * Delete a rule from the rule table.
570 * NOTE: Valid range for depth parameter is 1 .. 32 inclusive.
573 rule_delete_v20(struct rte_lpm_v20 *lpm, int32_t rule_index, uint8_t depth)
579 lpm->rules_tbl[rule_index] =
580 lpm->rules_tbl[lpm->rule_info[depth - 1].first_rule
581 + lpm->rule_info[depth - 1].used_rules - 1];
583 for (i = depth; i < RTE_LPM_MAX_DEPTH; i++) {
584 if (lpm->rule_info[i].used_rules > 0) {
585 lpm->rules_tbl[lpm->rule_info[i].first_rule - 1] =
586 lpm->rules_tbl[lpm->rule_info[i].first_rule
587 + lpm->rule_info[i].used_rules - 1];
588 lpm->rule_info[i].first_rule--;
592 lpm->rule_info[depth - 1].used_rules--;
596 rule_delete_v1604(struct rte_lpm *lpm, int32_t rule_index, uint8_t depth)
602 lpm->rules_tbl[rule_index] =
603 lpm->rules_tbl[lpm->rule_info[depth - 1].first_rule
604 + lpm->rule_info[depth - 1].used_rules - 1];
606 for (i = depth; i < RTE_LPM_MAX_DEPTH; i++) {
607 if (lpm->rule_info[i].used_rules > 0) {
608 lpm->rules_tbl[lpm->rule_info[i].first_rule - 1] =
609 lpm->rules_tbl[lpm->rule_info[i].first_rule
610 + lpm->rule_info[i].used_rules - 1];
611 lpm->rule_info[i].first_rule--;
615 lpm->rule_info[depth - 1].used_rules--;
619 * Finds a rule in rule table.
620 * NOTE: Valid range for depth parameter is 1 .. 32 inclusive.
622 static inline int32_t
623 rule_find_v20(struct rte_lpm_v20 *lpm, uint32_t ip_masked, uint8_t depth)
625 uint32_t rule_gindex, last_rule, rule_index;
629 rule_gindex = lpm->rule_info[depth - 1].first_rule;
630 last_rule = rule_gindex + lpm->rule_info[depth - 1].used_rules;
632 /* Scan used rules at given depth to find rule. */
633 for (rule_index = rule_gindex; rule_index < last_rule; rule_index++) {
634 /* If rule is found return the rule index. */
635 if (lpm->rules_tbl[rule_index].ip == ip_masked)
639 /* If rule is not found return -EINVAL. */
643 static inline int32_t
644 rule_find_v1604(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth)
646 uint32_t rule_gindex, last_rule, rule_index;
650 rule_gindex = lpm->rule_info[depth - 1].first_rule;
651 last_rule = rule_gindex + lpm->rule_info[depth - 1].used_rules;
653 /* Scan used rules at given depth to find rule. */
654 for (rule_index = rule_gindex; rule_index < last_rule; rule_index++) {
655 /* If rule is found return the rule index. */
656 if (lpm->rules_tbl[rule_index].ip == ip_masked)
660 /* If rule is not found return -EINVAL. */
665 * Find, clean and allocate a tbl8.
667 static inline int32_t
668 tbl8_alloc_v20(struct rte_lpm_tbl_entry_v20 *tbl8)
670 uint32_t group_idx; /* tbl8 group index. */
671 struct rte_lpm_tbl_entry_v20 *tbl8_entry;
673 /* Scan through tbl8 to find a free (i.e. INVALID) tbl8 group. */
674 for (group_idx = 0; group_idx < RTE_LPM_TBL8_NUM_GROUPS;
676 tbl8_entry = &tbl8[group_idx * RTE_LPM_TBL8_GROUP_NUM_ENTRIES];
677 /* If a free tbl8 group is found clean it and set as VALID. */
678 if (!tbl8_entry->valid_group) {
679 memset(&tbl8_entry[0], 0,
680 RTE_LPM_TBL8_GROUP_NUM_ENTRIES *
681 sizeof(tbl8_entry[0]));
683 tbl8_entry->valid_group = VALID;
685 /* Return group index for allocated tbl8 group. */
690 /* If there are no tbl8 groups free then return error. */
694 static inline int32_t
695 tbl8_alloc_v1604(struct rte_lpm_tbl_entry *tbl8, uint32_t number_tbl8s)
697 uint32_t group_idx; /* tbl8 group index. */
698 struct rte_lpm_tbl_entry *tbl8_entry;
700 /* Scan through tbl8 to find a free (i.e. INVALID) tbl8 group. */
701 for (group_idx = 0; group_idx < number_tbl8s; group_idx++) {
702 tbl8_entry = &tbl8[group_idx * RTE_LPM_TBL8_GROUP_NUM_ENTRIES];
703 /* If a free tbl8 group is found clean it and set as VALID. */
704 if (!tbl8_entry->valid_group) {
705 memset(&tbl8_entry[0], 0,
706 RTE_LPM_TBL8_GROUP_NUM_ENTRIES *
707 sizeof(tbl8_entry[0]));
709 tbl8_entry->valid_group = VALID;
711 /* Return group index for allocated tbl8 group. */
716 /* If there are no tbl8 groups free then return error. */
721 tbl8_free_v20(struct rte_lpm_tbl_entry_v20 *tbl8, uint32_t tbl8_group_start)
723 /* Set tbl8 group invalid*/
724 tbl8[tbl8_group_start].valid_group = INVALID;
728 tbl8_free_v1604(struct rte_lpm_tbl_entry *tbl8, uint32_t tbl8_group_start)
730 /* Set tbl8 group invalid*/
731 tbl8[tbl8_group_start].valid_group = INVALID;
734 static inline int32_t
735 add_depth_small_v20(struct rte_lpm_v20 *lpm, uint32_t ip, uint8_t depth,
738 uint32_t tbl24_index, tbl24_range, tbl8_index, tbl8_group_end, i, j;
740 /* Calculate the index into Table24. */
741 tbl24_index = ip >> 8;
742 tbl24_range = depth_to_range(depth);
744 for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) {
746 * For invalid OR valid and non-extended tbl 24 entries set
749 if (!lpm->tbl24[i].valid || (lpm->tbl24[i].valid_group == 0 &&
750 lpm->tbl24[i].depth <= depth)) {
752 struct rte_lpm_tbl_entry_v20 new_tbl24_entry = {
757 new_tbl24_entry.next_hop = next_hop;
759 /* Setting tbl24 entry in one go to avoid race
762 lpm->tbl24[i] = new_tbl24_entry;
767 if (lpm->tbl24[i].valid_group == 1) {
768 /* If tbl24 entry is valid and extended calculate the
771 tbl8_index = lpm->tbl24[i].group_idx *
772 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
773 tbl8_group_end = tbl8_index +
774 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
776 for (j = tbl8_index; j < tbl8_group_end; j++) {
777 if (!lpm->tbl8[j].valid ||
778 lpm->tbl8[j].depth <= depth) {
779 struct rte_lpm_tbl_entry_v20
782 .valid_group = VALID,
785 new_tbl8_entry.next_hop = next_hop;
788 * Setting tbl8 entry in one go to avoid
791 lpm->tbl8[j] = new_tbl8_entry;
802 static inline int32_t
803 add_depth_small_v1604(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
806 #define group_idx next_hop
807 uint32_t tbl24_index, tbl24_range, tbl8_index, tbl8_group_end, i, j;
809 /* Calculate the index into Table24. */
810 tbl24_index = ip >> 8;
811 tbl24_range = depth_to_range(depth);
813 for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) {
815 * For invalid OR valid and non-extended tbl 24 entries set
818 if (!lpm->tbl24[i].valid || (lpm->tbl24[i].valid_group == 0 &&
819 lpm->tbl24[i].depth <= depth)) {
821 struct rte_lpm_tbl_entry new_tbl24_entry = {
822 .next_hop = next_hop,
828 /* Setting tbl24 entry in one go to avoid race
831 lpm->tbl24[i] = new_tbl24_entry;
836 if (lpm->tbl24[i].valid_group == 1) {
837 /* If tbl24 entry is valid and extended calculate the
840 tbl8_index = lpm->tbl24[i].group_idx *
841 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
842 tbl8_group_end = tbl8_index +
843 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
845 for (j = tbl8_index; j < tbl8_group_end; j++) {
846 if (!lpm->tbl8[j].valid ||
847 lpm->tbl8[j].depth <= depth) {
848 struct rte_lpm_tbl_entry
851 .valid_group = VALID,
853 .next_hop = next_hop,
857 * Setting tbl8 entry in one go to avoid
860 lpm->tbl8[j] = new_tbl8_entry;
871 static inline int32_t
872 add_depth_big_v20(struct rte_lpm_v20 *lpm, uint32_t ip_masked, uint8_t depth,
875 uint32_t tbl24_index;
876 int32_t tbl8_group_index, tbl8_group_start, tbl8_group_end, tbl8_index,
879 tbl24_index = (ip_masked >> 8);
880 tbl8_range = depth_to_range(depth);
882 if (!lpm->tbl24[tbl24_index].valid) {
883 /* Search for a free tbl8 group. */
884 tbl8_group_index = tbl8_alloc_v20(lpm->tbl8);
886 /* Check tbl8 allocation was successful. */
887 if (tbl8_group_index < 0) {
888 return tbl8_group_index;
891 /* Find index into tbl8 and range. */
892 tbl8_index = (tbl8_group_index *
893 RTE_LPM_TBL8_GROUP_NUM_ENTRIES) +
896 /* Set tbl8 entry. */
897 for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
898 lpm->tbl8[i].depth = depth;
899 lpm->tbl8[i].next_hop = next_hop;
900 lpm->tbl8[i].valid = VALID;
904 * Update tbl24 entry to point to new tbl8 entry. Note: The
905 * ext_flag and tbl8_index need to be updated simultaneously,
906 * so assign whole structure in one go
909 struct rte_lpm_tbl_entry_v20 new_tbl24_entry = {
910 { .group_idx = (uint8_t)tbl8_group_index, },
916 lpm->tbl24[tbl24_index] = new_tbl24_entry;
918 } /* If valid entry but not extended calculate the index into Table8. */
919 else if (lpm->tbl24[tbl24_index].valid_group == 0) {
920 /* Search for free tbl8 group. */
921 tbl8_group_index = tbl8_alloc_v20(lpm->tbl8);
923 if (tbl8_group_index < 0) {
924 return tbl8_group_index;
927 tbl8_group_start = tbl8_group_index *
928 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
929 tbl8_group_end = tbl8_group_start +
930 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
932 /* Populate new tbl8 with tbl24 value. */
933 for (i = tbl8_group_start; i < tbl8_group_end; i++) {
934 lpm->tbl8[i].valid = VALID;
935 lpm->tbl8[i].depth = lpm->tbl24[tbl24_index].depth;
936 lpm->tbl8[i].next_hop =
937 lpm->tbl24[tbl24_index].next_hop;
940 tbl8_index = tbl8_group_start + (ip_masked & 0xFF);
942 /* Insert new rule into the tbl8 entry. */
943 for (i = tbl8_index; i < tbl8_index + tbl8_range; i++) {
944 if (!lpm->tbl8[i].valid ||
945 lpm->tbl8[i].depth <= depth) {
946 lpm->tbl8[i].valid = VALID;
947 lpm->tbl8[i].depth = depth;
948 lpm->tbl8[i].next_hop = next_hop;
955 * Update tbl24 entry to point to new tbl8 entry. Note: The
956 * ext_flag and tbl8_index need to be updated simultaneously,
957 * so assign whole structure in one go.
960 struct rte_lpm_tbl_entry_v20 new_tbl24_entry = {
961 { .group_idx = (uint8_t)tbl8_group_index, },
967 lpm->tbl24[tbl24_index] = new_tbl24_entry;
970 * If it is valid, extended entry calculate the index into tbl8.
972 tbl8_group_index = lpm->tbl24[tbl24_index].group_idx;
973 tbl8_group_start = tbl8_group_index *
974 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
975 tbl8_index = tbl8_group_start + (ip_masked & 0xFF);
977 for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
979 if (!lpm->tbl8[i].valid ||
980 lpm->tbl8[i].depth <= depth) {
981 struct rte_lpm_tbl_entry_v20 new_tbl8_entry = {
984 .valid_group = lpm->tbl8[i].valid_group,
986 new_tbl8_entry.next_hop = next_hop;
988 * Setting tbl8 entry in one go to avoid race
991 lpm->tbl8[i] = new_tbl8_entry;
1001 static inline int32_t
1002 add_depth_big_v1604(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth,
1005 #define group_idx next_hop
1006 uint32_t tbl24_index;
1007 int32_t tbl8_group_index, tbl8_group_start, tbl8_group_end, tbl8_index,
1010 tbl24_index = (ip_masked >> 8);
1011 tbl8_range = depth_to_range(depth);
1013 if (!lpm->tbl24[tbl24_index].valid) {
1014 /* Search for a free tbl8 group. */
1015 tbl8_group_index = tbl8_alloc_v1604(lpm->tbl8, lpm->number_tbl8s);
1017 /* Check tbl8 allocation was successful. */
1018 if (tbl8_group_index < 0) {
1019 return tbl8_group_index;
1022 /* Find index into tbl8 and range. */
1023 tbl8_index = (tbl8_group_index *
1024 RTE_LPM_TBL8_GROUP_NUM_ENTRIES) +
1027 /* Set tbl8 entry. */
1028 for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
1029 lpm->tbl8[i].depth = depth;
1030 lpm->tbl8[i].next_hop = next_hop;
1031 lpm->tbl8[i].valid = VALID;
1035 * Update tbl24 entry to point to new tbl8 entry. Note: The
1036 * ext_flag and tbl8_index need to be updated simultaneously,
1037 * so assign whole structure in one go
1040 struct rte_lpm_tbl_entry new_tbl24_entry = {
1041 .group_idx = (uint8_t)tbl8_group_index,
1047 lpm->tbl24[tbl24_index] = new_tbl24_entry;
1049 } /* If valid entry but not extended calculate the index into Table8. */
1050 else if (lpm->tbl24[tbl24_index].valid_group == 0) {
1051 /* Search for free tbl8 group. */
1052 tbl8_group_index = tbl8_alloc_v1604(lpm->tbl8, lpm->number_tbl8s);
1054 if (tbl8_group_index < 0) {
1055 return tbl8_group_index;
1058 tbl8_group_start = tbl8_group_index *
1059 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1060 tbl8_group_end = tbl8_group_start +
1061 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1063 /* Populate new tbl8 with tbl24 value. */
1064 for (i = tbl8_group_start; i < tbl8_group_end; i++) {
1065 lpm->tbl8[i].valid = VALID;
1066 lpm->tbl8[i].depth = lpm->tbl24[tbl24_index].depth;
1067 lpm->tbl8[i].next_hop =
1068 lpm->tbl24[tbl24_index].next_hop;
1071 tbl8_index = tbl8_group_start + (ip_masked & 0xFF);
1073 /* Insert new rule into the tbl8 entry. */
1074 for (i = tbl8_index; i < tbl8_index + tbl8_range; i++) {
1075 if (!lpm->tbl8[i].valid ||
1076 lpm->tbl8[i].depth <= depth) {
1077 lpm->tbl8[i].valid = VALID;
1078 lpm->tbl8[i].depth = depth;
1079 lpm->tbl8[i].next_hop = next_hop;
1086 * Update tbl24 entry to point to new tbl8 entry. Note: The
1087 * ext_flag and tbl8_index need to be updated simultaneously,
1088 * so assign whole structure in one go.
1091 struct rte_lpm_tbl_entry new_tbl24_entry = {
1092 .group_idx = (uint8_t)tbl8_group_index,
1098 lpm->tbl24[tbl24_index] = new_tbl24_entry;
1101 * If it is valid, extended entry calculate the index into tbl8.
1103 tbl8_group_index = lpm->tbl24[tbl24_index].group_idx;
1104 tbl8_group_start = tbl8_group_index *
1105 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1106 tbl8_index = tbl8_group_start + (ip_masked & 0xFF);
1108 for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
1110 if (!lpm->tbl8[i].valid ||
1111 lpm->tbl8[i].depth <= depth) {
1112 struct rte_lpm_tbl_entry new_tbl8_entry = {
1115 .next_hop = next_hop,
1116 .valid_group = lpm->tbl8[i].valid_group,
1120 * Setting tbl8 entry in one go to avoid race
1123 lpm->tbl8[i] = new_tbl8_entry;
1137 rte_lpm_add_v20(struct rte_lpm_v20 *lpm, uint32_t ip, uint8_t depth,
1140 int32_t rule_index, status = 0;
1143 /* Check user arguments. */
1144 if ((lpm == NULL) || (depth < 1) || (depth > RTE_LPM_MAX_DEPTH))
1147 ip_masked = ip & depth_to_mask(depth);
1149 /* Add the rule to the rule table. */
1150 rule_index = rule_add_v20(lpm, ip_masked, depth, next_hop);
1152 /* If the is no space available for new rule return error. */
1153 if (rule_index < 0) {
1157 if (depth <= MAX_DEPTH_TBL24) {
1158 status = add_depth_small_v20(lpm, ip_masked, depth, next_hop);
1159 } else { /* If depth > RTE_LPM_MAX_DEPTH_TBL24 */
1160 status = add_depth_big_v20(lpm, ip_masked, depth, next_hop);
1163 * If add fails due to exhaustion of tbl8 extensions delete
1164 * rule that was added to rule table.
1167 rule_delete_v20(lpm, rule_index, depth);
1175 VERSION_SYMBOL(rte_lpm_add, _v20, 2.0);
1178 rte_lpm_add_v1604(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
1181 int32_t rule_index, status = 0;
1184 /* Check user arguments. */
1185 if ((lpm == NULL) || (depth < 1) || (depth > RTE_LPM_MAX_DEPTH))
1188 ip_masked = ip & depth_to_mask(depth);
1190 /* Add the rule to the rule table. */
1191 rule_index = rule_add_v1604(lpm, ip_masked, depth, next_hop);
1193 /* If the is no space available for new rule return error. */
1194 if (rule_index < 0) {
1198 if (depth <= MAX_DEPTH_TBL24) {
1199 status = add_depth_small_v1604(lpm, ip_masked, depth, next_hop);
1200 } else { /* If depth > RTE_LPM_MAX_DEPTH_TBL24 */
1201 status = add_depth_big_v1604(lpm, ip_masked, depth, next_hop);
1204 * If add fails due to exhaustion of tbl8 extensions delete
1205 * rule that was added to rule table.
1208 rule_delete_v1604(lpm, rule_index, depth);
1216 BIND_DEFAULT_SYMBOL(rte_lpm_add, _v1604, 16.04);
1217 MAP_STATIC_SYMBOL(int rte_lpm_add(struct rte_lpm *lpm, uint32_t ip,
1218 uint8_t depth, uint32_t next_hop), rte_lpm_add_v1604);
1221 * Look for a rule in the high-level rules table
1224 rte_lpm_is_rule_present_v20(struct rte_lpm_v20 *lpm, uint32_t ip, uint8_t depth,
1230 /* Check user arguments. */
1231 if ((lpm == NULL) ||
1232 (next_hop == NULL) ||
1233 (depth < 1) || (depth > RTE_LPM_MAX_DEPTH))
1236 /* Look for the rule using rule_find. */
1237 ip_masked = ip & depth_to_mask(depth);
1238 rule_index = rule_find_v20(lpm, ip_masked, depth);
1240 if (rule_index >= 0) {
1241 *next_hop = lpm->rules_tbl[rule_index].next_hop;
1245 /* If rule is not found return 0. */
1248 VERSION_SYMBOL(rte_lpm_is_rule_present, _v20, 2.0);
1251 rte_lpm_is_rule_present_v1604(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
1257 /* Check user arguments. */
1258 if ((lpm == NULL) ||
1259 (next_hop == NULL) ||
1260 (depth < 1) || (depth > RTE_LPM_MAX_DEPTH))
1263 /* Look for the rule using rule_find. */
1264 ip_masked = ip & depth_to_mask(depth);
1265 rule_index = rule_find_v1604(lpm, ip_masked, depth);
1267 if (rule_index >= 0) {
1268 *next_hop = lpm->rules_tbl[rule_index].next_hop;
1272 /* If rule is not found return 0. */
1275 BIND_DEFAULT_SYMBOL(rte_lpm_is_rule_present, _v1604, 16.04);
1276 MAP_STATIC_SYMBOL(int rte_lpm_is_rule_present(struct rte_lpm *lpm, uint32_t ip,
1277 uint8_t depth, uint32_t *next_hop), rte_lpm_is_rule_present_v1604);
1279 static inline int32_t
1280 find_previous_rule_v20(struct rte_lpm_v20 *lpm, uint32_t ip, uint8_t depth,
1281 uint8_t *sub_rule_depth)
1287 for (prev_depth = (uint8_t)(depth - 1); prev_depth > 0; prev_depth--) {
1288 ip_masked = ip & depth_to_mask(prev_depth);
1290 rule_index = rule_find_v20(lpm, ip_masked, prev_depth);
1292 if (rule_index >= 0) {
1293 *sub_rule_depth = prev_depth;
1301 static inline int32_t
1302 find_previous_rule_v1604(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
1303 uint8_t *sub_rule_depth)
1309 for (prev_depth = (uint8_t)(depth - 1); prev_depth > 0; prev_depth--) {
1310 ip_masked = ip & depth_to_mask(prev_depth);
1312 rule_index = rule_find_v1604(lpm, ip_masked, prev_depth);
1314 if (rule_index >= 0) {
1315 *sub_rule_depth = prev_depth;
1323 static inline int32_t
1324 delete_depth_small_v20(struct rte_lpm_v20 *lpm, uint32_t ip_masked,
1325 uint8_t depth, int32_t sub_rule_index, uint8_t sub_rule_depth)
1327 uint32_t tbl24_range, tbl24_index, tbl8_group_index, tbl8_index, i, j;
1329 /* Calculate the range and index into Table24. */
1330 tbl24_range = depth_to_range(depth);
1331 tbl24_index = (ip_masked >> 8);
1334 * Firstly check the sub_rule_index. A -1 indicates no replacement rule
1335 * and a positive number indicates a sub_rule_index.
1337 if (sub_rule_index < 0) {
1339 * If no replacement rule exists then invalidate entries
1340 * associated with this rule.
1342 for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) {
1344 if (lpm->tbl24[i].valid_group == 0 &&
1345 lpm->tbl24[i].depth <= depth) {
1346 lpm->tbl24[i].valid = INVALID;
1347 } else if (lpm->tbl24[i].valid_group == 1) {
1349 * If TBL24 entry is extended, then there has
1350 * to be a rule with depth >= 25 in the
1351 * associated TBL8 group.
1354 tbl8_group_index = lpm->tbl24[i].group_idx;
1355 tbl8_index = tbl8_group_index *
1356 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1358 for (j = tbl8_index; j < (tbl8_index +
1359 RTE_LPM_TBL8_GROUP_NUM_ENTRIES); j++) {
1361 if (lpm->tbl8[j].depth <= depth)
1362 lpm->tbl8[j].valid = INVALID;
1368 * If a replacement rule exists then modify entries
1369 * associated with this rule.
1372 struct rte_lpm_tbl_entry_v20 new_tbl24_entry = {
1373 {.next_hop = lpm->rules_tbl[sub_rule_index].next_hop,},
1376 .depth = sub_rule_depth,
1379 struct rte_lpm_tbl_entry_v20 new_tbl8_entry = {
1381 .valid_group = VALID,
1382 .depth = sub_rule_depth,
1384 new_tbl8_entry.next_hop =
1385 lpm->rules_tbl[sub_rule_index].next_hop;
1387 for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) {
1389 if (lpm->tbl24[i].valid_group == 0 &&
1390 lpm->tbl24[i].depth <= depth) {
1391 lpm->tbl24[i] = new_tbl24_entry;
1392 } else if (lpm->tbl24[i].valid_group == 1) {
1394 * If TBL24 entry is extended, then there has
1395 * to be a rule with depth >= 25 in the
1396 * associated TBL8 group.
1399 tbl8_group_index = lpm->tbl24[i].group_idx;
1400 tbl8_index = tbl8_group_index *
1401 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1403 for (j = tbl8_index; j < (tbl8_index +
1404 RTE_LPM_TBL8_GROUP_NUM_ENTRIES); j++) {
1406 if (lpm->tbl8[j].depth <= depth)
1407 lpm->tbl8[j] = new_tbl8_entry;
1416 static inline int32_t
1417 delete_depth_small_v1604(struct rte_lpm *lpm, uint32_t ip_masked,
1418 uint8_t depth, int32_t sub_rule_index, uint8_t sub_rule_depth)
1420 #define group_idx next_hop
1421 uint32_t tbl24_range, tbl24_index, tbl8_group_index, tbl8_index, i, j;
1423 /* Calculate the range and index into Table24. */
1424 tbl24_range = depth_to_range(depth);
1425 tbl24_index = (ip_masked >> 8);
1428 * Firstly check the sub_rule_index. A -1 indicates no replacement rule
1429 * and a positive number indicates a sub_rule_index.
1431 if (sub_rule_index < 0) {
1433 * If no replacement rule exists then invalidate entries
1434 * associated with this rule.
1436 for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) {
1438 if (lpm->tbl24[i].valid_group == 0 &&
1439 lpm->tbl24[i].depth <= depth) {
1440 lpm->tbl24[i].valid = INVALID;
1441 } else if (lpm->tbl24[i].valid_group == 1) {
1443 * If TBL24 entry is extended, then there has
1444 * to be a rule with depth >= 25 in the
1445 * associated TBL8 group.
1448 tbl8_group_index = lpm->tbl24[i].group_idx;
1449 tbl8_index = tbl8_group_index *
1450 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1452 for (j = tbl8_index; j < (tbl8_index +
1453 RTE_LPM_TBL8_GROUP_NUM_ENTRIES); j++) {
1455 if (lpm->tbl8[j].depth <= depth)
1456 lpm->tbl8[j].valid = INVALID;
1462 * If a replacement rule exists then modify entries
1463 * associated with this rule.
1466 struct rte_lpm_tbl_entry new_tbl24_entry = {
1467 .next_hop = lpm->rules_tbl[sub_rule_index].next_hop,
1470 .depth = sub_rule_depth,
1473 struct rte_lpm_tbl_entry new_tbl8_entry = {
1475 .valid_group = VALID,
1476 .depth = sub_rule_depth,
1477 .next_hop = lpm->rules_tbl
1478 [sub_rule_index].next_hop,
1481 for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) {
1483 if (lpm->tbl24[i].valid_group == 0 &&
1484 lpm->tbl24[i].depth <= depth) {
1485 lpm->tbl24[i] = new_tbl24_entry;
1486 } else if (lpm->tbl24[i].valid_group == 1) {
1488 * If TBL24 entry is extended, then there has
1489 * to be a rule with depth >= 25 in the
1490 * associated TBL8 group.
1493 tbl8_group_index = lpm->tbl24[i].group_idx;
1494 tbl8_index = tbl8_group_index *
1495 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1497 for (j = tbl8_index; j < (tbl8_index +
1498 RTE_LPM_TBL8_GROUP_NUM_ENTRIES); j++) {
1500 if (lpm->tbl8[j].depth <= depth)
1501 lpm->tbl8[j] = new_tbl8_entry;
1511 * Checks if table 8 group can be recycled.
1513 * Return of -EEXIST means tbl8 is in use and thus can not be recycled.
1514 * Return of -EINVAL means tbl8 is empty and thus can be recycled
1515 * Return of value > -1 means tbl8 is in use but has all the same values and
1516 * thus can be recycled
1518 static inline int32_t
1519 tbl8_recycle_check_v20(struct rte_lpm_tbl_entry_v20 *tbl8,
1520 uint32_t tbl8_group_start)
1522 uint32_t tbl8_group_end, i;
1523 tbl8_group_end = tbl8_group_start + RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1526 * Check the first entry of the given tbl8. If it is invalid we know
1527 * this tbl8 does not contain any rule with a depth < RTE_LPM_MAX_DEPTH
1528 * (As they would affect all entries in a tbl8) and thus this table
1529 * can not be recycled.
1531 if (tbl8[tbl8_group_start].valid) {
1533 * If first entry is valid check if the depth is less than 24
1534 * and if so check the rest of the entries to verify that they
1535 * are all of this depth.
1537 if (tbl8[tbl8_group_start].depth < MAX_DEPTH_TBL24) {
1538 for (i = (tbl8_group_start + 1); i < tbl8_group_end;
1541 if (tbl8[i].depth !=
1542 tbl8[tbl8_group_start].depth) {
1547 /* If all entries are the same return the tb8 index */
1548 return tbl8_group_start;
1554 * If the first entry is invalid check if the rest of the entries in
1555 * the tbl8 are invalid.
1557 for (i = (tbl8_group_start + 1); i < tbl8_group_end; i++) {
1561 /* If no valid entries are found then return -EINVAL. */
1565 static inline int32_t
1566 tbl8_recycle_check_v1604(struct rte_lpm_tbl_entry *tbl8,
1567 uint32_t tbl8_group_start)
1569 uint32_t tbl8_group_end, i;
1570 tbl8_group_end = tbl8_group_start + RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1573 * Check the first entry of the given tbl8. If it is invalid we know
1574 * this tbl8 does not contain any rule with a depth < RTE_LPM_MAX_DEPTH
1575 * (As they would affect all entries in a tbl8) and thus this table
1576 * can not be recycled.
1578 if (tbl8[tbl8_group_start].valid) {
1580 * If first entry is valid check if the depth is less than 24
1581 * and if so check the rest of the entries to verify that they
1582 * are all of this depth.
1584 if (tbl8[tbl8_group_start].depth < MAX_DEPTH_TBL24) {
1585 for (i = (tbl8_group_start + 1); i < tbl8_group_end;
1588 if (tbl8[i].depth !=
1589 tbl8[tbl8_group_start].depth) {
1594 /* If all entries are the same return the tb8 index */
1595 return tbl8_group_start;
1601 * If the first entry is invalid check if the rest of the entries in
1602 * the tbl8 are invalid.
1604 for (i = (tbl8_group_start + 1); i < tbl8_group_end; i++) {
1608 /* If no valid entries are found then return -EINVAL. */
1612 static inline int32_t
1613 delete_depth_big_v20(struct rte_lpm_v20 *lpm, uint32_t ip_masked,
1614 uint8_t depth, int32_t sub_rule_index, uint8_t sub_rule_depth)
1616 uint32_t tbl24_index, tbl8_group_index, tbl8_group_start, tbl8_index,
1618 int32_t tbl8_recycle_index;
1621 * Calculate the index into tbl24 and range. Note: All depths larger
1622 * than MAX_DEPTH_TBL24 are associated with only one tbl24 entry.
1624 tbl24_index = ip_masked >> 8;
1626 /* Calculate the index into tbl8 and range. */
1627 tbl8_group_index = lpm->tbl24[tbl24_index].group_idx;
1628 tbl8_group_start = tbl8_group_index * RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1629 tbl8_index = tbl8_group_start + (ip_masked & 0xFF);
1630 tbl8_range = depth_to_range(depth);
1632 if (sub_rule_index < 0) {
1634 * Loop through the range of entries on tbl8 for which the
1635 * rule_to_delete must be removed or modified.
1637 for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
1638 if (lpm->tbl8[i].depth <= depth)
1639 lpm->tbl8[i].valid = INVALID;
1642 /* Set new tbl8 entry. */
1643 struct rte_lpm_tbl_entry_v20 new_tbl8_entry = {
1645 .depth = sub_rule_depth,
1646 .valid_group = lpm->tbl8[tbl8_group_start].valid_group,
1649 new_tbl8_entry.next_hop =
1650 lpm->rules_tbl[sub_rule_index].next_hop;
1652 * Loop through the range of entries on tbl8 for which the
1653 * rule_to_delete must be modified.
1655 for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
1656 if (lpm->tbl8[i].depth <= depth)
1657 lpm->tbl8[i] = new_tbl8_entry;
1662 * Check if there are any valid entries in this tbl8 group. If all
1663 * tbl8 entries are invalid we can free the tbl8 and invalidate the
1664 * associated tbl24 entry.
1667 tbl8_recycle_index = tbl8_recycle_check_v20(lpm->tbl8, tbl8_group_start);
1669 if (tbl8_recycle_index == -EINVAL) {
1670 /* Set tbl24 before freeing tbl8 to avoid race condition. */
1671 lpm->tbl24[tbl24_index].valid = 0;
1672 tbl8_free_v20(lpm->tbl8, tbl8_group_start);
1673 } else if (tbl8_recycle_index > -1) {
1674 /* Update tbl24 entry. */
1675 struct rte_lpm_tbl_entry_v20 new_tbl24_entry = {
1676 { .next_hop = lpm->tbl8[tbl8_recycle_index].next_hop, },
1679 .depth = lpm->tbl8[tbl8_recycle_index].depth,
1682 /* Set tbl24 before freeing tbl8 to avoid race condition. */
1683 lpm->tbl24[tbl24_index] = new_tbl24_entry;
1684 tbl8_free_v20(lpm->tbl8, tbl8_group_start);
1690 static inline int32_t
1691 delete_depth_big_v1604(struct rte_lpm *lpm, uint32_t ip_masked,
1692 uint8_t depth, int32_t sub_rule_index, uint8_t sub_rule_depth)
1694 #define group_idx next_hop
1695 uint32_t tbl24_index, tbl8_group_index, tbl8_group_start, tbl8_index,
1697 int32_t tbl8_recycle_index;
1700 * Calculate the index into tbl24 and range. Note: All depths larger
1701 * than MAX_DEPTH_TBL24 are associated with only one tbl24 entry.
1703 tbl24_index = ip_masked >> 8;
1705 /* Calculate the index into tbl8 and range. */
1706 tbl8_group_index = lpm->tbl24[tbl24_index].group_idx;
1707 tbl8_group_start = tbl8_group_index * RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1708 tbl8_index = tbl8_group_start + (ip_masked & 0xFF);
1709 tbl8_range = depth_to_range(depth);
1711 if (sub_rule_index < 0) {
1713 * Loop through the range of entries on tbl8 for which the
1714 * rule_to_delete must be removed or modified.
1716 for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
1717 if (lpm->tbl8[i].depth <= depth)
1718 lpm->tbl8[i].valid = INVALID;
1721 /* Set new tbl8 entry. */
1722 struct rte_lpm_tbl_entry new_tbl8_entry = {
1724 .depth = sub_rule_depth,
1725 .valid_group = lpm->tbl8[tbl8_group_start].valid_group,
1726 .next_hop = lpm->rules_tbl[sub_rule_index].next_hop,
1730 * Loop through the range of entries on tbl8 for which the
1731 * rule_to_delete must be modified.
1733 for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
1734 if (lpm->tbl8[i].depth <= depth)
1735 lpm->tbl8[i] = new_tbl8_entry;
1740 * Check if there are any valid entries in this tbl8 group. If all
1741 * tbl8 entries are invalid we can free the tbl8 and invalidate the
1742 * associated tbl24 entry.
1745 tbl8_recycle_index = tbl8_recycle_check_v1604(lpm->tbl8, tbl8_group_start);
1747 if (tbl8_recycle_index == -EINVAL) {
1748 /* Set tbl24 before freeing tbl8 to avoid race condition. */
1749 lpm->tbl24[tbl24_index].valid = 0;
1750 tbl8_free_v1604(lpm->tbl8, tbl8_group_start);
1751 } else if (tbl8_recycle_index > -1) {
1752 /* Update tbl24 entry. */
1753 struct rte_lpm_tbl_entry new_tbl24_entry = {
1754 .next_hop = lpm->tbl8[tbl8_recycle_index].next_hop,
1757 .depth = lpm->tbl8[tbl8_recycle_index].depth,
1760 /* Set tbl24 before freeing tbl8 to avoid race condition. */
1761 lpm->tbl24[tbl24_index] = new_tbl24_entry;
1762 tbl8_free_v1604(lpm->tbl8, tbl8_group_start);
1772 rte_lpm_delete_v20(struct rte_lpm_v20 *lpm, uint32_t ip, uint8_t depth)
1774 int32_t rule_to_delete_index, sub_rule_index;
1776 uint8_t sub_rule_depth;
1778 * Check input arguments. Note: IP must be a positive integer of 32
1779 * bits in length therefore it need not be checked.
1781 if ((lpm == NULL) || (depth < 1) || (depth > RTE_LPM_MAX_DEPTH)) {
1785 ip_masked = ip & depth_to_mask(depth);
1788 * Find the index of the input rule, that needs to be deleted, in the
1791 rule_to_delete_index = rule_find_v20(lpm, ip_masked, depth);
1794 * Check if rule_to_delete_index was found. If no rule was found the
1795 * function rule_find returns -EINVAL.
1797 if (rule_to_delete_index < 0)
1800 /* Delete the rule from the rule table. */
1801 rule_delete_v20(lpm, rule_to_delete_index, depth);
1804 * Find rule to replace the rule_to_delete. If there is no rule to
1805 * replace the rule_to_delete we return -1 and invalidate the table
1806 * entries associated with this rule.
1809 sub_rule_index = find_previous_rule_v20(lpm, ip, depth, &sub_rule_depth);
1812 * If the input depth value is less than 25 use function
1813 * delete_depth_small otherwise use delete_depth_big.
1815 if (depth <= MAX_DEPTH_TBL24) {
1816 return delete_depth_small_v20(lpm, ip_masked, depth,
1817 sub_rule_index, sub_rule_depth);
1818 } else { /* If depth > MAX_DEPTH_TBL24 */
1819 return delete_depth_big_v20(lpm, ip_masked, depth, sub_rule_index,
1823 VERSION_SYMBOL(rte_lpm_delete, _v20, 2.0);
1826 rte_lpm_delete_v1604(struct rte_lpm *lpm, uint32_t ip, uint8_t depth)
1828 int32_t rule_to_delete_index, sub_rule_index;
1830 uint8_t sub_rule_depth;
1832 * Check input arguments. Note: IP must be a positive integer of 32
1833 * bits in length therefore it need not be checked.
1835 if ((lpm == NULL) || (depth < 1) || (depth > RTE_LPM_MAX_DEPTH)) {
1839 ip_masked = ip & depth_to_mask(depth);
1842 * Find the index of the input rule, that needs to be deleted, in the
1845 rule_to_delete_index = rule_find_v1604(lpm, ip_masked, depth);
1848 * Check if rule_to_delete_index was found. If no rule was found the
1849 * function rule_find returns -EINVAL.
1851 if (rule_to_delete_index < 0)
1854 /* Delete the rule from the rule table. */
1855 rule_delete_v1604(lpm, rule_to_delete_index, depth);
1858 * Find rule to replace the rule_to_delete. If there is no rule to
1859 * replace the rule_to_delete we return -1 and invalidate the table
1860 * entries associated with this rule.
1863 sub_rule_index = find_previous_rule_v1604(lpm, ip, depth, &sub_rule_depth);
1866 * If the input depth value is less than 25 use function
1867 * delete_depth_small otherwise use delete_depth_big.
1869 if (depth <= MAX_DEPTH_TBL24) {
1870 return delete_depth_small_v1604(lpm, ip_masked, depth,
1871 sub_rule_index, sub_rule_depth);
1872 } else { /* If depth > MAX_DEPTH_TBL24 */
1873 return delete_depth_big_v1604(lpm, ip_masked, depth, sub_rule_index,
1877 BIND_DEFAULT_SYMBOL(rte_lpm_delete, _v1604, 16.04);
1878 MAP_STATIC_SYMBOL(int rte_lpm_delete(struct rte_lpm *lpm, uint32_t ip,
1879 uint8_t depth), rte_lpm_delete_v1604);
1882 * Delete all rules from the LPM table.
1885 rte_lpm_delete_all_v20(struct rte_lpm_v20 *lpm)
1887 /* Zero rule information. */
1888 memset(lpm->rule_info, 0, sizeof(lpm->rule_info));
1891 memset(lpm->tbl24, 0, sizeof(lpm->tbl24));
1894 memset(lpm->tbl8, 0, sizeof(lpm->tbl8));
1896 /* Delete all rules form the rules table. */
1897 memset(lpm->rules_tbl, 0, sizeof(lpm->rules_tbl[0]) * lpm->max_rules);
1899 VERSION_SYMBOL(rte_lpm_delete_all, _v20, 2.0);
1902 rte_lpm_delete_all_v1604(struct rte_lpm *lpm)
1904 /* Zero rule information. */
1905 memset(lpm->rule_info, 0, sizeof(lpm->rule_info));
1908 memset(lpm->tbl24, 0, sizeof(lpm->tbl24));
1911 memset(lpm->tbl8, 0, sizeof(lpm->tbl8[0])
1912 * RTE_LPM_TBL8_GROUP_NUM_ENTRIES * lpm->number_tbl8s);
1914 /* Delete all rules form the rules table. */
1915 memset(lpm->rules_tbl, 0, sizeof(lpm->rules_tbl[0]) * lpm->max_rules);
1917 BIND_DEFAULT_SYMBOL(rte_lpm_delete_all, _v1604, 16.04);
1918 MAP_STATIC_SYMBOL(void rte_lpm_delete_all(struct rte_lpm *lpm),
1919 rte_lpm_delete_all_v1604);