1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation
10 #include <sys/queue.h>
13 #include <rte_branch_prediction.h>
14 #include <rte_common.h>
15 #include <rte_memory.h> /* for definition of RTE_CACHE_LINE_SIZE */
16 #include <rte_malloc.h>
18 #include <rte_eal_memconfig.h>
19 #include <rte_per_lcore.h>
20 #include <rte_string_fns.h>
21 #include <rte_errno.h>
22 #include <rte_rwlock.h>
23 #include <rte_spinlock.h>
24 #include <rte_tailq.h>
28 TAILQ_HEAD(rte_lpm_list, rte_tailq_entry);
30 static struct rte_tailq_elem rte_lpm_tailq = {
33 EAL_REGISTER_TAILQ(rte_lpm_tailq)
35 #define MAX_DEPTH_TBL24 24
42 /* Macro to enable/disable run-time checks. */
43 #if defined(RTE_LIBRTE_LPM_DEBUG)
44 #include <rte_debug.h>
45 #define VERIFY_DEPTH(depth) do { \
46 if ((depth == 0) || (depth > RTE_LPM_MAX_DEPTH)) \
47 rte_panic("LPM: Invalid depth (%u) at line %d", \
48 (unsigned)(depth), __LINE__); \
51 #define VERIFY_DEPTH(depth)
55 * Converts a given depth value to its corresponding mask value.
57 * depth (IN) : range = 1 - 32
58 * mask (OUT) : 32bit mask
60 static uint32_t __attribute__((pure))
61 depth_to_mask(uint8_t depth)
65 /* To calculate a mask start with a 1 on the left hand side and right
66 * shift while populating the left hand side with 1's
68 return (int)0x80000000 >> (depth - 1);
72 * Converts given depth value to its corresponding range value.
74 static uint32_t __attribute__((pure))
75 depth_to_range(uint8_t depth)
80 * Calculate tbl24 range. (Note: 2^depth = 1 << depth)
82 if (depth <= MAX_DEPTH_TBL24)
83 return 1 << (MAX_DEPTH_TBL24 - depth);
85 /* Else if depth is greater than 24 */
86 return 1 << (RTE_LPM_MAX_DEPTH - depth);
90 * Find an existing lpm table and return a pointer to it.
93 rte_lpm_find_existing(const char *name)
95 struct rte_lpm *l = NULL;
96 struct rte_tailq_entry *te;
97 struct rte_lpm_list *lpm_list;
99 lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list);
101 rte_mcfg_tailq_read_lock();
102 TAILQ_FOREACH(te, lpm_list, next) {
104 if (strncmp(name, l->name, RTE_LPM_NAMESIZE) == 0)
107 rte_mcfg_tailq_read_unlock();
118 * Allocates memory for LPM object
121 rte_lpm_create(const char *name, int socket_id,
122 const struct rte_lpm_config *config)
124 char mem_name[RTE_LPM_NAMESIZE];
125 struct rte_lpm *lpm = NULL;
126 struct rte_tailq_entry *te;
127 uint32_t mem_size, rules_size, tbl8s_size;
128 struct rte_lpm_list *lpm_list;
130 lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list);
132 RTE_BUILD_BUG_ON(sizeof(struct rte_lpm_tbl_entry) != 4);
134 /* Check user arguments. */
135 if ((name == NULL) || (socket_id < -1) || (config->max_rules == 0)
136 || config->number_tbl8s > RTE_LPM_MAX_TBL8_NUM_GROUPS) {
141 snprintf(mem_name, sizeof(mem_name), "LPM_%s", name);
143 /* Determine the amount of memory to allocate. */
144 mem_size = sizeof(*lpm);
145 rules_size = sizeof(struct rte_lpm_rule) * config->max_rules;
146 tbl8s_size = (sizeof(struct rte_lpm_tbl_entry) *
147 RTE_LPM_TBL8_GROUP_NUM_ENTRIES * config->number_tbl8s);
149 rte_mcfg_tailq_write_lock();
151 /* guarantee there's no existing */
152 TAILQ_FOREACH(te, lpm_list, next) {
154 if (strncmp(name, lpm->name, RTE_LPM_NAMESIZE) == 0)
164 /* allocate tailq entry */
165 te = rte_zmalloc("LPM_TAILQ_ENTRY", sizeof(*te), 0);
167 RTE_LOG(ERR, LPM, "Failed to allocate tailq entry\n");
172 /* Allocate memory to store the LPM data structures. */
173 lpm = rte_zmalloc_socket(mem_name, mem_size,
174 RTE_CACHE_LINE_SIZE, socket_id);
176 RTE_LOG(ERR, LPM, "LPM memory allocation failed\n");
182 lpm->rules_tbl = rte_zmalloc_socket(NULL,
183 (size_t)rules_size, RTE_CACHE_LINE_SIZE, socket_id);
185 if (lpm->rules_tbl == NULL) {
186 RTE_LOG(ERR, LPM, "LPM rules_tbl memory allocation failed\n");
194 lpm->tbl8 = rte_zmalloc_socket(NULL,
195 (size_t)tbl8s_size, RTE_CACHE_LINE_SIZE, socket_id);
197 if (lpm->tbl8 == NULL) {
198 RTE_LOG(ERR, LPM, "LPM tbl8 memory allocation failed\n");
199 rte_free(lpm->rules_tbl);
207 /* Save user arguments. */
208 lpm->max_rules = config->max_rules;
209 lpm->number_tbl8s = config->number_tbl8s;
210 strlcpy(lpm->name, name, sizeof(lpm->name));
214 TAILQ_INSERT_TAIL(lpm_list, te, next);
217 rte_mcfg_tailq_write_unlock();
223 * Deallocates memory for given LPM table.
226 rte_lpm_free(struct rte_lpm *lpm)
228 struct rte_lpm_list *lpm_list;
229 struct rte_tailq_entry *te;
231 /* Check user arguments. */
235 lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list);
237 rte_mcfg_tailq_write_lock();
239 /* find our tailq entry */
240 TAILQ_FOREACH(te, lpm_list, next) {
241 if (te->data == (void *) lpm)
245 TAILQ_REMOVE(lpm_list, te, next);
247 rte_mcfg_tailq_write_unlock();
250 rte_free(lpm->rules_tbl);
256 * Adds a rule to the rule table.
258 * NOTE: The rule table is split into 32 groups. Each group contains rules that
259 * apply to a specific prefix depth (i.e. group 1 contains rules that apply to
260 * prefixes with a depth of 1 etc.). In the following code (depth - 1) is used
261 * to refer to depth 1 because even though the depth range is 1 - 32, depths
262 * are stored in the rule table from 0 - 31.
263 * NOTE: Valid range for depth parameter is 1 .. 32 inclusive.
266 rule_add(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth,
269 uint32_t rule_gindex, rule_index, last_rule;
274 /* Scan through rule group to see if rule already exists. */
275 if (lpm->rule_info[depth - 1].used_rules > 0) {
277 /* rule_gindex stands for rule group index. */
278 rule_gindex = lpm->rule_info[depth - 1].first_rule;
279 /* Initialise rule_index to point to start of rule group. */
280 rule_index = rule_gindex;
281 /* Last rule = Last used rule in this rule group. */
282 last_rule = rule_gindex + lpm->rule_info[depth - 1].used_rules;
284 for (; rule_index < last_rule; rule_index++) {
286 /* If rule already exists update next hop and return. */
287 if (lpm->rules_tbl[rule_index].ip == ip_masked) {
289 if (lpm->rules_tbl[rule_index].next_hop
292 lpm->rules_tbl[rule_index].next_hop = next_hop;
298 if (rule_index == lpm->max_rules)
301 /* Calculate the position in which the rule will be stored. */
304 for (i = depth - 1; i > 0; i--) {
305 if (lpm->rule_info[i - 1].used_rules > 0) {
306 rule_index = lpm->rule_info[i - 1].first_rule
307 + lpm->rule_info[i - 1].used_rules;
311 if (rule_index == lpm->max_rules)
314 lpm->rule_info[depth - 1].first_rule = rule_index;
317 /* Make room for the new rule in the array. */
318 for (i = RTE_LPM_MAX_DEPTH; i > depth; i--) {
319 if (lpm->rule_info[i - 1].first_rule
320 + lpm->rule_info[i - 1].used_rules == lpm->max_rules)
323 if (lpm->rule_info[i - 1].used_rules > 0) {
324 lpm->rules_tbl[lpm->rule_info[i - 1].first_rule
325 + lpm->rule_info[i - 1].used_rules]
326 = lpm->rules_tbl[lpm->rule_info[i - 1].first_rule];
327 lpm->rule_info[i - 1].first_rule++;
331 /* Add the new rule. */
332 lpm->rules_tbl[rule_index].ip = ip_masked;
333 lpm->rules_tbl[rule_index].next_hop = next_hop;
335 /* Increment the used rules counter for this rule group. */
336 lpm->rule_info[depth - 1].used_rules++;
342 * Delete a rule from the rule table.
343 * NOTE: Valid range for depth parameter is 1 .. 32 inclusive.
346 rule_delete(struct rte_lpm *lpm, int32_t rule_index, uint8_t depth)
352 lpm->rules_tbl[rule_index] =
353 lpm->rules_tbl[lpm->rule_info[depth - 1].first_rule
354 + lpm->rule_info[depth - 1].used_rules - 1];
356 for (i = depth; i < RTE_LPM_MAX_DEPTH; i++) {
357 if (lpm->rule_info[i].used_rules > 0) {
358 lpm->rules_tbl[lpm->rule_info[i].first_rule - 1] =
359 lpm->rules_tbl[lpm->rule_info[i].first_rule
360 + lpm->rule_info[i].used_rules - 1];
361 lpm->rule_info[i].first_rule--;
365 lpm->rule_info[depth - 1].used_rules--;
369 * Finds a rule in rule table.
370 * NOTE: Valid range for depth parameter is 1 .. 32 inclusive.
373 rule_find(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth)
375 uint32_t rule_gindex, last_rule, rule_index;
379 rule_gindex = lpm->rule_info[depth - 1].first_rule;
380 last_rule = rule_gindex + lpm->rule_info[depth - 1].used_rules;
382 /* Scan used rules at given depth to find rule. */
383 for (rule_index = rule_gindex; rule_index < last_rule; rule_index++) {
384 /* If rule is found return the rule index. */
385 if (lpm->rules_tbl[rule_index].ip == ip_masked)
389 /* If rule is not found return -EINVAL. */
394 * Find, clean and allocate a tbl8.
397 tbl8_alloc(struct rte_lpm_tbl_entry *tbl8, uint32_t number_tbl8s)
399 uint32_t group_idx; /* tbl8 group index. */
400 struct rte_lpm_tbl_entry *tbl8_entry;
402 /* Scan through tbl8 to find a free (i.e. INVALID) tbl8 group. */
403 for (group_idx = 0; group_idx < number_tbl8s; group_idx++) {
404 tbl8_entry = &tbl8[group_idx * RTE_LPM_TBL8_GROUP_NUM_ENTRIES];
405 /* If a free tbl8 group is found clean it and set as VALID. */
406 if (!tbl8_entry->valid_group) {
407 struct rte_lpm_tbl_entry new_tbl8_entry = {
411 .valid_group = VALID,
414 memset(&tbl8_entry[0], 0,
415 RTE_LPM_TBL8_GROUP_NUM_ENTRIES *
416 sizeof(tbl8_entry[0]));
418 __atomic_store(tbl8_entry, &new_tbl8_entry,
421 /* Return group index for allocated tbl8 group. */
426 /* If there are no tbl8 groups free then return error. */
431 tbl8_free(struct rte_lpm_tbl_entry *tbl8, uint32_t tbl8_group_start)
433 /* Set tbl8 group invalid*/
434 struct rte_lpm_tbl_entry zero_tbl8_entry = {0};
436 __atomic_store(&tbl8[tbl8_group_start], &zero_tbl8_entry,
440 static __rte_noinline int32_t
441 add_depth_small(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
444 #define group_idx next_hop
445 uint32_t tbl24_index, tbl24_range, tbl8_index, tbl8_group_end, i, j;
447 /* Calculate the index into Table24. */
448 tbl24_index = ip >> 8;
449 tbl24_range = depth_to_range(depth);
451 for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) {
453 * For invalid OR valid and non-extended tbl 24 entries set
456 if (!lpm->tbl24[i].valid || (lpm->tbl24[i].valid_group == 0 &&
457 lpm->tbl24[i].depth <= depth)) {
459 struct rte_lpm_tbl_entry new_tbl24_entry = {
460 .next_hop = next_hop,
466 /* Setting tbl24 entry in one go to avoid race
469 __atomic_store(&lpm->tbl24[i], &new_tbl24_entry,
475 if (lpm->tbl24[i].valid_group == 1) {
476 /* If tbl24 entry is valid and extended calculate the
479 tbl8_index = lpm->tbl24[i].group_idx *
480 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
481 tbl8_group_end = tbl8_index +
482 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
484 for (j = tbl8_index; j < tbl8_group_end; j++) {
485 if (!lpm->tbl8[j].valid ||
486 lpm->tbl8[j].depth <= depth) {
487 struct rte_lpm_tbl_entry
490 .valid_group = VALID,
492 .next_hop = next_hop,
496 * Setting tbl8 entry in one go to avoid
499 __atomic_store(&lpm->tbl8[j],
512 static __rte_noinline int32_t
513 add_depth_big(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth,
516 #define group_idx next_hop
517 uint32_t tbl24_index;
518 int32_t tbl8_group_index, tbl8_group_start, tbl8_group_end, tbl8_index,
521 tbl24_index = (ip_masked >> 8);
522 tbl8_range = depth_to_range(depth);
524 if (!lpm->tbl24[tbl24_index].valid) {
525 /* Search for a free tbl8 group. */
526 tbl8_group_index = tbl8_alloc(lpm->tbl8, lpm->number_tbl8s);
528 /* Check tbl8 allocation was successful. */
529 if (tbl8_group_index < 0) {
530 return tbl8_group_index;
533 /* Find index into tbl8 and range. */
534 tbl8_index = (tbl8_group_index *
535 RTE_LPM_TBL8_GROUP_NUM_ENTRIES) +
538 /* Set tbl8 entry. */
539 for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
540 struct rte_lpm_tbl_entry new_tbl8_entry = {
543 .valid_group = lpm->tbl8[i].valid_group,
544 .next_hop = next_hop,
546 __atomic_store(&lpm->tbl8[i], &new_tbl8_entry,
551 * Update tbl24 entry to point to new tbl8 entry. Note: The
552 * ext_flag and tbl8_index need to be updated simultaneously,
553 * so assign whole structure in one go
556 struct rte_lpm_tbl_entry new_tbl24_entry = {
557 .group_idx = tbl8_group_index,
563 /* The tbl24 entry must be written only after the
564 * tbl8 entries are written.
566 __atomic_store(&lpm->tbl24[tbl24_index], &new_tbl24_entry,
569 } /* If valid entry but not extended calculate the index into Table8. */
570 else if (lpm->tbl24[tbl24_index].valid_group == 0) {
571 /* Search for free tbl8 group. */
572 tbl8_group_index = tbl8_alloc(lpm->tbl8, lpm->number_tbl8s);
574 if (tbl8_group_index < 0) {
575 return tbl8_group_index;
578 tbl8_group_start = tbl8_group_index *
579 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
580 tbl8_group_end = tbl8_group_start +
581 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
583 /* Populate new tbl8 with tbl24 value. */
584 for (i = tbl8_group_start; i < tbl8_group_end; i++) {
585 struct rte_lpm_tbl_entry new_tbl8_entry = {
587 .depth = lpm->tbl24[tbl24_index].depth,
588 .valid_group = lpm->tbl8[i].valid_group,
589 .next_hop = lpm->tbl24[tbl24_index].next_hop,
591 __atomic_store(&lpm->tbl8[i], &new_tbl8_entry,
595 tbl8_index = tbl8_group_start + (ip_masked & 0xFF);
597 /* Insert new rule into the tbl8 entry. */
598 for (i = tbl8_index; i < tbl8_index + tbl8_range; i++) {
599 struct rte_lpm_tbl_entry new_tbl8_entry = {
602 .valid_group = lpm->tbl8[i].valid_group,
603 .next_hop = next_hop,
605 __atomic_store(&lpm->tbl8[i], &new_tbl8_entry,
610 * Update tbl24 entry to point to new tbl8 entry. Note: The
611 * ext_flag and tbl8_index need to be updated simultaneously,
612 * so assign whole structure in one go.
615 struct rte_lpm_tbl_entry new_tbl24_entry = {
616 .group_idx = tbl8_group_index,
622 /* The tbl24 entry must be written only after the
623 * tbl8 entries are written.
625 __atomic_store(&lpm->tbl24[tbl24_index], &new_tbl24_entry,
629 * If it is valid, extended entry calculate the index into tbl8.
631 tbl8_group_index = lpm->tbl24[tbl24_index].group_idx;
632 tbl8_group_start = tbl8_group_index *
633 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
634 tbl8_index = tbl8_group_start + (ip_masked & 0xFF);
636 for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
638 if (!lpm->tbl8[i].valid ||
639 lpm->tbl8[i].depth <= depth) {
640 struct rte_lpm_tbl_entry new_tbl8_entry = {
643 .next_hop = next_hop,
644 .valid_group = lpm->tbl8[i].valid_group,
648 * Setting tbl8 entry in one go to avoid race
651 __atomic_store(&lpm->tbl8[i], &new_tbl8_entry,
666 rte_lpm_add(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
669 int32_t rule_index, status = 0;
672 /* Check user arguments. */
673 if ((lpm == NULL) || (depth < 1) || (depth > RTE_LPM_MAX_DEPTH))
676 ip_masked = ip & depth_to_mask(depth);
678 /* Add the rule to the rule table. */
679 rule_index = rule_add(lpm, ip_masked, depth, next_hop);
681 /* Skip table entries update if The rule is the same as
682 * the rule in the rules table.
684 if (rule_index == -EEXIST)
687 /* If the is no space available for new rule return error. */
688 if (rule_index < 0) {
692 if (depth <= MAX_DEPTH_TBL24) {
693 status = add_depth_small(lpm, ip_masked, depth, next_hop);
694 } else { /* If depth > RTE_LPM_MAX_DEPTH_TBL24 */
695 status = add_depth_big(lpm, ip_masked, depth, next_hop);
698 * If add fails due to exhaustion of tbl8 extensions delete
699 * rule that was added to rule table.
702 rule_delete(lpm, rule_index, depth);
712 * Look for a rule in the high-level rules table
715 rte_lpm_is_rule_present(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
721 /* Check user arguments. */
723 (next_hop == NULL) ||
724 (depth < 1) || (depth > RTE_LPM_MAX_DEPTH))
727 /* Look for the rule using rule_find. */
728 ip_masked = ip & depth_to_mask(depth);
729 rule_index = rule_find(lpm, ip_masked, depth);
731 if (rule_index >= 0) {
732 *next_hop = lpm->rules_tbl[rule_index].next_hop;
736 /* If rule is not found return 0. */
741 find_previous_rule(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
742 uint8_t *sub_rule_depth)
748 for (prev_depth = (uint8_t)(depth - 1); prev_depth > 0; prev_depth--) {
749 ip_masked = ip & depth_to_mask(prev_depth);
751 rule_index = rule_find(lpm, ip_masked, prev_depth);
753 if (rule_index >= 0) {
754 *sub_rule_depth = prev_depth;
763 delete_depth_small(struct rte_lpm *lpm, uint32_t ip_masked,
764 uint8_t depth, int32_t sub_rule_index, uint8_t sub_rule_depth)
766 #define group_idx next_hop
767 uint32_t tbl24_range, tbl24_index, tbl8_group_index, tbl8_index, i, j;
769 /* Calculate the range and index into Table24. */
770 tbl24_range = depth_to_range(depth);
771 tbl24_index = (ip_masked >> 8);
772 struct rte_lpm_tbl_entry zero_tbl24_entry = {0};
775 * Firstly check the sub_rule_index. A -1 indicates no replacement rule
776 * and a positive number indicates a sub_rule_index.
778 if (sub_rule_index < 0) {
780 * If no replacement rule exists then invalidate entries
781 * associated with this rule.
783 for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) {
785 if (lpm->tbl24[i].valid_group == 0 &&
786 lpm->tbl24[i].depth <= depth) {
787 __atomic_store(&lpm->tbl24[i],
788 &zero_tbl24_entry, __ATOMIC_RELEASE);
789 } else if (lpm->tbl24[i].valid_group == 1) {
791 * If TBL24 entry is extended, then there has
792 * to be a rule with depth >= 25 in the
793 * associated TBL8 group.
796 tbl8_group_index = lpm->tbl24[i].group_idx;
797 tbl8_index = tbl8_group_index *
798 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
800 for (j = tbl8_index; j < (tbl8_index +
801 RTE_LPM_TBL8_GROUP_NUM_ENTRIES); j++) {
803 if (lpm->tbl8[j].depth <= depth)
804 lpm->tbl8[j].valid = INVALID;
810 * If a replacement rule exists then modify entries
811 * associated with this rule.
814 struct rte_lpm_tbl_entry new_tbl24_entry = {
815 .next_hop = lpm->rules_tbl[sub_rule_index].next_hop,
818 .depth = sub_rule_depth,
821 struct rte_lpm_tbl_entry new_tbl8_entry = {
823 .valid_group = VALID,
824 .depth = sub_rule_depth,
825 .next_hop = lpm->rules_tbl
826 [sub_rule_index].next_hop,
829 for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) {
831 if (lpm->tbl24[i].valid_group == 0 &&
832 lpm->tbl24[i].depth <= depth) {
833 __atomic_store(&lpm->tbl24[i], &new_tbl24_entry,
835 } else if (lpm->tbl24[i].valid_group == 1) {
837 * If TBL24 entry is extended, then there has
838 * to be a rule with depth >= 25 in the
839 * associated TBL8 group.
842 tbl8_group_index = lpm->tbl24[i].group_idx;
843 tbl8_index = tbl8_group_index *
844 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
846 for (j = tbl8_index; j < (tbl8_index +
847 RTE_LPM_TBL8_GROUP_NUM_ENTRIES); j++) {
849 if (lpm->tbl8[j].depth <= depth)
850 __atomic_store(&lpm->tbl8[j],
862 * Checks if table 8 group can be recycled.
864 * Return of -EEXIST means tbl8 is in use and thus can not be recycled.
865 * Return of -EINVAL means tbl8 is empty and thus can be recycled
866 * Return of value > -1 means tbl8 is in use but has all the same values and
867 * thus can be recycled
870 tbl8_recycle_check(struct rte_lpm_tbl_entry *tbl8,
871 uint32_t tbl8_group_start)
873 uint32_t tbl8_group_end, i;
874 tbl8_group_end = tbl8_group_start + RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
877 * Check the first entry of the given tbl8. If it is invalid we know
878 * this tbl8 does not contain any rule with a depth < RTE_LPM_MAX_DEPTH
879 * (As they would affect all entries in a tbl8) and thus this table
880 * can not be recycled.
882 if (tbl8[tbl8_group_start].valid) {
884 * If first entry is valid check if the depth is less than 24
885 * and if so check the rest of the entries to verify that they
886 * are all of this depth.
888 if (tbl8[tbl8_group_start].depth <= MAX_DEPTH_TBL24) {
889 for (i = (tbl8_group_start + 1); i < tbl8_group_end;
893 tbl8[tbl8_group_start].depth) {
898 /* If all entries are the same return the tb8 index */
899 return tbl8_group_start;
905 * If the first entry is invalid check if the rest of the entries in
906 * the tbl8 are invalid.
908 for (i = (tbl8_group_start + 1); i < tbl8_group_end; i++) {
912 /* If no valid entries are found then return -EINVAL. */
917 delete_depth_big(struct rte_lpm *lpm, uint32_t ip_masked,
918 uint8_t depth, int32_t sub_rule_index, uint8_t sub_rule_depth)
920 #define group_idx next_hop
921 uint32_t tbl24_index, tbl8_group_index, tbl8_group_start, tbl8_index,
923 int32_t tbl8_recycle_index;
926 * Calculate the index into tbl24 and range. Note: All depths larger
927 * than MAX_DEPTH_TBL24 are associated with only one tbl24 entry.
929 tbl24_index = ip_masked >> 8;
931 /* Calculate the index into tbl8 and range. */
932 tbl8_group_index = lpm->tbl24[tbl24_index].group_idx;
933 tbl8_group_start = tbl8_group_index * RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
934 tbl8_index = tbl8_group_start + (ip_masked & 0xFF);
935 tbl8_range = depth_to_range(depth);
937 if (sub_rule_index < 0) {
939 * Loop through the range of entries on tbl8 for which the
940 * rule_to_delete must be removed or modified.
942 for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
943 if (lpm->tbl8[i].depth <= depth)
944 lpm->tbl8[i].valid = INVALID;
947 /* Set new tbl8 entry. */
948 struct rte_lpm_tbl_entry new_tbl8_entry = {
950 .depth = sub_rule_depth,
951 .valid_group = lpm->tbl8[tbl8_group_start].valid_group,
952 .next_hop = lpm->rules_tbl[sub_rule_index].next_hop,
956 * Loop through the range of entries on tbl8 for which the
957 * rule_to_delete must be modified.
959 for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
960 if (lpm->tbl8[i].depth <= depth)
961 __atomic_store(&lpm->tbl8[i], &new_tbl8_entry,
967 * Check if there are any valid entries in this tbl8 group. If all
968 * tbl8 entries are invalid we can free the tbl8 and invalidate the
969 * associated tbl24 entry.
972 tbl8_recycle_index = tbl8_recycle_check(lpm->tbl8, tbl8_group_start);
974 if (tbl8_recycle_index == -EINVAL) {
975 /* Set tbl24 before freeing tbl8 to avoid race condition.
976 * Prevent the free of the tbl8 group from hoisting.
978 lpm->tbl24[tbl24_index].valid = 0;
979 __atomic_thread_fence(__ATOMIC_RELEASE);
980 tbl8_free(lpm->tbl8, tbl8_group_start);
981 } else if (tbl8_recycle_index > -1) {
982 /* Update tbl24 entry. */
983 struct rte_lpm_tbl_entry new_tbl24_entry = {
984 .next_hop = lpm->tbl8[tbl8_recycle_index].next_hop,
987 .depth = lpm->tbl8[tbl8_recycle_index].depth,
990 /* Set tbl24 before freeing tbl8 to avoid race condition.
991 * Prevent the free of the tbl8 group from hoisting.
993 __atomic_store(&lpm->tbl24[tbl24_index], &new_tbl24_entry,
995 __atomic_thread_fence(__ATOMIC_RELEASE);
996 tbl8_free(lpm->tbl8, tbl8_group_start);
1006 rte_lpm_delete(struct rte_lpm *lpm, uint32_t ip, uint8_t depth)
1008 int32_t rule_to_delete_index, sub_rule_index;
1010 uint8_t sub_rule_depth;
1012 * Check input arguments. Note: IP must be a positive integer of 32
1013 * bits in length therefore it need not be checked.
1015 if ((lpm == NULL) || (depth < 1) || (depth > RTE_LPM_MAX_DEPTH)) {
1019 ip_masked = ip & depth_to_mask(depth);
1022 * Find the index of the input rule, that needs to be deleted, in the
1025 rule_to_delete_index = rule_find(lpm, ip_masked, depth);
1028 * Check if rule_to_delete_index was found. If no rule was found the
1029 * function rule_find returns -EINVAL.
1031 if (rule_to_delete_index < 0)
1034 /* Delete the rule from the rule table. */
1035 rule_delete(lpm, rule_to_delete_index, depth);
1038 * Find rule to replace the rule_to_delete. If there is no rule to
1039 * replace the rule_to_delete we return -1 and invalidate the table
1040 * entries associated with this rule.
1043 sub_rule_index = find_previous_rule(lpm, ip, depth, &sub_rule_depth);
1046 * If the input depth value is less than 25 use function
1047 * delete_depth_small otherwise use delete_depth_big.
1049 if (depth <= MAX_DEPTH_TBL24) {
1050 return delete_depth_small(lpm, ip_masked, depth,
1051 sub_rule_index, sub_rule_depth);
1052 } else { /* If depth > MAX_DEPTH_TBL24 */
1053 return delete_depth_big(lpm, ip_masked, depth, sub_rule_index,
1059 * Delete all rules from the LPM table.
1062 rte_lpm_delete_all(struct rte_lpm *lpm)
1064 /* Zero rule information. */
1065 memset(lpm->rule_info, 0, sizeof(lpm->rule_info));
1068 memset(lpm->tbl24, 0, sizeof(lpm->tbl24));
1071 memset(lpm->tbl8, 0, sizeof(lpm->tbl8[0])
1072 * RTE_LPM_TBL8_GROUP_NUM_ENTRIES * lpm->number_tbl8s);
1074 /* Delete all rules form the rules table. */
1075 memset(lpm->rules_tbl, 0, sizeof(lpm->rules_tbl[0]) * lpm->max_rules);