1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation
10 #include <sys/queue.h>
13 #include <rte_branch_prediction.h>
14 #include <rte_common.h>
15 #include <rte_memory.h> /* for definition of RTE_CACHE_LINE_SIZE */
16 #include <rte_malloc.h>
18 #include <rte_eal_memconfig.h>
19 #include <rte_per_lcore.h>
20 #include <rte_string_fns.h>
21 #include <rte_errno.h>
22 #include <rte_rwlock.h>
23 #include <rte_spinlock.h>
24 #include <rte_tailq.h>
25 #include <rte_function_versioning.h>
29 TAILQ_HEAD(rte_lpm_list, rte_tailq_entry);
31 static struct rte_tailq_elem rte_lpm_tailq = {
34 EAL_REGISTER_TAILQ(rte_lpm_tailq)
36 #define MAX_DEPTH_TBL24 24
43 /* Macro to enable/disable run-time checks. */
44 #if defined(RTE_LIBRTE_LPM_DEBUG)
45 #include <rte_debug.h>
46 #define VERIFY_DEPTH(depth) do { \
47 if ((depth == 0) || (depth > RTE_LPM_MAX_DEPTH)) \
48 rte_panic("LPM: Invalid depth (%u) at line %d", \
49 (unsigned)(depth), __LINE__); \
52 #define VERIFY_DEPTH(depth)
56 * Converts a given depth value to its corresponding mask value.
58 * depth (IN) : range = 1 - 32
59 * mask (OUT) : 32bit mask
61 static uint32_t __attribute__((pure))
62 depth_to_mask(uint8_t depth)
66 /* To calculate a mask start with a 1 on the left hand side and right
67 * shift while populating the left hand side with 1's
69 return (int)0x80000000 >> (depth - 1);
73 * Converts given depth value to its corresponding range value.
75 static uint32_t __attribute__((pure))
76 depth_to_range(uint8_t depth)
81 * Calculate tbl24 range. (Note: 2^depth = 1 << depth)
83 if (depth <= MAX_DEPTH_TBL24)
84 return 1 << (MAX_DEPTH_TBL24 - depth);
86 /* Else if depth is greater than 24 */
87 return 1 << (RTE_LPM_MAX_DEPTH - depth);
91 * Find an existing lpm table and return a pointer to it.
93 struct rte_lpm_v20 * __vsym
94 rte_lpm_find_existing_v20(const char *name)
96 struct rte_lpm_v20 *l = NULL;
97 struct rte_tailq_entry *te;
98 struct rte_lpm_list *lpm_list;
100 lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list);
102 rte_mcfg_tailq_read_lock();
103 TAILQ_FOREACH(te, lpm_list, next) {
105 if (strncmp(name, l->name, RTE_LPM_NAMESIZE) == 0)
108 rte_mcfg_tailq_read_unlock();
117 VERSION_SYMBOL(rte_lpm_find_existing, _v20, 2.0);
119 struct rte_lpm * __vsym
120 rte_lpm_find_existing_v1604(const char *name)
122 struct rte_lpm *l = NULL;
123 struct rte_tailq_entry *te;
124 struct rte_lpm_list *lpm_list;
126 lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list);
128 rte_mcfg_tailq_read_lock();
129 TAILQ_FOREACH(te, lpm_list, next) {
131 if (strncmp(name, l->name, RTE_LPM_NAMESIZE) == 0)
134 rte_mcfg_tailq_read_unlock();
143 BIND_DEFAULT_SYMBOL(rte_lpm_find_existing, _v1604, 16.04);
144 MAP_STATIC_SYMBOL(struct rte_lpm *rte_lpm_find_existing(const char *name),
145 rte_lpm_find_existing_v1604);
148 * Allocates memory for LPM object
150 struct rte_lpm_v20 * __vsym
151 rte_lpm_create_v20(const char *name, int socket_id, int max_rules,
152 __rte_unused int flags)
154 char mem_name[RTE_LPM_NAMESIZE];
155 struct rte_lpm_v20 *lpm = NULL;
156 struct rte_tailq_entry *te;
158 struct rte_lpm_list *lpm_list;
160 lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list);
162 RTE_BUILD_BUG_ON(sizeof(struct rte_lpm_tbl_entry_v20) != 2);
164 /* Check user arguments. */
165 if ((name == NULL) || (socket_id < -1) || (max_rules == 0)) {
170 snprintf(mem_name, sizeof(mem_name), "LPM_%s", name);
172 /* Determine the amount of memory to allocate. */
173 mem_size = sizeof(*lpm) + (sizeof(lpm->rules_tbl[0]) * max_rules);
175 rte_mcfg_tailq_write_lock();
177 /* guarantee there's no existing */
178 TAILQ_FOREACH(te, lpm_list, next) {
180 if (strncmp(name, lpm->name, RTE_LPM_NAMESIZE) == 0)
190 /* allocate tailq entry */
191 te = rte_zmalloc("LPM_TAILQ_ENTRY", sizeof(*te), 0);
193 RTE_LOG(ERR, LPM, "Failed to allocate tailq entry\n");
198 /* Allocate memory to store the LPM data structures. */
199 lpm = rte_zmalloc_socket(mem_name, mem_size,
200 RTE_CACHE_LINE_SIZE, socket_id);
202 RTE_LOG(ERR, LPM, "LPM memory allocation failed\n");
208 /* Save user arguments. */
209 lpm->max_rules = max_rules;
210 strlcpy(lpm->name, name, sizeof(lpm->name));
214 TAILQ_INSERT_TAIL(lpm_list, te, next);
217 rte_mcfg_tailq_write_unlock();
221 VERSION_SYMBOL(rte_lpm_create, _v20, 2.0);
223 struct rte_lpm * __vsym
224 rte_lpm_create_v1604(const char *name, int socket_id,
225 const struct rte_lpm_config *config)
227 char mem_name[RTE_LPM_NAMESIZE];
228 struct rte_lpm *lpm = NULL;
229 struct rte_tailq_entry *te;
230 uint32_t mem_size, rules_size, tbl8s_size;
231 struct rte_lpm_list *lpm_list;
233 lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list);
235 RTE_BUILD_BUG_ON(sizeof(struct rte_lpm_tbl_entry) != 4);
237 /* Check user arguments. */
238 if ((name == NULL) || (socket_id < -1) || (config->max_rules == 0)
239 || config->number_tbl8s > RTE_LPM_MAX_TBL8_NUM_GROUPS) {
244 snprintf(mem_name, sizeof(mem_name), "LPM_%s", name);
246 /* Determine the amount of memory to allocate. */
247 mem_size = sizeof(*lpm);
248 rules_size = sizeof(struct rte_lpm_rule) * config->max_rules;
249 tbl8s_size = (sizeof(struct rte_lpm_tbl_entry) *
250 RTE_LPM_TBL8_GROUP_NUM_ENTRIES * config->number_tbl8s);
252 rte_mcfg_tailq_write_lock();
254 /* guarantee there's no existing */
255 TAILQ_FOREACH(te, lpm_list, next) {
257 if (strncmp(name, lpm->name, RTE_LPM_NAMESIZE) == 0)
267 /* allocate tailq entry */
268 te = rte_zmalloc("LPM_TAILQ_ENTRY", sizeof(*te), 0);
270 RTE_LOG(ERR, LPM, "Failed to allocate tailq entry\n");
275 /* Allocate memory to store the LPM data structures. */
276 lpm = rte_zmalloc_socket(mem_name, mem_size,
277 RTE_CACHE_LINE_SIZE, socket_id);
279 RTE_LOG(ERR, LPM, "LPM memory allocation failed\n");
285 lpm->rules_tbl = rte_zmalloc_socket(NULL,
286 (size_t)rules_size, RTE_CACHE_LINE_SIZE, socket_id);
288 if (lpm->rules_tbl == NULL) {
289 RTE_LOG(ERR, LPM, "LPM rules_tbl memory allocation failed\n");
297 lpm->tbl8 = rte_zmalloc_socket(NULL,
298 (size_t)tbl8s_size, RTE_CACHE_LINE_SIZE, socket_id);
300 if (lpm->tbl8 == NULL) {
301 RTE_LOG(ERR, LPM, "LPM tbl8 memory allocation failed\n");
302 rte_free(lpm->rules_tbl);
310 /* Save user arguments. */
311 lpm->max_rules = config->max_rules;
312 lpm->number_tbl8s = config->number_tbl8s;
313 strlcpy(lpm->name, name, sizeof(lpm->name));
317 TAILQ_INSERT_TAIL(lpm_list, te, next);
320 rte_mcfg_tailq_write_unlock();
324 BIND_DEFAULT_SYMBOL(rte_lpm_create, _v1604, 16.04);
326 struct rte_lpm *rte_lpm_create(const char *name, int socket_id,
327 const struct rte_lpm_config *config), rte_lpm_create_v1604);
330 * Deallocates memory for given LPM table.
333 rte_lpm_free_v20(struct rte_lpm_v20 *lpm)
335 struct rte_lpm_list *lpm_list;
336 struct rte_tailq_entry *te;
338 /* Check user arguments. */
342 lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list);
344 rte_mcfg_tailq_write_lock();
346 /* find our tailq entry */
347 TAILQ_FOREACH(te, lpm_list, next) {
348 if (te->data == (void *) lpm)
352 TAILQ_REMOVE(lpm_list, te, next);
354 rte_mcfg_tailq_write_unlock();
359 VERSION_SYMBOL(rte_lpm_free, _v20, 2.0);
362 rte_lpm_free_v1604(struct rte_lpm *lpm)
364 struct rte_lpm_list *lpm_list;
365 struct rte_tailq_entry *te;
367 /* Check user arguments. */
371 lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list);
373 rte_mcfg_tailq_write_lock();
375 /* find our tailq entry */
376 TAILQ_FOREACH(te, lpm_list, next) {
377 if (te->data == (void *) lpm)
381 TAILQ_REMOVE(lpm_list, te, next);
383 rte_mcfg_tailq_write_unlock();
386 rte_free(lpm->rules_tbl);
390 BIND_DEFAULT_SYMBOL(rte_lpm_free, _v1604, 16.04);
391 MAP_STATIC_SYMBOL(void rte_lpm_free(struct rte_lpm *lpm),
395 * Adds a rule to the rule table.
397 * NOTE: The rule table is split into 32 groups. Each group contains rules that
398 * apply to a specific prefix depth (i.e. group 1 contains rules that apply to
399 * prefixes with a depth of 1 etc.). In the following code (depth - 1) is used
400 * to refer to depth 1 because even though the depth range is 1 - 32, depths
401 * are stored in the rule table from 0 - 31.
402 * NOTE: Valid range for depth parameter is 1 .. 32 inclusive.
405 rule_add_v20(struct rte_lpm_v20 *lpm, uint32_t ip_masked, uint8_t depth,
408 uint32_t rule_gindex, rule_index, last_rule;
413 /* Scan through rule group to see if rule already exists. */
414 if (lpm->rule_info[depth - 1].used_rules > 0) {
416 /* rule_gindex stands for rule group index. */
417 rule_gindex = lpm->rule_info[depth - 1].first_rule;
418 /* Initialise rule_index to point to start of rule group. */
419 rule_index = rule_gindex;
420 /* Last rule = Last used rule in this rule group. */
421 last_rule = rule_gindex + lpm->rule_info[depth - 1].used_rules;
423 for (; rule_index < last_rule; rule_index++) {
425 /* If rule already exists update its next_hop and return. */
426 if (lpm->rules_tbl[rule_index].ip == ip_masked) {
427 lpm->rules_tbl[rule_index].next_hop = next_hop;
433 if (rule_index == lpm->max_rules)
436 /* Calculate the position in which the rule will be stored. */
439 for (i = depth - 1; i > 0; i--) {
440 if (lpm->rule_info[i - 1].used_rules > 0) {
441 rule_index = lpm->rule_info[i - 1].first_rule
442 + lpm->rule_info[i - 1].used_rules;
446 if (rule_index == lpm->max_rules)
449 lpm->rule_info[depth - 1].first_rule = rule_index;
452 /* Make room for the new rule in the array. */
453 for (i = RTE_LPM_MAX_DEPTH; i > depth; i--) {
454 if (lpm->rule_info[i - 1].first_rule
455 + lpm->rule_info[i - 1].used_rules == lpm->max_rules)
458 if (lpm->rule_info[i - 1].used_rules > 0) {
459 lpm->rules_tbl[lpm->rule_info[i - 1].first_rule
460 + lpm->rule_info[i - 1].used_rules]
461 = lpm->rules_tbl[lpm->rule_info[i - 1].first_rule];
462 lpm->rule_info[i - 1].first_rule++;
466 /* Add the new rule. */
467 lpm->rules_tbl[rule_index].ip = ip_masked;
468 lpm->rules_tbl[rule_index].next_hop = next_hop;
470 /* Increment the used rules counter for this rule group. */
471 lpm->rule_info[depth - 1].used_rules++;
477 rule_add_v1604(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth,
480 uint32_t rule_gindex, rule_index, last_rule;
485 /* Scan through rule group to see if rule already exists. */
486 if (lpm->rule_info[depth - 1].used_rules > 0) {
488 /* rule_gindex stands for rule group index. */
489 rule_gindex = lpm->rule_info[depth - 1].first_rule;
490 /* Initialise rule_index to point to start of rule group. */
491 rule_index = rule_gindex;
492 /* Last rule = Last used rule in this rule group. */
493 last_rule = rule_gindex + lpm->rule_info[depth - 1].used_rules;
495 for (; rule_index < last_rule; rule_index++) {
497 /* If rule already exists update its next_hop and return. */
498 if (lpm->rules_tbl[rule_index].ip == ip_masked) {
499 lpm->rules_tbl[rule_index].next_hop = next_hop;
505 if (rule_index == lpm->max_rules)
508 /* Calculate the position in which the rule will be stored. */
511 for (i = depth - 1; i > 0; i--) {
512 if (lpm->rule_info[i - 1].used_rules > 0) {
513 rule_index = lpm->rule_info[i - 1].first_rule
514 + lpm->rule_info[i - 1].used_rules;
518 if (rule_index == lpm->max_rules)
521 lpm->rule_info[depth - 1].first_rule = rule_index;
524 /* Make room for the new rule in the array. */
525 for (i = RTE_LPM_MAX_DEPTH; i > depth; i--) {
526 if (lpm->rule_info[i - 1].first_rule
527 + lpm->rule_info[i - 1].used_rules == lpm->max_rules)
530 if (lpm->rule_info[i - 1].used_rules > 0) {
531 lpm->rules_tbl[lpm->rule_info[i - 1].first_rule
532 + lpm->rule_info[i - 1].used_rules]
533 = lpm->rules_tbl[lpm->rule_info[i - 1].first_rule];
534 lpm->rule_info[i - 1].first_rule++;
538 /* Add the new rule. */
539 lpm->rules_tbl[rule_index].ip = ip_masked;
540 lpm->rules_tbl[rule_index].next_hop = next_hop;
542 /* Increment the used rules counter for this rule group. */
543 lpm->rule_info[depth - 1].used_rules++;
549 * Delete a rule from the rule table.
550 * NOTE: Valid range for depth parameter is 1 .. 32 inclusive.
553 rule_delete_v20(struct rte_lpm_v20 *lpm, int32_t rule_index, uint8_t depth)
559 lpm->rules_tbl[rule_index] =
560 lpm->rules_tbl[lpm->rule_info[depth - 1].first_rule
561 + lpm->rule_info[depth - 1].used_rules - 1];
563 for (i = depth; i < RTE_LPM_MAX_DEPTH; i++) {
564 if (lpm->rule_info[i].used_rules > 0) {
565 lpm->rules_tbl[lpm->rule_info[i].first_rule - 1] =
566 lpm->rules_tbl[lpm->rule_info[i].first_rule
567 + lpm->rule_info[i].used_rules - 1];
568 lpm->rule_info[i].first_rule--;
572 lpm->rule_info[depth - 1].used_rules--;
576 rule_delete_v1604(struct rte_lpm *lpm, int32_t rule_index, uint8_t depth)
582 lpm->rules_tbl[rule_index] =
583 lpm->rules_tbl[lpm->rule_info[depth - 1].first_rule
584 + lpm->rule_info[depth - 1].used_rules - 1];
586 for (i = depth; i < RTE_LPM_MAX_DEPTH; i++) {
587 if (lpm->rule_info[i].used_rules > 0) {
588 lpm->rules_tbl[lpm->rule_info[i].first_rule - 1] =
589 lpm->rules_tbl[lpm->rule_info[i].first_rule
590 + lpm->rule_info[i].used_rules - 1];
591 lpm->rule_info[i].first_rule--;
595 lpm->rule_info[depth - 1].used_rules--;
599 * Finds a rule in rule table.
600 * NOTE: Valid range for depth parameter is 1 .. 32 inclusive.
603 rule_find_v20(struct rte_lpm_v20 *lpm, uint32_t ip_masked, uint8_t depth)
605 uint32_t rule_gindex, last_rule, rule_index;
609 rule_gindex = lpm->rule_info[depth - 1].first_rule;
610 last_rule = rule_gindex + lpm->rule_info[depth - 1].used_rules;
612 /* Scan used rules at given depth to find rule. */
613 for (rule_index = rule_gindex; rule_index < last_rule; rule_index++) {
614 /* If rule is found return the rule index. */
615 if (lpm->rules_tbl[rule_index].ip == ip_masked)
619 /* If rule is not found return -EINVAL. */
624 rule_find_v1604(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth)
626 uint32_t rule_gindex, last_rule, rule_index;
630 rule_gindex = lpm->rule_info[depth - 1].first_rule;
631 last_rule = rule_gindex + lpm->rule_info[depth - 1].used_rules;
633 /* Scan used rules at given depth to find rule. */
634 for (rule_index = rule_gindex; rule_index < last_rule; rule_index++) {
635 /* If rule is found return the rule index. */
636 if (lpm->rules_tbl[rule_index].ip == ip_masked)
640 /* If rule is not found return -EINVAL. */
645 * Find, clean and allocate a tbl8.
648 tbl8_alloc_v20(struct rte_lpm_tbl_entry_v20 *tbl8)
650 uint32_t group_idx; /* tbl8 group index. */
651 struct rte_lpm_tbl_entry_v20 *tbl8_entry;
653 /* Scan through tbl8 to find a free (i.e. INVALID) tbl8 group. */
654 for (group_idx = 0; group_idx < RTE_LPM_TBL8_NUM_GROUPS;
656 tbl8_entry = &tbl8[group_idx * RTE_LPM_TBL8_GROUP_NUM_ENTRIES];
657 /* If a free tbl8 group is found clean it and set as VALID. */
658 if (!tbl8_entry->valid_group) {
659 struct rte_lpm_tbl_entry_v20 new_tbl8_entry = {
662 .valid_group = VALID,
664 new_tbl8_entry.next_hop = 0;
666 memset(&tbl8_entry[0], 0,
667 RTE_LPM_TBL8_GROUP_NUM_ENTRIES *
668 sizeof(tbl8_entry[0]));
670 __atomic_store(tbl8_entry, &new_tbl8_entry,
673 /* Return group index for allocated tbl8 group. */
678 /* If there are no tbl8 groups free then return error. */
683 tbl8_alloc_v1604(struct rte_lpm_tbl_entry *tbl8, uint32_t number_tbl8s)
685 uint32_t group_idx; /* tbl8 group index. */
686 struct rte_lpm_tbl_entry *tbl8_entry;
688 /* Scan through tbl8 to find a free (i.e. INVALID) tbl8 group. */
689 for (group_idx = 0; group_idx < number_tbl8s; group_idx++) {
690 tbl8_entry = &tbl8[group_idx * RTE_LPM_TBL8_GROUP_NUM_ENTRIES];
691 /* If a free tbl8 group is found clean it and set as VALID. */
692 if (!tbl8_entry->valid_group) {
693 struct rte_lpm_tbl_entry new_tbl8_entry = {
697 .valid_group = VALID,
700 memset(&tbl8_entry[0], 0,
701 RTE_LPM_TBL8_GROUP_NUM_ENTRIES *
702 sizeof(tbl8_entry[0]));
704 __atomic_store(tbl8_entry, &new_tbl8_entry,
707 /* Return group index for allocated tbl8 group. */
712 /* If there are no tbl8 groups free then return error. */
717 tbl8_free_v20(struct rte_lpm_tbl_entry_v20 *tbl8, uint32_t tbl8_group_start)
719 /* Set tbl8 group invalid*/
720 struct rte_lpm_tbl_entry_v20 zero_tbl8_entry = {
723 .valid_group = INVALID,
725 zero_tbl8_entry.next_hop = 0;
727 __atomic_store(&tbl8[tbl8_group_start], &zero_tbl8_entry,
732 tbl8_free_v1604(struct rte_lpm_tbl_entry *tbl8, uint32_t tbl8_group_start)
734 /* Set tbl8 group invalid*/
735 struct rte_lpm_tbl_entry zero_tbl8_entry = {0};
737 __atomic_store(&tbl8[tbl8_group_start], &zero_tbl8_entry,
741 static __rte_noinline int32_t
742 add_depth_small_v20(struct rte_lpm_v20 *lpm, uint32_t ip, uint8_t depth,
745 uint32_t tbl24_index, tbl24_range, tbl8_index, tbl8_group_end, i, j;
747 /* Calculate the index into Table24. */
748 tbl24_index = ip >> 8;
749 tbl24_range = depth_to_range(depth);
751 for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) {
753 * For invalid OR valid and non-extended tbl 24 entries set
756 if (!lpm->tbl24[i].valid || (lpm->tbl24[i].valid_group == 0 &&
757 lpm->tbl24[i].depth <= depth)) {
759 struct rte_lpm_tbl_entry_v20 new_tbl24_entry = {
764 new_tbl24_entry.next_hop = next_hop;
766 /* Setting tbl24 entry in one go to avoid race
769 __atomic_store(&lpm->tbl24[i], &new_tbl24_entry,
775 if (lpm->tbl24[i].valid_group == 1) {
776 /* If tbl24 entry is valid and extended calculate the
779 tbl8_index = lpm->tbl24[i].group_idx *
780 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
781 tbl8_group_end = tbl8_index +
782 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
784 for (j = tbl8_index; j < tbl8_group_end; j++) {
785 if (!lpm->tbl8[j].valid ||
786 lpm->tbl8[j].depth <= depth) {
787 struct rte_lpm_tbl_entry_v20
790 .valid_group = VALID,
793 new_tbl8_entry.next_hop = next_hop;
796 * Setting tbl8 entry in one go to avoid
799 __atomic_store(&lpm->tbl8[j],
812 static __rte_noinline int32_t
813 add_depth_small_v1604(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
816 #define group_idx next_hop
817 uint32_t tbl24_index, tbl24_range, tbl8_index, tbl8_group_end, i, j;
819 /* Calculate the index into Table24. */
820 tbl24_index = ip >> 8;
821 tbl24_range = depth_to_range(depth);
823 for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) {
825 * For invalid OR valid and non-extended tbl 24 entries set
828 if (!lpm->tbl24[i].valid || (lpm->tbl24[i].valid_group == 0 &&
829 lpm->tbl24[i].depth <= depth)) {
831 struct rte_lpm_tbl_entry new_tbl24_entry = {
832 .next_hop = next_hop,
838 /* Setting tbl24 entry in one go to avoid race
841 __atomic_store(&lpm->tbl24[i], &new_tbl24_entry,
847 if (lpm->tbl24[i].valid_group == 1) {
848 /* If tbl24 entry is valid and extended calculate the
851 tbl8_index = lpm->tbl24[i].group_idx *
852 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
853 tbl8_group_end = tbl8_index +
854 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
856 for (j = tbl8_index; j < tbl8_group_end; j++) {
857 if (!lpm->tbl8[j].valid ||
858 lpm->tbl8[j].depth <= depth) {
859 struct rte_lpm_tbl_entry
862 .valid_group = VALID,
864 .next_hop = next_hop,
868 * Setting tbl8 entry in one go to avoid
871 __atomic_store(&lpm->tbl8[j],
884 static __rte_noinline int32_t
885 add_depth_big_v20(struct rte_lpm_v20 *lpm, uint32_t ip_masked, uint8_t depth,
888 uint32_t tbl24_index;
889 int32_t tbl8_group_index, tbl8_group_start, tbl8_group_end, tbl8_index,
892 tbl24_index = (ip_masked >> 8);
893 tbl8_range = depth_to_range(depth);
895 if (!lpm->tbl24[tbl24_index].valid) {
896 /* Search for a free tbl8 group. */
897 tbl8_group_index = tbl8_alloc_v20(lpm->tbl8);
899 /* Check tbl8 allocation was successful. */
900 if (tbl8_group_index < 0) {
901 return tbl8_group_index;
904 /* Find index into tbl8 and range. */
905 tbl8_index = (tbl8_group_index *
906 RTE_LPM_TBL8_GROUP_NUM_ENTRIES) +
909 /* Set tbl8 entry. */
910 for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
911 struct rte_lpm_tbl_entry_v20 new_tbl8_entry = {
914 .valid_group = lpm->tbl8[i].valid_group,
916 new_tbl8_entry.next_hop = next_hop;
917 __atomic_store(&lpm->tbl8[i], &new_tbl8_entry,
922 * Update tbl24 entry to point to new tbl8 entry. Note: The
923 * ext_flag and tbl8_index need to be updated simultaneously,
924 * so assign whole structure in one go
927 struct rte_lpm_tbl_entry_v20 new_tbl24_entry = {
928 .group_idx = (uint8_t)tbl8_group_index,
934 __atomic_store(&lpm->tbl24[tbl24_index], &new_tbl24_entry,
937 } /* If valid entry but not extended calculate the index into Table8. */
938 else if (lpm->tbl24[tbl24_index].valid_group == 0) {
939 /* Search for free tbl8 group. */
940 tbl8_group_index = tbl8_alloc_v20(lpm->tbl8);
942 if (tbl8_group_index < 0) {
943 return tbl8_group_index;
946 tbl8_group_start = tbl8_group_index *
947 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
948 tbl8_group_end = tbl8_group_start +
949 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
951 /* Populate new tbl8 with tbl24 value. */
952 for (i = tbl8_group_start; i < tbl8_group_end; i++) {
953 struct rte_lpm_tbl_entry_v20 new_tbl8_entry = {
955 .depth = lpm->tbl24[tbl24_index].depth,
956 .valid_group = lpm->tbl8[i].valid_group,
958 new_tbl8_entry.next_hop =
959 lpm->tbl24[tbl24_index].next_hop;
960 __atomic_store(&lpm->tbl8[i], &new_tbl8_entry,
964 tbl8_index = tbl8_group_start + (ip_masked & 0xFF);
966 /* Insert new rule into the tbl8 entry. */
967 for (i = tbl8_index; i < tbl8_index + tbl8_range; i++) {
968 struct rte_lpm_tbl_entry_v20 new_tbl8_entry = {
971 .valid_group = lpm->tbl8[i].valid_group,
973 new_tbl8_entry.next_hop = next_hop;
974 __atomic_store(&lpm->tbl8[i], &new_tbl8_entry,
979 * Update tbl24 entry to point to new tbl8 entry. Note: The
980 * ext_flag and tbl8_index need to be updated simultaneously,
981 * so assign whole structure in one go.
984 struct rte_lpm_tbl_entry_v20 new_tbl24_entry = {
985 .group_idx = (uint8_t)tbl8_group_index,
991 __atomic_store(&lpm->tbl24[tbl24_index], &new_tbl24_entry,
995 * If it is valid, extended entry calculate the index into tbl8.
997 tbl8_group_index = lpm->tbl24[tbl24_index].group_idx;
998 tbl8_group_start = tbl8_group_index *
999 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1000 tbl8_index = tbl8_group_start + (ip_masked & 0xFF);
1002 for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
1004 if (!lpm->tbl8[i].valid ||
1005 lpm->tbl8[i].depth <= depth) {
1006 struct rte_lpm_tbl_entry_v20 new_tbl8_entry = {
1009 .valid_group = lpm->tbl8[i].valid_group,
1011 new_tbl8_entry.next_hop = next_hop;
1013 * Setting tbl8 entry in one go to avoid race
1016 __atomic_store(&lpm->tbl8[i], &new_tbl8_entry,
1027 static __rte_noinline int32_t
1028 add_depth_big_v1604(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth,
1031 #define group_idx next_hop
1032 uint32_t tbl24_index;
1033 int32_t tbl8_group_index, tbl8_group_start, tbl8_group_end, tbl8_index,
1036 tbl24_index = (ip_masked >> 8);
1037 tbl8_range = depth_to_range(depth);
1039 if (!lpm->tbl24[tbl24_index].valid) {
1040 /* Search for a free tbl8 group. */
1041 tbl8_group_index = tbl8_alloc_v1604(lpm->tbl8, lpm->number_tbl8s);
1043 /* Check tbl8 allocation was successful. */
1044 if (tbl8_group_index < 0) {
1045 return tbl8_group_index;
1048 /* Find index into tbl8 and range. */
1049 tbl8_index = (tbl8_group_index *
1050 RTE_LPM_TBL8_GROUP_NUM_ENTRIES) +
1053 /* Set tbl8 entry. */
1054 for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
1055 struct rte_lpm_tbl_entry new_tbl8_entry = {
1058 .valid_group = lpm->tbl8[i].valid_group,
1059 .next_hop = next_hop,
1061 __atomic_store(&lpm->tbl8[i], &new_tbl8_entry,
1066 * Update tbl24 entry to point to new tbl8 entry. Note: The
1067 * ext_flag and tbl8_index need to be updated simultaneously,
1068 * so assign whole structure in one go
1071 struct rte_lpm_tbl_entry new_tbl24_entry = {
1072 .group_idx = tbl8_group_index,
1078 /* The tbl24 entry must be written only after the
1079 * tbl8 entries are written.
1081 __atomic_store(&lpm->tbl24[tbl24_index], &new_tbl24_entry,
1084 } /* If valid entry but not extended calculate the index into Table8. */
1085 else if (lpm->tbl24[tbl24_index].valid_group == 0) {
1086 /* Search for free tbl8 group. */
1087 tbl8_group_index = tbl8_alloc_v1604(lpm->tbl8, lpm->number_tbl8s);
1089 if (tbl8_group_index < 0) {
1090 return tbl8_group_index;
1093 tbl8_group_start = tbl8_group_index *
1094 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1095 tbl8_group_end = tbl8_group_start +
1096 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1098 /* Populate new tbl8 with tbl24 value. */
1099 for (i = tbl8_group_start; i < tbl8_group_end; i++) {
1100 struct rte_lpm_tbl_entry new_tbl8_entry = {
1102 .depth = lpm->tbl24[tbl24_index].depth,
1103 .valid_group = lpm->tbl8[i].valid_group,
1104 .next_hop = lpm->tbl24[tbl24_index].next_hop,
1106 __atomic_store(&lpm->tbl8[i], &new_tbl8_entry,
1110 tbl8_index = tbl8_group_start + (ip_masked & 0xFF);
1112 /* Insert new rule into the tbl8 entry. */
1113 for (i = tbl8_index; i < tbl8_index + tbl8_range; i++) {
1114 struct rte_lpm_tbl_entry new_tbl8_entry = {
1117 .valid_group = lpm->tbl8[i].valid_group,
1118 .next_hop = next_hop,
1120 __atomic_store(&lpm->tbl8[i], &new_tbl8_entry,
1125 * Update tbl24 entry to point to new tbl8 entry. Note: The
1126 * ext_flag and tbl8_index need to be updated simultaneously,
1127 * so assign whole structure in one go.
1130 struct rte_lpm_tbl_entry new_tbl24_entry = {
1131 .group_idx = tbl8_group_index,
1137 /* The tbl24 entry must be written only after the
1138 * tbl8 entries are written.
1140 __atomic_store(&lpm->tbl24[tbl24_index], &new_tbl24_entry,
1144 * If it is valid, extended entry calculate the index into tbl8.
1146 tbl8_group_index = lpm->tbl24[tbl24_index].group_idx;
1147 tbl8_group_start = tbl8_group_index *
1148 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1149 tbl8_index = tbl8_group_start + (ip_masked & 0xFF);
1151 for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
1153 if (!lpm->tbl8[i].valid ||
1154 lpm->tbl8[i].depth <= depth) {
1155 struct rte_lpm_tbl_entry new_tbl8_entry = {
1158 .next_hop = next_hop,
1159 .valid_group = lpm->tbl8[i].valid_group,
1163 * Setting tbl8 entry in one go to avoid race
1166 __atomic_store(&lpm->tbl8[i], &new_tbl8_entry,
1181 rte_lpm_add_v20(struct rte_lpm_v20 *lpm, uint32_t ip, uint8_t depth,
1184 int32_t rule_index, status = 0;
1187 /* Check user arguments. */
1188 if ((lpm == NULL) || (depth < 1) || (depth > RTE_LPM_MAX_DEPTH))
1191 ip_masked = ip & depth_to_mask(depth);
1193 /* Add the rule to the rule table. */
1194 rule_index = rule_add_v20(lpm, ip_masked, depth, next_hop);
1196 /* If the is no space available for new rule return error. */
1197 if (rule_index < 0) {
1201 if (depth <= MAX_DEPTH_TBL24) {
1202 status = add_depth_small_v20(lpm, ip_masked, depth, next_hop);
1203 } else { /* If depth > RTE_LPM_MAX_DEPTH_TBL24 */
1204 status = add_depth_big_v20(lpm, ip_masked, depth, next_hop);
1207 * If add fails due to exhaustion of tbl8 extensions delete
1208 * rule that was added to rule table.
1211 rule_delete_v20(lpm, rule_index, depth);
1219 VERSION_SYMBOL(rte_lpm_add, _v20, 2.0);
1222 rte_lpm_add_v1604(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
1225 int32_t rule_index, status = 0;
1228 /* Check user arguments. */
1229 if ((lpm == NULL) || (depth < 1) || (depth > RTE_LPM_MAX_DEPTH))
1232 ip_masked = ip & depth_to_mask(depth);
1234 /* Add the rule to the rule table. */
1235 rule_index = rule_add_v1604(lpm, ip_masked, depth, next_hop);
1237 /* If the is no space available for new rule return error. */
1238 if (rule_index < 0) {
1242 if (depth <= MAX_DEPTH_TBL24) {
1243 status = add_depth_small_v1604(lpm, ip_masked, depth, next_hop);
1244 } else { /* If depth > RTE_LPM_MAX_DEPTH_TBL24 */
1245 status = add_depth_big_v1604(lpm, ip_masked, depth, next_hop);
1248 * If add fails due to exhaustion of tbl8 extensions delete
1249 * rule that was added to rule table.
1252 rule_delete_v1604(lpm, rule_index, depth);
1260 BIND_DEFAULT_SYMBOL(rte_lpm_add, _v1604, 16.04);
1261 MAP_STATIC_SYMBOL(int rte_lpm_add(struct rte_lpm *lpm, uint32_t ip,
1262 uint8_t depth, uint32_t next_hop), rte_lpm_add_v1604);
1265 * Look for a rule in the high-level rules table
1268 rte_lpm_is_rule_present_v20(struct rte_lpm_v20 *lpm, uint32_t ip, uint8_t depth,
1274 /* Check user arguments. */
1275 if ((lpm == NULL) ||
1276 (next_hop == NULL) ||
1277 (depth < 1) || (depth > RTE_LPM_MAX_DEPTH))
1280 /* Look for the rule using rule_find. */
1281 ip_masked = ip & depth_to_mask(depth);
1282 rule_index = rule_find_v20(lpm, ip_masked, depth);
1284 if (rule_index >= 0) {
1285 *next_hop = lpm->rules_tbl[rule_index].next_hop;
1289 /* If rule is not found return 0. */
1292 VERSION_SYMBOL(rte_lpm_is_rule_present, _v20, 2.0);
1295 rte_lpm_is_rule_present_v1604(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
1301 /* Check user arguments. */
1302 if ((lpm == NULL) ||
1303 (next_hop == NULL) ||
1304 (depth < 1) || (depth > RTE_LPM_MAX_DEPTH))
1307 /* Look for the rule using rule_find. */
1308 ip_masked = ip & depth_to_mask(depth);
1309 rule_index = rule_find_v1604(lpm, ip_masked, depth);
1311 if (rule_index >= 0) {
1312 *next_hop = lpm->rules_tbl[rule_index].next_hop;
1316 /* If rule is not found return 0. */
1319 BIND_DEFAULT_SYMBOL(rte_lpm_is_rule_present, _v1604, 16.04);
1320 MAP_STATIC_SYMBOL(int rte_lpm_is_rule_present(struct rte_lpm *lpm, uint32_t ip,
1321 uint8_t depth, uint32_t *next_hop), rte_lpm_is_rule_present_v1604);
1324 find_previous_rule_v20(struct rte_lpm_v20 *lpm, uint32_t ip, uint8_t depth,
1325 uint8_t *sub_rule_depth)
1331 for (prev_depth = (uint8_t)(depth - 1); prev_depth > 0; prev_depth--) {
1332 ip_masked = ip & depth_to_mask(prev_depth);
1334 rule_index = rule_find_v20(lpm, ip_masked, prev_depth);
1336 if (rule_index >= 0) {
1337 *sub_rule_depth = prev_depth;
1346 find_previous_rule_v1604(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
1347 uint8_t *sub_rule_depth)
1353 for (prev_depth = (uint8_t)(depth - 1); prev_depth > 0; prev_depth--) {
1354 ip_masked = ip & depth_to_mask(prev_depth);
1356 rule_index = rule_find_v1604(lpm, ip_masked, prev_depth);
1358 if (rule_index >= 0) {
1359 *sub_rule_depth = prev_depth;
1368 delete_depth_small_v20(struct rte_lpm_v20 *lpm, uint32_t ip_masked,
1369 uint8_t depth, int32_t sub_rule_index, uint8_t sub_rule_depth)
1371 uint32_t tbl24_range, tbl24_index, tbl8_group_index, tbl8_index, i, j;
1373 /* Calculate the range and index into Table24. */
1374 tbl24_range = depth_to_range(depth);
1375 tbl24_index = (ip_masked >> 8);
1378 * Firstly check the sub_rule_index. A -1 indicates no replacement rule
1379 * and a positive number indicates a sub_rule_index.
1381 if (sub_rule_index < 0) {
1383 * If no replacement rule exists then invalidate entries
1384 * associated with this rule.
1386 for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) {
1388 if (lpm->tbl24[i].valid_group == 0 &&
1389 lpm->tbl24[i].depth <= depth) {
1390 struct rte_lpm_tbl_entry_v20
1391 zero_tbl24_entry = {
1396 zero_tbl24_entry.next_hop = 0;
1397 __atomic_store(&lpm->tbl24[i],
1398 &zero_tbl24_entry, __ATOMIC_RELEASE);
1399 } else if (lpm->tbl24[i].valid_group == 1) {
1401 * If TBL24 entry is extended, then there has
1402 * to be a rule with depth >= 25 in the
1403 * associated TBL8 group.
1406 tbl8_group_index = lpm->tbl24[i].group_idx;
1407 tbl8_index = tbl8_group_index *
1408 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1410 for (j = tbl8_index; j < (tbl8_index +
1411 RTE_LPM_TBL8_GROUP_NUM_ENTRIES); j++) {
1413 if (lpm->tbl8[j].depth <= depth)
1414 lpm->tbl8[j].valid = INVALID;
1420 * If a replacement rule exists then modify entries
1421 * associated with this rule.
1424 struct rte_lpm_tbl_entry_v20 new_tbl24_entry = {
1425 .next_hop = lpm->rules_tbl[sub_rule_index].next_hop,
1428 .depth = sub_rule_depth,
1431 struct rte_lpm_tbl_entry_v20 new_tbl8_entry = {
1433 .valid_group = VALID,
1434 .depth = sub_rule_depth,
1436 new_tbl8_entry.next_hop =
1437 lpm->rules_tbl[sub_rule_index].next_hop;
1439 for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) {
1441 if (lpm->tbl24[i].valid_group == 0 &&
1442 lpm->tbl24[i].depth <= depth) {
1443 __atomic_store(&lpm->tbl24[i], &new_tbl24_entry,
1445 } else if (lpm->tbl24[i].valid_group == 1) {
1447 * If TBL24 entry is extended, then there has
1448 * to be a rule with depth >= 25 in the
1449 * associated TBL8 group.
1452 tbl8_group_index = lpm->tbl24[i].group_idx;
1453 tbl8_index = tbl8_group_index *
1454 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1456 for (j = tbl8_index; j < (tbl8_index +
1457 RTE_LPM_TBL8_GROUP_NUM_ENTRIES); j++) {
1459 if (lpm->tbl8[j].depth <= depth)
1460 __atomic_store(&lpm->tbl8[j],
1472 delete_depth_small_v1604(struct rte_lpm *lpm, uint32_t ip_masked,
1473 uint8_t depth, int32_t sub_rule_index, uint8_t sub_rule_depth)
1475 #define group_idx next_hop
1476 uint32_t tbl24_range, tbl24_index, tbl8_group_index, tbl8_index, i, j;
1478 /* Calculate the range and index into Table24. */
1479 tbl24_range = depth_to_range(depth);
1480 tbl24_index = (ip_masked >> 8);
1481 struct rte_lpm_tbl_entry zero_tbl24_entry = {0};
1484 * Firstly check the sub_rule_index. A -1 indicates no replacement rule
1485 * and a positive number indicates a sub_rule_index.
1487 if (sub_rule_index < 0) {
1489 * If no replacement rule exists then invalidate entries
1490 * associated with this rule.
1492 for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) {
1494 if (lpm->tbl24[i].valid_group == 0 &&
1495 lpm->tbl24[i].depth <= depth) {
1496 __atomic_store(&lpm->tbl24[i],
1497 &zero_tbl24_entry, __ATOMIC_RELEASE);
1498 } else if (lpm->tbl24[i].valid_group == 1) {
1500 * If TBL24 entry is extended, then there has
1501 * to be a rule with depth >= 25 in the
1502 * associated TBL8 group.
1505 tbl8_group_index = lpm->tbl24[i].group_idx;
1506 tbl8_index = tbl8_group_index *
1507 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1509 for (j = tbl8_index; j < (tbl8_index +
1510 RTE_LPM_TBL8_GROUP_NUM_ENTRIES); j++) {
1512 if (lpm->tbl8[j].depth <= depth)
1513 lpm->tbl8[j].valid = INVALID;
1519 * If a replacement rule exists then modify entries
1520 * associated with this rule.
1523 struct rte_lpm_tbl_entry new_tbl24_entry = {
1524 .next_hop = lpm->rules_tbl[sub_rule_index].next_hop,
1527 .depth = sub_rule_depth,
1530 struct rte_lpm_tbl_entry new_tbl8_entry = {
1532 .valid_group = VALID,
1533 .depth = sub_rule_depth,
1534 .next_hop = lpm->rules_tbl
1535 [sub_rule_index].next_hop,
1538 for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) {
1540 if (lpm->tbl24[i].valid_group == 0 &&
1541 lpm->tbl24[i].depth <= depth) {
1542 __atomic_store(&lpm->tbl24[i], &new_tbl24_entry,
1544 } else if (lpm->tbl24[i].valid_group == 1) {
1546 * If TBL24 entry is extended, then there has
1547 * to be a rule with depth >= 25 in the
1548 * associated TBL8 group.
1551 tbl8_group_index = lpm->tbl24[i].group_idx;
1552 tbl8_index = tbl8_group_index *
1553 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1555 for (j = tbl8_index; j < (tbl8_index +
1556 RTE_LPM_TBL8_GROUP_NUM_ENTRIES); j++) {
1558 if (lpm->tbl8[j].depth <= depth)
1559 __atomic_store(&lpm->tbl8[j],
1571 * Checks if table 8 group can be recycled.
1573 * Return of -EEXIST means tbl8 is in use and thus can not be recycled.
1574 * Return of -EINVAL means tbl8 is empty and thus can be recycled
1575 * Return of value > -1 means tbl8 is in use but has all the same values and
1576 * thus can be recycled
1579 tbl8_recycle_check_v20(struct rte_lpm_tbl_entry_v20 *tbl8,
1580 uint32_t tbl8_group_start)
1582 uint32_t tbl8_group_end, i;
1583 tbl8_group_end = tbl8_group_start + RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1586 * Check the first entry of the given tbl8. If it is invalid we know
1587 * this tbl8 does not contain any rule with a depth < RTE_LPM_MAX_DEPTH
1588 * (As they would affect all entries in a tbl8) and thus this table
1589 * can not be recycled.
1591 if (tbl8[tbl8_group_start].valid) {
1593 * If first entry is valid check if the depth is less than 24
1594 * and if so check the rest of the entries to verify that they
1595 * are all of this depth.
1597 if (tbl8[tbl8_group_start].depth <= MAX_DEPTH_TBL24) {
1598 for (i = (tbl8_group_start + 1); i < tbl8_group_end;
1601 if (tbl8[i].depth !=
1602 tbl8[tbl8_group_start].depth) {
1607 /* If all entries are the same return the tb8 index */
1608 return tbl8_group_start;
1614 * If the first entry is invalid check if the rest of the entries in
1615 * the tbl8 are invalid.
1617 for (i = (tbl8_group_start + 1); i < tbl8_group_end; i++) {
1621 /* If no valid entries are found then return -EINVAL. */
1626 tbl8_recycle_check_v1604(struct rte_lpm_tbl_entry *tbl8,
1627 uint32_t tbl8_group_start)
1629 uint32_t tbl8_group_end, i;
1630 tbl8_group_end = tbl8_group_start + RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1633 * Check the first entry of the given tbl8. If it is invalid we know
1634 * this tbl8 does not contain any rule with a depth < RTE_LPM_MAX_DEPTH
1635 * (As they would affect all entries in a tbl8) and thus this table
1636 * can not be recycled.
1638 if (tbl8[tbl8_group_start].valid) {
1640 * If first entry is valid check if the depth is less than 24
1641 * and if so check the rest of the entries to verify that they
1642 * are all of this depth.
1644 if (tbl8[tbl8_group_start].depth <= MAX_DEPTH_TBL24) {
1645 for (i = (tbl8_group_start + 1); i < tbl8_group_end;
1648 if (tbl8[i].depth !=
1649 tbl8[tbl8_group_start].depth) {
1654 /* If all entries are the same return the tb8 index */
1655 return tbl8_group_start;
1661 * If the first entry is invalid check if the rest of the entries in
1662 * the tbl8 are invalid.
1664 for (i = (tbl8_group_start + 1); i < tbl8_group_end; i++) {
1668 /* If no valid entries are found then return -EINVAL. */
1673 delete_depth_big_v20(struct rte_lpm_v20 *lpm, uint32_t ip_masked,
1674 uint8_t depth, int32_t sub_rule_index, uint8_t sub_rule_depth)
1676 uint32_t tbl24_index, tbl8_group_index, tbl8_group_start, tbl8_index,
1678 int32_t tbl8_recycle_index;
1681 * Calculate the index into tbl24 and range. Note: All depths larger
1682 * than MAX_DEPTH_TBL24 are associated with only one tbl24 entry.
1684 tbl24_index = ip_masked >> 8;
1686 /* Calculate the index into tbl8 and range. */
1687 tbl8_group_index = lpm->tbl24[tbl24_index].group_idx;
1688 tbl8_group_start = tbl8_group_index * RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1689 tbl8_index = tbl8_group_start + (ip_masked & 0xFF);
1690 tbl8_range = depth_to_range(depth);
1692 if (sub_rule_index < 0) {
1694 * Loop through the range of entries on tbl8 for which the
1695 * rule_to_delete must be removed or modified.
1697 for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
1698 if (lpm->tbl8[i].depth <= depth)
1699 lpm->tbl8[i].valid = INVALID;
1702 /* Set new tbl8 entry. */
1703 struct rte_lpm_tbl_entry_v20 new_tbl8_entry = {
1705 .depth = sub_rule_depth,
1706 .valid_group = lpm->tbl8[tbl8_group_start].valid_group,
1709 new_tbl8_entry.next_hop =
1710 lpm->rules_tbl[sub_rule_index].next_hop;
1712 * Loop through the range of entries on tbl8 for which the
1713 * rule_to_delete must be modified.
1715 for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
1716 if (lpm->tbl8[i].depth <= depth)
1717 __atomic_store(&lpm->tbl8[i], &new_tbl8_entry,
1723 * Check if there are any valid entries in this tbl8 group. If all
1724 * tbl8 entries are invalid we can free the tbl8 and invalidate the
1725 * associated tbl24 entry.
1728 tbl8_recycle_index = tbl8_recycle_check_v20(lpm->tbl8, tbl8_group_start);
1730 if (tbl8_recycle_index == -EINVAL) {
1731 /* Set tbl24 before freeing tbl8 to avoid race condition.
1732 * Prevent the free of the tbl8 group from hoisting.
1734 lpm->tbl24[tbl24_index].valid = 0;
1735 __atomic_thread_fence(__ATOMIC_RELEASE);
1736 tbl8_free_v20(lpm->tbl8, tbl8_group_start);
1737 } else if (tbl8_recycle_index > -1) {
1738 /* Update tbl24 entry. */
1739 struct rte_lpm_tbl_entry_v20 new_tbl24_entry = {
1740 .next_hop = lpm->tbl8[tbl8_recycle_index].next_hop,
1743 .depth = lpm->tbl8[tbl8_recycle_index].depth,
1746 /* Set tbl24 before freeing tbl8 to avoid race condition.
1747 * Prevent the free of the tbl8 group from hoisting.
1749 __atomic_store(&lpm->tbl24[tbl24_index], &new_tbl24_entry,
1751 __atomic_thread_fence(__ATOMIC_RELEASE);
1752 tbl8_free_v20(lpm->tbl8, tbl8_group_start);
1759 delete_depth_big_v1604(struct rte_lpm *lpm, uint32_t ip_masked,
1760 uint8_t depth, int32_t sub_rule_index, uint8_t sub_rule_depth)
1762 #define group_idx next_hop
1763 uint32_t tbl24_index, tbl8_group_index, tbl8_group_start, tbl8_index,
1765 int32_t tbl8_recycle_index;
1768 * Calculate the index into tbl24 and range. Note: All depths larger
1769 * than MAX_DEPTH_TBL24 are associated with only one tbl24 entry.
1771 tbl24_index = ip_masked >> 8;
1773 /* Calculate the index into tbl8 and range. */
1774 tbl8_group_index = lpm->tbl24[tbl24_index].group_idx;
1775 tbl8_group_start = tbl8_group_index * RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1776 tbl8_index = tbl8_group_start + (ip_masked & 0xFF);
1777 tbl8_range = depth_to_range(depth);
1779 if (sub_rule_index < 0) {
1781 * Loop through the range of entries on tbl8 for which the
1782 * rule_to_delete must be removed or modified.
1784 for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
1785 if (lpm->tbl8[i].depth <= depth)
1786 lpm->tbl8[i].valid = INVALID;
1789 /* Set new tbl8 entry. */
1790 struct rte_lpm_tbl_entry new_tbl8_entry = {
1792 .depth = sub_rule_depth,
1793 .valid_group = lpm->tbl8[tbl8_group_start].valid_group,
1794 .next_hop = lpm->rules_tbl[sub_rule_index].next_hop,
1798 * Loop through the range of entries on tbl8 for which the
1799 * rule_to_delete must be modified.
1801 for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
1802 if (lpm->tbl8[i].depth <= depth)
1803 __atomic_store(&lpm->tbl8[i], &new_tbl8_entry,
1809 * Check if there are any valid entries in this tbl8 group. If all
1810 * tbl8 entries are invalid we can free the tbl8 and invalidate the
1811 * associated tbl24 entry.
1814 tbl8_recycle_index = tbl8_recycle_check_v1604(lpm->tbl8, tbl8_group_start);
1816 if (tbl8_recycle_index == -EINVAL) {
1817 /* Set tbl24 before freeing tbl8 to avoid race condition.
1818 * Prevent the free of the tbl8 group from hoisting.
1820 lpm->tbl24[tbl24_index].valid = 0;
1821 __atomic_thread_fence(__ATOMIC_RELEASE);
1822 tbl8_free_v1604(lpm->tbl8, tbl8_group_start);
1823 } else if (tbl8_recycle_index > -1) {
1824 /* Update tbl24 entry. */
1825 struct rte_lpm_tbl_entry new_tbl24_entry = {
1826 .next_hop = lpm->tbl8[tbl8_recycle_index].next_hop,
1829 .depth = lpm->tbl8[tbl8_recycle_index].depth,
1832 /* Set tbl24 before freeing tbl8 to avoid race condition.
1833 * Prevent the free of the tbl8 group from hoisting.
1835 __atomic_store(&lpm->tbl24[tbl24_index], &new_tbl24_entry,
1837 __atomic_thread_fence(__ATOMIC_RELEASE);
1838 tbl8_free_v1604(lpm->tbl8, tbl8_group_start);
1848 rte_lpm_delete_v20(struct rte_lpm_v20 *lpm, uint32_t ip, uint8_t depth)
1850 int32_t rule_to_delete_index, sub_rule_index;
1852 uint8_t sub_rule_depth;
1854 * Check input arguments. Note: IP must be a positive integer of 32
1855 * bits in length therefore it need not be checked.
1857 if ((lpm == NULL) || (depth < 1) || (depth > RTE_LPM_MAX_DEPTH)) {
1861 ip_masked = ip & depth_to_mask(depth);
1864 * Find the index of the input rule, that needs to be deleted, in the
1867 rule_to_delete_index = rule_find_v20(lpm, ip_masked, depth);
1870 * Check if rule_to_delete_index was found. If no rule was found the
1871 * function rule_find returns -EINVAL.
1873 if (rule_to_delete_index < 0)
1876 /* Delete the rule from the rule table. */
1877 rule_delete_v20(lpm, rule_to_delete_index, depth);
1880 * Find rule to replace the rule_to_delete. If there is no rule to
1881 * replace the rule_to_delete we return -1 and invalidate the table
1882 * entries associated with this rule.
1885 sub_rule_index = find_previous_rule_v20(lpm, ip, depth, &sub_rule_depth);
1888 * If the input depth value is less than 25 use function
1889 * delete_depth_small otherwise use delete_depth_big.
1891 if (depth <= MAX_DEPTH_TBL24) {
1892 return delete_depth_small_v20(lpm, ip_masked, depth,
1893 sub_rule_index, sub_rule_depth);
1894 } else { /* If depth > MAX_DEPTH_TBL24 */
1895 return delete_depth_big_v20(lpm, ip_masked, depth, sub_rule_index,
1899 VERSION_SYMBOL(rte_lpm_delete, _v20, 2.0);
1902 rte_lpm_delete_v1604(struct rte_lpm *lpm, uint32_t ip, uint8_t depth)
1904 int32_t rule_to_delete_index, sub_rule_index;
1906 uint8_t sub_rule_depth;
1908 * Check input arguments. Note: IP must be a positive integer of 32
1909 * bits in length therefore it need not be checked.
1911 if ((lpm == NULL) || (depth < 1) || (depth > RTE_LPM_MAX_DEPTH)) {
1915 ip_masked = ip & depth_to_mask(depth);
1918 * Find the index of the input rule, that needs to be deleted, in the
1921 rule_to_delete_index = rule_find_v1604(lpm, ip_masked, depth);
1924 * Check if rule_to_delete_index was found. If no rule was found the
1925 * function rule_find returns -EINVAL.
1927 if (rule_to_delete_index < 0)
1930 /* Delete the rule from the rule table. */
1931 rule_delete_v1604(lpm, rule_to_delete_index, depth);
1934 * Find rule to replace the rule_to_delete. If there is no rule to
1935 * replace the rule_to_delete we return -1 and invalidate the table
1936 * entries associated with this rule.
1939 sub_rule_index = find_previous_rule_v1604(lpm, ip, depth, &sub_rule_depth);
1942 * If the input depth value is less than 25 use function
1943 * delete_depth_small otherwise use delete_depth_big.
1945 if (depth <= MAX_DEPTH_TBL24) {
1946 return delete_depth_small_v1604(lpm, ip_masked, depth,
1947 sub_rule_index, sub_rule_depth);
1948 } else { /* If depth > MAX_DEPTH_TBL24 */
1949 return delete_depth_big_v1604(lpm, ip_masked, depth, sub_rule_index,
1953 BIND_DEFAULT_SYMBOL(rte_lpm_delete, _v1604, 16.04);
1954 MAP_STATIC_SYMBOL(int rte_lpm_delete(struct rte_lpm *lpm, uint32_t ip,
1955 uint8_t depth), rte_lpm_delete_v1604);
1958 * Delete all rules from the LPM table.
1961 rte_lpm_delete_all_v20(struct rte_lpm_v20 *lpm)
1963 /* Zero rule information. */
1964 memset(lpm->rule_info, 0, sizeof(lpm->rule_info));
1967 memset(lpm->tbl24, 0, sizeof(lpm->tbl24));
1970 memset(lpm->tbl8, 0, sizeof(lpm->tbl8));
1972 /* Delete all rules form the rules table. */
1973 memset(lpm->rules_tbl, 0, sizeof(lpm->rules_tbl[0]) * lpm->max_rules);
1975 VERSION_SYMBOL(rte_lpm_delete_all, _v20, 2.0);
1978 rte_lpm_delete_all_v1604(struct rte_lpm *lpm)
1980 /* Zero rule information. */
1981 memset(lpm->rule_info, 0, sizeof(lpm->rule_info));
1984 memset(lpm->tbl24, 0, sizeof(lpm->tbl24));
1987 memset(lpm->tbl8, 0, sizeof(lpm->tbl8[0])
1988 * RTE_LPM_TBL8_GROUP_NUM_ENTRIES * lpm->number_tbl8s);
1990 /* Delete all rules form the rules table. */
1991 memset(lpm->rules_tbl, 0, sizeof(lpm->rules_tbl[0]) * lpm->max_rules);
1993 BIND_DEFAULT_SYMBOL(rte_lpm_delete_all, _v1604, 16.04);
1994 MAP_STATIC_SYMBOL(void rte_lpm_delete_all(struct rte_lpm *lpm),
1995 rte_lpm_delete_all_v1604);