1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation
10 #include <sys/queue.h>
13 #include <rte_branch_prediction.h>
14 #include <rte_common.h>
15 #include <rte_memory.h> /* for definition of RTE_CACHE_LINE_SIZE */
16 #include <rte_malloc.h>
18 #include <rte_eal_memconfig.h>
19 #include <rte_per_lcore.h>
20 #include <rte_string_fns.h>
21 #include <rte_errno.h>
22 #include <rte_rwlock.h>
23 #include <rte_spinlock.h>
24 #include <rte_tailq.h>
28 TAILQ_HEAD(rte_lpm_list, rte_tailq_entry);
30 static struct rte_tailq_elem rte_lpm_tailq = {
33 EAL_REGISTER_TAILQ(rte_lpm_tailq)
35 #define MAX_DEPTH_TBL24 24
42 /* Macro to enable/disable run-time checks. */
43 #if defined(RTE_LIBRTE_LPM_DEBUG)
44 #include <rte_debug.h>
45 #define VERIFY_DEPTH(depth) do { \
46 if ((depth == 0) || (depth > RTE_LPM_MAX_DEPTH)) \
47 rte_panic("LPM: Invalid depth (%u) at line %d", \
48 (unsigned)(depth), __LINE__); \
51 #define VERIFY_DEPTH(depth)
55 * Converts a given depth value to its corresponding mask value.
57 * depth (IN) : range = 1 - 32
58 * mask (OUT) : 32bit mask
60 static uint32_t __attribute__((pure))
61 depth_to_mask(uint8_t depth)
65 /* To calculate a mask start with a 1 on the left hand side and right
66 * shift while populating the left hand side with 1's
68 return (int)0x80000000 >> (depth - 1);
72 * Converts given depth value to its corresponding range value.
74 static uint32_t __attribute__((pure))
75 depth_to_range(uint8_t depth)
80 * Calculate tbl24 range. (Note: 2^depth = 1 << depth)
82 if (depth <= MAX_DEPTH_TBL24)
83 return 1 << (MAX_DEPTH_TBL24 - depth);
85 /* Else if depth is greater than 24 */
86 return 1 << (RTE_LPM_MAX_DEPTH - depth);
90 * Find an existing lpm table and return a pointer to it.
93 rte_lpm_find_existing_v20(const char *name)
95 struct rte_lpm_v20 *l = NULL;
96 struct rte_tailq_entry *te;
97 struct rte_lpm_list *lpm_list;
99 lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list);
101 rte_mcfg_tailq_read_lock();
102 TAILQ_FOREACH(te, lpm_list, next) {
104 if (strncmp(name, l->name, RTE_LPM_NAMESIZE) == 0)
107 rte_mcfg_tailq_read_unlock();
116 VERSION_SYMBOL(rte_lpm_find_existing, _v20, 2.0);
119 rte_lpm_find_existing_v1604(const char *name)
121 struct rte_lpm *l = NULL;
122 struct rte_tailq_entry *te;
123 struct rte_lpm_list *lpm_list;
125 lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list);
127 rte_mcfg_tailq_read_lock();
128 TAILQ_FOREACH(te, lpm_list, next) {
130 if (strncmp(name, l->name, RTE_LPM_NAMESIZE) == 0)
133 rte_mcfg_tailq_read_unlock();
142 BIND_DEFAULT_SYMBOL(rte_lpm_find_existing, _v1604, 16.04);
143 MAP_STATIC_SYMBOL(struct rte_lpm *rte_lpm_find_existing(const char *name),
144 rte_lpm_find_existing_v1604);
147 * Allocates memory for LPM object
150 rte_lpm_create_v20(const char *name, int socket_id, int max_rules,
151 __rte_unused int flags)
153 char mem_name[RTE_LPM_NAMESIZE];
154 struct rte_lpm_v20 *lpm = NULL;
155 struct rte_tailq_entry *te;
157 struct rte_lpm_list *lpm_list;
159 lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list);
161 RTE_BUILD_BUG_ON(sizeof(struct rte_lpm_tbl_entry_v20) != 2);
163 /* Check user arguments. */
164 if ((name == NULL) || (socket_id < -1) || (max_rules == 0)) {
169 snprintf(mem_name, sizeof(mem_name), "LPM_%s", name);
171 /* Determine the amount of memory to allocate. */
172 mem_size = sizeof(*lpm) + (sizeof(lpm->rules_tbl[0]) * max_rules);
174 rte_mcfg_tailq_write_lock();
176 /* guarantee there's no existing */
177 TAILQ_FOREACH(te, lpm_list, next) {
179 if (strncmp(name, lpm->name, RTE_LPM_NAMESIZE) == 0)
189 /* allocate tailq entry */
190 te = rte_zmalloc("LPM_TAILQ_ENTRY", sizeof(*te), 0);
192 RTE_LOG(ERR, LPM, "Failed to allocate tailq entry\n");
197 /* Allocate memory to store the LPM data structures. */
198 lpm = rte_zmalloc_socket(mem_name, mem_size,
199 RTE_CACHE_LINE_SIZE, socket_id);
201 RTE_LOG(ERR, LPM, "LPM memory allocation failed\n");
207 /* Save user arguments. */
208 lpm->max_rules = max_rules;
209 strlcpy(lpm->name, name, sizeof(lpm->name));
213 TAILQ_INSERT_TAIL(lpm_list, te, next);
216 rte_mcfg_tailq_write_unlock();
220 VERSION_SYMBOL(rte_lpm_create, _v20, 2.0);
223 rte_lpm_create_v1604(const char *name, int socket_id,
224 const struct rte_lpm_config *config)
226 char mem_name[RTE_LPM_NAMESIZE];
227 struct rte_lpm *lpm = NULL;
228 struct rte_tailq_entry *te;
229 uint32_t mem_size, rules_size, tbl8s_size;
230 struct rte_lpm_list *lpm_list;
232 lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list);
234 RTE_BUILD_BUG_ON(sizeof(struct rte_lpm_tbl_entry) != 4);
236 /* Check user arguments. */
237 if ((name == NULL) || (socket_id < -1) || (config->max_rules == 0)
238 || config->number_tbl8s > RTE_LPM_MAX_TBL8_NUM_GROUPS) {
243 snprintf(mem_name, sizeof(mem_name), "LPM_%s", name);
245 /* Determine the amount of memory to allocate. */
246 mem_size = sizeof(*lpm);
247 rules_size = sizeof(struct rte_lpm_rule) * config->max_rules;
248 tbl8s_size = (sizeof(struct rte_lpm_tbl_entry) *
249 RTE_LPM_TBL8_GROUP_NUM_ENTRIES * config->number_tbl8s);
251 rte_mcfg_tailq_write_lock();
253 /* guarantee there's no existing */
254 TAILQ_FOREACH(te, lpm_list, next) {
256 if (strncmp(name, lpm->name, RTE_LPM_NAMESIZE) == 0)
266 /* allocate tailq entry */
267 te = rte_zmalloc("LPM_TAILQ_ENTRY", sizeof(*te), 0);
269 RTE_LOG(ERR, LPM, "Failed to allocate tailq entry\n");
274 /* Allocate memory to store the LPM data structures. */
275 lpm = rte_zmalloc_socket(mem_name, mem_size,
276 RTE_CACHE_LINE_SIZE, socket_id);
278 RTE_LOG(ERR, LPM, "LPM memory allocation failed\n");
284 lpm->rules_tbl = rte_zmalloc_socket(NULL,
285 (size_t)rules_size, RTE_CACHE_LINE_SIZE, socket_id);
287 if (lpm->rules_tbl == NULL) {
288 RTE_LOG(ERR, LPM, "LPM rules_tbl memory allocation failed\n");
296 lpm->tbl8 = rte_zmalloc_socket(NULL,
297 (size_t)tbl8s_size, RTE_CACHE_LINE_SIZE, socket_id);
299 if (lpm->tbl8 == NULL) {
300 RTE_LOG(ERR, LPM, "LPM tbl8 memory allocation failed\n");
301 rte_free(lpm->rules_tbl);
309 /* Save user arguments. */
310 lpm->max_rules = config->max_rules;
311 lpm->number_tbl8s = config->number_tbl8s;
312 strlcpy(lpm->name, name, sizeof(lpm->name));
316 TAILQ_INSERT_TAIL(lpm_list, te, next);
319 rte_mcfg_tailq_write_unlock();
323 BIND_DEFAULT_SYMBOL(rte_lpm_create, _v1604, 16.04);
325 struct rte_lpm *rte_lpm_create(const char *name, int socket_id,
326 const struct rte_lpm_config *config), rte_lpm_create_v1604);
329 * Deallocates memory for given LPM table.
332 rte_lpm_free_v20(struct rte_lpm_v20 *lpm)
334 struct rte_lpm_list *lpm_list;
335 struct rte_tailq_entry *te;
337 /* Check user arguments. */
341 lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list);
343 rte_mcfg_tailq_write_lock();
345 /* find our tailq entry */
346 TAILQ_FOREACH(te, lpm_list, next) {
347 if (te->data == (void *) lpm)
351 TAILQ_REMOVE(lpm_list, te, next);
353 rte_mcfg_tailq_write_unlock();
358 VERSION_SYMBOL(rte_lpm_free, _v20, 2.0);
361 rte_lpm_free_v1604(struct rte_lpm *lpm)
363 struct rte_lpm_list *lpm_list;
364 struct rte_tailq_entry *te;
366 /* Check user arguments. */
370 lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list);
372 rte_mcfg_tailq_write_lock();
374 /* find our tailq entry */
375 TAILQ_FOREACH(te, lpm_list, next) {
376 if (te->data == (void *) lpm)
380 TAILQ_REMOVE(lpm_list, te, next);
382 rte_mcfg_tailq_write_unlock();
385 rte_free(lpm->rules_tbl);
389 BIND_DEFAULT_SYMBOL(rte_lpm_free, _v1604, 16.04);
390 MAP_STATIC_SYMBOL(void rte_lpm_free(struct rte_lpm *lpm),
394 * Adds a rule to the rule table.
396 * NOTE: The rule table is split into 32 groups. Each group contains rules that
397 * apply to a specific prefix depth (i.e. group 1 contains rules that apply to
398 * prefixes with a depth of 1 etc.). In the following code (depth - 1) is used
399 * to refer to depth 1 because even though the depth range is 1 - 32, depths
400 * are stored in the rule table from 0 - 31.
401 * NOTE: Valid range for depth parameter is 1 .. 32 inclusive.
404 rule_add_v20(struct rte_lpm_v20 *lpm, uint32_t ip_masked, uint8_t depth,
407 uint32_t rule_gindex, rule_index, last_rule;
412 /* Scan through rule group to see if rule already exists. */
413 if (lpm->rule_info[depth - 1].used_rules > 0) {
415 /* rule_gindex stands for rule group index. */
416 rule_gindex = lpm->rule_info[depth - 1].first_rule;
417 /* Initialise rule_index to point to start of rule group. */
418 rule_index = rule_gindex;
419 /* Last rule = Last used rule in this rule group. */
420 last_rule = rule_gindex + lpm->rule_info[depth - 1].used_rules;
422 for (; rule_index < last_rule; rule_index++) {
424 /* If rule already exists update its next_hop and return. */
425 if (lpm->rules_tbl[rule_index].ip == ip_masked) {
426 lpm->rules_tbl[rule_index].next_hop = next_hop;
432 if (rule_index == lpm->max_rules)
435 /* Calculate the position in which the rule will be stored. */
438 for (i = depth - 1; i > 0; i--) {
439 if (lpm->rule_info[i - 1].used_rules > 0) {
440 rule_index = lpm->rule_info[i - 1].first_rule
441 + lpm->rule_info[i - 1].used_rules;
445 if (rule_index == lpm->max_rules)
448 lpm->rule_info[depth - 1].first_rule = rule_index;
451 /* Make room for the new rule in the array. */
452 for (i = RTE_LPM_MAX_DEPTH; i > depth; i--) {
453 if (lpm->rule_info[i - 1].first_rule
454 + lpm->rule_info[i - 1].used_rules == lpm->max_rules)
457 if (lpm->rule_info[i - 1].used_rules > 0) {
458 lpm->rules_tbl[lpm->rule_info[i - 1].first_rule
459 + lpm->rule_info[i - 1].used_rules]
460 = lpm->rules_tbl[lpm->rule_info[i - 1].first_rule];
461 lpm->rule_info[i - 1].first_rule++;
465 /* Add the new rule. */
466 lpm->rules_tbl[rule_index].ip = ip_masked;
467 lpm->rules_tbl[rule_index].next_hop = next_hop;
469 /* Increment the used rules counter for this rule group. */
470 lpm->rule_info[depth - 1].used_rules++;
476 rule_add_v1604(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth,
479 uint32_t rule_gindex, rule_index, last_rule;
484 /* Scan through rule group to see if rule already exists. */
485 if (lpm->rule_info[depth - 1].used_rules > 0) {
487 /* rule_gindex stands for rule group index. */
488 rule_gindex = lpm->rule_info[depth - 1].first_rule;
489 /* Initialise rule_index to point to start of rule group. */
490 rule_index = rule_gindex;
491 /* Last rule = Last used rule in this rule group. */
492 last_rule = rule_gindex + lpm->rule_info[depth - 1].used_rules;
494 for (; rule_index < last_rule; rule_index++) {
496 /* If rule already exists update its next_hop and return. */
497 if (lpm->rules_tbl[rule_index].ip == ip_masked) {
498 lpm->rules_tbl[rule_index].next_hop = next_hop;
504 if (rule_index == lpm->max_rules)
507 /* Calculate the position in which the rule will be stored. */
510 for (i = depth - 1; i > 0; i--) {
511 if (lpm->rule_info[i - 1].used_rules > 0) {
512 rule_index = lpm->rule_info[i - 1].first_rule
513 + lpm->rule_info[i - 1].used_rules;
517 if (rule_index == lpm->max_rules)
520 lpm->rule_info[depth - 1].first_rule = rule_index;
523 /* Make room for the new rule in the array. */
524 for (i = RTE_LPM_MAX_DEPTH; i > depth; i--) {
525 if (lpm->rule_info[i - 1].first_rule
526 + lpm->rule_info[i - 1].used_rules == lpm->max_rules)
529 if (lpm->rule_info[i - 1].used_rules > 0) {
530 lpm->rules_tbl[lpm->rule_info[i - 1].first_rule
531 + lpm->rule_info[i - 1].used_rules]
532 = lpm->rules_tbl[lpm->rule_info[i - 1].first_rule];
533 lpm->rule_info[i - 1].first_rule++;
537 /* Add the new rule. */
538 lpm->rules_tbl[rule_index].ip = ip_masked;
539 lpm->rules_tbl[rule_index].next_hop = next_hop;
541 /* Increment the used rules counter for this rule group. */
542 lpm->rule_info[depth - 1].used_rules++;
548 * Delete a rule from the rule table.
549 * NOTE: Valid range for depth parameter is 1 .. 32 inclusive.
552 rule_delete_v20(struct rte_lpm_v20 *lpm, int32_t rule_index, uint8_t depth)
558 lpm->rules_tbl[rule_index] =
559 lpm->rules_tbl[lpm->rule_info[depth - 1].first_rule
560 + lpm->rule_info[depth - 1].used_rules - 1];
562 for (i = depth; i < RTE_LPM_MAX_DEPTH; i++) {
563 if (lpm->rule_info[i].used_rules > 0) {
564 lpm->rules_tbl[lpm->rule_info[i].first_rule - 1] =
565 lpm->rules_tbl[lpm->rule_info[i].first_rule
566 + lpm->rule_info[i].used_rules - 1];
567 lpm->rule_info[i].first_rule--;
571 lpm->rule_info[depth - 1].used_rules--;
575 rule_delete_v1604(struct rte_lpm *lpm, int32_t rule_index, uint8_t depth)
581 lpm->rules_tbl[rule_index] =
582 lpm->rules_tbl[lpm->rule_info[depth - 1].first_rule
583 + lpm->rule_info[depth - 1].used_rules - 1];
585 for (i = depth; i < RTE_LPM_MAX_DEPTH; i++) {
586 if (lpm->rule_info[i].used_rules > 0) {
587 lpm->rules_tbl[lpm->rule_info[i].first_rule - 1] =
588 lpm->rules_tbl[lpm->rule_info[i].first_rule
589 + lpm->rule_info[i].used_rules - 1];
590 lpm->rule_info[i].first_rule--;
594 lpm->rule_info[depth - 1].used_rules--;
598 * Finds a rule in rule table.
599 * NOTE: Valid range for depth parameter is 1 .. 32 inclusive.
602 rule_find_v20(struct rte_lpm_v20 *lpm, uint32_t ip_masked, uint8_t depth)
604 uint32_t rule_gindex, last_rule, rule_index;
608 rule_gindex = lpm->rule_info[depth - 1].first_rule;
609 last_rule = rule_gindex + lpm->rule_info[depth - 1].used_rules;
611 /* Scan used rules at given depth to find rule. */
612 for (rule_index = rule_gindex; rule_index < last_rule; rule_index++) {
613 /* If rule is found return the rule index. */
614 if (lpm->rules_tbl[rule_index].ip == ip_masked)
618 /* If rule is not found return -EINVAL. */
623 rule_find_v1604(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth)
625 uint32_t rule_gindex, last_rule, rule_index;
629 rule_gindex = lpm->rule_info[depth - 1].first_rule;
630 last_rule = rule_gindex + lpm->rule_info[depth - 1].used_rules;
632 /* Scan used rules at given depth to find rule. */
633 for (rule_index = rule_gindex; rule_index < last_rule; rule_index++) {
634 /* If rule is found return the rule index. */
635 if (lpm->rules_tbl[rule_index].ip == ip_masked)
639 /* If rule is not found return -EINVAL. */
644 * Find, clean and allocate a tbl8.
647 tbl8_alloc_v20(struct rte_lpm_tbl_entry_v20 *tbl8)
649 uint32_t group_idx; /* tbl8 group index. */
650 struct rte_lpm_tbl_entry_v20 *tbl8_entry;
652 /* Scan through tbl8 to find a free (i.e. INVALID) tbl8 group. */
653 for (group_idx = 0; group_idx < RTE_LPM_TBL8_NUM_GROUPS;
655 tbl8_entry = &tbl8[group_idx * RTE_LPM_TBL8_GROUP_NUM_ENTRIES];
656 /* If a free tbl8 group is found clean it and set as VALID. */
657 if (!tbl8_entry->valid_group) {
658 struct rte_lpm_tbl_entry_v20 new_tbl8_entry = {
661 .valid_group = VALID,
663 new_tbl8_entry.next_hop = 0;
665 memset(&tbl8_entry[0], 0,
666 RTE_LPM_TBL8_GROUP_NUM_ENTRIES *
667 sizeof(tbl8_entry[0]));
669 __atomic_store(tbl8_entry, &new_tbl8_entry,
672 /* Return group index for allocated tbl8 group. */
677 /* If there are no tbl8 groups free then return error. */
682 tbl8_alloc_v1604(struct rte_lpm_tbl_entry *tbl8, uint32_t number_tbl8s)
684 uint32_t group_idx; /* tbl8 group index. */
685 struct rte_lpm_tbl_entry *tbl8_entry;
687 /* Scan through tbl8 to find a free (i.e. INVALID) tbl8 group. */
688 for (group_idx = 0; group_idx < number_tbl8s; group_idx++) {
689 tbl8_entry = &tbl8[group_idx * RTE_LPM_TBL8_GROUP_NUM_ENTRIES];
690 /* If a free tbl8 group is found clean it and set as VALID. */
691 if (!tbl8_entry->valid_group) {
692 struct rte_lpm_tbl_entry new_tbl8_entry = {
696 .valid_group = VALID,
699 memset(&tbl8_entry[0], 0,
700 RTE_LPM_TBL8_GROUP_NUM_ENTRIES *
701 sizeof(tbl8_entry[0]));
703 __atomic_store(tbl8_entry, &new_tbl8_entry,
706 /* Return group index for allocated tbl8 group. */
711 /* If there are no tbl8 groups free then return error. */
716 tbl8_free_v20(struct rte_lpm_tbl_entry_v20 *tbl8, uint32_t tbl8_group_start)
718 /* Set tbl8 group invalid*/
719 struct rte_lpm_tbl_entry_v20 zero_tbl8_entry = {
722 .valid_group = INVALID,
724 zero_tbl8_entry.next_hop = 0;
726 __atomic_store(&tbl8[tbl8_group_start], &zero_tbl8_entry,
731 tbl8_free_v1604(struct rte_lpm_tbl_entry *tbl8, uint32_t tbl8_group_start)
733 /* Set tbl8 group invalid*/
734 struct rte_lpm_tbl_entry zero_tbl8_entry = {0};
736 __atomic_store(&tbl8[tbl8_group_start], &zero_tbl8_entry,
740 static __rte_noinline int32_t
741 add_depth_small_v20(struct rte_lpm_v20 *lpm, uint32_t ip, uint8_t depth,
744 uint32_t tbl24_index, tbl24_range, tbl8_index, tbl8_group_end, i, j;
746 /* Calculate the index into Table24. */
747 tbl24_index = ip >> 8;
748 tbl24_range = depth_to_range(depth);
750 for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) {
752 * For invalid OR valid and non-extended tbl 24 entries set
755 if (!lpm->tbl24[i].valid || (lpm->tbl24[i].valid_group == 0 &&
756 lpm->tbl24[i].depth <= depth)) {
758 struct rte_lpm_tbl_entry_v20 new_tbl24_entry = {
763 new_tbl24_entry.next_hop = next_hop;
765 /* Setting tbl24 entry in one go to avoid race
768 __atomic_store(&lpm->tbl24[i], &new_tbl24_entry,
774 if (lpm->tbl24[i].valid_group == 1) {
775 /* If tbl24 entry is valid and extended calculate the
778 tbl8_index = lpm->tbl24[i].group_idx *
779 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
780 tbl8_group_end = tbl8_index +
781 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
783 for (j = tbl8_index; j < tbl8_group_end; j++) {
784 if (!lpm->tbl8[j].valid ||
785 lpm->tbl8[j].depth <= depth) {
786 struct rte_lpm_tbl_entry_v20
789 .valid_group = VALID,
792 new_tbl8_entry.next_hop = next_hop;
795 * Setting tbl8 entry in one go to avoid
798 __atomic_store(&lpm->tbl8[j],
811 static __rte_noinline int32_t
812 add_depth_small_v1604(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
815 #define group_idx next_hop
816 uint32_t tbl24_index, tbl24_range, tbl8_index, tbl8_group_end, i, j;
818 /* Calculate the index into Table24. */
819 tbl24_index = ip >> 8;
820 tbl24_range = depth_to_range(depth);
822 for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) {
824 * For invalid OR valid and non-extended tbl 24 entries set
827 if (!lpm->tbl24[i].valid || (lpm->tbl24[i].valid_group == 0 &&
828 lpm->tbl24[i].depth <= depth)) {
830 struct rte_lpm_tbl_entry new_tbl24_entry = {
831 .next_hop = next_hop,
837 /* Setting tbl24 entry in one go to avoid race
840 __atomic_store(&lpm->tbl24[i], &new_tbl24_entry,
846 if (lpm->tbl24[i].valid_group == 1) {
847 /* If tbl24 entry is valid and extended calculate the
850 tbl8_index = lpm->tbl24[i].group_idx *
851 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
852 tbl8_group_end = tbl8_index +
853 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
855 for (j = tbl8_index; j < tbl8_group_end; j++) {
856 if (!lpm->tbl8[j].valid ||
857 lpm->tbl8[j].depth <= depth) {
858 struct rte_lpm_tbl_entry
861 .valid_group = VALID,
863 .next_hop = next_hop,
867 * Setting tbl8 entry in one go to avoid
870 __atomic_store(&lpm->tbl8[j],
883 static __rte_noinline int32_t
884 add_depth_big_v20(struct rte_lpm_v20 *lpm, uint32_t ip_masked, uint8_t depth,
887 uint32_t tbl24_index;
888 int32_t tbl8_group_index, tbl8_group_start, tbl8_group_end, tbl8_index,
891 tbl24_index = (ip_masked >> 8);
892 tbl8_range = depth_to_range(depth);
894 if (!lpm->tbl24[tbl24_index].valid) {
895 /* Search for a free tbl8 group. */
896 tbl8_group_index = tbl8_alloc_v20(lpm->tbl8);
898 /* Check tbl8 allocation was successful. */
899 if (tbl8_group_index < 0) {
900 return tbl8_group_index;
903 /* Find index into tbl8 and range. */
904 tbl8_index = (tbl8_group_index *
905 RTE_LPM_TBL8_GROUP_NUM_ENTRIES) +
908 /* Set tbl8 entry. */
909 for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
910 struct rte_lpm_tbl_entry_v20 new_tbl8_entry = {
913 .valid_group = lpm->tbl8[i].valid_group,
915 new_tbl8_entry.next_hop = next_hop;
916 __atomic_store(&lpm->tbl8[i], &new_tbl8_entry,
921 * Update tbl24 entry to point to new tbl8 entry. Note: The
922 * ext_flag and tbl8_index need to be updated simultaneously,
923 * so assign whole structure in one go
926 struct rte_lpm_tbl_entry_v20 new_tbl24_entry = {
927 .group_idx = (uint8_t)tbl8_group_index,
933 __atomic_store(&lpm->tbl24[tbl24_index], &new_tbl24_entry,
936 } /* If valid entry but not extended calculate the index into Table8. */
937 else if (lpm->tbl24[tbl24_index].valid_group == 0) {
938 /* Search for free tbl8 group. */
939 tbl8_group_index = tbl8_alloc_v20(lpm->tbl8);
941 if (tbl8_group_index < 0) {
942 return tbl8_group_index;
945 tbl8_group_start = tbl8_group_index *
946 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
947 tbl8_group_end = tbl8_group_start +
948 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
950 /* Populate new tbl8 with tbl24 value. */
951 for (i = tbl8_group_start; i < tbl8_group_end; i++) {
952 struct rte_lpm_tbl_entry_v20 new_tbl8_entry = {
954 .depth = lpm->tbl24[tbl24_index].depth,
955 .valid_group = lpm->tbl8[i].valid_group,
957 new_tbl8_entry.next_hop =
958 lpm->tbl24[tbl24_index].next_hop;
959 __atomic_store(&lpm->tbl8[i], &new_tbl8_entry,
963 tbl8_index = tbl8_group_start + (ip_masked & 0xFF);
965 /* Insert new rule into the tbl8 entry. */
966 for (i = tbl8_index; i < tbl8_index + tbl8_range; i++) {
967 struct rte_lpm_tbl_entry_v20 new_tbl8_entry = {
970 .valid_group = lpm->tbl8[i].valid_group,
972 new_tbl8_entry.next_hop = next_hop;
973 __atomic_store(&lpm->tbl8[i], &new_tbl8_entry,
978 * Update tbl24 entry to point to new tbl8 entry. Note: The
979 * ext_flag and tbl8_index need to be updated simultaneously,
980 * so assign whole structure in one go.
983 struct rte_lpm_tbl_entry_v20 new_tbl24_entry = {
984 .group_idx = (uint8_t)tbl8_group_index,
990 __atomic_store(&lpm->tbl24[tbl24_index], &new_tbl24_entry,
994 * If it is valid, extended entry calculate the index into tbl8.
996 tbl8_group_index = lpm->tbl24[tbl24_index].group_idx;
997 tbl8_group_start = tbl8_group_index *
998 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
999 tbl8_index = tbl8_group_start + (ip_masked & 0xFF);
1001 for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
1003 if (!lpm->tbl8[i].valid ||
1004 lpm->tbl8[i].depth <= depth) {
1005 struct rte_lpm_tbl_entry_v20 new_tbl8_entry = {
1008 .valid_group = lpm->tbl8[i].valid_group,
1010 new_tbl8_entry.next_hop = next_hop;
1012 * Setting tbl8 entry in one go to avoid race
1015 __atomic_store(&lpm->tbl8[i], &new_tbl8_entry,
1026 static __rte_noinline int32_t
1027 add_depth_big_v1604(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth,
1030 #define group_idx next_hop
1031 uint32_t tbl24_index;
1032 int32_t tbl8_group_index, tbl8_group_start, tbl8_group_end, tbl8_index,
1035 tbl24_index = (ip_masked >> 8);
1036 tbl8_range = depth_to_range(depth);
1038 if (!lpm->tbl24[tbl24_index].valid) {
1039 /* Search for a free tbl8 group. */
1040 tbl8_group_index = tbl8_alloc_v1604(lpm->tbl8, lpm->number_tbl8s);
1042 /* Check tbl8 allocation was successful. */
1043 if (tbl8_group_index < 0) {
1044 return tbl8_group_index;
1047 /* Find index into tbl8 and range. */
1048 tbl8_index = (tbl8_group_index *
1049 RTE_LPM_TBL8_GROUP_NUM_ENTRIES) +
1052 /* Set tbl8 entry. */
1053 for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
1054 struct rte_lpm_tbl_entry new_tbl8_entry = {
1057 .valid_group = lpm->tbl8[i].valid_group,
1058 .next_hop = next_hop,
1060 __atomic_store(&lpm->tbl8[i], &new_tbl8_entry,
1065 * Update tbl24 entry to point to new tbl8 entry. Note: The
1066 * ext_flag and tbl8_index need to be updated simultaneously,
1067 * so assign whole structure in one go
1070 struct rte_lpm_tbl_entry new_tbl24_entry = {
1071 .group_idx = tbl8_group_index,
1077 /* The tbl24 entry must be written only after the
1078 * tbl8 entries are written.
1080 __atomic_store(&lpm->tbl24[tbl24_index], &new_tbl24_entry,
1083 } /* If valid entry but not extended calculate the index into Table8. */
1084 else if (lpm->tbl24[tbl24_index].valid_group == 0) {
1085 /* Search for free tbl8 group. */
1086 tbl8_group_index = tbl8_alloc_v1604(lpm->tbl8, lpm->number_tbl8s);
1088 if (tbl8_group_index < 0) {
1089 return tbl8_group_index;
1092 tbl8_group_start = tbl8_group_index *
1093 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1094 tbl8_group_end = tbl8_group_start +
1095 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1097 /* Populate new tbl8 with tbl24 value. */
1098 for (i = tbl8_group_start; i < tbl8_group_end; i++) {
1099 struct rte_lpm_tbl_entry new_tbl8_entry = {
1101 .depth = lpm->tbl24[tbl24_index].depth,
1102 .valid_group = lpm->tbl8[i].valid_group,
1103 .next_hop = lpm->tbl24[tbl24_index].next_hop,
1105 __atomic_store(&lpm->tbl8[i], &new_tbl8_entry,
1109 tbl8_index = tbl8_group_start + (ip_masked & 0xFF);
1111 /* Insert new rule into the tbl8 entry. */
1112 for (i = tbl8_index; i < tbl8_index + tbl8_range; i++) {
1113 struct rte_lpm_tbl_entry new_tbl8_entry = {
1116 .valid_group = lpm->tbl8[i].valid_group,
1117 .next_hop = next_hop,
1119 __atomic_store(&lpm->tbl8[i], &new_tbl8_entry,
1124 * Update tbl24 entry to point to new tbl8 entry. Note: The
1125 * ext_flag and tbl8_index need to be updated simultaneously,
1126 * so assign whole structure in one go.
1129 struct rte_lpm_tbl_entry new_tbl24_entry = {
1130 .group_idx = tbl8_group_index,
1136 /* The tbl24 entry must be written only after the
1137 * tbl8 entries are written.
1139 __atomic_store(&lpm->tbl24[tbl24_index], &new_tbl24_entry,
1143 * If it is valid, extended entry calculate the index into tbl8.
1145 tbl8_group_index = lpm->tbl24[tbl24_index].group_idx;
1146 tbl8_group_start = tbl8_group_index *
1147 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1148 tbl8_index = tbl8_group_start + (ip_masked & 0xFF);
1150 for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
1152 if (!lpm->tbl8[i].valid ||
1153 lpm->tbl8[i].depth <= depth) {
1154 struct rte_lpm_tbl_entry new_tbl8_entry = {
1157 .next_hop = next_hop,
1158 .valid_group = lpm->tbl8[i].valid_group,
1162 * Setting tbl8 entry in one go to avoid race
1165 __atomic_store(&lpm->tbl8[i], &new_tbl8_entry,
1180 rte_lpm_add_v20(struct rte_lpm_v20 *lpm, uint32_t ip, uint8_t depth,
1183 int32_t rule_index, status = 0;
1186 /* Check user arguments. */
1187 if ((lpm == NULL) || (depth < 1) || (depth > RTE_LPM_MAX_DEPTH))
1190 ip_masked = ip & depth_to_mask(depth);
1192 /* Add the rule to the rule table. */
1193 rule_index = rule_add_v20(lpm, ip_masked, depth, next_hop);
1195 /* If the is no space available for new rule return error. */
1196 if (rule_index < 0) {
1200 if (depth <= MAX_DEPTH_TBL24) {
1201 status = add_depth_small_v20(lpm, ip_masked, depth, next_hop);
1202 } else { /* If depth > RTE_LPM_MAX_DEPTH_TBL24 */
1203 status = add_depth_big_v20(lpm, ip_masked, depth, next_hop);
1206 * If add fails due to exhaustion of tbl8 extensions delete
1207 * rule that was added to rule table.
1210 rule_delete_v20(lpm, rule_index, depth);
1218 VERSION_SYMBOL(rte_lpm_add, _v20, 2.0);
1221 rte_lpm_add_v1604(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
1224 int32_t rule_index, status = 0;
1227 /* Check user arguments. */
1228 if ((lpm == NULL) || (depth < 1) || (depth > RTE_LPM_MAX_DEPTH))
1231 ip_masked = ip & depth_to_mask(depth);
1233 /* Add the rule to the rule table. */
1234 rule_index = rule_add_v1604(lpm, ip_masked, depth, next_hop);
1236 /* If the is no space available for new rule return error. */
1237 if (rule_index < 0) {
1241 if (depth <= MAX_DEPTH_TBL24) {
1242 status = add_depth_small_v1604(lpm, ip_masked, depth, next_hop);
1243 } else { /* If depth > RTE_LPM_MAX_DEPTH_TBL24 */
1244 status = add_depth_big_v1604(lpm, ip_masked, depth, next_hop);
1247 * If add fails due to exhaustion of tbl8 extensions delete
1248 * rule that was added to rule table.
1251 rule_delete_v1604(lpm, rule_index, depth);
1259 BIND_DEFAULT_SYMBOL(rte_lpm_add, _v1604, 16.04);
1260 MAP_STATIC_SYMBOL(int rte_lpm_add(struct rte_lpm *lpm, uint32_t ip,
1261 uint8_t depth, uint32_t next_hop), rte_lpm_add_v1604);
1264 * Look for a rule in the high-level rules table
1267 rte_lpm_is_rule_present_v20(struct rte_lpm_v20 *lpm, uint32_t ip, uint8_t depth,
1273 /* Check user arguments. */
1274 if ((lpm == NULL) ||
1275 (next_hop == NULL) ||
1276 (depth < 1) || (depth > RTE_LPM_MAX_DEPTH))
1279 /* Look for the rule using rule_find. */
1280 ip_masked = ip & depth_to_mask(depth);
1281 rule_index = rule_find_v20(lpm, ip_masked, depth);
1283 if (rule_index >= 0) {
1284 *next_hop = lpm->rules_tbl[rule_index].next_hop;
1288 /* If rule is not found return 0. */
1291 VERSION_SYMBOL(rte_lpm_is_rule_present, _v20, 2.0);
1294 rte_lpm_is_rule_present_v1604(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
1300 /* Check user arguments. */
1301 if ((lpm == NULL) ||
1302 (next_hop == NULL) ||
1303 (depth < 1) || (depth > RTE_LPM_MAX_DEPTH))
1306 /* Look for the rule using rule_find. */
1307 ip_masked = ip & depth_to_mask(depth);
1308 rule_index = rule_find_v1604(lpm, ip_masked, depth);
1310 if (rule_index >= 0) {
1311 *next_hop = lpm->rules_tbl[rule_index].next_hop;
1315 /* If rule is not found return 0. */
1318 BIND_DEFAULT_SYMBOL(rte_lpm_is_rule_present, _v1604, 16.04);
1319 MAP_STATIC_SYMBOL(int rte_lpm_is_rule_present(struct rte_lpm *lpm, uint32_t ip,
1320 uint8_t depth, uint32_t *next_hop), rte_lpm_is_rule_present_v1604);
1323 find_previous_rule_v20(struct rte_lpm_v20 *lpm, uint32_t ip, uint8_t depth,
1324 uint8_t *sub_rule_depth)
1330 for (prev_depth = (uint8_t)(depth - 1); prev_depth > 0; prev_depth--) {
1331 ip_masked = ip & depth_to_mask(prev_depth);
1333 rule_index = rule_find_v20(lpm, ip_masked, prev_depth);
1335 if (rule_index >= 0) {
1336 *sub_rule_depth = prev_depth;
1345 find_previous_rule_v1604(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
1346 uint8_t *sub_rule_depth)
1352 for (prev_depth = (uint8_t)(depth - 1); prev_depth > 0; prev_depth--) {
1353 ip_masked = ip & depth_to_mask(prev_depth);
1355 rule_index = rule_find_v1604(lpm, ip_masked, prev_depth);
1357 if (rule_index >= 0) {
1358 *sub_rule_depth = prev_depth;
1367 delete_depth_small_v20(struct rte_lpm_v20 *lpm, uint32_t ip_masked,
1368 uint8_t depth, int32_t sub_rule_index, uint8_t sub_rule_depth)
1370 uint32_t tbl24_range, tbl24_index, tbl8_group_index, tbl8_index, i, j;
1372 /* Calculate the range and index into Table24. */
1373 tbl24_range = depth_to_range(depth);
1374 tbl24_index = (ip_masked >> 8);
1377 * Firstly check the sub_rule_index. A -1 indicates no replacement rule
1378 * and a positive number indicates a sub_rule_index.
1380 if (sub_rule_index < 0) {
1382 * If no replacement rule exists then invalidate entries
1383 * associated with this rule.
1385 for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) {
1387 if (lpm->tbl24[i].valid_group == 0 &&
1388 lpm->tbl24[i].depth <= depth) {
1389 struct rte_lpm_tbl_entry_v20
1390 zero_tbl24_entry = {
1395 zero_tbl24_entry.next_hop = 0;
1396 __atomic_store(&lpm->tbl24[i],
1397 &zero_tbl24_entry, __ATOMIC_RELEASE);
1398 } else if (lpm->tbl24[i].valid_group == 1) {
1400 * If TBL24 entry is extended, then there has
1401 * to be a rule with depth >= 25 in the
1402 * associated TBL8 group.
1405 tbl8_group_index = lpm->tbl24[i].group_idx;
1406 tbl8_index = tbl8_group_index *
1407 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1409 for (j = tbl8_index; j < (tbl8_index +
1410 RTE_LPM_TBL8_GROUP_NUM_ENTRIES); j++) {
1412 if (lpm->tbl8[j].depth <= depth)
1413 lpm->tbl8[j].valid = INVALID;
1419 * If a replacement rule exists then modify entries
1420 * associated with this rule.
1423 struct rte_lpm_tbl_entry_v20 new_tbl24_entry = {
1424 .next_hop = lpm->rules_tbl[sub_rule_index].next_hop,
1427 .depth = sub_rule_depth,
1430 struct rte_lpm_tbl_entry_v20 new_tbl8_entry = {
1432 .valid_group = VALID,
1433 .depth = sub_rule_depth,
1435 new_tbl8_entry.next_hop =
1436 lpm->rules_tbl[sub_rule_index].next_hop;
1438 for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) {
1440 if (lpm->tbl24[i].valid_group == 0 &&
1441 lpm->tbl24[i].depth <= depth) {
1442 __atomic_store(&lpm->tbl24[i], &new_tbl24_entry,
1444 } else if (lpm->tbl24[i].valid_group == 1) {
1446 * If TBL24 entry is extended, then there has
1447 * to be a rule with depth >= 25 in the
1448 * associated TBL8 group.
1451 tbl8_group_index = lpm->tbl24[i].group_idx;
1452 tbl8_index = tbl8_group_index *
1453 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1455 for (j = tbl8_index; j < (tbl8_index +
1456 RTE_LPM_TBL8_GROUP_NUM_ENTRIES); j++) {
1458 if (lpm->tbl8[j].depth <= depth)
1459 __atomic_store(&lpm->tbl8[j],
1471 delete_depth_small_v1604(struct rte_lpm *lpm, uint32_t ip_masked,
1472 uint8_t depth, int32_t sub_rule_index, uint8_t sub_rule_depth)
1474 #define group_idx next_hop
1475 uint32_t tbl24_range, tbl24_index, tbl8_group_index, tbl8_index, i, j;
1477 /* Calculate the range and index into Table24. */
1478 tbl24_range = depth_to_range(depth);
1479 tbl24_index = (ip_masked >> 8);
1480 struct rte_lpm_tbl_entry zero_tbl24_entry = {0};
1483 * Firstly check the sub_rule_index. A -1 indicates no replacement rule
1484 * and a positive number indicates a sub_rule_index.
1486 if (sub_rule_index < 0) {
1488 * If no replacement rule exists then invalidate entries
1489 * associated with this rule.
1491 for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) {
1493 if (lpm->tbl24[i].valid_group == 0 &&
1494 lpm->tbl24[i].depth <= depth) {
1495 __atomic_store(&lpm->tbl24[i],
1496 &zero_tbl24_entry, __ATOMIC_RELEASE);
1497 } else if (lpm->tbl24[i].valid_group == 1) {
1499 * If TBL24 entry is extended, then there has
1500 * to be a rule with depth >= 25 in the
1501 * associated TBL8 group.
1504 tbl8_group_index = lpm->tbl24[i].group_idx;
1505 tbl8_index = tbl8_group_index *
1506 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1508 for (j = tbl8_index; j < (tbl8_index +
1509 RTE_LPM_TBL8_GROUP_NUM_ENTRIES); j++) {
1511 if (lpm->tbl8[j].depth <= depth)
1512 lpm->tbl8[j].valid = INVALID;
1518 * If a replacement rule exists then modify entries
1519 * associated with this rule.
1522 struct rte_lpm_tbl_entry new_tbl24_entry = {
1523 .next_hop = lpm->rules_tbl[sub_rule_index].next_hop,
1526 .depth = sub_rule_depth,
1529 struct rte_lpm_tbl_entry new_tbl8_entry = {
1531 .valid_group = VALID,
1532 .depth = sub_rule_depth,
1533 .next_hop = lpm->rules_tbl
1534 [sub_rule_index].next_hop,
1537 for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) {
1539 if (lpm->tbl24[i].valid_group == 0 &&
1540 lpm->tbl24[i].depth <= depth) {
1541 __atomic_store(&lpm->tbl24[i], &new_tbl24_entry,
1543 } else if (lpm->tbl24[i].valid_group == 1) {
1545 * If TBL24 entry is extended, then there has
1546 * to be a rule with depth >= 25 in the
1547 * associated TBL8 group.
1550 tbl8_group_index = lpm->tbl24[i].group_idx;
1551 tbl8_index = tbl8_group_index *
1552 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1554 for (j = tbl8_index; j < (tbl8_index +
1555 RTE_LPM_TBL8_GROUP_NUM_ENTRIES); j++) {
1557 if (lpm->tbl8[j].depth <= depth)
1558 __atomic_store(&lpm->tbl8[j],
1570 * Checks if table 8 group can be recycled.
1572 * Return of -EEXIST means tbl8 is in use and thus can not be recycled.
1573 * Return of -EINVAL means tbl8 is empty and thus can be recycled
1574 * Return of value > -1 means tbl8 is in use but has all the same values and
1575 * thus can be recycled
1578 tbl8_recycle_check_v20(struct rte_lpm_tbl_entry_v20 *tbl8,
1579 uint32_t tbl8_group_start)
1581 uint32_t tbl8_group_end, i;
1582 tbl8_group_end = tbl8_group_start + RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1585 * Check the first entry of the given tbl8. If it is invalid we know
1586 * this tbl8 does not contain any rule with a depth < RTE_LPM_MAX_DEPTH
1587 * (As they would affect all entries in a tbl8) and thus this table
1588 * can not be recycled.
1590 if (tbl8[tbl8_group_start].valid) {
1592 * If first entry is valid check if the depth is less than 24
1593 * and if so check the rest of the entries to verify that they
1594 * are all of this depth.
1596 if (tbl8[tbl8_group_start].depth <= MAX_DEPTH_TBL24) {
1597 for (i = (tbl8_group_start + 1); i < tbl8_group_end;
1600 if (tbl8[i].depth !=
1601 tbl8[tbl8_group_start].depth) {
1606 /* If all entries are the same return the tb8 index */
1607 return tbl8_group_start;
1613 * If the first entry is invalid check if the rest of the entries in
1614 * the tbl8 are invalid.
1616 for (i = (tbl8_group_start + 1); i < tbl8_group_end; i++) {
1620 /* If no valid entries are found then return -EINVAL. */
1625 tbl8_recycle_check_v1604(struct rte_lpm_tbl_entry *tbl8,
1626 uint32_t tbl8_group_start)
1628 uint32_t tbl8_group_end, i;
1629 tbl8_group_end = tbl8_group_start + RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1632 * Check the first entry of the given tbl8. If it is invalid we know
1633 * this tbl8 does not contain any rule with a depth < RTE_LPM_MAX_DEPTH
1634 * (As they would affect all entries in a tbl8) and thus this table
1635 * can not be recycled.
1637 if (tbl8[tbl8_group_start].valid) {
1639 * If first entry is valid check if the depth is less than 24
1640 * and if so check the rest of the entries to verify that they
1641 * are all of this depth.
1643 if (tbl8[tbl8_group_start].depth <= MAX_DEPTH_TBL24) {
1644 for (i = (tbl8_group_start + 1); i < tbl8_group_end;
1647 if (tbl8[i].depth !=
1648 tbl8[tbl8_group_start].depth) {
1653 /* If all entries are the same return the tb8 index */
1654 return tbl8_group_start;
1660 * If the first entry is invalid check if the rest of the entries in
1661 * the tbl8 are invalid.
1663 for (i = (tbl8_group_start + 1); i < tbl8_group_end; i++) {
1667 /* If no valid entries are found then return -EINVAL. */
1672 delete_depth_big_v20(struct rte_lpm_v20 *lpm, uint32_t ip_masked,
1673 uint8_t depth, int32_t sub_rule_index, uint8_t sub_rule_depth)
1675 uint32_t tbl24_index, tbl8_group_index, tbl8_group_start, tbl8_index,
1677 int32_t tbl8_recycle_index;
1680 * Calculate the index into tbl24 and range. Note: All depths larger
1681 * than MAX_DEPTH_TBL24 are associated with only one tbl24 entry.
1683 tbl24_index = ip_masked >> 8;
1685 /* Calculate the index into tbl8 and range. */
1686 tbl8_group_index = lpm->tbl24[tbl24_index].group_idx;
1687 tbl8_group_start = tbl8_group_index * RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1688 tbl8_index = tbl8_group_start + (ip_masked & 0xFF);
1689 tbl8_range = depth_to_range(depth);
1691 if (sub_rule_index < 0) {
1693 * Loop through the range of entries on tbl8 for which the
1694 * rule_to_delete must be removed or modified.
1696 for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
1697 if (lpm->tbl8[i].depth <= depth)
1698 lpm->tbl8[i].valid = INVALID;
1701 /* Set new tbl8 entry. */
1702 struct rte_lpm_tbl_entry_v20 new_tbl8_entry = {
1704 .depth = sub_rule_depth,
1705 .valid_group = lpm->tbl8[tbl8_group_start].valid_group,
1708 new_tbl8_entry.next_hop =
1709 lpm->rules_tbl[sub_rule_index].next_hop;
1711 * Loop through the range of entries on tbl8 for which the
1712 * rule_to_delete must be modified.
1714 for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
1715 if (lpm->tbl8[i].depth <= depth)
1716 __atomic_store(&lpm->tbl8[i], &new_tbl8_entry,
1722 * Check if there are any valid entries in this tbl8 group. If all
1723 * tbl8 entries are invalid we can free the tbl8 and invalidate the
1724 * associated tbl24 entry.
1727 tbl8_recycle_index = tbl8_recycle_check_v20(lpm->tbl8, tbl8_group_start);
1729 if (tbl8_recycle_index == -EINVAL) {
1730 /* Set tbl24 before freeing tbl8 to avoid race condition.
1731 * Prevent the free of the tbl8 group from hoisting.
1733 lpm->tbl24[tbl24_index].valid = 0;
1734 __atomic_thread_fence(__ATOMIC_RELEASE);
1735 tbl8_free_v20(lpm->tbl8, tbl8_group_start);
1736 } else if (tbl8_recycle_index > -1) {
1737 /* Update tbl24 entry. */
1738 struct rte_lpm_tbl_entry_v20 new_tbl24_entry = {
1739 .next_hop = lpm->tbl8[tbl8_recycle_index].next_hop,
1742 .depth = lpm->tbl8[tbl8_recycle_index].depth,
1745 /* Set tbl24 before freeing tbl8 to avoid race condition.
1746 * Prevent the free of the tbl8 group from hoisting.
1748 __atomic_store(&lpm->tbl24[tbl24_index], &new_tbl24_entry,
1750 __atomic_thread_fence(__ATOMIC_RELEASE);
1751 tbl8_free_v20(lpm->tbl8, tbl8_group_start);
1758 delete_depth_big_v1604(struct rte_lpm *lpm, uint32_t ip_masked,
1759 uint8_t depth, int32_t sub_rule_index, uint8_t sub_rule_depth)
1761 #define group_idx next_hop
1762 uint32_t tbl24_index, tbl8_group_index, tbl8_group_start, tbl8_index,
1764 int32_t tbl8_recycle_index;
1767 * Calculate the index into tbl24 and range. Note: All depths larger
1768 * than MAX_DEPTH_TBL24 are associated with only one tbl24 entry.
1770 tbl24_index = ip_masked >> 8;
1772 /* Calculate the index into tbl8 and range. */
1773 tbl8_group_index = lpm->tbl24[tbl24_index].group_idx;
1774 tbl8_group_start = tbl8_group_index * RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1775 tbl8_index = tbl8_group_start + (ip_masked & 0xFF);
1776 tbl8_range = depth_to_range(depth);
1778 if (sub_rule_index < 0) {
1780 * Loop through the range of entries on tbl8 for which the
1781 * rule_to_delete must be removed or modified.
1783 for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
1784 if (lpm->tbl8[i].depth <= depth)
1785 lpm->tbl8[i].valid = INVALID;
1788 /* Set new tbl8 entry. */
1789 struct rte_lpm_tbl_entry new_tbl8_entry = {
1791 .depth = sub_rule_depth,
1792 .valid_group = lpm->tbl8[tbl8_group_start].valid_group,
1793 .next_hop = lpm->rules_tbl[sub_rule_index].next_hop,
1797 * Loop through the range of entries on tbl8 for which the
1798 * rule_to_delete must be modified.
1800 for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
1801 if (lpm->tbl8[i].depth <= depth)
1802 __atomic_store(&lpm->tbl8[i], &new_tbl8_entry,
1808 * Check if there are any valid entries in this tbl8 group. If all
1809 * tbl8 entries are invalid we can free the tbl8 and invalidate the
1810 * associated tbl24 entry.
1813 tbl8_recycle_index = tbl8_recycle_check_v1604(lpm->tbl8, tbl8_group_start);
1815 if (tbl8_recycle_index == -EINVAL) {
1816 /* Set tbl24 before freeing tbl8 to avoid race condition.
1817 * Prevent the free of the tbl8 group from hoisting.
1819 lpm->tbl24[tbl24_index].valid = 0;
1820 __atomic_thread_fence(__ATOMIC_RELEASE);
1821 tbl8_free_v1604(lpm->tbl8, tbl8_group_start);
1822 } else if (tbl8_recycle_index > -1) {
1823 /* Update tbl24 entry. */
1824 struct rte_lpm_tbl_entry new_tbl24_entry = {
1825 .next_hop = lpm->tbl8[tbl8_recycle_index].next_hop,
1828 .depth = lpm->tbl8[tbl8_recycle_index].depth,
1831 /* Set tbl24 before freeing tbl8 to avoid race condition.
1832 * Prevent the free of the tbl8 group from hoisting.
1834 __atomic_store(&lpm->tbl24[tbl24_index], &new_tbl24_entry,
1836 __atomic_thread_fence(__ATOMIC_RELEASE);
1837 tbl8_free_v1604(lpm->tbl8, tbl8_group_start);
1847 rte_lpm_delete_v20(struct rte_lpm_v20 *lpm, uint32_t ip, uint8_t depth)
1849 int32_t rule_to_delete_index, sub_rule_index;
1851 uint8_t sub_rule_depth;
1853 * Check input arguments. Note: IP must be a positive integer of 32
1854 * bits in length therefore it need not be checked.
1856 if ((lpm == NULL) || (depth < 1) || (depth > RTE_LPM_MAX_DEPTH)) {
1860 ip_masked = ip & depth_to_mask(depth);
1863 * Find the index of the input rule, that needs to be deleted, in the
1866 rule_to_delete_index = rule_find_v20(lpm, ip_masked, depth);
1869 * Check if rule_to_delete_index was found. If no rule was found the
1870 * function rule_find returns -EINVAL.
1872 if (rule_to_delete_index < 0)
1875 /* Delete the rule from the rule table. */
1876 rule_delete_v20(lpm, rule_to_delete_index, depth);
1879 * Find rule to replace the rule_to_delete. If there is no rule to
1880 * replace the rule_to_delete we return -1 and invalidate the table
1881 * entries associated with this rule.
1884 sub_rule_index = find_previous_rule_v20(lpm, ip, depth, &sub_rule_depth);
1887 * If the input depth value is less than 25 use function
1888 * delete_depth_small otherwise use delete_depth_big.
1890 if (depth <= MAX_DEPTH_TBL24) {
1891 return delete_depth_small_v20(lpm, ip_masked, depth,
1892 sub_rule_index, sub_rule_depth);
1893 } else { /* If depth > MAX_DEPTH_TBL24 */
1894 return delete_depth_big_v20(lpm, ip_masked, depth, sub_rule_index,
1898 VERSION_SYMBOL(rte_lpm_delete, _v20, 2.0);
1901 rte_lpm_delete_v1604(struct rte_lpm *lpm, uint32_t ip, uint8_t depth)
1903 int32_t rule_to_delete_index, sub_rule_index;
1905 uint8_t sub_rule_depth;
1907 * Check input arguments. Note: IP must be a positive integer of 32
1908 * bits in length therefore it need not be checked.
1910 if ((lpm == NULL) || (depth < 1) || (depth > RTE_LPM_MAX_DEPTH)) {
1914 ip_masked = ip & depth_to_mask(depth);
1917 * Find the index of the input rule, that needs to be deleted, in the
1920 rule_to_delete_index = rule_find_v1604(lpm, ip_masked, depth);
1923 * Check if rule_to_delete_index was found. If no rule was found the
1924 * function rule_find returns -EINVAL.
1926 if (rule_to_delete_index < 0)
1929 /* Delete the rule from the rule table. */
1930 rule_delete_v1604(lpm, rule_to_delete_index, depth);
1933 * Find rule to replace the rule_to_delete. If there is no rule to
1934 * replace the rule_to_delete we return -1 and invalidate the table
1935 * entries associated with this rule.
1938 sub_rule_index = find_previous_rule_v1604(lpm, ip, depth, &sub_rule_depth);
1941 * If the input depth value is less than 25 use function
1942 * delete_depth_small otherwise use delete_depth_big.
1944 if (depth <= MAX_DEPTH_TBL24) {
1945 return delete_depth_small_v1604(lpm, ip_masked, depth,
1946 sub_rule_index, sub_rule_depth);
1947 } else { /* If depth > MAX_DEPTH_TBL24 */
1948 return delete_depth_big_v1604(lpm, ip_masked, depth, sub_rule_index,
1952 BIND_DEFAULT_SYMBOL(rte_lpm_delete, _v1604, 16.04);
1953 MAP_STATIC_SYMBOL(int rte_lpm_delete(struct rte_lpm *lpm, uint32_t ip,
1954 uint8_t depth), rte_lpm_delete_v1604);
1957 * Delete all rules from the LPM table.
1960 rte_lpm_delete_all_v20(struct rte_lpm_v20 *lpm)
1962 /* Zero rule information. */
1963 memset(lpm->rule_info, 0, sizeof(lpm->rule_info));
1966 memset(lpm->tbl24, 0, sizeof(lpm->tbl24));
1969 memset(lpm->tbl8, 0, sizeof(lpm->tbl8));
1971 /* Delete all rules form the rules table. */
1972 memset(lpm->rules_tbl, 0, sizeof(lpm->rules_tbl[0]) * lpm->max_rules);
1974 VERSION_SYMBOL(rte_lpm_delete_all, _v20, 2.0);
1977 rte_lpm_delete_all_v1604(struct rte_lpm *lpm)
1979 /* Zero rule information. */
1980 memset(lpm->rule_info, 0, sizeof(lpm->rule_info));
1983 memset(lpm->tbl24, 0, sizeof(lpm->tbl24));
1986 memset(lpm->tbl8, 0, sizeof(lpm->tbl8[0])
1987 * RTE_LPM_TBL8_GROUP_NUM_ENTRIES * lpm->number_tbl8s);
1989 /* Delete all rules form the rules table. */
1990 memset(lpm->rules_tbl, 0, sizeof(lpm->rules_tbl[0]) * lpm->max_rules);
1992 BIND_DEFAULT_SYMBOL(rte_lpm_delete_all, _v1604, 16.04);
1993 MAP_STATIC_SYMBOL(void rte_lpm_delete_all(struct rte_lpm *lpm),
1994 rte_lpm_delete_all_v1604);