1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation
3 * Copyright(c) 2020 Arm Limited
11 #include <sys/queue.h>
14 #include <rte_branch_prediction.h>
15 #include <rte_common.h>
16 #include <rte_memory.h> /* for definition of RTE_CACHE_LINE_SIZE */
17 #include <rte_malloc.h>
19 #include <rte_eal_memconfig.h>
20 #include <rte_per_lcore.h>
21 #include <rte_string_fns.h>
22 #include <rte_errno.h>
23 #include <rte_rwlock.h>
24 #include <rte_spinlock.h>
25 #include <rte_tailq.h>
29 TAILQ_HEAD(rte_lpm_list, rte_tailq_entry);
31 static struct rte_tailq_elem rte_lpm_tailq = {
34 EAL_REGISTER_TAILQ(rte_lpm_tailq)
36 #define MAX_DEPTH_TBL24 24
43 /** @internal LPM structure. */
49 struct rte_rcu_qsbr *v; /* RCU QSBR variable. */
50 enum rte_lpm_qsbr_mode rcu_mode;/* Blocking, defer queue. */
51 struct rte_rcu_qsbr_dq *dq; /* RCU QSBR defer queue. */
54 /* Macro to enable/disable run-time checks. */
55 #if defined(RTE_LIBRTE_LPM_DEBUG)
56 #include <rte_debug.h>
57 #define VERIFY_DEPTH(depth) do { \
58 if ((depth == 0) || (depth > RTE_LPM_MAX_DEPTH)) \
59 rte_panic("LPM: Invalid depth (%u) at line %d", \
60 (unsigned)(depth), __LINE__); \
63 #define VERIFY_DEPTH(depth)
67 * Converts a given depth value to its corresponding mask value.
69 * depth (IN) : range = 1 - 32
70 * mask (OUT) : 32bit mask
72 static uint32_t __attribute__((pure))
73 depth_to_mask(uint8_t depth)
77 /* To calculate a mask start with a 1 on the left hand side and right
78 * shift while populating the left hand side with 1's
80 return (int)0x80000000 >> (depth - 1);
84 * Converts given depth value to its corresponding range value.
86 static uint32_t __attribute__((pure))
87 depth_to_range(uint8_t depth)
92 * Calculate tbl24 range. (Note: 2^depth = 1 << depth)
94 if (depth <= MAX_DEPTH_TBL24)
95 return 1 << (MAX_DEPTH_TBL24 - depth);
97 /* Else if depth is greater than 24 */
98 return 1 << (RTE_LPM_MAX_DEPTH - depth);
102 * Find an existing lpm table and return a pointer to it.
105 rte_lpm_find_existing(const char *name)
107 struct rte_lpm *l = NULL;
108 struct rte_tailq_entry *te;
109 struct rte_lpm_list *lpm_list;
111 lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list);
113 rte_mcfg_tailq_read_lock();
114 TAILQ_FOREACH(te, lpm_list, next) {
116 if (strncmp(name, l->name, RTE_LPM_NAMESIZE) == 0)
119 rte_mcfg_tailq_read_unlock();
130 * Allocates memory for LPM object
133 rte_lpm_create(const char *name, int socket_id,
134 const struct rte_lpm_config *config)
136 char mem_name[RTE_LPM_NAMESIZE];
137 struct __rte_lpm *internal_lpm;
138 struct rte_lpm *lpm = NULL;
139 struct rte_tailq_entry *te;
140 uint32_t mem_size, rules_size, tbl8s_size;
141 struct rte_lpm_list *lpm_list;
143 lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list);
145 RTE_BUILD_BUG_ON(sizeof(struct rte_lpm_tbl_entry) != 4);
147 /* Check user arguments. */
148 if ((name == NULL) || (socket_id < -1) || (config->max_rules == 0)
149 || config->number_tbl8s > RTE_LPM_MAX_TBL8_NUM_GROUPS) {
154 snprintf(mem_name, sizeof(mem_name), "LPM_%s", name);
156 rte_mcfg_tailq_write_lock();
158 /* guarantee there's no existing */
159 TAILQ_FOREACH(te, lpm_list, next) {
161 if (strncmp(name, lpm->name, RTE_LPM_NAMESIZE) == 0)
171 /* Determine the amount of memory to allocate. */
172 mem_size = sizeof(*internal_lpm);
173 rules_size = sizeof(struct rte_lpm_rule) * config->max_rules;
174 tbl8s_size = sizeof(struct rte_lpm_tbl_entry) *
175 RTE_LPM_TBL8_GROUP_NUM_ENTRIES * config->number_tbl8s;
177 /* allocate tailq entry */
178 te = rte_zmalloc("LPM_TAILQ_ENTRY", sizeof(*te), 0);
180 RTE_LOG(ERR, LPM, "Failed to allocate tailq entry\n");
185 /* Allocate memory to store the LPM data structures. */
186 internal_lpm = rte_zmalloc_socket(mem_name, mem_size,
187 RTE_CACHE_LINE_SIZE, socket_id);
188 if (internal_lpm == NULL) {
189 RTE_LOG(ERR, LPM, "LPM memory allocation failed\n");
195 lpm = &internal_lpm->lpm;
196 lpm->rules_tbl = rte_zmalloc_socket(NULL,
197 (size_t)rules_size, RTE_CACHE_LINE_SIZE, socket_id);
199 if (lpm->rules_tbl == NULL) {
200 RTE_LOG(ERR, LPM, "LPM rules_tbl memory allocation failed\n");
201 rte_free(internal_lpm);
209 lpm->tbl8 = rte_zmalloc_socket(NULL,
210 (size_t)tbl8s_size, RTE_CACHE_LINE_SIZE, socket_id);
212 if (lpm->tbl8 == NULL) {
213 RTE_LOG(ERR, LPM, "LPM tbl8 memory allocation failed\n");
214 rte_free(lpm->rules_tbl);
215 rte_free(internal_lpm);
223 /* Save user arguments. */
224 lpm->max_rules = config->max_rules;
225 lpm->number_tbl8s = config->number_tbl8s;
226 strlcpy(lpm->name, name, sizeof(lpm->name));
230 TAILQ_INSERT_TAIL(lpm_list, te, next);
233 rte_mcfg_tailq_write_unlock();
239 * Deallocates memory for given LPM table.
242 rte_lpm_free(struct rte_lpm *lpm)
244 struct __rte_lpm *internal_lpm;
245 struct rte_lpm_list *lpm_list;
246 struct rte_tailq_entry *te;
248 /* Check user arguments. */
252 lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list);
254 rte_mcfg_tailq_write_lock();
256 /* find our tailq entry */
257 TAILQ_FOREACH(te, lpm_list, next) {
258 if (te->data == (void *) lpm)
262 TAILQ_REMOVE(lpm_list, te, next);
264 rte_mcfg_tailq_write_unlock();
266 internal_lpm = container_of(lpm, struct __rte_lpm, lpm);
267 if (internal_lpm->dq != NULL)
268 rte_rcu_qsbr_dq_delete(internal_lpm->dq);
270 rte_free(lpm->rules_tbl);
276 __lpm_rcu_qsbr_free_resource(void *p, void *data, unsigned int n)
278 struct rte_lpm_tbl_entry *tbl8 = ((struct rte_lpm *)p)->tbl8;
279 struct rte_lpm_tbl_entry zero_tbl8_entry = {0};
280 uint32_t tbl8_group_index = *(uint32_t *)data;
283 /* Set tbl8 group invalid */
284 __atomic_store(&tbl8[tbl8_group_index], &zero_tbl8_entry,
288 /* Associate QSBR variable with an LPM object.
291 rte_lpm_rcu_qsbr_add(struct rte_lpm *lpm, struct rte_lpm_rcu_config *cfg)
293 struct rte_rcu_qsbr_dq_parameters params = {0};
294 char rcu_dq_name[RTE_RCU_QSBR_DQ_NAMESIZE];
295 struct __rte_lpm *internal_lpm;
297 if (lpm == NULL || cfg == NULL) {
302 internal_lpm = container_of(lpm, struct __rte_lpm, lpm);
303 if (internal_lpm->v != NULL) {
308 if (cfg->mode == RTE_LPM_QSBR_MODE_SYNC) {
309 /* No other things to do. */
310 } else if (cfg->mode == RTE_LPM_QSBR_MODE_DQ) {
311 /* Init QSBR defer queue. */
312 snprintf(rcu_dq_name, sizeof(rcu_dq_name),
313 "LPM_RCU_%s", lpm->name);
314 params.name = rcu_dq_name;
315 params.size = cfg->dq_size;
316 if (params.size == 0)
317 params.size = lpm->number_tbl8s;
318 params.trigger_reclaim_limit = cfg->reclaim_thd;
319 params.max_reclaim_size = cfg->reclaim_max;
320 if (params.max_reclaim_size == 0)
321 params.max_reclaim_size = RTE_LPM_RCU_DQ_RECLAIM_MAX;
322 params.esize = sizeof(uint32_t); /* tbl8 group index */
323 params.free_fn = __lpm_rcu_qsbr_free_resource;
326 internal_lpm->dq = rte_rcu_qsbr_dq_create(¶ms);
327 if (internal_lpm->dq == NULL) {
328 RTE_LOG(ERR, LPM, "LPM defer queue creation failed\n");
335 internal_lpm->rcu_mode = cfg->mode;
336 internal_lpm->v = cfg->v;
342 * Adds a rule to the rule table.
344 * NOTE: The rule table is split into 32 groups. Each group contains rules that
345 * apply to a specific prefix depth (i.e. group 1 contains rules that apply to
346 * prefixes with a depth of 1 etc.). In the following code (depth - 1) is used
347 * to refer to depth 1 because even though the depth range is 1 - 32, depths
348 * are stored in the rule table from 0 - 31.
349 * NOTE: Valid range for depth parameter is 1 .. 32 inclusive.
352 rule_add(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth,
355 uint32_t rule_gindex, rule_index, last_rule;
360 /* Scan through rule group to see if rule already exists. */
361 if (lpm->rule_info[depth - 1].used_rules > 0) {
363 /* rule_gindex stands for rule group index. */
364 rule_gindex = lpm->rule_info[depth - 1].first_rule;
365 /* Initialise rule_index to point to start of rule group. */
366 rule_index = rule_gindex;
367 /* Last rule = Last used rule in this rule group. */
368 last_rule = rule_gindex + lpm->rule_info[depth - 1].used_rules;
370 for (; rule_index < last_rule; rule_index++) {
372 /* If rule already exists update next hop and return. */
373 if (lpm->rules_tbl[rule_index].ip == ip_masked) {
375 if (lpm->rules_tbl[rule_index].next_hop
378 lpm->rules_tbl[rule_index].next_hop = next_hop;
384 if (rule_index == lpm->max_rules)
387 /* Calculate the position in which the rule will be stored. */
390 for (i = depth - 1; i > 0; i--) {
391 if (lpm->rule_info[i - 1].used_rules > 0) {
392 rule_index = lpm->rule_info[i - 1].first_rule
393 + lpm->rule_info[i - 1].used_rules;
397 if (rule_index == lpm->max_rules)
400 lpm->rule_info[depth - 1].first_rule = rule_index;
403 /* Make room for the new rule in the array. */
404 for (i = RTE_LPM_MAX_DEPTH; i > depth; i--) {
405 if (lpm->rule_info[i - 1].first_rule
406 + lpm->rule_info[i - 1].used_rules == lpm->max_rules)
409 if (lpm->rule_info[i - 1].used_rules > 0) {
410 lpm->rules_tbl[lpm->rule_info[i - 1].first_rule
411 + lpm->rule_info[i - 1].used_rules]
412 = lpm->rules_tbl[lpm->rule_info[i - 1].first_rule];
413 lpm->rule_info[i - 1].first_rule++;
417 /* Add the new rule. */
418 lpm->rules_tbl[rule_index].ip = ip_masked;
419 lpm->rules_tbl[rule_index].next_hop = next_hop;
421 /* Increment the used rules counter for this rule group. */
422 lpm->rule_info[depth - 1].used_rules++;
428 * Delete a rule from the rule table.
429 * NOTE: Valid range for depth parameter is 1 .. 32 inclusive.
432 rule_delete(struct rte_lpm *lpm, int32_t rule_index, uint8_t depth)
438 lpm->rules_tbl[rule_index] =
439 lpm->rules_tbl[lpm->rule_info[depth - 1].first_rule
440 + lpm->rule_info[depth - 1].used_rules - 1];
442 for (i = depth; i < RTE_LPM_MAX_DEPTH; i++) {
443 if (lpm->rule_info[i].used_rules > 0) {
444 lpm->rules_tbl[lpm->rule_info[i].first_rule - 1] =
445 lpm->rules_tbl[lpm->rule_info[i].first_rule
446 + lpm->rule_info[i].used_rules - 1];
447 lpm->rule_info[i].first_rule--;
451 lpm->rule_info[depth - 1].used_rules--;
455 * Finds a rule in rule table.
456 * NOTE: Valid range for depth parameter is 1 .. 32 inclusive.
459 rule_find(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth)
461 uint32_t rule_gindex, last_rule, rule_index;
465 rule_gindex = lpm->rule_info[depth - 1].first_rule;
466 last_rule = rule_gindex + lpm->rule_info[depth - 1].used_rules;
468 /* Scan used rules at given depth to find rule. */
469 for (rule_index = rule_gindex; rule_index < last_rule; rule_index++) {
470 /* If rule is found return the rule index. */
471 if (lpm->rules_tbl[rule_index].ip == ip_masked)
475 /* If rule is not found return -EINVAL. */
480 * Find, clean and allocate a tbl8.
483 _tbl8_alloc(struct rte_lpm *lpm)
485 uint32_t group_idx; /* tbl8 group index. */
486 struct rte_lpm_tbl_entry *tbl8_entry;
488 /* Scan through tbl8 to find a free (i.e. INVALID) tbl8 group. */
489 for (group_idx = 0; group_idx < lpm->number_tbl8s; group_idx++) {
490 tbl8_entry = &lpm->tbl8[group_idx *
491 RTE_LPM_TBL8_GROUP_NUM_ENTRIES];
492 /* If a free tbl8 group is found clean it and set as VALID. */
493 if (!tbl8_entry->valid_group) {
494 struct rte_lpm_tbl_entry new_tbl8_entry = {
498 .valid_group = VALID,
501 memset(&tbl8_entry[0], 0,
502 RTE_LPM_TBL8_GROUP_NUM_ENTRIES *
503 sizeof(tbl8_entry[0]));
505 __atomic_store(tbl8_entry, &new_tbl8_entry,
508 /* Return group index for allocated tbl8 group. */
513 /* If there are no tbl8 groups free then return error. */
518 tbl8_alloc(struct rte_lpm *lpm)
520 int32_t group_idx; /* tbl8 group index. */
521 struct __rte_lpm *internal_lpm;
523 internal_lpm = container_of(lpm, struct __rte_lpm, lpm);
524 group_idx = _tbl8_alloc(lpm);
525 if (group_idx == -ENOSPC && internal_lpm->dq != NULL) {
526 /* If there are no tbl8 groups try to reclaim one. */
527 if (rte_rcu_qsbr_dq_reclaim(internal_lpm->dq, 1,
528 NULL, NULL, NULL) == 0)
529 group_idx = _tbl8_alloc(lpm);
536 tbl8_free(struct rte_lpm *lpm, uint32_t tbl8_group_start)
538 struct rte_lpm_tbl_entry zero_tbl8_entry = {0};
539 struct __rte_lpm *internal_lpm;
542 internal_lpm = container_of(lpm, struct __rte_lpm, lpm);
543 if (internal_lpm->v == NULL) {
544 /* Set tbl8 group invalid*/
545 __atomic_store(&lpm->tbl8[tbl8_group_start], &zero_tbl8_entry,
547 } else if (internal_lpm->rcu_mode == RTE_LPM_QSBR_MODE_SYNC) {
548 /* Wait for quiescent state change. */
549 rte_rcu_qsbr_synchronize(internal_lpm->v,
550 RTE_QSBR_THRID_INVALID);
551 /* Set tbl8 group invalid*/
552 __atomic_store(&lpm->tbl8[tbl8_group_start], &zero_tbl8_entry,
554 } else if (internal_lpm->rcu_mode == RTE_LPM_QSBR_MODE_DQ) {
555 /* Push into QSBR defer queue. */
556 status = rte_rcu_qsbr_dq_enqueue(internal_lpm->dq,
557 (void *)&tbl8_group_start);
559 RTE_LOG(ERR, LPM, "Failed to push QSBR FIFO\n");
567 static __rte_noinline int32_t
568 add_depth_small(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
571 #define group_idx next_hop
572 uint32_t tbl24_index, tbl24_range, tbl8_index, tbl8_group_end, i, j;
574 /* Calculate the index into Table24. */
575 tbl24_index = ip >> 8;
576 tbl24_range = depth_to_range(depth);
578 for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) {
580 * For invalid OR valid and non-extended tbl 24 entries set
583 if (!lpm->tbl24[i].valid || (lpm->tbl24[i].valid_group == 0 &&
584 lpm->tbl24[i].depth <= depth)) {
586 struct rte_lpm_tbl_entry new_tbl24_entry = {
587 .next_hop = next_hop,
593 /* Setting tbl24 entry in one go to avoid race
596 __atomic_store(&lpm->tbl24[i], &new_tbl24_entry,
602 if (lpm->tbl24[i].valid_group == 1) {
603 /* If tbl24 entry is valid and extended calculate the
606 tbl8_index = lpm->tbl24[i].group_idx *
607 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
608 tbl8_group_end = tbl8_index +
609 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
611 for (j = tbl8_index; j < tbl8_group_end; j++) {
612 if (!lpm->tbl8[j].valid ||
613 lpm->tbl8[j].depth <= depth) {
614 struct rte_lpm_tbl_entry
617 .valid_group = VALID,
619 .next_hop = next_hop,
623 * Setting tbl8 entry in one go to avoid
626 __atomic_store(&lpm->tbl8[j],
639 static __rte_noinline int32_t
640 add_depth_big(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth,
643 #define group_idx next_hop
644 uint32_t tbl24_index;
645 int32_t tbl8_group_index, tbl8_group_start, tbl8_group_end, tbl8_index,
648 tbl24_index = (ip_masked >> 8);
649 tbl8_range = depth_to_range(depth);
651 if (!lpm->tbl24[tbl24_index].valid) {
652 /* Search for a free tbl8 group. */
653 tbl8_group_index = tbl8_alloc(lpm);
655 /* Check tbl8 allocation was successful. */
656 if (tbl8_group_index < 0) {
657 return tbl8_group_index;
660 /* Find index into tbl8 and range. */
661 tbl8_index = (tbl8_group_index *
662 RTE_LPM_TBL8_GROUP_NUM_ENTRIES) +
665 /* Set tbl8 entry. */
666 for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
667 struct rte_lpm_tbl_entry new_tbl8_entry = {
670 .valid_group = lpm->tbl8[i].valid_group,
671 .next_hop = next_hop,
673 __atomic_store(&lpm->tbl8[i], &new_tbl8_entry,
678 * Update tbl24 entry to point to new tbl8 entry. Note: The
679 * ext_flag and tbl8_index need to be updated simultaneously,
680 * so assign whole structure in one go
683 struct rte_lpm_tbl_entry new_tbl24_entry = {
684 .group_idx = tbl8_group_index,
690 /* The tbl24 entry must be written only after the
691 * tbl8 entries are written.
693 __atomic_store(&lpm->tbl24[tbl24_index], &new_tbl24_entry,
696 } /* If valid entry but not extended calculate the index into Table8. */
697 else if (lpm->tbl24[tbl24_index].valid_group == 0) {
698 /* Search for free tbl8 group. */
699 tbl8_group_index = tbl8_alloc(lpm);
701 if (tbl8_group_index < 0) {
702 return tbl8_group_index;
705 tbl8_group_start = tbl8_group_index *
706 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
707 tbl8_group_end = tbl8_group_start +
708 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
710 /* Populate new tbl8 with tbl24 value. */
711 for (i = tbl8_group_start; i < tbl8_group_end; i++) {
712 struct rte_lpm_tbl_entry new_tbl8_entry = {
714 .depth = lpm->tbl24[tbl24_index].depth,
715 .valid_group = lpm->tbl8[i].valid_group,
716 .next_hop = lpm->tbl24[tbl24_index].next_hop,
718 __atomic_store(&lpm->tbl8[i], &new_tbl8_entry,
722 tbl8_index = tbl8_group_start + (ip_masked & 0xFF);
724 /* Insert new rule into the tbl8 entry. */
725 for (i = tbl8_index; i < tbl8_index + tbl8_range; i++) {
726 struct rte_lpm_tbl_entry new_tbl8_entry = {
729 .valid_group = lpm->tbl8[i].valid_group,
730 .next_hop = next_hop,
732 __atomic_store(&lpm->tbl8[i], &new_tbl8_entry,
737 * Update tbl24 entry to point to new tbl8 entry. Note: The
738 * ext_flag and tbl8_index need to be updated simultaneously,
739 * so assign whole structure in one go.
742 struct rte_lpm_tbl_entry new_tbl24_entry = {
743 .group_idx = tbl8_group_index,
749 /* The tbl24 entry must be written only after the
750 * tbl8 entries are written.
752 __atomic_store(&lpm->tbl24[tbl24_index], &new_tbl24_entry,
756 * If it is valid, extended entry calculate the index into tbl8.
758 tbl8_group_index = lpm->tbl24[tbl24_index].group_idx;
759 tbl8_group_start = tbl8_group_index *
760 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
761 tbl8_index = tbl8_group_start + (ip_masked & 0xFF);
763 for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
765 if (!lpm->tbl8[i].valid ||
766 lpm->tbl8[i].depth <= depth) {
767 struct rte_lpm_tbl_entry new_tbl8_entry = {
770 .next_hop = next_hop,
771 .valid_group = lpm->tbl8[i].valid_group,
775 * Setting tbl8 entry in one go to avoid race
778 __atomic_store(&lpm->tbl8[i], &new_tbl8_entry,
793 rte_lpm_add(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
796 int32_t rule_index, status = 0;
799 /* Check user arguments. */
800 if ((lpm == NULL) || (depth < 1) || (depth > RTE_LPM_MAX_DEPTH))
803 ip_masked = ip & depth_to_mask(depth);
805 /* Add the rule to the rule table. */
806 rule_index = rule_add(lpm, ip_masked, depth, next_hop);
808 /* Skip table entries update if The rule is the same as
809 * the rule in the rules table.
811 if (rule_index == -EEXIST)
814 /* If the is no space available for new rule return error. */
815 if (rule_index < 0) {
819 if (depth <= MAX_DEPTH_TBL24) {
820 status = add_depth_small(lpm, ip_masked, depth, next_hop);
821 } else { /* If depth > RTE_LPM_MAX_DEPTH_TBL24 */
822 status = add_depth_big(lpm, ip_masked, depth, next_hop);
825 * If add fails due to exhaustion of tbl8 extensions delete
826 * rule that was added to rule table.
829 rule_delete(lpm, rule_index, depth);
839 * Look for a rule in the high-level rules table
842 rte_lpm_is_rule_present(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
848 /* Check user arguments. */
850 (next_hop == NULL) ||
851 (depth < 1) || (depth > RTE_LPM_MAX_DEPTH))
854 /* Look for the rule using rule_find. */
855 ip_masked = ip & depth_to_mask(depth);
856 rule_index = rule_find(lpm, ip_masked, depth);
858 if (rule_index >= 0) {
859 *next_hop = lpm->rules_tbl[rule_index].next_hop;
863 /* If rule is not found return 0. */
868 find_previous_rule(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
869 uint8_t *sub_rule_depth)
875 for (prev_depth = (uint8_t)(depth - 1); prev_depth > 0; prev_depth--) {
876 ip_masked = ip & depth_to_mask(prev_depth);
878 rule_index = rule_find(lpm, ip_masked, prev_depth);
880 if (rule_index >= 0) {
881 *sub_rule_depth = prev_depth;
890 delete_depth_small(struct rte_lpm *lpm, uint32_t ip_masked,
891 uint8_t depth, int32_t sub_rule_index, uint8_t sub_rule_depth)
893 #define group_idx next_hop
894 uint32_t tbl24_range, tbl24_index, tbl8_group_index, tbl8_index, i, j;
896 /* Calculate the range and index into Table24. */
897 tbl24_range = depth_to_range(depth);
898 tbl24_index = (ip_masked >> 8);
899 struct rte_lpm_tbl_entry zero_tbl24_entry = {0};
902 * Firstly check the sub_rule_index. A -1 indicates no replacement rule
903 * and a positive number indicates a sub_rule_index.
905 if (sub_rule_index < 0) {
907 * If no replacement rule exists then invalidate entries
908 * associated with this rule.
910 for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) {
912 if (lpm->tbl24[i].valid_group == 0 &&
913 lpm->tbl24[i].depth <= depth) {
914 __atomic_store(&lpm->tbl24[i],
915 &zero_tbl24_entry, __ATOMIC_RELEASE);
916 } else if (lpm->tbl24[i].valid_group == 1) {
918 * If TBL24 entry is extended, then there has
919 * to be a rule with depth >= 25 in the
920 * associated TBL8 group.
923 tbl8_group_index = lpm->tbl24[i].group_idx;
924 tbl8_index = tbl8_group_index *
925 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
927 for (j = tbl8_index; j < (tbl8_index +
928 RTE_LPM_TBL8_GROUP_NUM_ENTRIES); j++) {
930 if (lpm->tbl8[j].depth <= depth)
931 lpm->tbl8[j].valid = INVALID;
937 * If a replacement rule exists then modify entries
938 * associated with this rule.
941 struct rte_lpm_tbl_entry new_tbl24_entry = {
942 .next_hop = lpm->rules_tbl[sub_rule_index].next_hop,
945 .depth = sub_rule_depth,
948 struct rte_lpm_tbl_entry new_tbl8_entry = {
950 .valid_group = VALID,
951 .depth = sub_rule_depth,
952 .next_hop = lpm->rules_tbl
953 [sub_rule_index].next_hop,
956 for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) {
958 if (lpm->tbl24[i].valid_group == 0 &&
959 lpm->tbl24[i].depth <= depth) {
960 __atomic_store(&lpm->tbl24[i], &new_tbl24_entry,
962 } else if (lpm->tbl24[i].valid_group == 1) {
964 * If TBL24 entry is extended, then there has
965 * to be a rule with depth >= 25 in the
966 * associated TBL8 group.
969 tbl8_group_index = lpm->tbl24[i].group_idx;
970 tbl8_index = tbl8_group_index *
971 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
973 for (j = tbl8_index; j < (tbl8_index +
974 RTE_LPM_TBL8_GROUP_NUM_ENTRIES); j++) {
976 if (lpm->tbl8[j].depth <= depth)
977 __atomic_store(&lpm->tbl8[j],
989 * Checks if table 8 group can be recycled.
991 * Return of -EEXIST means tbl8 is in use and thus can not be recycled.
992 * Return of -EINVAL means tbl8 is empty and thus can be recycled
993 * Return of value > -1 means tbl8 is in use but has all the same values and
994 * thus can be recycled
997 tbl8_recycle_check(struct rte_lpm_tbl_entry *tbl8,
998 uint32_t tbl8_group_start)
1000 uint32_t tbl8_group_end, i;
1001 tbl8_group_end = tbl8_group_start + RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1004 * Check the first entry of the given tbl8. If it is invalid we know
1005 * this tbl8 does not contain any rule with a depth < RTE_LPM_MAX_DEPTH
1006 * (As they would affect all entries in a tbl8) and thus this table
1007 * can not be recycled.
1009 if (tbl8[tbl8_group_start].valid) {
1011 * If first entry is valid check if the depth is less than 24
1012 * and if so check the rest of the entries to verify that they
1013 * are all of this depth.
1015 if (tbl8[tbl8_group_start].depth <= MAX_DEPTH_TBL24) {
1016 for (i = (tbl8_group_start + 1); i < tbl8_group_end;
1019 if (tbl8[i].depth !=
1020 tbl8[tbl8_group_start].depth) {
1025 /* If all entries are the same return the tb8 index */
1026 return tbl8_group_start;
1032 * If the first entry is invalid check if the rest of the entries in
1033 * the tbl8 are invalid.
1035 for (i = (tbl8_group_start + 1); i < tbl8_group_end; i++) {
1039 /* If no valid entries are found then return -EINVAL. */
1044 delete_depth_big(struct rte_lpm *lpm, uint32_t ip_masked,
1045 uint8_t depth, int32_t sub_rule_index, uint8_t sub_rule_depth)
1047 #define group_idx next_hop
1048 uint32_t tbl24_index, tbl8_group_index, tbl8_group_start, tbl8_index,
1050 int32_t tbl8_recycle_index, status = 0;
1053 * Calculate the index into tbl24 and range. Note: All depths larger
1054 * than MAX_DEPTH_TBL24 are associated with only one tbl24 entry.
1056 tbl24_index = ip_masked >> 8;
1058 /* Calculate the index into tbl8 and range. */
1059 tbl8_group_index = lpm->tbl24[tbl24_index].group_idx;
1060 tbl8_group_start = tbl8_group_index * RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1061 tbl8_index = tbl8_group_start + (ip_masked & 0xFF);
1062 tbl8_range = depth_to_range(depth);
1064 if (sub_rule_index < 0) {
1066 * Loop through the range of entries on tbl8 for which the
1067 * rule_to_delete must be removed or modified.
1069 for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
1070 if (lpm->tbl8[i].depth <= depth)
1071 lpm->tbl8[i].valid = INVALID;
1074 /* Set new tbl8 entry. */
1075 struct rte_lpm_tbl_entry new_tbl8_entry = {
1077 .depth = sub_rule_depth,
1078 .valid_group = lpm->tbl8[tbl8_group_start].valid_group,
1079 .next_hop = lpm->rules_tbl[sub_rule_index].next_hop,
1083 * Loop through the range of entries on tbl8 for which the
1084 * rule_to_delete must be modified.
1086 for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
1087 if (lpm->tbl8[i].depth <= depth)
1088 __atomic_store(&lpm->tbl8[i], &new_tbl8_entry,
1094 * Check if there are any valid entries in this tbl8 group. If all
1095 * tbl8 entries are invalid we can free the tbl8 and invalidate the
1096 * associated tbl24 entry.
1099 tbl8_recycle_index = tbl8_recycle_check(lpm->tbl8, tbl8_group_start);
1101 if (tbl8_recycle_index == -EINVAL) {
1102 /* Set tbl24 before freeing tbl8 to avoid race condition.
1103 * Prevent the free of the tbl8 group from hoisting.
1105 lpm->tbl24[tbl24_index].valid = 0;
1106 __atomic_thread_fence(__ATOMIC_RELEASE);
1107 status = tbl8_free(lpm, tbl8_group_start);
1108 } else if (tbl8_recycle_index > -1) {
1109 /* Update tbl24 entry. */
1110 struct rte_lpm_tbl_entry new_tbl24_entry = {
1111 .next_hop = lpm->tbl8[tbl8_recycle_index].next_hop,
1114 .depth = lpm->tbl8[tbl8_recycle_index].depth,
1117 /* Set tbl24 before freeing tbl8 to avoid race condition.
1118 * Prevent the free of the tbl8 group from hoisting.
1120 __atomic_store(&lpm->tbl24[tbl24_index], &new_tbl24_entry,
1122 __atomic_thread_fence(__ATOMIC_RELEASE);
1123 status = tbl8_free(lpm, tbl8_group_start);
1133 rte_lpm_delete(struct rte_lpm *lpm, uint32_t ip, uint8_t depth)
1135 int32_t rule_to_delete_index, sub_rule_index;
1137 uint8_t sub_rule_depth;
1139 * Check input arguments. Note: IP must be a positive integer of 32
1140 * bits in length therefore it need not be checked.
1142 if ((lpm == NULL) || (depth < 1) || (depth > RTE_LPM_MAX_DEPTH)) {
1146 ip_masked = ip & depth_to_mask(depth);
1149 * Find the index of the input rule, that needs to be deleted, in the
1152 rule_to_delete_index = rule_find(lpm, ip_masked, depth);
1155 * Check if rule_to_delete_index was found. If no rule was found the
1156 * function rule_find returns -EINVAL.
1158 if (rule_to_delete_index < 0)
1161 /* Delete the rule from the rule table. */
1162 rule_delete(lpm, rule_to_delete_index, depth);
1165 * Find rule to replace the rule_to_delete. If there is no rule to
1166 * replace the rule_to_delete we return -1 and invalidate the table
1167 * entries associated with this rule.
1170 sub_rule_index = find_previous_rule(lpm, ip, depth, &sub_rule_depth);
1173 * If the input depth value is less than 25 use function
1174 * delete_depth_small otherwise use delete_depth_big.
1176 if (depth <= MAX_DEPTH_TBL24) {
1177 return delete_depth_small(lpm, ip_masked, depth,
1178 sub_rule_index, sub_rule_depth);
1179 } else { /* If depth > MAX_DEPTH_TBL24 */
1180 return delete_depth_big(lpm, ip_masked, depth, sub_rule_index,
1186 * Delete all rules from the LPM table.
1189 rte_lpm_delete_all(struct rte_lpm *lpm)
1191 /* Zero rule information. */
1192 memset(lpm->rule_info, 0, sizeof(lpm->rule_info));
1195 memset(lpm->tbl24, 0, sizeof(lpm->tbl24));
1198 memset(lpm->tbl8, 0, sizeof(lpm->tbl8[0])
1199 * RTE_LPM_TBL8_GROUP_NUM_ENTRIES * lpm->number_tbl8s);
1201 /* Delete all rules form the rules table. */
1202 memset(lpm->rules_tbl, 0, sizeof(lpm->rules_tbl[0]) * lpm->max_rules);