1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation
3 * Copyright(c) 2020 Arm Limited
10 #include <sys/queue.h>
13 #include <rte_common.h>
14 #include <rte_malloc.h>
15 #include <rte_eal_memconfig.h>
16 #include <rte_string_fns.h>
17 #include <rte_errno.h>
18 #include <rte_tailq.h>
22 TAILQ_HEAD(rte_lpm_list, rte_tailq_entry);
24 static struct rte_tailq_elem rte_lpm_tailq = {
27 EAL_REGISTER_TAILQ(rte_lpm_tailq)
29 #define MAX_DEPTH_TBL24 24
36 /** @internal Rule structure. */
38 uint32_t ip; /**< Rule IP address. */
39 uint32_t next_hop; /**< Rule next hop. */
42 /** @internal Contains metadata about the rules table. */
43 struct rte_lpm_rule_info {
44 uint32_t used_rules; /**< Used rules so far. */
45 uint32_t first_rule; /**< Indexes the first rule of a given depth. */
48 /** @internal LPM structure. */
50 /* Exposed LPM data. */
54 char name[RTE_LPM_NAMESIZE]; /**< Name of the lpm. */
55 uint32_t max_rules; /**< Max. balanced rules per lpm. */
56 uint32_t number_tbl8s; /**< Number of tbl8s. */
57 /**< Rule info table. */
58 struct rte_lpm_rule_info rule_info[RTE_LPM_MAX_DEPTH];
59 struct rte_lpm_rule *rules_tbl; /**< LPM rules. */
62 struct rte_rcu_qsbr *v; /* RCU QSBR variable. */
63 enum rte_lpm_qsbr_mode rcu_mode;/* Blocking, defer queue. */
64 struct rte_rcu_qsbr_dq *dq; /* RCU QSBR defer queue. */
67 /* Macro to enable/disable run-time checks. */
68 #if defined(RTE_LIBRTE_LPM_DEBUG)
69 #include <rte_debug.h>
70 #define VERIFY_DEPTH(depth) do { \
71 if ((depth == 0) || (depth > RTE_LPM_MAX_DEPTH)) \
72 rte_panic("LPM: Invalid depth (%u) at line %d", \
73 (unsigned)(depth), __LINE__); \
76 #define VERIFY_DEPTH(depth)
80 * Converts a given depth value to its corresponding mask value.
82 * depth (IN) : range = 1 - 32
83 * mask (OUT) : 32bit mask
85 static uint32_t __attribute__((pure))
86 depth_to_mask(uint8_t depth)
90 /* To calculate a mask start with a 1 on the left hand side and right
91 * shift while populating the left hand side with 1's
93 return (int)0x80000000 >> (depth - 1);
97 * Converts given depth value to its corresponding range value.
99 static uint32_t __attribute__((pure))
100 depth_to_range(uint8_t depth)
105 * Calculate tbl24 range. (Note: 2^depth = 1 << depth)
107 if (depth <= MAX_DEPTH_TBL24)
108 return 1 << (MAX_DEPTH_TBL24 - depth);
110 /* Else if depth is greater than 24 */
111 return 1 << (RTE_LPM_MAX_DEPTH - depth);
115 * Find an existing lpm table and return a pointer to it.
118 rte_lpm_find_existing(const char *name)
120 struct __rte_lpm *i_lpm = NULL;
121 struct rte_tailq_entry *te;
122 struct rte_lpm_list *lpm_list;
124 lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list);
126 rte_mcfg_tailq_read_lock();
127 TAILQ_FOREACH(te, lpm_list, next) {
129 if (strncmp(name, i_lpm->name, RTE_LPM_NAMESIZE) == 0)
132 rte_mcfg_tailq_read_unlock();
143 * Allocates memory for LPM object
146 rte_lpm_create(const char *name, int socket_id,
147 const struct rte_lpm_config *config)
149 char mem_name[RTE_LPM_NAMESIZE];
150 struct __rte_lpm *i_lpm;
151 struct rte_lpm *lpm = NULL;
152 struct rte_tailq_entry *te;
153 uint32_t mem_size, rules_size, tbl8s_size;
154 struct rte_lpm_list *lpm_list;
156 lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list);
158 RTE_BUILD_BUG_ON(sizeof(struct rte_lpm_tbl_entry) != 4);
160 /* Check user arguments. */
161 if ((name == NULL) || (socket_id < -1) || (config->max_rules == 0)
162 || config->number_tbl8s > RTE_LPM_MAX_TBL8_NUM_GROUPS) {
167 snprintf(mem_name, sizeof(mem_name), "LPM_%s", name);
169 rte_mcfg_tailq_write_lock();
171 /* guarantee there's no existing */
172 TAILQ_FOREACH(te, lpm_list, next) {
174 if (strncmp(name, i_lpm->name, RTE_LPM_NAMESIZE) == 0)
183 /* Determine the amount of memory to allocate. */
184 mem_size = sizeof(*i_lpm);
185 rules_size = sizeof(struct rte_lpm_rule) * config->max_rules;
186 tbl8s_size = sizeof(struct rte_lpm_tbl_entry) *
187 RTE_LPM_TBL8_GROUP_NUM_ENTRIES * config->number_tbl8s;
189 /* allocate tailq entry */
190 te = rte_zmalloc("LPM_TAILQ_ENTRY", sizeof(*te), 0);
192 RTE_LOG(ERR, LPM, "Failed to allocate tailq entry\n");
197 /* Allocate memory to store the LPM data structures. */
198 i_lpm = rte_zmalloc_socket(mem_name, mem_size,
199 RTE_CACHE_LINE_SIZE, socket_id);
201 RTE_LOG(ERR, LPM, "LPM memory allocation failed\n");
207 i_lpm->rules_tbl = rte_zmalloc_socket(NULL,
208 (size_t)rules_size, RTE_CACHE_LINE_SIZE, socket_id);
210 if (i_lpm->rules_tbl == NULL) {
211 RTE_LOG(ERR, LPM, "LPM rules_tbl memory allocation failed\n");
219 i_lpm->lpm.tbl8 = rte_zmalloc_socket(NULL,
220 (size_t)tbl8s_size, RTE_CACHE_LINE_SIZE, socket_id);
222 if (i_lpm->lpm.tbl8 == NULL) {
223 RTE_LOG(ERR, LPM, "LPM tbl8 memory allocation failed\n");
224 rte_free(i_lpm->rules_tbl);
232 /* Save user arguments. */
233 i_lpm->max_rules = config->max_rules;
234 i_lpm->number_tbl8s = config->number_tbl8s;
235 strlcpy(i_lpm->name, name, sizeof(i_lpm->name));
240 TAILQ_INSERT_TAIL(lpm_list, te, next);
243 rte_mcfg_tailq_write_unlock();
249 * Deallocates memory for given LPM table.
252 rte_lpm_free(struct rte_lpm *lpm)
254 struct rte_lpm_list *lpm_list;
255 struct rte_tailq_entry *te;
256 struct __rte_lpm *i_lpm;
258 /* Check user arguments. */
261 i_lpm = container_of(lpm, struct __rte_lpm, lpm);
263 lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list);
265 rte_mcfg_tailq_write_lock();
267 /* find our tailq entry */
268 TAILQ_FOREACH(te, lpm_list, next) {
269 if (te->data == (void *)i_lpm)
273 TAILQ_REMOVE(lpm_list, te, next);
275 rte_mcfg_tailq_write_unlock();
277 if (i_lpm->dq != NULL)
278 rte_rcu_qsbr_dq_delete(i_lpm->dq);
279 rte_free(i_lpm->lpm.tbl8);
280 rte_free(i_lpm->rules_tbl);
286 __lpm_rcu_qsbr_free_resource(void *p, void *data, unsigned int n)
288 struct rte_lpm_tbl_entry *tbl8 = ((struct __rte_lpm *)p)->lpm.tbl8;
289 struct rte_lpm_tbl_entry zero_tbl8_entry = {0};
290 uint32_t tbl8_group_index = *(uint32_t *)data;
293 /* Set tbl8 group invalid */
294 __atomic_store(&tbl8[tbl8_group_index], &zero_tbl8_entry,
298 /* Associate QSBR variable with an LPM object.
301 rte_lpm_rcu_qsbr_add(struct rte_lpm *lpm, struct rte_lpm_rcu_config *cfg)
303 struct rte_rcu_qsbr_dq_parameters params = {0};
304 char rcu_dq_name[RTE_RCU_QSBR_DQ_NAMESIZE];
305 struct __rte_lpm *i_lpm;
307 if (lpm == NULL || cfg == NULL) {
312 i_lpm = container_of(lpm, struct __rte_lpm, lpm);
313 if (i_lpm->v != NULL) {
318 if (cfg->mode == RTE_LPM_QSBR_MODE_SYNC) {
319 /* No other things to do. */
320 } else if (cfg->mode == RTE_LPM_QSBR_MODE_DQ) {
321 /* Init QSBR defer queue. */
322 snprintf(rcu_dq_name, sizeof(rcu_dq_name),
323 "LPM_RCU_%s", i_lpm->name);
324 params.name = rcu_dq_name;
325 params.size = cfg->dq_size;
326 if (params.size == 0)
327 params.size = i_lpm->number_tbl8s;
328 params.trigger_reclaim_limit = cfg->reclaim_thd;
329 params.max_reclaim_size = cfg->reclaim_max;
330 if (params.max_reclaim_size == 0)
331 params.max_reclaim_size = RTE_LPM_RCU_DQ_RECLAIM_MAX;
332 params.esize = sizeof(uint32_t); /* tbl8 group index */
333 params.free_fn = __lpm_rcu_qsbr_free_resource;
336 i_lpm->dq = rte_rcu_qsbr_dq_create(¶ms);
337 if (i_lpm->dq == NULL) {
338 RTE_LOG(ERR, LPM, "LPM defer queue creation failed\n");
345 i_lpm->rcu_mode = cfg->mode;
352 * Adds a rule to the rule table.
354 * NOTE: The rule table is split into 32 groups. Each group contains rules that
355 * apply to a specific prefix depth (i.e. group 1 contains rules that apply to
356 * prefixes with a depth of 1 etc.). In the following code (depth - 1) is used
357 * to refer to depth 1 because even though the depth range is 1 - 32, depths
358 * are stored in the rule table from 0 - 31.
359 * NOTE: Valid range for depth parameter is 1 .. 32 inclusive.
362 rule_add(struct __rte_lpm *i_lpm, uint32_t ip_masked, uint8_t depth,
365 uint32_t rule_gindex, rule_index, last_rule;
370 /* Scan through rule group to see if rule already exists. */
371 if (i_lpm->rule_info[depth - 1].used_rules > 0) {
373 /* rule_gindex stands for rule group index. */
374 rule_gindex = i_lpm->rule_info[depth - 1].first_rule;
375 /* Initialise rule_index to point to start of rule group. */
376 rule_index = rule_gindex;
377 /* Last rule = Last used rule in this rule group. */
378 last_rule = rule_gindex + i_lpm->rule_info[depth - 1].used_rules;
380 for (; rule_index < last_rule; rule_index++) {
382 /* If rule already exists update next hop and return. */
383 if (i_lpm->rules_tbl[rule_index].ip == ip_masked) {
385 if (i_lpm->rules_tbl[rule_index].next_hop
388 i_lpm->rules_tbl[rule_index].next_hop = next_hop;
394 if (rule_index == i_lpm->max_rules)
397 /* Calculate the position in which the rule will be stored. */
400 for (i = depth - 1; i > 0; i--) {
401 if (i_lpm->rule_info[i - 1].used_rules > 0) {
402 rule_index = i_lpm->rule_info[i - 1].first_rule
403 + i_lpm->rule_info[i - 1].used_rules;
407 if (rule_index == i_lpm->max_rules)
410 i_lpm->rule_info[depth - 1].first_rule = rule_index;
413 /* Make room for the new rule in the array. */
414 for (i = RTE_LPM_MAX_DEPTH; i > depth; i--) {
415 if (i_lpm->rule_info[i - 1].first_rule
416 + i_lpm->rule_info[i - 1].used_rules == i_lpm->max_rules)
419 if (i_lpm->rule_info[i - 1].used_rules > 0) {
420 i_lpm->rules_tbl[i_lpm->rule_info[i - 1].first_rule
421 + i_lpm->rule_info[i - 1].used_rules]
422 = i_lpm->rules_tbl[i_lpm->rule_info[i - 1].first_rule];
423 i_lpm->rule_info[i - 1].first_rule++;
427 /* Add the new rule. */
428 i_lpm->rules_tbl[rule_index].ip = ip_masked;
429 i_lpm->rules_tbl[rule_index].next_hop = next_hop;
431 /* Increment the used rules counter for this rule group. */
432 i_lpm->rule_info[depth - 1].used_rules++;
438 * Delete a rule from the rule table.
439 * NOTE: Valid range for depth parameter is 1 .. 32 inclusive.
442 rule_delete(struct __rte_lpm *i_lpm, int32_t rule_index, uint8_t depth)
448 i_lpm->rules_tbl[rule_index] =
449 i_lpm->rules_tbl[i_lpm->rule_info[depth - 1].first_rule
450 + i_lpm->rule_info[depth - 1].used_rules - 1];
452 for (i = depth; i < RTE_LPM_MAX_DEPTH; i++) {
453 if (i_lpm->rule_info[i].used_rules > 0) {
454 i_lpm->rules_tbl[i_lpm->rule_info[i].first_rule - 1] =
455 i_lpm->rules_tbl[i_lpm->rule_info[i].first_rule
456 + i_lpm->rule_info[i].used_rules - 1];
457 i_lpm->rule_info[i].first_rule--;
461 i_lpm->rule_info[depth - 1].used_rules--;
465 * Finds a rule in rule table.
466 * NOTE: Valid range for depth parameter is 1 .. 32 inclusive.
469 rule_find(struct __rte_lpm *i_lpm, uint32_t ip_masked, uint8_t depth)
471 uint32_t rule_gindex, last_rule, rule_index;
475 rule_gindex = i_lpm->rule_info[depth - 1].first_rule;
476 last_rule = rule_gindex + i_lpm->rule_info[depth - 1].used_rules;
478 /* Scan used rules at given depth to find rule. */
479 for (rule_index = rule_gindex; rule_index < last_rule; rule_index++) {
480 /* If rule is found return the rule index. */
481 if (i_lpm->rules_tbl[rule_index].ip == ip_masked)
485 /* If rule is not found return -EINVAL. */
490 * Find, clean and allocate a tbl8.
493 _tbl8_alloc(struct __rte_lpm *i_lpm)
495 uint32_t group_idx; /* tbl8 group index. */
496 struct rte_lpm_tbl_entry *tbl8_entry;
498 /* Scan through tbl8 to find a free (i.e. INVALID) tbl8 group. */
499 for (group_idx = 0; group_idx < i_lpm->number_tbl8s; group_idx++) {
500 tbl8_entry = &i_lpm->lpm.tbl8[group_idx *
501 RTE_LPM_TBL8_GROUP_NUM_ENTRIES];
502 /* If a free tbl8 group is found clean it and set as VALID. */
503 if (!tbl8_entry->valid_group) {
504 struct rte_lpm_tbl_entry new_tbl8_entry = {
508 .valid_group = VALID,
511 memset(&tbl8_entry[0], 0,
512 RTE_LPM_TBL8_GROUP_NUM_ENTRIES *
513 sizeof(tbl8_entry[0]));
515 __atomic_store(tbl8_entry, &new_tbl8_entry,
518 /* Return group index for allocated tbl8 group. */
523 /* If there are no tbl8 groups free then return error. */
528 tbl8_alloc(struct __rte_lpm *i_lpm)
530 int32_t group_idx; /* tbl8 group index. */
532 group_idx = _tbl8_alloc(i_lpm);
533 if (group_idx == -ENOSPC && i_lpm->dq != NULL) {
534 /* If there are no tbl8 groups try to reclaim one. */
535 if (rte_rcu_qsbr_dq_reclaim(i_lpm->dq, 1,
536 NULL, NULL, NULL) == 0)
537 group_idx = _tbl8_alloc(i_lpm);
544 tbl8_free(struct __rte_lpm *i_lpm, uint32_t tbl8_group_start)
546 struct rte_lpm_tbl_entry zero_tbl8_entry = {0};
549 if (i_lpm->v == NULL) {
550 /* Set tbl8 group invalid*/
551 __atomic_store(&i_lpm->lpm.tbl8[tbl8_group_start], &zero_tbl8_entry,
553 } else if (i_lpm->rcu_mode == RTE_LPM_QSBR_MODE_SYNC) {
554 /* Wait for quiescent state change. */
555 rte_rcu_qsbr_synchronize(i_lpm->v,
556 RTE_QSBR_THRID_INVALID);
557 /* Set tbl8 group invalid*/
558 __atomic_store(&i_lpm->lpm.tbl8[tbl8_group_start], &zero_tbl8_entry,
560 } else if (i_lpm->rcu_mode == RTE_LPM_QSBR_MODE_DQ) {
561 /* Push into QSBR defer queue. */
562 status = rte_rcu_qsbr_dq_enqueue(i_lpm->dq,
563 (void *)&tbl8_group_start);
565 RTE_LOG(ERR, LPM, "Failed to push QSBR FIFO\n");
573 static __rte_noinline int32_t
574 add_depth_small(struct __rte_lpm *i_lpm, uint32_t ip, uint8_t depth,
577 #define group_idx next_hop
578 uint32_t tbl24_index, tbl24_range, tbl8_index, tbl8_group_end, i, j;
580 /* Calculate the index into Table24. */
581 tbl24_index = ip >> 8;
582 tbl24_range = depth_to_range(depth);
584 for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) {
586 * For invalid OR valid and non-extended tbl 24 entries set
589 if (!i_lpm->lpm.tbl24[i].valid || (i_lpm->lpm.tbl24[i].valid_group == 0 &&
590 i_lpm->lpm.tbl24[i].depth <= depth)) {
592 struct rte_lpm_tbl_entry new_tbl24_entry = {
593 .next_hop = next_hop,
599 /* Setting tbl24 entry in one go to avoid race
602 __atomic_store(&i_lpm->lpm.tbl24[i], &new_tbl24_entry,
608 if (i_lpm->lpm.tbl24[i].valid_group == 1) {
609 /* If tbl24 entry is valid and extended calculate the
612 tbl8_index = i_lpm->lpm.tbl24[i].group_idx *
613 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
614 tbl8_group_end = tbl8_index +
615 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
617 for (j = tbl8_index; j < tbl8_group_end; j++) {
618 if (!i_lpm->lpm.tbl8[j].valid ||
619 i_lpm->lpm.tbl8[j].depth <= depth) {
620 struct rte_lpm_tbl_entry
623 .valid_group = VALID,
625 .next_hop = next_hop,
629 * Setting tbl8 entry in one go to avoid
632 __atomic_store(&i_lpm->lpm.tbl8[j],
645 static __rte_noinline int32_t
646 add_depth_big(struct __rte_lpm *i_lpm, uint32_t ip_masked, uint8_t depth,
649 #define group_idx next_hop
650 uint32_t tbl24_index;
651 int32_t tbl8_group_index, tbl8_group_start, tbl8_group_end, tbl8_index,
654 tbl24_index = (ip_masked >> 8);
655 tbl8_range = depth_to_range(depth);
657 if (!i_lpm->lpm.tbl24[tbl24_index].valid) {
658 /* Search for a free tbl8 group. */
659 tbl8_group_index = tbl8_alloc(i_lpm);
661 /* Check tbl8 allocation was successful. */
662 if (tbl8_group_index < 0) {
663 return tbl8_group_index;
666 /* Find index into tbl8 and range. */
667 tbl8_index = (tbl8_group_index *
668 RTE_LPM_TBL8_GROUP_NUM_ENTRIES) +
671 /* Set tbl8 entry. */
672 for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
673 struct rte_lpm_tbl_entry new_tbl8_entry = {
676 .valid_group = i_lpm->lpm.tbl8[i].valid_group,
677 .next_hop = next_hop,
679 __atomic_store(&i_lpm->lpm.tbl8[i], &new_tbl8_entry,
684 * Update tbl24 entry to point to new tbl8 entry. Note: The
685 * ext_flag and tbl8_index need to be updated simultaneously,
686 * so assign whole structure in one go
689 struct rte_lpm_tbl_entry new_tbl24_entry = {
690 .group_idx = tbl8_group_index,
696 /* The tbl24 entry must be written only after the
697 * tbl8 entries are written.
699 __atomic_store(&i_lpm->lpm.tbl24[tbl24_index], &new_tbl24_entry,
702 } /* If valid entry but not extended calculate the index into Table8. */
703 else if (i_lpm->lpm.tbl24[tbl24_index].valid_group == 0) {
704 /* Search for free tbl8 group. */
705 tbl8_group_index = tbl8_alloc(i_lpm);
707 if (tbl8_group_index < 0) {
708 return tbl8_group_index;
711 tbl8_group_start = tbl8_group_index *
712 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
713 tbl8_group_end = tbl8_group_start +
714 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
716 /* Populate new tbl8 with tbl24 value. */
717 for (i = tbl8_group_start; i < tbl8_group_end; i++) {
718 struct rte_lpm_tbl_entry new_tbl8_entry = {
720 .depth = i_lpm->lpm.tbl24[tbl24_index].depth,
721 .valid_group = i_lpm->lpm.tbl8[i].valid_group,
722 .next_hop = i_lpm->lpm.tbl24[tbl24_index].next_hop,
724 __atomic_store(&i_lpm->lpm.tbl8[i], &new_tbl8_entry,
728 tbl8_index = tbl8_group_start + (ip_masked & 0xFF);
730 /* Insert new rule into the tbl8 entry. */
731 for (i = tbl8_index; i < tbl8_index + tbl8_range; i++) {
732 struct rte_lpm_tbl_entry new_tbl8_entry = {
735 .valid_group = i_lpm->lpm.tbl8[i].valid_group,
736 .next_hop = next_hop,
738 __atomic_store(&i_lpm->lpm.tbl8[i], &new_tbl8_entry,
743 * Update tbl24 entry to point to new tbl8 entry. Note: The
744 * ext_flag and tbl8_index need to be updated simultaneously,
745 * so assign whole structure in one go.
748 struct rte_lpm_tbl_entry new_tbl24_entry = {
749 .group_idx = tbl8_group_index,
755 /* The tbl24 entry must be written only after the
756 * tbl8 entries are written.
758 __atomic_store(&i_lpm->lpm.tbl24[tbl24_index], &new_tbl24_entry,
762 * If it is valid, extended entry calculate the index into tbl8.
764 tbl8_group_index = i_lpm->lpm.tbl24[tbl24_index].group_idx;
765 tbl8_group_start = tbl8_group_index *
766 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
767 tbl8_index = tbl8_group_start + (ip_masked & 0xFF);
769 for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
771 if (!i_lpm->lpm.tbl8[i].valid ||
772 i_lpm->lpm.tbl8[i].depth <= depth) {
773 struct rte_lpm_tbl_entry new_tbl8_entry = {
776 .next_hop = next_hop,
777 .valid_group = i_lpm->lpm.tbl8[i].valid_group,
781 * Setting tbl8 entry in one go to avoid race
784 __atomic_store(&i_lpm->lpm.tbl8[i], &new_tbl8_entry,
799 rte_lpm_add(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
802 int32_t rule_index, status = 0;
803 struct __rte_lpm *i_lpm;
806 /* Check user arguments. */
807 if ((lpm == NULL) || (depth < 1) || (depth > RTE_LPM_MAX_DEPTH))
810 i_lpm = container_of(lpm, struct __rte_lpm, lpm);
811 ip_masked = ip & depth_to_mask(depth);
813 /* Add the rule to the rule table. */
814 rule_index = rule_add(i_lpm, ip_masked, depth, next_hop);
816 /* Skip table entries update if The rule is the same as
817 * the rule in the rules table.
819 if (rule_index == -EEXIST)
822 /* If the is no space available for new rule return error. */
823 if (rule_index < 0) {
827 if (depth <= MAX_DEPTH_TBL24) {
828 status = add_depth_small(i_lpm, ip_masked, depth, next_hop);
829 } else { /* If depth > RTE_LPM_MAX_DEPTH_TBL24 */
830 status = add_depth_big(i_lpm, ip_masked, depth, next_hop);
833 * If add fails due to exhaustion of tbl8 extensions delete
834 * rule that was added to rule table.
837 rule_delete(i_lpm, rule_index, depth);
847 * Look for a rule in the high-level rules table
850 rte_lpm_is_rule_present(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
853 struct __rte_lpm *i_lpm;
857 /* Check user arguments. */
859 (next_hop == NULL) ||
860 (depth < 1) || (depth > RTE_LPM_MAX_DEPTH))
863 /* Look for the rule using rule_find. */
864 i_lpm = container_of(lpm, struct __rte_lpm, lpm);
865 ip_masked = ip & depth_to_mask(depth);
866 rule_index = rule_find(i_lpm, ip_masked, depth);
868 if (rule_index >= 0) {
869 *next_hop = i_lpm->rules_tbl[rule_index].next_hop;
873 /* If rule is not found return 0. */
878 find_previous_rule(struct __rte_lpm *i_lpm, uint32_t ip, uint8_t depth,
879 uint8_t *sub_rule_depth)
885 for (prev_depth = (uint8_t)(depth - 1); prev_depth > 0; prev_depth--) {
886 ip_masked = ip & depth_to_mask(prev_depth);
888 rule_index = rule_find(i_lpm, ip_masked, prev_depth);
890 if (rule_index >= 0) {
891 *sub_rule_depth = prev_depth;
900 delete_depth_small(struct __rte_lpm *i_lpm, uint32_t ip_masked,
901 uint8_t depth, int32_t sub_rule_index, uint8_t sub_rule_depth)
903 #define group_idx next_hop
904 uint32_t tbl24_range, tbl24_index, tbl8_group_index, tbl8_index, i, j;
906 /* Calculate the range and index into Table24. */
907 tbl24_range = depth_to_range(depth);
908 tbl24_index = (ip_masked >> 8);
909 struct rte_lpm_tbl_entry zero_tbl24_entry = {0};
912 * Firstly check the sub_rule_index. A -1 indicates no replacement rule
913 * and a positive number indicates a sub_rule_index.
915 if (sub_rule_index < 0) {
917 * If no replacement rule exists then invalidate entries
918 * associated with this rule.
920 for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) {
922 if (i_lpm->lpm.tbl24[i].valid_group == 0 &&
923 i_lpm->lpm.tbl24[i].depth <= depth) {
924 __atomic_store(&i_lpm->lpm.tbl24[i],
925 &zero_tbl24_entry, __ATOMIC_RELEASE);
926 } else if (i_lpm->lpm.tbl24[i].valid_group == 1) {
928 * If TBL24 entry is extended, then there has
929 * to be a rule with depth >= 25 in the
930 * associated TBL8 group.
933 tbl8_group_index = i_lpm->lpm.tbl24[i].group_idx;
934 tbl8_index = tbl8_group_index *
935 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
937 for (j = tbl8_index; j < (tbl8_index +
938 RTE_LPM_TBL8_GROUP_NUM_ENTRIES); j++) {
940 if (i_lpm->lpm.tbl8[j].depth <= depth)
941 i_lpm->lpm.tbl8[j].valid = INVALID;
947 * If a replacement rule exists then modify entries
948 * associated with this rule.
951 struct rte_lpm_tbl_entry new_tbl24_entry = {
952 .next_hop = i_lpm->rules_tbl[sub_rule_index].next_hop,
955 .depth = sub_rule_depth,
958 struct rte_lpm_tbl_entry new_tbl8_entry = {
960 .valid_group = VALID,
961 .depth = sub_rule_depth,
962 .next_hop = i_lpm->rules_tbl
963 [sub_rule_index].next_hop,
966 for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) {
968 if (i_lpm->lpm.tbl24[i].valid_group == 0 &&
969 i_lpm->lpm.tbl24[i].depth <= depth) {
970 __atomic_store(&i_lpm->lpm.tbl24[i], &new_tbl24_entry,
972 } else if (i_lpm->lpm.tbl24[i].valid_group == 1) {
974 * If TBL24 entry is extended, then there has
975 * to be a rule with depth >= 25 in the
976 * associated TBL8 group.
979 tbl8_group_index = i_lpm->lpm.tbl24[i].group_idx;
980 tbl8_index = tbl8_group_index *
981 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
983 for (j = tbl8_index; j < (tbl8_index +
984 RTE_LPM_TBL8_GROUP_NUM_ENTRIES); j++) {
986 if (i_lpm->lpm.tbl8[j].depth <= depth)
987 __atomic_store(&i_lpm->lpm.tbl8[j],
999 * Checks if table 8 group can be recycled.
1001 * Return of -EEXIST means tbl8 is in use and thus can not be recycled.
1002 * Return of -EINVAL means tbl8 is empty and thus can be recycled
1003 * Return of value > -1 means tbl8 is in use but has all the same values and
1004 * thus can be recycled
1007 tbl8_recycle_check(struct rte_lpm_tbl_entry *tbl8,
1008 uint32_t tbl8_group_start)
1010 uint32_t tbl8_group_end, i;
1011 tbl8_group_end = tbl8_group_start + RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1014 * Check the first entry of the given tbl8. If it is invalid we know
1015 * this tbl8 does not contain any rule with a depth < RTE_LPM_MAX_DEPTH
1016 * (As they would affect all entries in a tbl8) and thus this table
1017 * can not be recycled.
1019 if (tbl8[tbl8_group_start].valid) {
1021 * If first entry is valid check if the depth is less than 24
1022 * and if so check the rest of the entries to verify that they
1023 * are all of this depth.
1025 if (tbl8[tbl8_group_start].depth <= MAX_DEPTH_TBL24) {
1026 for (i = (tbl8_group_start + 1); i < tbl8_group_end;
1029 if (tbl8[i].depth !=
1030 tbl8[tbl8_group_start].depth) {
1035 /* If all entries are the same return the tb8 index */
1036 return tbl8_group_start;
1042 * If the first entry is invalid check if the rest of the entries in
1043 * the tbl8 are invalid.
1045 for (i = (tbl8_group_start + 1); i < tbl8_group_end; i++) {
1049 /* If no valid entries are found then return -EINVAL. */
1054 delete_depth_big(struct __rte_lpm *i_lpm, uint32_t ip_masked,
1055 uint8_t depth, int32_t sub_rule_index, uint8_t sub_rule_depth)
1057 #define group_idx next_hop
1058 uint32_t tbl24_index, tbl8_group_index, tbl8_group_start, tbl8_index,
1060 int32_t tbl8_recycle_index, status = 0;
1063 * Calculate the index into tbl24 and range. Note: All depths larger
1064 * than MAX_DEPTH_TBL24 are associated with only one tbl24 entry.
1066 tbl24_index = ip_masked >> 8;
1068 /* Calculate the index into tbl8 and range. */
1069 tbl8_group_index = i_lpm->lpm.tbl24[tbl24_index].group_idx;
1070 tbl8_group_start = tbl8_group_index * RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1071 tbl8_index = tbl8_group_start + (ip_masked & 0xFF);
1072 tbl8_range = depth_to_range(depth);
1074 if (sub_rule_index < 0) {
1076 * Loop through the range of entries on tbl8 for which the
1077 * rule_to_delete must be removed or modified.
1079 for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
1080 if (i_lpm->lpm.tbl8[i].depth <= depth)
1081 i_lpm->lpm.tbl8[i].valid = INVALID;
1084 /* Set new tbl8 entry. */
1085 struct rte_lpm_tbl_entry new_tbl8_entry = {
1087 .depth = sub_rule_depth,
1088 .valid_group = i_lpm->lpm.tbl8[tbl8_group_start].valid_group,
1089 .next_hop = i_lpm->rules_tbl[sub_rule_index].next_hop,
1093 * Loop through the range of entries on tbl8 for which the
1094 * rule_to_delete must be modified.
1096 for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
1097 if (i_lpm->lpm.tbl8[i].depth <= depth)
1098 __atomic_store(&i_lpm->lpm.tbl8[i], &new_tbl8_entry,
1104 * Check if there are any valid entries in this tbl8 group. If all
1105 * tbl8 entries are invalid we can free the tbl8 and invalidate the
1106 * associated tbl24 entry.
1109 tbl8_recycle_index = tbl8_recycle_check(i_lpm->lpm.tbl8, tbl8_group_start);
1111 if (tbl8_recycle_index == -EINVAL) {
1112 /* Set tbl24 before freeing tbl8 to avoid race condition.
1113 * Prevent the free of the tbl8 group from hoisting.
1115 i_lpm->lpm.tbl24[tbl24_index].valid = 0;
1116 __atomic_thread_fence(__ATOMIC_RELEASE);
1117 status = tbl8_free(i_lpm, tbl8_group_start);
1118 } else if (tbl8_recycle_index > -1) {
1119 /* Update tbl24 entry. */
1120 struct rte_lpm_tbl_entry new_tbl24_entry = {
1121 .next_hop = i_lpm->lpm.tbl8[tbl8_recycle_index].next_hop,
1124 .depth = i_lpm->lpm.tbl8[tbl8_recycle_index].depth,
1127 /* Set tbl24 before freeing tbl8 to avoid race condition.
1128 * Prevent the free of the tbl8 group from hoisting.
1130 __atomic_store(&i_lpm->lpm.tbl24[tbl24_index], &new_tbl24_entry,
1132 __atomic_thread_fence(__ATOMIC_RELEASE);
1133 status = tbl8_free(i_lpm, tbl8_group_start);
1143 rte_lpm_delete(struct rte_lpm *lpm, uint32_t ip, uint8_t depth)
1145 int32_t rule_to_delete_index, sub_rule_index;
1146 struct __rte_lpm *i_lpm;
1148 uint8_t sub_rule_depth;
1150 * Check input arguments. Note: IP must be a positive integer of 32
1151 * bits in length therefore it need not be checked.
1153 if ((lpm == NULL) || (depth < 1) || (depth > RTE_LPM_MAX_DEPTH)) {
1157 i_lpm = container_of(lpm, struct __rte_lpm, lpm);
1158 ip_masked = ip & depth_to_mask(depth);
1161 * Find the index of the input rule, that needs to be deleted, in the
1164 rule_to_delete_index = rule_find(i_lpm, ip_masked, depth);
1167 * Check if rule_to_delete_index was found. If no rule was found the
1168 * function rule_find returns -EINVAL.
1170 if (rule_to_delete_index < 0)
1173 /* Delete the rule from the rule table. */
1174 rule_delete(i_lpm, rule_to_delete_index, depth);
1177 * Find rule to replace the rule_to_delete. If there is no rule to
1178 * replace the rule_to_delete we return -1 and invalidate the table
1179 * entries associated with this rule.
1182 sub_rule_index = find_previous_rule(i_lpm, ip, depth, &sub_rule_depth);
1185 * If the input depth value is less than 25 use function
1186 * delete_depth_small otherwise use delete_depth_big.
1188 if (depth <= MAX_DEPTH_TBL24) {
1189 return delete_depth_small(i_lpm, ip_masked, depth,
1190 sub_rule_index, sub_rule_depth);
1191 } else { /* If depth > MAX_DEPTH_TBL24 */
1192 return delete_depth_big(i_lpm, ip_masked, depth, sub_rule_index,
1198 * Delete all rules from the LPM table.
1201 rte_lpm_delete_all(struct rte_lpm *lpm)
1203 struct __rte_lpm *i_lpm;
1205 i_lpm = container_of(lpm, struct __rte_lpm, lpm);
1206 /* Zero rule information. */
1207 memset(i_lpm->rule_info, 0, sizeof(i_lpm->rule_info));
1210 memset(i_lpm->lpm.tbl24, 0, sizeof(i_lpm->lpm.tbl24));
1213 memset(i_lpm->lpm.tbl8, 0, sizeof(i_lpm->lpm.tbl8[0])
1214 * RTE_LPM_TBL8_GROUP_NUM_ENTRIES * i_lpm->number_tbl8s);
1216 /* Delete all rules form the rules table. */
1217 memset(i_lpm->rules_tbl, 0, sizeof(i_lpm->rules_tbl[0]) * i_lpm->max_rules);