1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation
3 * Copyright(c) 2020 Arm Limited
11 #include <sys/queue.h>
14 #include <rte_branch_prediction.h>
15 #include <rte_common.h>
16 #include <rte_memory.h> /* for definition of RTE_CACHE_LINE_SIZE */
17 #include <rte_malloc.h>
19 #include <rte_eal_memconfig.h>
20 #include <rte_per_lcore.h>
21 #include <rte_string_fns.h>
22 #include <rte_errno.h>
23 #include <rte_rwlock.h>
24 #include <rte_spinlock.h>
25 #include <rte_tailq.h>
29 TAILQ_HEAD(rte_lpm_list, rte_tailq_entry);
31 static struct rte_tailq_elem rte_lpm_tailq = {
34 EAL_REGISTER_TAILQ(rte_lpm_tailq)
36 #define MAX_DEPTH_TBL24 24
43 /** @internal Rule structure. */
45 uint32_t ip; /**< Rule IP address. */
46 uint32_t next_hop; /**< Rule next hop. */
49 /** @internal Contains metadata about the rules table. */
50 struct rte_lpm_rule_info {
51 uint32_t used_rules; /**< Used rules so far. */
52 uint32_t first_rule; /**< Indexes the first rule of a given depth. */
55 /** @internal LPM structure. */
57 /* Exposed LPM data. */
61 char name[RTE_LPM_NAMESIZE]; /**< Name of the lpm. */
62 uint32_t max_rules; /**< Max. balanced rules per lpm. */
63 uint32_t number_tbl8s; /**< Number of tbl8s. */
64 /**< Rule info table. */
65 struct rte_lpm_rule_info rule_info[RTE_LPM_MAX_DEPTH];
66 struct rte_lpm_rule *rules_tbl; /**< LPM rules. */
69 struct rte_rcu_qsbr *v; /* RCU QSBR variable. */
70 enum rte_lpm_qsbr_mode rcu_mode;/* Blocking, defer queue. */
71 struct rte_rcu_qsbr_dq *dq; /* RCU QSBR defer queue. */
74 /* Macro to enable/disable run-time checks. */
75 #if defined(RTE_LIBRTE_LPM_DEBUG)
76 #include <rte_debug.h>
77 #define VERIFY_DEPTH(depth) do { \
78 if ((depth == 0) || (depth > RTE_LPM_MAX_DEPTH)) \
79 rte_panic("LPM: Invalid depth (%u) at line %d", \
80 (unsigned)(depth), __LINE__); \
83 #define VERIFY_DEPTH(depth)
87 * Converts a given depth value to its corresponding mask value.
89 * depth (IN) : range = 1 - 32
90 * mask (OUT) : 32bit mask
92 static uint32_t __attribute__((pure))
93 depth_to_mask(uint8_t depth)
97 /* To calculate a mask start with a 1 on the left hand side and right
98 * shift while populating the left hand side with 1's
100 return (int)0x80000000 >> (depth - 1);
104 * Converts given depth value to its corresponding range value.
106 static uint32_t __attribute__((pure))
107 depth_to_range(uint8_t depth)
112 * Calculate tbl24 range. (Note: 2^depth = 1 << depth)
114 if (depth <= MAX_DEPTH_TBL24)
115 return 1 << (MAX_DEPTH_TBL24 - depth);
117 /* Else if depth is greater than 24 */
118 return 1 << (RTE_LPM_MAX_DEPTH - depth);
122 * Find an existing lpm table and return a pointer to it.
125 rte_lpm_find_existing(const char *name)
127 struct __rte_lpm *i_lpm = NULL;
128 struct rte_tailq_entry *te;
129 struct rte_lpm_list *lpm_list;
131 lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list);
133 rte_mcfg_tailq_read_lock();
134 TAILQ_FOREACH(te, lpm_list, next) {
136 if (strncmp(name, i_lpm->name, RTE_LPM_NAMESIZE) == 0)
139 rte_mcfg_tailq_read_unlock();
150 * Allocates memory for LPM object
153 rte_lpm_create(const char *name, int socket_id,
154 const struct rte_lpm_config *config)
156 char mem_name[RTE_LPM_NAMESIZE];
157 struct __rte_lpm *i_lpm;
158 struct rte_lpm *lpm = NULL;
159 struct rte_tailq_entry *te;
160 uint32_t mem_size, rules_size, tbl8s_size;
161 struct rte_lpm_list *lpm_list;
163 lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list);
165 RTE_BUILD_BUG_ON(sizeof(struct rte_lpm_tbl_entry) != 4);
167 /* Check user arguments. */
168 if ((name == NULL) || (socket_id < -1) || (config->max_rules == 0)
169 || config->number_tbl8s > RTE_LPM_MAX_TBL8_NUM_GROUPS) {
174 snprintf(mem_name, sizeof(mem_name), "LPM_%s", name);
176 rte_mcfg_tailq_write_lock();
178 /* guarantee there's no existing */
179 TAILQ_FOREACH(te, lpm_list, next) {
181 if (strncmp(name, i_lpm->name, RTE_LPM_NAMESIZE) == 0)
190 /* Determine the amount of memory to allocate. */
191 mem_size = sizeof(*i_lpm);
192 rules_size = sizeof(struct rte_lpm_rule) * config->max_rules;
193 tbl8s_size = sizeof(struct rte_lpm_tbl_entry) *
194 RTE_LPM_TBL8_GROUP_NUM_ENTRIES * config->number_tbl8s;
196 /* allocate tailq entry */
197 te = rte_zmalloc("LPM_TAILQ_ENTRY", sizeof(*te), 0);
199 RTE_LOG(ERR, LPM, "Failed to allocate tailq entry\n");
204 /* Allocate memory to store the LPM data structures. */
205 i_lpm = rte_zmalloc_socket(mem_name, mem_size,
206 RTE_CACHE_LINE_SIZE, socket_id);
208 RTE_LOG(ERR, LPM, "LPM memory allocation failed\n");
214 i_lpm->rules_tbl = rte_zmalloc_socket(NULL,
215 (size_t)rules_size, RTE_CACHE_LINE_SIZE, socket_id);
217 if (i_lpm->rules_tbl == NULL) {
218 RTE_LOG(ERR, LPM, "LPM rules_tbl memory allocation failed\n");
226 i_lpm->lpm.tbl8 = rte_zmalloc_socket(NULL,
227 (size_t)tbl8s_size, RTE_CACHE_LINE_SIZE, socket_id);
229 if (i_lpm->lpm.tbl8 == NULL) {
230 RTE_LOG(ERR, LPM, "LPM tbl8 memory allocation failed\n");
231 rte_free(i_lpm->rules_tbl);
239 /* Save user arguments. */
240 i_lpm->max_rules = config->max_rules;
241 i_lpm->number_tbl8s = config->number_tbl8s;
242 strlcpy(i_lpm->name, name, sizeof(i_lpm->name));
247 TAILQ_INSERT_TAIL(lpm_list, te, next);
250 rte_mcfg_tailq_write_unlock();
256 * Deallocates memory for given LPM table.
259 rte_lpm_free(struct rte_lpm *lpm)
261 struct rte_lpm_list *lpm_list;
262 struct rte_tailq_entry *te;
263 struct __rte_lpm *i_lpm;
265 /* Check user arguments. */
268 i_lpm = container_of(lpm, struct __rte_lpm, lpm);
270 lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list);
272 rte_mcfg_tailq_write_lock();
274 /* find our tailq entry */
275 TAILQ_FOREACH(te, lpm_list, next) {
276 if (te->data == (void *)i_lpm)
280 TAILQ_REMOVE(lpm_list, te, next);
282 rte_mcfg_tailq_write_unlock();
284 if (i_lpm->dq != NULL)
285 rte_rcu_qsbr_dq_delete(i_lpm->dq);
286 rte_free(i_lpm->lpm.tbl8);
287 rte_free(i_lpm->rules_tbl);
293 __lpm_rcu_qsbr_free_resource(void *p, void *data, unsigned int n)
295 struct rte_lpm_tbl_entry *tbl8 = ((struct __rte_lpm *)p)->lpm.tbl8;
296 struct rte_lpm_tbl_entry zero_tbl8_entry = {0};
297 uint32_t tbl8_group_index = *(uint32_t *)data;
300 /* Set tbl8 group invalid */
301 __atomic_store(&tbl8[tbl8_group_index], &zero_tbl8_entry,
305 /* Associate QSBR variable with an LPM object.
308 rte_lpm_rcu_qsbr_add(struct rte_lpm *lpm, struct rte_lpm_rcu_config *cfg)
310 struct rte_rcu_qsbr_dq_parameters params = {0};
311 char rcu_dq_name[RTE_RCU_QSBR_DQ_NAMESIZE];
312 struct __rte_lpm *i_lpm;
314 if (lpm == NULL || cfg == NULL) {
319 i_lpm = container_of(lpm, struct __rte_lpm, lpm);
320 if (i_lpm->v != NULL) {
325 if (cfg->mode == RTE_LPM_QSBR_MODE_SYNC) {
326 /* No other things to do. */
327 } else if (cfg->mode == RTE_LPM_QSBR_MODE_DQ) {
328 /* Init QSBR defer queue. */
329 snprintf(rcu_dq_name, sizeof(rcu_dq_name),
330 "LPM_RCU_%s", i_lpm->name);
331 params.name = rcu_dq_name;
332 params.size = cfg->dq_size;
333 if (params.size == 0)
334 params.size = i_lpm->number_tbl8s;
335 params.trigger_reclaim_limit = cfg->reclaim_thd;
336 params.max_reclaim_size = cfg->reclaim_max;
337 if (params.max_reclaim_size == 0)
338 params.max_reclaim_size = RTE_LPM_RCU_DQ_RECLAIM_MAX;
339 params.esize = sizeof(uint32_t); /* tbl8 group index */
340 params.free_fn = __lpm_rcu_qsbr_free_resource;
343 i_lpm->dq = rte_rcu_qsbr_dq_create(¶ms);
344 if (i_lpm->dq == NULL) {
345 RTE_LOG(ERR, LPM, "LPM defer queue creation failed\n");
352 i_lpm->rcu_mode = cfg->mode;
359 * Adds a rule to the rule table.
361 * NOTE: The rule table is split into 32 groups. Each group contains rules that
362 * apply to a specific prefix depth (i.e. group 1 contains rules that apply to
363 * prefixes with a depth of 1 etc.). In the following code (depth - 1) is used
364 * to refer to depth 1 because even though the depth range is 1 - 32, depths
365 * are stored in the rule table from 0 - 31.
366 * NOTE: Valid range for depth parameter is 1 .. 32 inclusive.
369 rule_add(struct __rte_lpm *i_lpm, uint32_t ip_masked, uint8_t depth,
372 uint32_t rule_gindex, rule_index, last_rule;
377 /* Scan through rule group to see if rule already exists. */
378 if (i_lpm->rule_info[depth - 1].used_rules > 0) {
380 /* rule_gindex stands for rule group index. */
381 rule_gindex = i_lpm->rule_info[depth - 1].first_rule;
382 /* Initialise rule_index to point to start of rule group. */
383 rule_index = rule_gindex;
384 /* Last rule = Last used rule in this rule group. */
385 last_rule = rule_gindex + i_lpm->rule_info[depth - 1].used_rules;
387 for (; rule_index < last_rule; rule_index++) {
389 /* If rule already exists update next hop and return. */
390 if (i_lpm->rules_tbl[rule_index].ip == ip_masked) {
392 if (i_lpm->rules_tbl[rule_index].next_hop
395 i_lpm->rules_tbl[rule_index].next_hop = next_hop;
401 if (rule_index == i_lpm->max_rules)
404 /* Calculate the position in which the rule will be stored. */
407 for (i = depth - 1; i > 0; i--) {
408 if (i_lpm->rule_info[i - 1].used_rules > 0) {
409 rule_index = i_lpm->rule_info[i - 1].first_rule
410 + i_lpm->rule_info[i - 1].used_rules;
414 if (rule_index == i_lpm->max_rules)
417 i_lpm->rule_info[depth - 1].first_rule = rule_index;
420 /* Make room for the new rule in the array. */
421 for (i = RTE_LPM_MAX_DEPTH; i > depth; i--) {
422 if (i_lpm->rule_info[i - 1].first_rule
423 + i_lpm->rule_info[i - 1].used_rules == i_lpm->max_rules)
426 if (i_lpm->rule_info[i - 1].used_rules > 0) {
427 i_lpm->rules_tbl[i_lpm->rule_info[i - 1].first_rule
428 + i_lpm->rule_info[i - 1].used_rules]
429 = i_lpm->rules_tbl[i_lpm->rule_info[i - 1].first_rule];
430 i_lpm->rule_info[i - 1].first_rule++;
434 /* Add the new rule. */
435 i_lpm->rules_tbl[rule_index].ip = ip_masked;
436 i_lpm->rules_tbl[rule_index].next_hop = next_hop;
438 /* Increment the used rules counter for this rule group. */
439 i_lpm->rule_info[depth - 1].used_rules++;
445 * Delete a rule from the rule table.
446 * NOTE: Valid range for depth parameter is 1 .. 32 inclusive.
449 rule_delete(struct __rte_lpm *i_lpm, int32_t rule_index, uint8_t depth)
455 i_lpm->rules_tbl[rule_index] =
456 i_lpm->rules_tbl[i_lpm->rule_info[depth - 1].first_rule
457 + i_lpm->rule_info[depth - 1].used_rules - 1];
459 for (i = depth; i < RTE_LPM_MAX_DEPTH; i++) {
460 if (i_lpm->rule_info[i].used_rules > 0) {
461 i_lpm->rules_tbl[i_lpm->rule_info[i].first_rule - 1] =
462 i_lpm->rules_tbl[i_lpm->rule_info[i].first_rule
463 + i_lpm->rule_info[i].used_rules - 1];
464 i_lpm->rule_info[i].first_rule--;
468 i_lpm->rule_info[depth - 1].used_rules--;
472 * Finds a rule in rule table.
473 * NOTE: Valid range for depth parameter is 1 .. 32 inclusive.
476 rule_find(struct __rte_lpm *i_lpm, uint32_t ip_masked, uint8_t depth)
478 uint32_t rule_gindex, last_rule, rule_index;
482 rule_gindex = i_lpm->rule_info[depth - 1].first_rule;
483 last_rule = rule_gindex + i_lpm->rule_info[depth - 1].used_rules;
485 /* Scan used rules at given depth to find rule. */
486 for (rule_index = rule_gindex; rule_index < last_rule; rule_index++) {
487 /* If rule is found return the rule index. */
488 if (i_lpm->rules_tbl[rule_index].ip == ip_masked)
492 /* If rule is not found return -EINVAL. */
497 * Find, clean and allocate a tbl8.
500 _tbl8_alloc(struct __rte_lpm *i_lpm)
502 uint32_t group_idx; /* tbl8 group index. */
503 struct rte_lpm_tbl_entry *tbl8_entry;
505 /* Scan through tbl8 to find a free (i.e. INVALID) tbl8 group. */
506 for (group_idx = 0; group_idx < i_lpm->number_tbl8s; group_idx++) {
507 tbl8_entry = &i_lpm->lpm.tbl8[group_idx *
508 RTE_LPM_TBL8_GROUP_NUM_ENTRIES];
509 /* If a free tbl8 group is found clean it and set as VALID. */
510 if (!tbl8_entry->valid_group) {
511 struct rte_lpm_tbl_entry new_tbl8_entry = {
515 .valid_group = VALID,
518 memset(&tbl8_entry[0], 0,
519 RTE_LPM_TBL8_GROUP_NUM_ENTRIES *
520 sizeof(tbl8_entry[0]));
522 __atomic_store(tbl8_entry, &new_tbl8_entry,
525 /* Return group index for allocated tbl8 group. */
530 /* If there are no tbl8 groups free then return error. */
535 tbl8_alloc(struct __rte_lpm *i_lpm)
537 int32_t group_idx; /* tbl8 group index. */
539 group_idx = _tbl8_alloc(i_lpm);
540 if (group_idx == -ENOSPC && i_lpm->dq != NULL) {
541 /* If there are no tbl8 groups try to reclaim one. */
542 if (rte_rcu_qsbr_dq_reclaim(i_lpm->dq, 1,
543 NULL, NULL, NULL) == 0)
544 group_idx = _tbl8_alloc(i_lpm);
551 tbl8_free(struct __rte_lpm *i_lpm, uint32_t tbl8_group_start)
553 struct rte_lpm_tbl_entry zero_tbl8_entry = {0};
556 if (i_lpm->v == NULL) {
557 /* Set tbl8 group invalid*/
558 __atomic_store(&i_lpm->lpm.tbl8[tbl8_group_start], &zero_tbl8_entry,
560 } else if (i_lpm->rcu_mode == RTE_LPM_QSBR_MODE_SYNC) {
561 /* Wait for quiescent state change. */
562 rte_rcu_qsbr_synchronize(i_lpm->v,
563 RTE_QSBR_THRID_INVALID);
564 /* Set tbl8 group invalid*/
565 __atomic_store(&i_lpm->lpm.tbl8[tbl8_group_start], &zero_tbl8_entry,
567 } else if (i_lpm->rcu_mode == RTE_LPM_QSBR_MODE_DQ) {
568 /* Push into QSBR defer queue. */
569 status = rte_rcu_qsbr_dq_enqueue(i_lpm->dq,
570 (void *)&tbl8_group_start);
572 RTE_LOG(ERR, LPM, "Failed to push QSBR FIFO\n");
580 static __rte_noinline int32_t
581 add_depth_small(struct __rte_lpm *i_lpm, uint32_t ip, uint8_t depth,
584 #define group_idx next_hop
585 uint32_t tbl24_index, tbl24_range, tbl8_index, tbl8_group_end, i, j;
587 /* Calculate the index into Table24. */
588 tbl24_index = ip >> 8;
589 tbl24_range = depth_to_range(depth);
591 for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) {
593 * For invalid OR valid and non-extended tbl 24 entries set
596 if (!i_lpm->lpm.tbl24[i].valid || (i_lpm->lpm.tbl24[i].valid_group == 0 &&
597 i_lpm->lpm.tbl24[i].depth <= depth)) {
599 struct rte_lpm_tbl_entry new_tbl24_entry = {
600 .next_hop = next_hop,
606 /* Setting tbl24 entry in one go to avoid race
609 __atomic_store(&i_lpm->lpm.tbl24[i], &new_tbl24_entry,
615 if (i_lpm->lpm.tbl24[i].valid_group == 1) {
616 /* If tbl24 entry is valid and extended calculate the
619 tbl8_index = i_lpm->lpm.tbl24[i].group_idx *
620 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
621 tbl8_group_end = tbl8_index +
622 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
624 for (j = tbl8_index; j < tbl8_group_end; j++) {
625 if (!i_lpm->lpm.tbl8[j].valid ||
626 i_lpm->lpm.tbl8[j].depth <= depth) {
627 struct rte_lpm_tbl_entry
630 .valid_group = VALID,
632 .next_hop = next_hop,
636 * Setting tbl8 entry in one go to avoid
639 __atomic_store(&i_lpm->lpm.tbl8[j],
652 static __rte_noinline int32_t
653 add_depth_big(struct __rte_lpm *i_lpm, uint32_t ip_masked, uint8_t depth,
656 #define group_idx next_hop
657 uint32_t tbl24_index;
658 int32_t tbl8_group_index, tbl8_group_start, tbl8_group_end, tbl8_index,
661 tbl24_index = (ip_masked >> 8);
662 tbl8_range = depth_to_range(depth);
664 if (!i_lpm->lpm.tbl24[tbl24_index].valid) {
665 /* Search for a free tbl8 group. */
666 tbl8_group_index = tbl8_alloc(i_lpm);
668 /* Check tbl8 allocation was successful. */
669 if (tbl8_group_index < 0) {
670 return tbl8_group_index;
673 /* Find index into tbl8 and range. */
674 tbl8_index = (tbl8_group_index *
675 RTE_LPM_TBL8_GROUP_NUM_ENTRIES) +
678 /* Set tbl8 entry. */
679 for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
680 struct rte_lpm_tbl_entry new_tbl8_entry = {
683 .valid_group = i_lpm->lpm.tbl8[i].valid_group,
684 .next_hop = next_hop,
686 __atomic_store(&i_lpm->lpm.tbl8[i], &new_tbl8_entry,
691 * Update tbl24 entry to point to new tbl8 entry. Note: The
692 * ext_flag and tbl8_index need to be updated simultaneously,
693 * so assign whole structure in one go
696 struct rte_lpm_tbl_entry new_tbl24_entry = {
697 .group_idx = tbl8_group_index,
703 /* The tbl24 entry must be written only after the
704 * tbl8 entries are written.
706 __atomic_store(&i_lpm->lpm.tbl24[tbl24_index], &new_tbl24_entry,
709 } /* If valid entry but not extended calculate the index into Table8. */
710 else if (i_lpm->lpm.tbl24[tbl24_index].valid_group == 0) {
711 /* Search for free tbl8 group. */
712 tbl8_group_index = tbl8_alloc(i_lpm);
714 if (tbl8_group_index < 0) {
715 return tbl8_group_index;
718 tbl8_group_start = tbl8_group_index *
719 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
720 tbl8_group_end = tbl8_group_start +
721 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
723 /* Populate new tbl8 with tbl24 value. */
724 for (i = tbl8_group_start; i < tbl8_group_end; i++) {
725 struct rte_lpm_tbl_entry new_tbl8_entry = {
727 .depth = i_lpm->lpm.tbl24[tbl24_index].depth,
728 .valid_group = i_lpm->lpm.tbl8[i].valid_group,
729 .next_hop = i_lpm->lpm.tbl24[tbl24_index].next_hop,
731 __atomic_store(&i_lpm->lpm.tbl8[i], &new_tbl8_entry,
735 tbl8_index = tbl8_group_start + (ip_masked & 0xFF);
737 /* Insert new rule into the tbl8 entry. */
738 for (i = tbl8_index; i < tbl8_index + tbl8_range; i++) {
739 struct rte_lpm_tbl_entry new_tbl8_entry = {
742 .valid_group = i_lpm->lpm.tbl8[i].valid_group,
743 .next_hop = next_hop,
745 __atomic_store(&i_lpm->lpm.tbl8[i], &new_tbl8_entry,
750 * Update tbl24 entry to point to new tbl8 entry. Note: The
751 * ext_flag and tbl8_index need to be updated simultaneously,
752 * so assign whole structure in one go.
755 struct rte_lpm_tbl_entry new_tbl24_entry = {
756 .group_idx = tbl8_group_index,
762 /* The tbl24 entry must be written only after the
763 * tbl8 entries are written.
765 __atomic_store(&i_lpm->lpm.tbl24[tbl24_index], &new_tbl24_entry,
769 * If it is valid, extended entry calculate the index into tbl8.
771 tbl8_group_index = i_lpm->lpm.tbl24[tbl24_index].group_idx;
772 tbl8_group_start = tbl8_group_index *
773 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
774 tbl8_index = tbl8_group_start + (ip_masked & 0xFF);
776 for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
778 if (!i_lpm->lpm.tbl8[i].valid ||
779 i_lpm->lpm.tbl8[i].depth <= depth) {
780 struct rte_lpm_tbl_entry new_tbl8_entry = {
783 .next_hop = next_hop,
784 .valid_group = i_lpm->lpm.tbl8[i].valid_group,
788 * Setting tbl8 entry in one go to avoid race
791 __atomic_store(&i_lpm->lpm.tbl8[i], &new_tbl8_entry,
806 rte_lpm_add(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
809 int32_t rule_index, status = 0;
810 struct __rte_lpm *i_lpm;
813 /* Check user arguments. */
814 if ((lpm == NULL) || (depth < 1) || (depth > RTE_LPM_MAX_DEPTH))
817 i_lpm = container_of(lpm, struct __rte_lpm, lpm);
818 ip_masked = ip & depth_to_mask(depth);
820 /* Add the rule to the rule table. */
821 rule_index = rule_add(i_lpm, ip_masked, depth, next_hop);
823 /* Skip table entries update if The rule is the same as
824 * the rule in the rules table.
826 if (rule_index == -EEXIST)
829 /* If the is no space available for new rule return error. */
830 if (rule_index < 0) {
834 if (depth <= MAX_DEPTH_TBL24) {
835 status = add_depth_small(i_lpm, ip_masked, depth, next_hop);
836 } else { /* If depth > RTE_LPM_MAX_DEPTH_TBL24 */
837 status = add_depth_big(i_lpm, ip_masked, depth, next_hop);
840 * If add fails due to exhaustion of tbl8 extensions delete
841 * rule that was added to rule table.
844 rule_delete(i_lpm, rule_index, depth);
854 * Look for a rule in the high-level rules table
857 rte_lpm_is_rule_present(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
860 struct __rte_lpm *i_lpm;
864 /* Check user arguments. */
866 (next_hop == NULL) ||
867 (depth < 1) || (depth > RTE_LPM_MAX_DEPTH))
870 /* Look for the rule using rule_find. */
871 i_lpm = container_of(lpm, struct __rte_lpm, lpm);
872 ip_masked = ip & depth_to_mask(depth);
873 rule_index = rule_find(i_lpm, ip_masked, depth);
875 if (rule_index >= 0) {
876 *next_hop = i_lpm->rules_tbl[rule_index].next_hop;
880 /* If rule is not found return 0. */
885 find_previous_rule(struct __rte_lpm *i_lpm, uint32_t ip, uint8_t depth,
886 uint8_t *sub_rule_depth)
892 for (prev_depth = (uint8_t)(depth - 1); prev_depth > 0; prev_depth--) {
893 ip_masked = ip & depth_to_mask(prev_depth);
895 rule_index = rule_find(i_lpm, ip_masked, prev_depth);
897 if (rule_index >= 0) {
898 *sub_rule_depth = prev_depth;
907 delete_depth_small(struct __rte_lpm *i_lpm, uint32_t ip_masked,
908 uint8_t depth, int32_t sub_rule_index, uint8_t sub_rule_depth)
910 #define group_idx next_hop
911 uint32_t tbl24_range, tbl24_index, tbl8_group_index, tbl8_index, i, j;
913 /* Calculate the range and index into Table24. */
914 tbl24_range = depth_to_range(depth);
915 tbl24_index = (ip_masked >> 8);
916 struct rte_lpm_tbl_entry zero_tbl24_entry = {0};
919 * Firstly check the sub_rule_index. A -1 indicates no replacement rule
920 * and a positive number indicates a sub_rule_index.
922 if (sub_rule_index < 0) {
924 * If no replacement rule exists then invalidate entries
925 * associated with this rule.
927 for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) {
929 if (i_lpm->lpm.tbl24[i].valid_group == 0 &&
930 i_lpm->lpm.tbl24[i].depth <= depth) {
931 __atomic_store(&i_lpm->lpm.tbl24[i],
932 &zero_tbl24_entry, __ATOMIC_RELEASE);
933 } else if (i_lpm->lpm.tbl24[i].valid_group == 1) {
935 * If TBL24 entry is extended, then there has
936 * to be a rule with depth >= 25 in the
937 * associated TBL8 group.
940 tbl8_group_index = i_lpm->lpm.tbl24[i].group_idx;
941 tbl8_index = tbl8_group_index *
942 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
944 for (j = tbl8_index; j < (tbl8_index +
945 RTE_LPM_TBL8_GROUP_NUM_ENTRIES); j++) {
947 if (i_lpm->lpm.tbl8[j].depth <= depth)
948 i_lpm->lpm.tbl8[j].valid = INVALID;
954 * If a replacement rule exists then modify entries
955 * associated with this rule.
958 struct rte_lpm_tbl_entry new_tbl24_entry = {
959 .next_hop = i_lpm->rules_tbl[sub_rule_index].next_hop,
962 .depth = sub_rule_depth,
965 struct rte_lpm_tbl_entry new_tbl8_entry = {
967 .valid_group = VALID,
968 .depth = sub_rule_depth,
969 .next_hop = i_lpm->rules_tbl
970 [sub_rule_index].next_hop,
973 for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) {
975 if (i_lpm->lpm.tbl24[i].valid_group == 0 &&
976 i_lpm->lpm.tbl24[i].depth <= depth) {
977 __atomic_store(&i_lpm->lpm.tbl24[i], &new_tbl24_entry,
979 } else if (i_lpm->lpm.tbl24[i].valid_group == 1) {
981 * If TBL24 entry is extended, then there has
982 * to be a rule with depth >= 25 in the
983 * associated TBL8 group.
986 tbl8_group_index = i_lpm->lpm.tbl24[i].group_idx;
987 tbl8_index = tbl8_group_index *
988 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
990 for (j = tbl8_index; j < (tbl8_index +
991 RTE_LPM_TBL8_GROUP_NUM_ENTRIES); j++) {
993 if (i_lpm->lpm.tbl8[j].depth <= depth)
994 __atomic_store(&i_lpm->lpm.tbl8[j],
1006 * Checks if table 8 group can be recycled.
1008 * Return of -EEXIST means tbl8 is in use and thus can not be recycled.
1009 * Return of -EINVAL means tbl8 is empty and thus can be recycled
1010 * Return of value > -1 means tbl8 is in use but has all the same values and
1011 * thus can be recycled
1014 tbl8_recycle_check(struct rte_lpm_tbl_entry *tbl8,
1015 uint32_t tbl8_group_start)
1017 uint32_t tbl8_group_end, i;
1018 tbl8_group_end = tbl8_group_start + RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1021 * Check the first entry of the given tbl8. If it is invalid we know
1022 * this tbl8 does not contain any rule with a depth < RTE_LPM_MAX_DEPTH
1023 * (As they would affect all entries in a tbl8) and thus this table
1024 * can not be recycled.
1026 if (tbl8[tbl8_group_start].valid) {
1028 * If first entry is valid check if the depth is less than 24
1029 * and if so check the rest of the entries to verify that they
1030 * are all of this depth.
1032 if (tbl8[tbl8_group_start].depth <= MAX_DEPTH_TBL24) {
1033 for (i = (tbl8_group_start + 1); i < tbl8_group_end;
1036 if (tbl8[i].depth !=
1037 tbl8[tbl8_group_start].depth) {
1042 /* If all entries are the same return the tb8 index */
1043 return tbl8_group_start;
1049 * If the first entry is invalid check if the rest of the entries in
1050 * the tbl8 are invalid.
1052 for (i = (tbl8_group_start + 1); i < tbl8_group_end; i++) {
1056 /* If no valid entries are found then return -EINVAL. */
1061 delete_depth_big(struct __rte_lpm *i_lpm, uint32_t ip_masked,
1062 uint8_t depth, int32_t sub_rule_index, uint8_t sub_rule_depth)
1064 #define group_idx next_hop
1065 uint32_t tbl24_index, tbl8_group_index, tbl8_group_start, tbl8_index,
1067 int32_t tbl8_recycle_index, status = 0;
1070 * Calculate the index into tbl24 and range. Note: All depths larger
1071 * than MAX_DEPTH_TBL24 are associated with only one tbl24 entry.
1073 tbl24_index = ip_masked >> 8;
1075 /* Calculate the index into tbl8 and range. */
1076 tbl8_group_index = i_lpm->lpm.tbl24[tbl24_index].group_idx;
1077 tbl8_group_start = tbl8_group_index * RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1078 tbl8_index = tbl8_group_start + (ip_masked & 0xFF);
1079 tbl8_range = depth_to_range(depth);
1081 if (sub_rule_index < 0) {
1083 * Loop through the range of entries on tbl8 for which the
1084 * rule_to_delete must be removed or modified.
1086 for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
1087 if (i_lpm->lpm.tbl8[i].depth <= depth)
1088 i_lpm->lpm.tbl8[i].valid = INVALID;
1091 /* Set new tbl8 entry. */
1092 struct rte_lpm_tbl_entry new_tbl8_entry = {
1094 .depth = sub_rule_depth,
1095 .valid_group = i_lpm->lpm.tbl8[tbl8_group_start].valid_group,
1096 .next_hop = i_lpm->rules_tbl[sub_rule_index].next_hop,
1100 * Loop through the range of entries on tbl8 for which the
1101 * rule_to_delete must be modified.
1103 for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
1104 if (i_lpm->lpm.tbl8[i].depth <= depth)
1105 __atomic_store(&i_lpm->lpm.tbl8[i], &new_tbl8_entry,
1111 * Check if there are any valid entries in this tbl8 group. If all
1112 * tbl8 entries are invalid we can free the tbl8 and invalidate the
1113 * associated tbl24 entry.
1116 tbl8_recycle_index = tbl8_recycle_check(i_lpm->lpm.tbl8, tbl8_group_start);
1118 if (tbl8_recycle_index == -EINVAL) {
1119 /* Set tbl24 before freeing tbl8 to avoid race condition.
1120 * Prevent the free of the tbl8 group from hoisting.
1122 i_lpm->lpm.tbl24[tbl24_index].valid = 0;
1123 __atomic_thread_fence(__ATOMIC_RELEASE);
1124 status = tbl8_free(i_lpm, tbl8_group_start);
1125 } else if (tbl8_recycle_index > -1) {
1126 /* Update tbl24 entry. */
1127 struct rte_lpm_tbl_entry new_tbl24_entry = {
1128 .next_hop = i_lpm->lpm.tbl8[tbl8_recycle_index].next_hop,
1131 .depth = i_lpm->lpm.tbl8[tbl8_recycle_index].depth,
1134 /* Set tbl24 before freeing tbl8 to avoid race condition.
1135 * Prevent the free of the tbl8 group from hoisting.
1137 __atomic_store(&i_lpm->lpm.tbl24[tbl24_index], &new_tbl24_entry,
1139 __atomic_thread_fence(__ATOMIC_RELEASE);
1140 status = tbl8_free(i_lpm, tbl8_group_start);
1150 rte_lpm_delete(struct rte_lpm *lpm, uint32_t ip, uint8_t depth)
1152 int32_t rule_to_delete_index, sub_rule_index;
1153 struct __rte_lpm *i_lpm;
1155 uint8_t sub_rule_depth;
1157 * Check input arguments. Note: IP must be a positive integer of 32
1158 * bits in length therefore it need not be checked.
1160 if ((lpm == NULL) || (depth < 1) || (depth > RTE_LPM_MAX_DEPTH)) {
1164 i_lpm = container_of(lpm, struct __rte_lpm, lpm);
1165 ip_masked = ip & depth_to_mask(depth);
1168 * Find the index of the input rule, that needs to be deleted, in the
1171 rule_to_delete_index = rule_find(i_lpm, ip_masked, depth);
1174 * Check if rule_to_delete_index was found. If no rule was found the
1175 * function rule_find returns -EINVAL.
1177 if (rule_to_delete_index < 0)
1180 /* Delete the rule from the rule table. */
1181 rule_delete(i_lpm, rule_to_delete_index, depth);
1184 * Find rule to replace the rule_to_delete. If there is no rule to
1185 * replace the rule_to_delete we return -1 and invalidate the table
1186 * entries associated with this rule.
1189 sub_rule_index = find_previous_rule(i_lpm, ip, depth, &sub_rule_depth);
1192 * If the input depth value is less than 25 use function
1193 * delete_depth_small otherwise use delete_depth_big.
1195 if (depth <= MAX_DEPTH_TBL24) {
1196 return delete_depth_small(i_lpm, ip_masked, depth,
1197 sub_rule_index, sub_rule_depth);
1198 } else { /* If depth > MAX_DEPTH_TBL24 */
1199 return delete_depth_big(i_lpm, ip_masked, depth, sub_rule_index,
1205 * Delete all rules from the LPM table.
1208 rte_lpm_delete_all(struct rte_lpm *lpm)
1210 struct __rte_lpm *i_lpm;
1212 i_lpm = container_of(lpm, struct __rte_lpm, lpm);
1213 /* Zero rule information. */
1214 memset(i_lpm->rule_info, 0, sizeof(i_lpm->rule_info));
1217 memset(i_lpm->lpm.tbl24, 0, sizeof(i_lpm->lpm.tbl24));
1220 memset(i_lpm->lpm.tbl8, 0, sizeof(i_lpm->lpm.tbl8[0])
1221 * RTE_LPM_TBL8_GROUP_NUM_ENTRIES * i_lpm->number_tbl8s);
1223 /* Delete all rules form the rules table. */
1224 memset(i_lpm->rules_tbl, 0, sizeof(i_lpm->rules_tbl[0]) * i_lpm->max_rules);