1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation
12 #include <rte_branch_prediction.h>
13 #include <rte_common.h>
14 #include <rte_memory.h>
15 #include <rte_malloc.h>
16 #include <rte_memcpy.h>
18 #include <rte_eal_memconfig.h>
19 #include <rte_per_lcore.h>
20 #include <rte_string_fns.h>
21 #include <rte_errno.h>
22 #include <rte_rwlock.h>
23 #include <rte_spinlock.h>
26 #include <rte_jhash.h>
27 #include <rte_tailq.h>
31 #define RTE_LPM6_TBL24_NUM_ENTRIES (1 << 24)
32 #define RTE_LPM6_TBL8_GROUP_NUM_ENTRIES 256
33 #define RTE_LPM6_TBL8_MAX_NUM_GROUPS (1 << 21)
35 #define RTE_LPM6_VALID_EXT_ENTRY_BITMASK 0xA0000000
36 #define RTE_LPM6_LOOKUP_SUCCESS 0x20000000
37 #define RTE_LPM6_TBL8_BITMASK 0x001FFFFF
39 #define ADD_FIRST_BYTE 3
40 #define LOOKUP_FIRST_BYTE 4
42 #define BYTES2_SIZE 16
44 #define RULE_HASH_TABLE_EXTRA_SPACE 64
45 #define TBL24_IND UINT32_MAX
47 #define lpm6_tbl8_gindex next_hop
49 /** Flags for setting an entry as valid/invalid. */
55 TAILQ_HEAD(rte_lpm6_list, rte_tailq_entry);
57 static struct rte_tailq_elem rte_lpm6_tailq = {
60 EAL_REGISTER_TAILQ(rte_lpm6_tailq)
62 /** Tbl entry structure. It is the same for both tbl24 and tbl8 */
63 struct rte_lpm6_tbl_entry {
64 uint32_t next_hop: 21; /**< Next hop / next table to be checked. */
65 uint32_t depth :8; /**< Rule depth. */
68 uint32_t valid :1; /**< Validation flag. */
69 uint32_t valid_group :1; /**< Group validation flag. */
70 uint32_t ext_entry :1; /**< External entry. */
73 /** Rules tbl entry structure. */
74 struct rte_lpm6_rule {
75 uint8_t ip[RTE_LPM6_IPV6_ADDR_SIZE]; /**< Rule IP address. */
76 uint32_t next_hop; /**< Rule next hop. */
77 uint8_t depth; /**< Rule depth. */
80 /** Rules tbl entry key. */
81 struct rte_lpm6_rule_key {
82 uint8_t ip[RTE_LPM6_IPV6_ADDR_SIZE]; /**< Rule IP address. */
83 uint8_t depth; /**< Rule depth. */
87 struct rte_lpm_tbl8_hdr {
88 uint32_t owner_tbl_ind; /**< owner table: TBL24_IND if owner is tbl24,
89 * otherwise index of tbl8
91 uint32_t owner_entry_ind; /**< index of the owner table entry where
92 * pointer to the tbl8 is stored
94 uint32_t ref_cnt; /**< table reference counter */
97 /** LPM6 structure. */
100 char name[RTE_LPM6_NAMESIZE]; /**< Name of the lpm. */
101 uint32_t max_rules; /**< Max number of rules. */
102 uint32_t used_rules; /**< Used rules so far. */
103 uint32_t number_tbl8s; /**< Number of tbl8s to allocate. */
106 struct rte_hash *rules_tbl; /**< LPM rules. */
107 struct rte_lpm6_tbl_entry tbl24[RTE_LPM6_TBL24_NUM_ENTRIES]
108 __rte_cache_aligned; /**< LPM tbl24 table. */
110 uint32_t *tbl8_pool; /**< pool of indexes of free tbl8s */
111 uint32_t tbl8_pool_pos; /**< current position in the tbl8 pool */
113 struct rte_lpm_tbl8_hdr *tbl8_hdrs; /* array of tbl8 headers */
115 struct rte_lpm6_tbl_entry tbl8[0]
116 __rte_cache_aligned; /**< LPM tbl8 table. */
120 * Takes an array of uint8_t (IPv6 address) and masks it using the depth.
121 * It leaves untouched one bit per unit in the depth variable
122 * and set the rest to 0.
125 ip6_mask_addr(uint8_t *ip, uint8_t depth)
127 int16_t part_depth, mask;
132 for (i = 0; i < RTE_LPM6_IPV6_ADDR_SIZE; i++) {
133 if (part_depth < BYTE_SIZE && part_depth >= 0) {
134 mask = (uint16_t)(~(UINT8_MAX >> part_depth));
135 ip[i] = (uint8_t)(ip[i] & mask);
136 } else if (part_depth < 0)
139 part_depth -= BYTE_SIZE;
143 /* copy ipv6 address */
145 ip6_copy_addr(uint8_t *dst, const uint8_t *src)
147 rte_memcpy(dst, src, RTE_LPM6_IPV6_ADDR_SIZE);
151 * LPM6 rule hash function
153 * It's used as a hash function for the rte_hash
156 static inline uint32_t
157 rule_hash(const void *data, __rte_unused uint32_t data_len,
160 return rte_jhash(data, sizeof(struct rte_lpm6_rule_key), init_val);
164 * Init pool of free tbl8 indexes
167 tbl8_pool_init(struct rte_lpm6 *lpm)
171 /* put entire range of indexes to the tbl8 pool */
172 for (i = 0; i < lpm->number_tbl8s; i++)
173 lpm->tbl8_pool[i] = i;
175 lpm->tbl8_pool_pos = 0;
179 * Get an index of a free tbl8 from the pool
181 static inline uint32_t
182 tbl8_get(struct rte_lpm6 *lpm, uint32_t *tbl8_ind)
184 if (lpm->tbl8_pool_pos == lpm->number_tbl8s)
185 /* no more free tbl8 */
189 *tbl8_ind = lpm->tbl8_pool[lpm->tbl8_pool_pos++];
194 * Put an index of a free tbl8 back to the pool
196 static inline uint32_t
197 tbl8_put(struct rte_lpm6 *lpm, uint32_t tbl8_ind)
199 if (lpm->tbl8_pool_pos == 0)
203 lpm->tbl8_pool[--lpm->tbl8_pool_pos] = tbl8_ind;
208 * Returns number of tbl8s available in the pool
210 static inline uint32_t
211 tbl8_available(struct rte_lpm6 *lpm)
213 return lpm->number_tbl8s - lpm->tbl8_pool_pos;
218 * note that ip must be already masked
221 rule_key_init(struct rte_lpm6_rule_key *key, uint8_t *ip, uint8_t depth)
223 ip6_copy_addr(key->ip, ip);
228 * Rebuild the entire LPM tree by reinserting all rules
231 rebuild_lpm(struct rte_lpm6 *lpm)
234 struct rte_lpm6_rule_key *rule_key;
237 while (rte_hash_iterate(lpm->rules_tbl, (void *) &rule_key,
238 (void **) &next_hop, &iter) >= 0)
239 rte_lpm6_add(lpm, rule_key->ip, rule_key->depth,
240 (uint32_t) next_hop);
244 * Allocates memory for LPM object
247 rte_lpm6_create(const char *name, int socket_id,
248 const struct rte_lpm6_config *config)
250 char mem_name[RTE_LPM6_NAMESIZE];
251 struct rte_lpm6 *lpm = NULL;
252 struct rte_tailq_entry *te;
254 struct rte_lpm6_list *lpm_list;
255 struct rte_hash *rules_tbl = NULL;
256 uint32_t *tbl8_pool = NULL;
257 struct rte_lpm_tbl8_hdr *tbl8_hdrs = NULL;
259 lpm_list = RTE_TAILQ_CAST(rte_lpm6_tailq.head, rte_lpm6_list);
261 RTE_BUILD_BUG_ON(sizeof(struct rte_lpm6_tbl_entry) != sizeof(uint32_t));
263 /* Check user arguments. */
264 if ((name == NULL) || (socket_id < -1) || (config == NULL) ||
265 (config->max_rules == 0) ||
266 config->number_tbl8s > RTE_LPM6_TBL8_MAX_NUM_GROUPS) {
271 /* create rules hash table */
272 snprintf(mem_name, sizeof(mem_name), "LRH_%s", name);
273 struct rte_hash_parameters rule_hash_tbl_params = {
274 .entries = config->max_rules * 1.2 +
275 RULE_HASH_TABLE_EXTRA_SPACE,
276 .key_len = sizeof(struct rte_lpm6_rule_key),
277 .hash_func = rule_hash,
278 .hash_func_init_val = 0,
281 .socket_id = socket_id,
285 rules_tbl = rte_hash_create(&rule_hash_tbl_params);
286 if (rules_tbl == NULL) {
287 RTE_LOG(ERR, LPM, "LPM rules hash table allocation failed: %s (%d)",
288 rte_strerror(rte_errno), rte_errno);
292 /* allocate tbl8 indexes pool */
293 tbl8_pool = rte_malloc(NULL,
294 sizeof(uint32_t) * config->number_tbl8s,
295 RTE_CACHE_LINE_SIZE);
296 if (tbl8_pool == NULL) {
297 RTE_LOG(ERR, LPM, "LPM tbl8 pool allocation failed: %s (%d)",
298 rte_strerror(rte_errno), rte_errno);
303 /* allocate tbl8 headers */
304 tbl8_hdrs = rte_malloc(NULL,
305 sizeof(struct rte_lpm_tbl8_hdr) * config->number_tbl8s,
306 RTE_CACHE_LINE_SIZE);
307 if (tbl8_hdrs == NULL) {
308 RTE_LOG(ERR, LPM, "LPM tbl8 headers allocation failed: %s (%d)",
309 rte_strerror(rte_errno), rte_errno);
314 snprintf(mem_name, sizeof(mem_name), "LPM_%s", name);
316 /* Determine the amount of memory to allocate. */
317 mem_size = sizeof(*lpm) + (sizeof(lpm->tbl8[0]) *
318 RTE_LPM6_TBL8_GROUP_NUM_ENTRIES * config->number_tbl8s);
320 rte_mcfg_tailq_write_lock();
322 /* Guarantee there's no existing */
323 TAILQ_FOREACH(te, lpm_list, next) {
324 lpm = (struct rte_lpm6 *) te->data;
325 if (strncmp(name, lpm->name, RTE_LPM6_NAMESIZE) == 0)
334 /* allocate tailq entry */
335 te = rte_zmalloc("LPM6_TAILQ_ENTRY", sizeof(*te), 0);
337 RTE_LOG(ERR, LPM, "Failed to allocate tailq entry!\n");
342 /* Allocate memory to store the LPM data structures. */
343 lpm = rte_zmalloc_socket(mem_name, (size_t)mem_size,
344 RTE_CACHE_LINE_SIZE, socket_id);
347 RTE_LOG(ERR, LPM, "LPM memory allocation failed\n");
353 /* Save user arguments. */
354 lpm->max_rules = config->max_rules;
355 lpm->number_tbl8s = config->number_tbl8s;
356 strlcpy(lpm->name, name, sizeof(lpm->name));
357 lpm->rules_tbl = rules_tbl;
358 lpm->tbl8_pool = tbl8_pool;
359 lpm->tbl8_hdrs = tbl8_hdrs;
364 te->data = (void *) lpm;
366 TAILQ_INSERT_TAIL(lpm_list, te, next);
367 rte_mcfg_tailq_write_unlock();
371 rte_mcfg_tailq_write_unlock();
376 rte_hash_free(rules_tbl);
382 * Find an existing lpm table and return a pointer to it.
385 rte_lpm6_find_existing(const char *name)
387 struct rte_lpm6 *l = NULL;
388 struct rte_tailq_entry *te;
389 struct rte_lpm6_list *lpm_list;
391 lpm_list = RTE_TAILQ_CAST(rte_lpm6_tailq.head, rte_lpm6_list);
393 rte_mcfg_tailq_read_lock();
394 TAILQ_FOREACH(te, lpm_list, next) {
395 l = (struct rte_lpm6 *) te->data;
396 if (strncmp(name, l->name, RTE_LPM6_NAMESIZE) == 0)
399 rte_mcfg_tailq_read_unlock();
410 * Deallocates memory for given LPM table.
413 rte_lpm6_free(struct rte_lpm6 *lpm)
415 struct rte_lpm6_list *lpm_list;
416 struct rte_tailq_entry *te;
418 /* Check user arguments. */
422 lpm_list = RTE_TAILQ_CAST(rte_lpm6_tailq.head, rte_lpm6_list);
424 rte_mcfg_tailq_write_lock();
426 /* find our tailq entry */
427 TAILQ_FOREACH(te, lpm_list, next) {
428 if (te->data == (void *) lpm)
433 TAILQ_REMOVE(lpm_list, te, next);
435 rte_mcfg_tailq_write_unlock();
437 rte_free(lpm->tbl8_hdrs);
438 rte_free(lpm->tbl8_pool);
439 rte_hash_free(lpm->rules_tbl);
446 rule_find_with_key(struct rte_lpm6 *lpm,
447 const struct rte_lpm6_rule_key *rule_key,
453 /* lookup for a rule */
454 ret = rte_hash_lookup_data(lpm->rules_tbl, (const void *) rule_key,
455 (void **) &hash_val);
457 *next_hop = (uint32_t) hash_val;
466 rule_find(struct rte_lpm6 *lpm, uint8_t *ip, uint8_t depth,
469 struct rte_lpm6_rule_key rule_key;
471 /* init a rule key */
472 rule_key_init(&rule_key, ip, depth);
474 return rule_find_with_key(lpm, &rule_key, next_hop);
478 * Checks if a rule already exists in the rules table and updates
479 * the nexthop if so. Otherwise it adds a new rule if enough space is available.
482 * 0 - next hop of existed rule is updated
483 * 1 - new rule successfully added
487 rule_add(struct rte_lpm6 *lpm, uint8_t *ip, uint8_t depth, uint32_t next_hop)
490 struct rte_lpm6_rule_key rule_key;
493 /* init a rule key */
494 rule_key_init(&rule_key, ip, depth);
496 /* Scan through rule list to see if rule already exists. */
497 rule_exist = rule_find_with_key(lpm, &rule_key, &unused);
500 * If rule does not exist check if there is space to add a new rule to
501 * this rule group. If there is no space return error.
503 if (!rule_exist && lpm->used_rules == lpm->max_rules)
506 /* add the rule or update rules next hop */
507 ret = rte_hash_add_key_data(lpm->rules_tbl, &rule_key,
508 (void *)(uintptr_t) next_hop);
512 /* Increment the used rules counter for this rule group. */
522 * Function that expands a rule across the data structure when a less-generic
523 * one has been added before. It assures that every possible combination of bits
524 * in the IP address returns a match.
527 expand_rule(struct rte_lpm6 *lpm, uint32_t tbl8_gindex, uint8_t old_depth,
528 uint8_t new_depth, uint32_t next_hop, uint8_t valid)
530 uint32_t tbl8_group_end, tbl8_gindex_next, j;
532 tbl8_group_end = tbl8_gindex + RTE_LPM6_TBL8_GROUP_NUM_ENTRIES;
534 struct rte_lpm6_tbl_entry new_tbl8_entry = {
536 .valid_group = valid,
538 .next_hop = next_hop,
542 for (j = tbl8_gindex; j < tbl8_group_end; j++) {
543 if (!lpm->tbl8[j].valid || (lpm->tbl8[j].ext_entry == 0
544 && lpm->tbl8[j].depth <= old_depth)) {
546 lpm->tbl8[j] = new_tbl8_entry;
548 } else if (lpm->tbl8[j].ext_entry == 1) {
550 tbl8_gindex_next = lpm->tbl8[j].lpm6_tbl8_gindex
551 * RTE_LPM6_TBL8_GROUP_NUM_ENTRIES;
552 expand_rule(lpm, tbl8_gindex_next, old_depth, new_depth,
562 init_tbl8_header(struct rte_lpm6 *lpm, uint32_t tbl_ind,
563 uint32_t owner_tbl_ind, uint32_t owner_entry_ind)
565 struct rte_lpm_tbl8_hdr *tbl_hdr = &lpm->tbl8_hdrs[tbl_ind];
566 tbl_hdr->owner_tbl_ind = owner_tbl_ind;
567 tbl_hdr->owner_entry_ind = owner_entry_ind;
568 tbl_hdr->ref_cnt = 0;
572 * Calculate index to the table based on the number and position
573 * of the bytes being inspected in this step.
576 get_bitshift(const uint8_t *ip, uint8_t first_byte, uint8_t bytes)
578 uint32_t entry_ind, i;
582 for (i = first_byte; i < (uint32_t)(first_byte + bytes); i++) {
583 bitshift = (int8_t)((bytes - i)*BYTE_SIZE);
587 entry_ind = entry_ind | ip[i-1] << bitshift;
594 * Simulate adding a new route to the LPM counting number
595 * of new tables that will be needed
597 * It returns 0 on success, or 1 if
598 * the process needs to be continued by calling the function again.
601 simulate_add_step(struct rte_lpm6 *lpm, struct rte_lpm6_tbl_entry *tbl,
602 struct rte_lpm6_tbl_entry **next_tbl, const uint8_t *ip,
603 uint8_t bytes, uint8_t first_byte, uint8_t depth,
604 uint32_t *need_tbl_nb)
607 uint8_t bits_covered;
608 uint32_t next_tbl_ind;
611 * Calculate index to the table based on the number and position
612 * of the bytes being inspected in this step.
614 entry_ind = get_bitshift(ip, first_byte, bytes);
616 /* Number of bits covered in this step */
617 bits_covered = (uint8_t)((bytes+first_byte-1)*BYTE_SIZE);
619 if (depth <= bits_covered) {
624 if (tbl[entry_ind].valid == 0 || tbl[entry_ind].ext_entry == 0) {
625 /* from this point on a new table is needed on each level
626 * that is not covered yet
628 depth -= bits_covered;
629 uint32_t cnt = depth >> 3; /* depth / BYTE_SIZE */
630 if (depth & 7) /* 0b00000111 */
631 /* if depth % 8 > 0 then one more table is needed
632 * for those last bits
640 next_tbl_ind = tbl[entry_ind].lpm6_tbl8_gindex;
641 *next_tbl = &(lpm->tbl8[next_tbl_ind *
642 RTE_LPM6_TBL8_GROUP_NUM_ENTRIES]);
648 * Partially adds a new route to the data structure (tbl24+tbl8s).
649 * It returns 0 on success, a negative number on failure, or 1 if
650 * the process needs to be continued by calling the function again.
653 add_step(struct rte_lpm6 *lpm, struct rte_lpm6_tbl_entry *tbl,
654 uint32_t tbl_ind, struct rte_lpm6_tbl_entry **next_tbl,
655 uint32_t *next_tbl_ind, uint8_t *ip, uint8_t bytes,
656 uint8_t first_byte, uint8_t depth, uint32_t next_hop,
659 uint32_t entry_ind, tbl_range, tbl8_group_start, tbl8_group_end, i;
660 uint32_t tbl8_gindex;
661 uint8_t bits_covered;
665 * Calculate index to the table based on the number and position
666 * of the bytes being inspected in this step.
668 entry_ind = get_bitshift(ip, first_byte, bytes);
670 /* Number of bits covered in this step */
671 bits_covered = (uint8_t)((bytes+first_byte-1)*BYTE_SIZE);
674 * If depth if smaller than this number (ie this is the last step)
675 * expand the rule across the relevant positions in the table.
677 if (depth <= bits_covered) {
678 tbl_range = 1 << (bits_covered - depth);
680 for (i = entry_ind; i < (entry_ind + tbl_range); i++) {
681 if (!tbl[i].valid || (tbl[i].ext_entry == 0 &&
682 tbl[i].depth <= depth)) {
684 struct rte_lpm6_tbl_entry new_tbl_entry = {
685 .next_hop = next_hop,
688 .valid_group = VALID,
692 tbl[i] = new_tbl_entry;
694 } else if (tbl[i].ext_entry == 1) {
697 * If tbl entry is valid and extended calculate the index
698 * into next tbl8 and expand the rule across the data structure.
700 tbl8_gindex = tbl[i].lpm6_tbl8_gindex *
701 RTE_LPM6_TBL8_GROUP_NUM_ENTRIES;
702 expand_rule(lpm, tbl8_gindex, depth, depth,
707 /* update tbl8 rule reference counter */
708 if (tbl_ind != TBL24_IND && is_new_rule)
709 lpm->tbl8_hdrs[tbl_ind].ref_cnt++;
714 * If this is not the last step just fill one position
715 * and calculate the index to the next table.
718 /* If it's invalid a new tbl8 is needed */
719 if (!tbl[entry_ind].valid) {
720 /* get a new table */
721 ret = tbl8_get(lpm, &tbl8_gindex);
725 /* invalidate all new tbl8 entries */
726 tbl8_group_start = tbl8_gindex *
727 RTE_LPM6_TBL8_GROUP_NUM_ENTRIES;
728 memset(&lpm->tbl8[tbl8_group_start], 0,
729 RTE_LPM6_TBL8_GROUP_NUM_ENTRIES);
731 /* init the new table's header:
732 * save the reference to the owner table
734 init_tbl8_header(lpm, tbl8_gindex, tbl_ind, entry_ind);
736 /* reference to a new tbl8 */
737 struct rte_lpm6_tbl_entry new_tbl_entry = {
738 .lpm6_tbl8_gindex = tbl8_gindex,
741 .valid_group = VALID,
745 tbl[entry_ind] = new_tbl_entry;
747 /* update the current table's reference counter */
748 if (tbl_ind != TBL24_IND)
749 lpm->tbl8_hdrs[tbl_ind].ref_cnt++;
752 * If it's valid but not extended the rule that was stored
753 * here needs to be moved to the next table.
755 else if (tbl[entry_ind].ext_entry == 0) {
756 /* get a new tbl8 index */
757 ret = tbl8_get(lpm, &tbl8_gindex);
761 tbl8_group_start = tbl8_gindex *
762 RTE_LPM6_TBL8_GROUP_NUM_ENTRIES;
763 tbl8_group_end = tbl8_group_start +
764 RTE_LPM6_TBL8_GROUP_NUM_ENTRIES;
766 struct rte_lpm6_tbl_entry tbl_entry = {
767 .next_hop = tbl[entry_ind].next_hop,
768 .depth = tbl[entry_ind].depth,
770 .valid_group = VALID,
774 /* Populate new tbl8 with tbl value. */
775 for (i = tbl8_group_start; i < tbl8_group_end; i++)
776 lpm->tbl8[i] = tbl_entry;
778 /* init the new table's header:
779 * save the reference to the owner table
781 init_tbl8_header(lpm, tbl8_gindex, tbl_ind, entry_ind);
784 * Update tbl entry to point to new tbl8 entry. Note: The
785 * ext_flag and tbl8_index need to be updated simultaneously,
786 * so assign whole structure in one go.
788 struct rte_lpm6_tbl_entry new_tbl_entry = {
789 .lpm6_tbl8_gindex = tbl8_gindex,
792 .valid_group = VALID,
796 tbl[entry_ind] = new_tbl_entry;
798 /* update the current table's reference counter */
799 if (tbl_ind != TBL24_IND)
800 lpm->tbl8_hdrs[tbl_ind].ref_cnt++;
803 *next_tbl_ind = tbl[entry_ind].lpm6_tbl8_gindex;
804 *next_tbl = &(lpm->tbl8[*next_tbl_ind *
805 RTE_LPM6_TBL8_GROUP_NUM_ENTRIES]);
815 rte_lpm6_add_v20(struct rte_lpm6 *lpm, uint8_t *ip, uint8_t depth,
818 return rte_lpm6_add_v1705(lpm, ip, depth, next_hop);
820 VERSION_SYMBOL(rte_lpm6_add, _v20, 2.0);
824 * Simulate adding a route to LPM
828 * -ENOSPC not enought tbl8 left
831 simulate_add(struct rte_lpm6 *lpm, const uint8_t *masked_ip, uint8_t depth)
833 struct rte_lpm6_tbl_entry *tbl;
834 struct rte_lpm6_tbl_entry *tbl_next = NULL;
837 /* number of new tables needed for a step */
838 uint32_t need_tbl_nb;
839 /* total number of new tables needed */
840 uint32_t total_need_tbl_nb;
842 /* Inspect the first three bytes through tbl24 on the first step. */
843 ret = simulate_add_step(lpm, lpm->tbl24, &tbl_next, masked_ip,
844 ADD_FIRST_BYTE, 1, depth, &need_tbl_nb);
845 total_need_tbl_nb = need_tbl_nb;
847 * Inspect one by one the rest of the bytes until
848 * the process is completed.
850 for (i = ADD_FIRST_BYTE; i < RTE_LPM6_IPV6_ADDR_SIZE && ret == 1; i++) {
852 ret = simulate_add_step(lpm, tbl, &tbl_next, masked_ip, 1,
853 (uint8_t)(i+1), depth, &need_tbl_nb);
854 total_need_tbl_nb += need_tbl_nb;
857 if (tbl8_available(lpm) < total_need_tbl_nb)
858 /* not enought tbl8 to add a rule */
865 rte_lpm6_add_v1705(struct rte_lpm6 *lpm, uint8_t *ip, uint8_t depth,
868 struct rte_lpm6_tbl_entry *tbl;
869 struct rte_lpm6_tbl_entry *tbl_next = NULL;
870 /* init to avoid compiler warning */
871 uint32_t tbl_next_num = 123456;
873 uint8_t masked_ip[RTE_LPM6_IPV6_ADDR_SIZE];
876 /* Check user arguments. */
877 if ((lpm == NULL) || (depth < 1) || (depth > RTE_LPM6_MAX_DEPTH))
880 /* Copy the IP and mask it to avoid modifying user's input data. */
881 ip6_copy_addr(masked_ip, ip);
882 ip6_mask_addr(masked_ip, depth);
884 /* Simulate adding a new route */
885 int ret = simulate_add(lpm, masked_ip, depth);
889 /* Add the rule to the rule table. */
890 int is_new_rule = rule_add(lpm, masked_ip, depth, next_hop);
891 /* If there is no space available for new rule return error. */
895 /* Inspect the first three bytes through tbl24 on the first step. */
897 status = add_step(lpm, tbl, TBL24_IND, &tbl_next, &tbl_next_num,
898 masked_ip, ADD_FIRST_BYTE, 1, depth, next_hop,
903 * Inspect one by one the rest of the bytes until
904 * the process is completed.
906 for (i = ADD_FIRST_BYTE; i < RTE_LPM6_IPV6_ADDR_SIZE && status == 1; i++) {
908 status = add_step(lpm, tbl, tbl_next_num, &tbl_next,
909 &tbl_next_num, masked_ip, 1, (uint8_t)(i+1),
910 depth, next_hop, is_new_rule);
916 BIND_DEFAULT_SYMBOL(rte_lpm6_add, _v1705, 17.05);
917 MAP_STATIC_SYMBOL(int rte_lpm6_add(struct rte_lpm6 *lpm, uint8_t *ip,
918 uint8_t depth, uint32_t next_hop),
922 * Takes a pointer to a table entry and inspect one level.
923 * The function returns 0 on lookup success, ENOENT if no match was found
924 * or 1 if the process needs to be continued by calling the function again.
927 lookup_step(const struct rte_lpm6 *lpm, const struct rte_lpm6_tbl_entry *tbl,
928 const struct rte_lpm6_tbl_entry **tbl_next, uint8_t *ip,
929 uint8_t first_byte, uint32_t *next_hop)
931 uint32_t tbl8_index, tbl_entry;
933 /* Take the integer value from the pointer. */
934 tbl_entry = *(const uint32_t *)tbl;
936 /* If it is valid and extended we calculate the new pointer to return. */
937 if ((tbl_entry & RTE_LPM6_VALID_EXT_ENTRY_BITMASK) ==
938 RTE_LPM6_VALID_EXT_ENTRY_BITMASK) {
940 tbl8_index = ip[first_byte-1] +
941 ((tbl_entry & RTE_LPM6_TBL8_BITMASK) *
942 RTE_LPM6_TBL8_GROUP_NUM_ENTRIES);
944 *tbl_next = &lpm->tbl8[tbl8_index];
948 /* If not extended then we can have a match. */
949 *next_hop = ((uint32_t)tbl_entry & RTE_LPM6_TBL8_BITMASK);
950 return (tbl_entry & RTE_LPM6_LOOKUP_SUCCESS) ? 0 : -ENOENT;
958 rte_lpm6_lookup_v20(const struct rte_lpm6 *lpm, uint8_t *ip, uint8_t *next_hop)
960 uint32_t next_hop32 = 0;
963 /* DEBUG: Check user input arguments. */
964 if (next_hop == NULL)
967 status = rte_lpm6_lookup_v1705(lpm, ip, &next_hop32);
969 *next_hop = (uint8_t)next_hop32;
973 VERSION_SYMBOL(rte_lpm6_lookup, _v20, 2.0);
976 rte_lpm6_lookup_v1705(const struct rte_lpm6 *lpm, uint8_t *ip,
979 const struct rte_lpm6_tbl_entry *tbl;
980 const struct rte_lpm6_tbl_entry *tbl_next = NULL;
983 uint32_t tbl24_index;
985 /* DEBUG: Check user input arguments. */
986 if ((lpm == NULL) || (ip == NULL) || (next_hop == NULL))
989 first_byte = LOOKUP_FIRST_BYTE;
990 tbl24_index = (ip[0] << BYTES2_SIZE) | (ip[1] << BYTE_SIZE) | ip[2];
992 /* Calculate pointer to the first entry to be inspected */
993 tbl = &lpm->tbl24[tbl24_index];
996 /* Continue inspecting following levels until success or failure */
997 status = lookup_step(lpm, tbl, &tbl_next, ip, first_byte++, next_hop);
999 } while (status == 1);
1003 BIND_DEFAULT_SYMBOL(rte_lpm6_lookup, _v1705, 17.05);
1004 MAP_STATIC_SYMBOL(int rte_lpm6_lookup(const struct rte_lpm6 *lpm, uint8_t *ip,
1005 uint32_t *next_hop), rte_lpm6_lookup_v1705);
1008 * Looks up a group of IP addresses
1011 rte_lpm6_lookup_bulk_func_v20(const struct rte_lpm6 *lpm,
1012 uint8_t ips[][RTE_LPM6_IPV6_ADDR_SIZE],
1013 int16_t * next_hops, unsigned n)
1016 const struct rte_lpm6_tbl_entry *tbl;
1017 const struct rte_lpm6_tbl_entry *tbl_next = NULL;
1018 uint32_t tbl24_index, next_hop;
1022 /* DEBUG: Check user input arguments. */
1023 if ((lpm == NULL) || (ips == NULL) || (next_hops == NULL))
1026 for (i = 0; i < n; i++) {
1027 first_byte = LOOKUP_FIRST_BYTE;
1028 tbl24_index = (ips[i][0] << BYTES2_SIZE) |
1029 (ips[i][1] << BYTE_SIZE) | ips[i][2];
1031 /* Calculate pointer to the first entry to be inspected */
1032 tbl = &lpm->tbl24[tbl24_index];
1035 /* Continue inspecting following levels until success or failure */
1036 status = lookup_step(lpm, tbl, &tbl_next, ips[i], first_byte++,
1039 } while (status == 1);
1044 next_hops[i] = (int16_t)next_hop;
1049 VERSION_SYMBOL(rte_lpm6_lookup_bulk_func, _v20, 2.0);
1052 rte_lpm6_lookup_bulk_func_v1705(const struct rte_lpm6 *lpm,
1053 uint8_t ips[][RTE_LPM6_IPV6_ADDR_SIZE],
1054 int32_t *next_hops, unsigned int n)
1057 const struct rte_lpm6_tbl_entry *tbl;
1058 const struct rte_lpm6_tbl_entry *tbl_next = NULL;
1059 uint32_t tbl24_index, next_hop;
1063 /* DEBUG: Check user input arguments. */
1064 if ((lpm == NULL) || (ips == NULL) || (next_hops == NULL))
1067 for (i = 0; i < n; i++) {
1068 first_byte = LOOKUP_FIRST_BYTE;
1069 tbl24_index = (ips[i][0] << BYTES2_SIZE) |
1070 (ips[i][1] << BYTE_SIZE) | ips[i][2];
1072 /* Calculate pointer to the first entry to be inspected */
1073 tbl = &lpm->tbl24[tbl24_index];
1076 /* Continue inspecting following levels
1077 * until success or failure
1079 status = lookup_step(lpm, tbl, &tbl_next, ips[i],
1080 first_byte++, &next_hop);
1082 } while (status == 1);
1087 next_hops[i] = (int32_t)next_hop;
1092 BIND_DEFAULT_SYMBOL(rte_lpm6_lookup_bulk_func, _v1705, 17.05);
1093 MAP_STATIC_SYMBOL(int rte_lpm6_lookup_bulk_func(const struct rte_lpm6 *lpm,
1094 uint8_t ips[][RTE_LPM6_IPV6_ADDR_SIZE],
1095 int32_t *next_hops, unsigned int n),
1096 rte_lpm6_lookup_bulk_func_v1705);
1099 * Look for a rule in the high-level rules table
1102 rte_lpm6_is_rule_present_v20(struct rte_lpm6 *lpm, uint8_t *ip, uint8_t depth,
1105 uint32_t next_hop32 = 0;
1108 /* DEBUG: Check user input arguments. */
1109 if (next_hop == NULL)
1112 status = rte_lpm6_is_rule_present_v1705(lpm, ip, depth, &next_hop32);
1114 *next_hop = (uint8_t)next_hop32;
1119 VERSION_SYMBOL(rte_lpm6_is_rule_present, _v20, 2.0);
1122 rte_lpm6_is_rule_present_v1705(struct rte_lpm6 *lpm, uint8_t *ip, uint8_t depth,
1125 uint8_t masked_ip[RTE_LPM6_IPV6_ADDR_SIZE];
1127 /* Check user arguments. */
1128 if ((lpm == NULL) || next_hop == NULL || ip == NULL ||
1129 (depth < 1) || (depth > RTE_LPM6_MAX_DEPTH))
1132 /* Copy the IP and mask it to avoid modifying user's input data. */
1133 ip6_copy_addr(masked_ip, ip);
1134 ip6_mask_addr(masked_ip, depth);
1136 return rule_find(lpm, masked_ip, depth, next_hop);
1138 BIND_DEFAULT_SYMBOL(rte_lpm6_is_rule_present, _v1705, 17.05);
1139 MAP_STATIC_SYMBOL(int rte_lpm6_is_rule_present(struct rte_lpm6 *lpm,
1140 uint8_t *ip, uint8_t depth, uint32_t *next_hop),
1141 rte_lpm6_is_rule_present_v1705);
1144 * Delete a rule from the rule table.
1145 * NOTE: Valid range for depth parameter is 1 .. 128 inclusive.
1151 rule_delete(struct rte_lpm6 *lpm, uint8_t *ip, uint8_t depth)
1154 struct rte_lpm6_rule_key rule_key;
1157 rule_key_init(&rule_key, ip, depth);
1159 /* delete the rule */
1160 ret = rte_hash_del_key(lpm->rules_tbl, (void *) &rule_key);
1168 * Deletes a group of rules
1170 * Note that the function rebuilds the lpm table,
1171 * rather than doing incremental updates like
1172 * the regular delete function
1175 rte_lpm6_delete_bulk_func(struct rte_lpm6 *lpm,
1176 uint8_t ips[][RTE_LPM6_IPV6_ADDR_SIZE], uint8_t *depths,
1179 uint8_t masked_ip[RTE_LPM6_IPV6_ADDR_SIZE];
1182 /* Check input arguments. */
1183 if ((lpm == NULL) || (ips == NULL) || (depths == NULL))
1186 for (i = 0; i < n; i++) {
1187 ip6_copy_addr(masked_ip, ips[i]);
1188 ip6_mask_addr(masked_ip, depths[i]);
1189 rule_delete(lpm, masked_ip, depths[i]);
1193 * Set all the table entries to 0 (ie delete every rule
1194 * from the data structure.
1196 memset(lpm->tbl24, 0, sizeof(lpm->tbl24));
1197 memset(lpm->tbl8, 0, sizeof(lpm->tbl8[0])
1198 * RTE_LPM6_TBL8_GROUP_NUM_ENTRIES * lpm->number_tbl8s);
1199 tbl8_pool_init(lpm);
1202 * Add every rule again (except for the ones that were removed from
1211 * Delete all rules from the LPM table.
1214 rte_lpm6_delete_all(struct rte_lpm6 *lpm)
1216 /* Zero used rules counter. */
1217 lpm->used_rules = 0;
1220 memset(lpm->tbl24, 0, sizeof(lpm->tbl24));
1223 memset(lpm->tbl8, 0, sizeof(lpm->tbl8[0]) *
1224 RTE_LPM6_TBL8_GROUP_NUM_ENTRIES * lpm->number_tbl8s);
1226 /* init pool of free tbl8 indexes */
1227 tbl8_pool_init(lpm);
1229 /* Delete all rules form the rules table. */
1230 rte_hash_reset(lpm->rules_tbl);
1234 * Convert a depth to a one byte long mask
1235 * Example: 4 will be converted to 0xF0
1237 static uint8_t __attribute__((pure))
1238 depth_to_mask_1b(uint8_t depth)
1240 /* To calculate a mask start with a 1 on the left hand side and right
1241 * shift while populating the left hand side with 1's
1243 return (signed char)0x80 >> (depth - 1);
1247 * Find a less specific rule
1250 rule_find_less_specific(struct rte_lpm6 *lpm, uint8_t *ip, uint8_t depth,
1251 struct rte_lpm6_rule *rule)
1256 struct rte_lpm6_rule_key rule_key;
1261 rule_key_init(&rule_key, ip, depth);
1266 /* each iteration zero one more bit of the key */
1267 mask = depth & 7; /* depth % BYTE_SIZE */
1269 mask = depth_to_mask_1b(mask);
1271 rule_key.depth = depth;
1272 rule_key.ip[depth >> 3] &= mask;
1274 ret = rule_find_with_key(lpm, &rule_key, &next_hop);
1276 rule->depth = depth;
1277 ip6_copy_addr(rule->ip, rule_key.ip);
1278 rule->next_hop = next_hop;
1287 * Find range of tbl8 cells occupied by a rule
1290 rule_find_range(struct rte_lpm6 *lpm, const uint8_t *ip, uint8_t depth,
1291 struct rte_lpm6_tbl_entry **from,
1292 struct rte_lpm6_tbl_entry **to,
1293 uint32_t *out_tbl_ind)
1296 uint32_t first_3bytes = (uint32_t)ip[0] << 16 | ip[1] << 8 | ip[2];
1299 /* rule is within the top level */
1301 *from = &lpm->tbl24[ind];
1302 ind += (1 << (24 - depth)) - 1;
1303 *to = &lpm->tbl24[ind];
1304 *out_tbl_ind = TBL24_IND;
1306 /* top level entry */
1307 struct rte_lpm6_tbl_entry *tbl = &lpm->tbl24[first_3bytes];
1308 assert(tbl->ext_entry == 1);
1310 uint32_t tbl_ind = tbl->lpm6_tbl8_gindex;
1311 tbl = &lpm->tbl8[tbl_ind *
1312 RTE_LPM6_TBL8_GROUP_NUM_ENTRIES];
1313 /* current ip byte, the top level is already behind */
1315 /* minus top level */
1318 /* interate through levels (tbl8s)
1319 * until we reach the last one
1323 assert(tbl->ext_entry == 1);
1324 /* go to the next level/tbl8 */
1325 tbl_ind = tbl->lpm6_tbl8_gindex;
1326 tbl = &lpm->tbl8[tbl_ind *
1327 RTE_LPM6_TBL8_GROUP_NUM_ENTRIES];
1332 /* last level/tbl8 */
1333 ind = ip[byte] & depth_to_mask_1b(depth);
1335 ind += (1 << (8 - depth)) - 1;
1337 *out_tbl_ind = tbl_ind;
1342 * Remove a table from the LPM tree
1345 remove_tbl(struct rte_lpm6 *lpm, struct rte_lpm_tbl8_hdr *tbl_hdr,
1346 uint32_t tbl_ind, struct rte_lpm6_rule *lsp_rule)
1348 struct rte_lpm6_tbl_entry *owner_entry;
1350 if (tbl_hdr->owner_tbl_ind == TBL24_IND)
1351 owner_entry = &lpm->tbl24[tbl_hdr->owner_entry_ind];
1353 uint32_t owner_tbl_ind = tbl_hdr->owner_tbl_ind;
1354 owner_entry = &lpm->tbl8[
1355 owner_tbl_ind * RTE_LPM6_TBL8_GROUP_NUM_ENTRIES +
1356 tbl_hdr->owner_entry_ind];
1358 struct rte_lpm_tbl8_hdr *owner_tbl_hdr =
1359 &lpm->tbl8_hdrs[owner_tbl_ind];
1360 if (--owner_tbl_hdr->ref_cnt == 0)
1361 remove_tbl(lpm, owner_tbl_hdr, owner_tbl_ind, lsp_rule);
1364 assert(owner_entry->ext_entry == 1);
1366 /* unlink the table */
1367 if (lsp_rule != NULL) {
1368 struct rte_lpm6_tbl_entry new_tbl_entry = {
1369 .next_hop = lsp_rule->next_hop,
1370 .depth = lsp_rule->depth,
1372 .valid_group = VALID,
1376 *owner_entry = new_tbl_entry;
1378 struct rte_lpm6_tbl_entry new_tbl_entry = {
1382 .valid_group = INVALID,
1386 *owner_entry = new_tbl_entry;
1389 /* return the table to the pool */
1390 tbl8_put(lpm, tbl_ind);
1397 rte_lpm6_delete(struct rte_lpm6 *lpm, uint8_t *ip, uint8_t depth)
1399 uint8_t masked_ip[RTE_LPM6_IPV6_ADDR_SIZE];
1400 struct rte_lpm6_rule lsp_rule_obj;
1401 struct rte_lpm6_rule *lsp_rule;
1404 struct rte_lpm6_tbl_entry *from, *to;
1406 /* Check input arguments. */
1407 if ((lpm == NULL) || (depth < 1) || (depth > RTE_LPM6_MAX_DEPTH))
1410 /* Copy the IP and mask it to avoid modifying user's input data. */
1411 ip6_copy_addr(masked_ip, ip);
1412 ip6_mask_addr(masked_ip, depth);
1414 /* Delete the rule from the rule table. */
1415 ret = rule_delete(lpm, masked_ip, depth);
1419 /* find rule cells */
1420 rule_find_range(lpm, masked_ip, depth, &from, &to, &tbl_ind);
1422 /* find a less specific rule (a rule with smaller depth)
1423 * note: masked_ip will be modified, don't use it anymore
1425 ret = rule_find_less_specific(lpm, masked_ip, depth,
1427 lsp_rule = ret ? &lsp_rule_obj : NULL;
1429 /* decrement the table rule counter,
1430 * note that tbl24 doesn't have a header
1432 if (tbl_ind != TBL24_IND) {
1433 struct rte_lpm_tbl8_hdr *tbl_hdr = &lpm->tbl8_hdrs[tbl_ind];
1434 if (--tbl_hdr->ref_cnt == 0) {
1435 /* remove the table */
1436 remove_tbl(lpm, tbl_hdr, tbl_ind, lsp_rule);
1441 /* iterate rule cells */
1442 for (; from <= to; from++)
1443 if (from->ext_entry == 1) {
1444 /* reference to a more specific space
1445 * of the prefix/rule. Entries in a more
1446 * specific space that are not used by
1447 * a more specific prefix must be occupied
1450 if (lsp_rule != NULL)
1452 from->lpm6_tbl8_gindex *
1453 RTE_LPM6_TBL8_GROUP_NUM_ENTRIES,
1454 depth, lsp_rule->depth,
1455 lsp_rule->next_hop, VALID);
1457 /* since the prefix has no less specific prefix,
1458 * its more specific space must be invalidated
1461 from->lpm6_tbl8_gindex *
1462 RTE_LPM6_TBL8_GROUP_NUM_ENTRIES,
1463 depth, 0, 0, INVALID);
1464 } else if (from->depth == depth) {
1465 /* entry is not a reference and belongs to the prefix */
1466 if (lsp_rule != NULL) {
1467 struct rte_lpm6_tbl_entry new_tbl_entry = {
1468 .next_hop = lsp_rule->next_hop,
1469 .depth = lsp_rule->depth,
1471 .valid_group = VALID,
1475 *from = new_tbl_entry;
1477 struct rte_lpm6_tbl_entry new_tbl_entry = {
1481 .valid_group = INVALID,
1485 *from = new_tbl_entry;