1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation
12 #include <rte_branch_prediction.h>
13 #include <rte_common.h>
14 #include <rte_memory.h>
15 #include <rte_malloc.h>
16 #include <rte_memcpy.h>
18 #include <rte_eal_memconfig.h>
19 #include <rte_per_lcore.h>
20 #include <rte_string_fns.h>
21 #include <rte_errno.h>
22 #include <rte_rwlock.h>
23 #include <rte_spinlock.h>
27 #define RTE_LPM6_TBL24_NUM_ENTRIES (1 << 24)
28 #define RTE_LPM6_TBL8_GROUP_NUM_ENTRIES 256
29 #define RTE_LPM6_TBL8_MAX_NUM_GROUPS (1 << 21)
31 #define RTE_LPM6_VALID_EXT_ENTRY_BITMASK 0xA0000000
32 #define RTE_LPM6_LOOKUP_SUCCESS 0x20000000
33 #define RTE_LPM6_TBL8_BITMASK 0x001FFFFF
35 #define ADD_FIRST_BYTE 3
36 #define LOOKUP_FIRST_BYTE 4
38 #define BYTES2_SIZE 16
40 #define lpm6_tbl8_gindex next_hop
42 /** Flags for setting an entry as valid/invalid. */
48 TAILQ_HEAD(rte_lpm6_list, rte_tailq_entry);
50 static struct rte_tailq_elem rte_lpm6_tailq = {
53 EAL_REGISTER_TAILQ(rte_lpm6_tailq)
55 /** Tbl entry structure. It is the same for both tbl24 and tbl8 */
56 struct rte_lpm6_tbl_entry {
57 uint32_t next_hop: 21; /**< Next hop / next table to be checked. */
58 uint32_t depth :8; /**< Rule depth. */
61 uint32_t valid :1; /**< Validation flag. */
62 uint32_t valid_group :1; /**< Group validation flag. */
63 uint32_t ext_entry :1; /**< External entry. */
66 /** Rules tbl entry structure. */
67 struct rte_lpm6_rule {
68 uint8_t ip[RTE_LPM6_IPV6_ADDR_SIZE]; /**< Rule IP address. */
69 uint32_t next_hop; /**< Rule next hop. */
70 uint8_t depth; /**< Rule depth. */
73 /** LPM6 structure. */
76 char name[RTE_LPM6_NAMESIZE]; /**< Name of the lpm. */
77 uint32_t max_rules; /**< Max number of rules. */
78 uint32_t used_rules; /**< Used rules so far. */
79 uint32_t number_tbl8s; /**< Number of tbl8s to allocate. */
80 uint32_t next_tbl8; /**< Next tbl8 to be used. */
83 struct rte_lpm6_rule *rules_tbl; /**< LPM rules. */
84 struct rte_lpm6_tbl_entry tbl24[RTE_LPM6_TBL24_NUM_ENTRIES]
85 __rte_cache_aligned; /**< LPM tbl24 table. */
86 struct rte_lpm6_tbl_entry tbl8[0]
87 __rte_cache_aligned; /**< LPM tbl8 table. */
91 * Takes an array of uint8_t (IPv6 address) and masks it using the depth.
92 * It leaves untouched one bit per unit in the depth variable
93 * and set the rest to 0.
96 mask_ip(uint8_t *ip, uint8_t depth)
98 int16_t part_depth, mask;
103 for (i = 0; i < RTE_LPM6_IPV6_ADDR_SIZE; i++) {
104 if (part_depth < BYTE_SIZE && part_depth >= 0) {
105 mask = (uint16_t)(~(UINT8_MAX >> part_depth));
106 ip[i] = (uint8_t)(ip[i] & mask);
107 } else if (part_depth < 0) {
110 part_depth -= BYTE_SIZE;
115 * Allocates memory for LPM object
118 rte_lpm6_create(const char *name, int socket_id,
119 const struct rte_lpm6_config *config)
121 char mem_name[RTE_LPM6_NAMESIZE];
122 struct rte_lpm6 *lpm = NULL;
123 struct rte_tailq_entry *te;
124 uint64_t mem_size, rules_size;
125 struct rte_lpm6_list *lpm_list;
127 lpm_list = RTE_TAILQ_CAST(rte_lpm6_tailq.head, rte_lpm6_list);
129 RTE_BUILD_BUG_ON(sizeof(struct rte_lpm6_tbl_entry) != sizeof(uint32_t));
131 /* Check user arguments. */
132 if ((name == NULL) || (socket_id < -1) || (config == NULL) ||
133 (config->max_rules == 0) ||
134 config->number_tbl8s > RTE_LPM6_TBL8_MAX_NUM_GROUPS) {
139 snprintf(mem_name, sizeof(mem_name), "LPM_%s", name);
141 /* Determine the amount of memory to allocate. */
142 mem_size = sizeof(*lpm) + (sizeof(lpm->tbl8[0]) *
143 RTE_LPM6_TBL8_GROUP_NUM_ENTRIES * config->number_tbl8s);
144 rules_size = sizeof(struct rte_lpm6_rule) * config->max_rules;
146 rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
148 /* Guarantee there's no existing */
149 TAILQ_FOREACH(te, lpm_list, next) {
150 lpm = (struct rte_lpm6 *) te->data;
151 if (strncmp(name, lpm->name, RTE_LPM6_NAMESIZE) == 0)
160 /* allocate tailq entry */
161 te = rte_zmalloc("LPM6_TAILQ_ENTRY", sizeof(*te), 0);
163 RTE_LOG(ERR, LPM, "Failed to allocate tailq entry!\n");
168 /* Allocate memory to store the LPM data structures. */
169 lpm = rte_zmalloc_socket(mem_name, (size_t)mem_size,
170 RTE_CACHE_LINE_SIZE, socket_id);
173 RTE_LOG(ERR, LPM, "LPM memory allocation failed\n");
179 lpm->rules_tbl = rte_zmalloc_socket(NULL,
180 (size_t)rules_size, RTE_CACHE_LINE_SIZE, socket_id);
182 if (lpm->rules_tbl == NULL) {
183 RTE_LOG(ERR, LPM, "LPM rules_tbl allocation failed\n");
191 /* Save user arguments. */
192 lpm->max_rules = config->max_rules;
193 lpm->number_tbl8s = config->number_tbl8s;
194 snprintf(lpm->name, sizeof(lpm->name), "%s", name);
196 te->data = (void *) lpm;
198 TAILQ_INSERT_TAIL(lpm_list, te, next);
201 rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
207 * Find an existing lpm table and return a pointer to it.
210 rte_lpm6_find_existing(const char *name)
212 struct rte_lpm6 *l = NULL;
213 struct rte_tailq_entry *te;
214 struct rte_lpm6_list *lpm_list;
216 lpm_list = RTE_TAILQ_CAST(rte_lpm6_tailq.head, rte_lpm6_list);
218 rte_rwlock_read_lock(RTE_EAL_TAILQ_RWLOCK);
219 TAILQ_FOREACH(te, lpm_list, next) {
220 l = (struct rte_lpm6 *) te->data;
221 if (strncmp(name, l->name, RTE_LPM6_NAMESIZE) == 0)
224 rte_rwlock_read_unlock(RTE_EAL_TAILQ_RWLOCK);
235 * Deallocates memory for given LPM table.
238 rte_lpm6_free(struct rte_lpm6 *lpm)
240 struct rte_lpm6_list *lpm_list;
241 struct rte_tailq_entry *te;
243 /* Check user arguments. */
247 lpm_list = RTE_TAILQ_CAST(rte_lpm6_tailq.head, rte_lpm6_list);
249 rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
251 /* find our tailq entry */
252 TAILQ_FOREACH(te, lpm_list, next) {
253 if (te->data == (void *) lpm)
258 TAILQ_REMOVE(lpm_list, te, next);
260 rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
262 rte_free(lpm->rules_tbl);
268 * Checks if a rule already exists in the rules table and updates
269 * the nexthop if so. Otherwise it adds a new rule if enough space is available.
271 static inline int32_t
272 rule_add(struct rte_lpm6 *lpm, uint8_t *ip, uint32_t next_hop, uint8_t depth)
276 /* Scan through rule list to see if rule already exists. */
277 for (rule_index = 0; rule_index < lpm->used_rules; rule_index++) {
279 /* If rule already exists update its next_hop and return. */
280 if ((memcmp (lpm->rules_tbl[rule_index].ip, ip,
281 RTE_LPM6_IPV6_ADDR_SIZE) == 0) &&
282 lpm->rules_tbl[rule_index].depth == depth) {
283 lpm->rules_tbl[rule_index].next_hop = next_hop;
290 * If rule does not exist check if there is space to add a new rule to
291 * this rule group. If there is no space return error.
293 if (lpm->used_rules == lpm->max_rules) {
297 /* If there is space for the new rule add it. */
298 rte_memcpy(lpm->rules_tbl[rule_index].ip, ip, RTE_LPM6_IPV6_ADDR_SIZE);
299 lpm->rules_tbl[rule_index].next_hop = next_hop;
300 lpm->rules_tbl[rule_index].depth = depth;
302 /* Increment the used rules counter for this rule group. */
309 * Function that expands a rule across the data structure when a less-generic
310 * one has been added before. It assures that every possible combination of bits
311 * in the IP address returns a match.
314 expand_rule(struct rte_lpm6 *lpm, uint32_t tbl8_gindex, uint8_t depth,
317 uint32_t tbl8_group_end, tbl8_gindex_next, j;
319 tbl8_group_end = tbl8_gindex + RTE_LPM6_TBL8_GROUP_NUM_ENTRIES;
321 struct rte_lpm6_tbl_entry new_tbl8_entry = {
323 .valid_group = VALID,
325 .next_hop = next_hop,
329 for (j = tbl8_gindex; j < tbl8_group_end; j++) {
330 if (!lpm->tbl8[j].valid || (lpm->tbl8[j].ext_entry == 0
331 && lpm->tbl8[j].depth <= depth)) {
333 lpm->tbl8[j] = new_tbl8_entry;
335 } else if (lpm->tbl8[j].ext_entry == 1) {
337 tbl8_gindex_next = lpm->tbl8[j].lpm6_tbl8_gindex
338 * RTE_LPM6_TBL8_GROUP_NUM_ENTRIES;
339 expand_rule(lpm, tbl8_gindex_next, depth, next_hop);
345 * Partially adds a new route to the data structure (tbl24+tbl8s).
346 * It returns 0 on success, a negative number on failure, or 1 if
347 * the process needs to be continued by calling the function again.
350 add_step(struct rte_lpm6 *lpm, struct rte_lpm6_tbl_entry *tbl,
351 struct rte_lpm6_tbl_entry **tbl_next, uint8_t *ip, uint8_t bytes,
352 uint8_t first_byte, uint8_t depth, uint32_t next_hop)
354 uint32_t tbl_index, tbl_range, tbl8_group_start, tbl8_group_end, i;
357 uint8_t bits_covered;
360 * Calculate index to the table based on the number and position
361 * of the bytes being inspected in this step.
364 for (i = first_byte; i < (uint32_t)(first_byte + bytes); i++) {
365 bitshift = (int8_t)((bytes - i)*BYTE_SIZE);
367 if (bitshift < 0) bitshift = 0;
368 tbl_index = tbl_index | ip[i-1] << bitshift;
371 /* Number of bits covered in this step */
372 bits_covered = (uint8_t)((bytes+first_byte-1)*BYTE_SIZE);
375 * If depth if smaller than this number (ie this is the last step)
376 * expand the rule across the relevant positions in the table.
378 if (depth <= bits_covered) {
379 tbl_range = 1 << (bits_covered - depth);
381 for (i = tbl_index; i < (tbl_index + tbl_range); i++) {
382 if (!tbl[i].valid || (tbl[i].ext_entry == 0 &&
383 tbl[i].depth <= depth)) {
385 struct rte_lpm6_tbl_entry new_tbl_entry = {
386 .next_hop = next_hop,
389 .valid_group = VALID,
393 tbl[i] = new_tbl_entry;
395 } else if (tbl[i].ext_entry == 1) {
398 * If tbl entry is valid and extended calculate the index
399 * into next tbl8 and expand the rule across the data structure.
401 tbl8_gindex = tbl[i].lpm6_tbl8_gindex *
402 RTE_LPM6_TBL8_GROUP_NUM_ENTRIES;
403 expand_rule(lpm, tbl8_gindex, depth, next_hop);
410 * If this is not the last step just fill one position
411 * and calculate the index to the next table.
414 /* If it's invalid a new tbl8 is needed */
415 if (!tbl[tbl_index].valid) {
416 if (lpm->next_tbl8 < lpm->number_tbl8s)
417 tbl8_gindex = (lpm->next_tbl8)++;
421 struct rte_lpm6_tbl_entry new_tbl_entry = {
422 .lpm6_tbl8_gindex = tbl8_gindex,
425 .valid_group = VALID,
429 tbl[tbl_index] = new_tbl_entry;
432 * If it's valid but not extended the rule that was stored *
433 * here needs to be moved to the next table.
435 else if (tbl[tbl_index].ext_entry == 0) {
436 /* Search for free tbl8 group. */
437 if (lpm->next_tbl8 < lpm->number_tbl8s)
438 tbl8_gindex = (lpm->next_tbl8)++;
442 tbl8_group_start = tbl8_gindex *
443 RTE_LPM6_TBL8_GROUP_NUM_ENTRIES;
444 tbl8_group_end = tbl8_group_start +
445 RTE_LPM6_TBL8_GROUP_NUM_ENTRIES;
447 /* Populate new tbl8 with tbl value. */
448 for (i = tbl8_group_start; i < tbl8_group_end; i++) {
449 lpm->tbl8[i].valid = VALID;
450 lpm->tbl8[i].depth = tbl[tbl_index].depth;
451 lpm->tbl8[i].next_hop = tbl[tbl_index].next_hop;
452 lpm->tbl8[i].ext_entry = 0;
456 * Update tbl entry to point to new tbl8 entry. Note: The
457 * ext_flag and tbl8_index need to be updated simultaneously,
458 * so assign whole structure in one go.
460 struct rte_lpm6_tbl_entry new_tbl_entry = {
461 .lpm6_tbl8_gindex = tbl8_gindex,
464 .valid_group = VALID,
468 tbl[tbl_index] = new_tbl_entry;
471 *tbl_next = &(lpm->tbl8[tbl[tbl_index].lpm6_tbl8_gindex *
472 RTE_LPM6_TBL8_GROUP_NUM_ENTRIES]);
482 rte_lpm6_add_v20(struct rte_lpm6 *lpm, uint8_t *ip, uint8_t depth,
485 return rte_lpm6_add_v1705(lpm, ip, depth, next_hop);
487 VERSION_SYMBOL(rte_lpm6_add, _v20, 2.0);
490 rte_lpm6_add_v1705(struct rte_lpm6 *lpm, uint8_t *ip, uint8_t depth,
493 struct rte_lpm6_tbl_entry *tbl;
494 struct rte_lpm6_tbl_entry *tbl_next = NULL;
497 uint8_t masked_ip[RTE_LPM6_IPV6_ADDR_SIZE];
500 /* Check user arguments. */
501 if ((lpm == NULL) || (depth < 1) || (depth > RTE_LPM6_MAX_DEPTH))
504 /* Copy the IP and mask it to avoid modifying user's input data. */
505 memcpy(masked_ip, ip, RTE_LPM6_IPV6_ADDR_SIZE);
506 mask_ip(masked_ip, depth);
508 /* Add the rule to the rule table. */
509 rule_index = rule_add(lpm, masked_ip, next_hop, depth);
511 /* If there is no space available for new rule return error. */
512 if (rule_index < 0) {
516 /* Inspect the first three bytes through tbl24 on the first step. */
518 status = add_step (lpm, tbl, &tbl_next, masked_ip, ADD_FIRST_BYTE, 1,
521 rte_lpm6_delete(lpm, masked_ip, depth);
527 * Inspect one by one the rest of the bytes until
528 * the process is completed.
530 for (i = ADD_FIRST_BYTE; i < RTE_LPM6_IPV6_ADDR_SIZE && status == 1; i++) {
532 status = add_step (lpm, tbl, &tbl_next, masked_ip, 1, (uint8_t)(i+1),
535 rte_lpm6_delete(lpm, masked_ip, depth);
543 BIND_DEFAULT_SYMBOL(rte_lpm6_add, _v1705, 17.05);
544 MAP_STATIC_SYMBOL(int rte_lpm6_add(struct rte_lpm6 *lpm, uint8_t *ip,
545 uint8_t depth, uint32_t next_hop),
549 * Takes a pointer to a table entry and inspect one level.
550 * The function returns 0 on lookup success, ENOENT if no match was found
551 * or 1 if the process needs to be continued by calling the function again.
554 lookup_step(const struct rte_lpm6 *lpm, const struct rte_lpm6_tbl_entry *tbl,
555 const struct rte_lpm6_tbl_entry **tbl_next, uint8_t *ip,
556 uint8_t first_byte, uint32_t *next_hop)
558 uint32_t tbl8_index, tbl_entry;
560 /* Take the integer value from the pointer. */
561 tbl_entry = *(const uint32_t *)tbl;
563 /* If it is valid and extended we calculate the new pointer to return. */
564 if ((tbl_entry & RTE_LPM6_VALID_EXT_ENTRY_BITMASK) ==
565 RTE_LPM6_VALID_EXT_ENTRY_BITMASK) {
567 tbl8_index = ip[first_byte-1] +
568 ((tbl_entry & RTE_LPM6_TBL8_BITMASK) *
569 RTE_LPM6_TBL8_GROUP_NUM_ENTRIES);
571 *tbl_next = &lpm->tbl8[tbl8_index];
575 /* If not extended then we can have a match. */
576 *next_hop = ((uint32_t)tbl_entry & RTE_LPM6_TBL8_BITMASK);
577 return (tbl_entry & RTE_LPM6_LOOKUP_SUCCESS) ? 0 : -ENOENT;
585 rte_lpm6_lookup_v20(const struct rte_lpm6 *lpm, uint8_t *ip, uint8_t *next_hop)
587 uint32_t next_hop32 = 0;
590 /* DEBUG: Check user input arguments. */
591 if (next_hop == NULL)
594 status = rte_lpm6_lookup_v1705(lpm, ip, &next_hop32);
596 *next_hop = (uint8_t)next_hop32;
600 VERSION_SYMBOL(rte_lpm6_lookup, _v20, 2.0);
603 rte_lpm6_lookup_v1705(const struct rte_lpm6 *lpm, uint8_t *ip,
606 const struct rte_lpm6_tbl_entry *tbl;
607 const struct rte_lpm6_tbl_entry *tbl_next = NULL;
610 uint32_t tbl24_index;
612 /* DEBUG: Check user input arguments. */
613 if ((lpm == NULL) || (ip == NULL) || (next_hop == NULL)) {
617 first_byte = LOOKUP_FIRST_BYTE;
618 tbl24_index = (ip[0] << BYTES2_SIZE) | (ip[1] << BYTE_SIZE) | ip[2];
620 /* Calculate pointer to the first entry to be inspected */
621 tbl = &lpm->tbl24[tbl24_index];
624 /* Continue inspecting following levels until success or failure */
625 status = lookup_step(lpm, tbl, &tbl_next, ip, first_byte++, next_hop);
627 } while (status == 1);
631 BIND_DEFAULT_SYMBOL(rte_lpm6_lookup, _v1705, 17.05);
632 MAP_STATIC_SYMBOL(int rte_lpm6_lookup(const struct rte_lpm6 *lpm, uint8_t *ip,
633 uint32_t *next_hop), rte_lpm6_lookup_v1705);
636 * Looks up a group of IP addresses
639 rte_lpm6_lookup_bulk_func_v20(const struct rte_lpm6 *lpm,
640 uint8_t ips[][RTE_LPM6_IPV6_ADDR_SIZE],
641 int16_t * next_hops, unsigned n)
644 const struct rte_lpm6_tbl_entry *tbl;
645 const struct rte_lpm6_tbl_entry *tbl_next = NULL;
646 uint32_t tbl24_index, next_hop;
650 /* DEBUG: Check user input arguments. */
651 if ((lpm == NULL) || (ips == NULL) || (next_hops == NULL)) {
655 for (i = 0; i < n; i++) {
656 first_byte = LOOKUP_FIRST_BYTE;
657 tbl24_index = (ips[i][0] << BYTES2_SIZE) |
658 (ips[i][1] << BYTE_SIZE) | ips[i][2];
660 /* Calculate pointer to the first entry to be inspected */
661 tbl = &lpm->tbl24[tbl24_index];
664 /* Continue inspecting following levels until success or failure */
665 status = lookup_step(lpm, tbl, &tbl_next, ips[i], first_byte++,
668 } while (status == 1);
673 next_hops[i] = (int16_t)next_hop;
678 VERSION_SYMBOL(rte_lpm6_lookup_bulk_func, _v20, 2.0);
681 rte_lpm6_lookup_bulk_func_v1705(const struct rte_lpm6 *lpm,
682 uint8_t ips[][RTE_LPM6_IPV6_ADDR_SIZE],
683 int32_t *next_hops, unsigned int n)
686 const struct rte_lpm6_tbl_entry *tbl;
687 const struct rte_lpm6_tbl_entry *tbl_next = NULL;
688 uint32_t tbl24_index, next_hop;
692 /* DEBUG: Check user input arguments. */
693 if ((lpm == NULL) || (ips == NULL) || (next_hops == NULL))
696 for (i = 0; i < n; i++) {
697 first_byte = LOOKUP_FIRST_BYTE;
698 tbl24_index = (ips[i][0] << BYTES2_SIZE) |
699 (ips[i][1] << BYTE_SIZE) | ips[i][2];
701 /* Calculate pointer to the first entry to be inspected */
702 tbl = &lpm->tbl24[tbl24_index];
705 /* Continue inspecting following levels
706 * until success or failure
708 status = lookup_step(lpm, tbl, &tbl_next, ips[i],
709 first_byte++, &next_hop);
711 } while (status == 1);
716 next_hops[i] = (int32_t)next_hop;
721 BIND_DEFAULT_SYMBOL(rte_lpm6_lookup_bulk_func, _v1705, 17.05);
722 MAP_STATIC_SYMBOL(int rte_lpm6_lookup_bulk_func(const struct rte_lpm6 *lpm,
723 uint8_t ips[][RTE_LPM6_IPV6_ADDR_SIZE],
724 int32_t *next_hops, unsigned int n),
725 rte_lpm6_lookup_bulk_func_v1705);
728 * Finds a rule in rule table.
729 * NOTE: Valid range for depth parameter is 1 .. 128 inclusive.
731 static inline int32_t
732 rule_find(struct rte_lpm6 *lpm, uint8_t *ip, uint8_t depth)
736 /* Scan used rules at given depth to find rule. */
737 for (rule_index = 0; rule_index < lpm->used_rules; rule_index++) {
738 /* If rule is found return the rule index. */
739 if ((memcmp (lpm->rules_tbl[rule_index].ip, ip,
740 RTE_LPM6_IPV6_ADDR_SIZE) == 0) &&
741 lpm->rules_tbl[rule_index].depth == depth) {
747 /* If rule is not found return -ENOENT. */
752 * Look for a rule in the high-level rules table
755 rte_lpm6_is_rule_present_v20(struct rte_lpm6 *lpm, uint8_t *ip, uint8_t depth,
758 uint32_t next_hop32 = 0;
761 /* DEBUG: Check user input arguments. */
762 if (next_hop == NULL)
765 status = rte_lpm6_is_rule_present_v1705(lpm, ip, depth, &next_hop32);
767 *next_hop = (uint8_t)next_hop32;
772 VERSION_SYMBOL(rte_lpm6_is_rule_present, _v20, 2.0);
775 rte_lpm6_is_rule_present_v1705(struct rte_lpm6 *lpm, uint8_t *ip, uint8_t depth,
778 uint8_t ip_masked[RTE_LPM6_IPV6_ADDR_SIZE];
781 /* Check user arguments. */
782 if ((lpm == NULL) || next_hop == NULL || ip == NULL ||
783 (depth < 1) || (depth > RTE_LPM6_MAX_DEPTH))
786 /* Copy the IP and mask it to avoid modifying user's input data. */
787 memcpy(ip_masked, ip, RTE_LPM6_IPV6_ADDR_SIZE);
788 mask_ip(ip_masked, depth);
790 /* Look for the rule using rule_find. */
791 rule_index = rule_find(lpm, ip_masked, depth);
793 if (rule_index >= 0) {
794 *next_hop = lpm->rules_tbl[rule_index].next_hop;
798 /* If rule is not found return 0. */
801 BIND_DEFAULT_SYMBOL(rte_lpm6_is_rule_present, _v1705, 17.05);
802 MAP_STATIC_SYMBOL(int rte_lpm6_is_rule_present(struct rte_lpm6 *lpm,
803 uint8_t *ip, uint8_t depth, uint32_t *next_hop),
804 rte_lpm6_is_rule_present_v1705);
807 * Delete a rule from the rule table.
808 * NOTE: Valid range for depth parameter is 1 .. 128 inclusive.
811 rule_delete(struct rte_lpm6 *lpm, int32_t rule_index)
814 * Overwrite redundant rule with last rule in group and decrement rule
817 lpm->rules_tbl[rule_index] = lpm->rules_tbl[lpm->used_rules-1];
825 rte_lpm6_delete(struct rte_lpm6 *lpm, uint8_t *ip, uint8_t depth)
827 int32_t rule_to_delete_index;
828 uint8_t ip_masked[RTE_LPM6_IPV6_ADDR_SIZE];
832 * Check input arguments.
834 if ((lpm == NULL) || (depth < 1) || (depth > RTE_LPM6_MAX_DEPTH)) {
838 /* Copy the IP and mask it to avoid modifying user's input data. */
839 memcpy(ip_masked, ip, RTE_LPM6_IPV6_ADDR_SIZE);
840 mask_ip(ip_masked, depth);
843 * Find the index of the input rule, that needs to be deleted, in the
846 rule_to_delete_index = rule_find(lpm, ip_masked, depth);
849 * Check if rule_to_delete_index was found. If no rule was found the
850 * function rule_find returns -ENOENT.
852 if (rule_to_delete_index < 0)
853 return rule_to_delete_index;
855 /* Delete the rule from the rule table. */
856 rule_delete(lpm, rule_to_delete_index);
859 * Set all the table entries to 0 (ie delete every rule
860 * from the data structure.
863 memset(lpm->tbl24, 0, sizeof(lpm->tbl24));
864 memset(lpm->tbl8, 0, sizeof(lpm->tbl8[0])
865 * RTE_LPM6_TBL8_GROUP_NUM_ENTRIES * lpm->number_tbl8s);
868 * Add every rule again (except for the one that was removed from
871 for (i = 0; i < lpm->used_rules; i++) {
872 rte_lpm6_add(lpm, lpm->rules_tbl[i].ip, lpm->rules_tbl[i].depth,
873 lpm->rules_tbl[i].next_hop);
880 * Deletes a group of rules
883 rte_lpm6_delete_bulk_func(struct rte_lpm6 *lpm,
884 uint8_t ips[][RTE_LPM6_IPV6_ADDR_SIZE], uint8_t *depths, unsigned n)
886 int32_t rule_to_delete_index;
887 uint8_t ip_masked[RTE_LPM6_IPV6_ADDR_SIZE];
891 * Check input arguments.
893 if ((lpm == NULL) || (ips == NULL) || (depths == NULL)) {
897 for (i = 0; i < n; i++) {
898 /* Copy the IP and mask it to avoid modifying user's input data. */
899 memcpy(ip_masked, ips[i], RTE_LPM6_IPV6_ADDR_SIZE);
900 mask_ip(ip_masked, depths[i]);
903 * Find the index of the input rule, that needs to be deleted, in the
906 rule_to_delete_index = rule_find(lpm, ip_masked, depths[i]);
909 * Check if rule_to_delete_index was found. If no rule was found the
910 * function rule_find returns -ENOENT.
912 if (rule_to_delete_index < 0)
915 /* Delete the rule from the rule table. */
916 rule_delete(lpm, rule_to_delete_index);
920 * Set all the table entries to 0 (ie delete every rule
921 * from the data structure.
924 memset(lpm->tbl24, 0, sizeof(lpm->tbl24));
925 memset(lpm->tbl8, 0, sizeof(lpm->tbl8[0])
926 * RTE_LPM6_TBL8_GROUP_NUM_ENTRIES * lpm->number_tbl8s);
929 * Add every rule again (except for the ones that were removed from
932 for (i = 0; i < lpm->used_rules; i++) {
933 rte_lpm6_add(lpm, lpm->rules_tbl[i].ip, lpm->rules_tbl[i].depth,
934 lpm->rules_tbl[i].next_hop);
941 * Delete all rules from the LPM table.
944 rte_lpm6_delete_all(struct rte_lpm6 *lpm)
946 /* Zero used rules counter. */
949 /* Zero next tbl8 index. */
953 memset(lpm->tbl24, 0, sizeof(lpm->tbl24));
956 memset(lpm->tbl8, 0, sizeof(lpm->tbl8[0]) *
957 RTE_LPM6_TBL8_GROUP_NUM_ENTRIES * lpm->number_tbl8s);
959 /* Delete all rules form the rules table. */
960 memset(lpm->rules_tbl, 0, sizeof(struct rte_lpm6_rule) * lpm->max_rules);