4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 #include <sys/queue.h>
42 #include <rte_branch_prediction.h>
43 #include <rte_common.h>
44 #include <rte_memory.h>
45 #include <rte_malloc.h>
46 #include <rte_memzone.h>
47 #include <rte_memcpy.h>
49 #include <rte_eal_memconfig.h>
50 #include <rte_per_lcore.h>
51 #include <rte_string_fns.h>
52 #include <rte_errno.h>
53 #include <rte_rwlock.h>
54 #include <rte_spinlock.h>
58 #define RTE_LPM6_TBL24_NUM_ENTRIES (1 << 24)
59 #define RTE_LPM6_TBL8_GROUP_NUM_ENTRIES 256
60 #define RTE_LPM6_TBL8_MAX_NUM_GROUPS (1 << 21)
62 #define RTE_LPM6_VALID_EXT_ENTRY_BITMASK 0xA0000000
63 #define RTE_LPM6_LOOKUP_SUCCESS 0x20000000
64 #define RTE_LPM6_TBL8_BITMASK 0x001FFFFF
66 #define ADD_FIRST_BYTE 3
67 #define LOOKUP_FIRST_BYTE 4
69 #define BYTES2_SIZE 16
71 #define lpm6_tbl8_gindex next_hop
73 /** Flags for setting an entry as valid/invalid. */
79 TAILQ_HEAD(rte_lpm6_list, rte_tailq_entry);
81 static struct rte_tailq_elem rte_lpm6_tailq = {
84 EAL_REGISTER_TAILQ(rte_lpm6_tailq)
86 /** Tbl entry structure. It is the same for both tbl24 and tbl8 */
87 struct rte_lpm6_tbl_entry {
88 uint32_t next_hop: 21; /**< Next hop / next table to be checked. */
89 uint32_t depth :8; /**< Rule depth. */
92 uint32_t valid :1; /**< Validation flag. */
93 uint32_t valid_group :1; /**< Group validation flag. */
94 uint32_t ext_entry :1; /**< External entry. */
97 /** Rules tbl entry structure. */
98 struct rte_lpm6_rule {
99 uint8_t ip[RTE_LPM6_IPV6_ADDR_SIZE]; /**< Rule IP address. */
100 uint32_t next_hop; /**< Rule next hop. */
101 uint8_t depth; /**< Rule depth. */
104 /** LPM6 structure. */
107 char name[RTE_LPM6_NAMESIZE]; /**< Name of the lpm. */
108 uint32_t max_rules; /**< Max number of rules. */
109 uint32_t used_rules; /**< Used rules so far. */
110 uint32_t number_tbl8s; /**< Number of tbl8s to allocate. */
111 uint32_t next_tbl8; /**< Next tbl8 to be used. */
114 struct rte_lpm6_rule *rules_tbl; /**< LPM rules. */
115 struct rte_lpm6_tbl_entry tbl24[RTE_LPM6_TBL24_NUM_ENTRIES]
116 __rte_cache_aligned; /**< LPM tbl24 table. */
117 struct rte_lpm6_tbl_entry tbl8[0]
118 __rte_cache_aligned; /**< LPM tbl8 table. */
122 * Takes an array of uint8_t (IPv6 address) and masks it using the depth.
123 * It leaves untouched one bit per unit in the depth variable
124 * and set the rest to 0.
127 mask_ip(uint8_t *ip, uint8_t depth)
129 int16_t part_depth, mask;
134 for (i = 0; i < RTE_LPM6_IPV6_ADDR_SIZE; i++) {
135 if (part_depth < BYTE_SIZE && part_depth >= 0) {
136 mask = (uint16_t)(~(UINT8_MAX >> part_depth));
137 ip[i] = (uint8_t)(ip[i] & mask);
138 } else if (part_depth < 0) {
141 part_depth -= BYTE_SIZE;
146 * Allocates memory for LPM object
149 rte_lpm6_create(const char *name, int socket_id,
150 const struct rte_lpm6_config *config)
152 char mem_name[RTE_LPM6_NAMESIZE];
153 struct rte_lpm6 *lpm = NULL;
154 struct rte_tailq_entry *te;
155 uint64_t mem_size, rules_size;
156 struct rte_lpm6_list *lpm_list;
158 lpm_list = RTE_TAILQ_CAST(rte_lpm6_tailq.head, rte_lpm6_list);
160 RTE_BUILD_BUG_ON(sizeof(struct rte_lpm6_tbl_entry) != sizeof(uint32_t));
162 /* Check user arguments. */
163 if ((name == NULL) || (socket_id < -1) || (config == NULL) ||
164 (config->max_rules == 0) ||
165 config->number_tbl8s > RTE_LPM6_TBL8_MAX_NUM_GROUPS) {
170 snprintf(mem_name, sizeof(mem_name), "LPM_%s", name);
172 /* Determine the amount of memory to allocate. */
173 mem_size = sizeof(*lpm) + (sizeof(lpm->tbl8[0]) *
174 RTE_LPM6_TBL8_GROUP_NUM_ENTRIES * config->number_tbl8s);
175 rules_size = sizeof(struct rte_lpm6_rule) * config->max_rules;
177 rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
179 /* Guarantee there's no existing */
180 TAILQ_FOREACH(te, lpm_list, next) {
181 lpm = (struct rte_lpm6 *) te->data;
182 if (strncmp(name, lpm->name, RTE_LPM6_NAMESIZE) == 0)
191 /* allocate tailq entry */
192 te = rte_zmalloc("LPM6_TAILQ_ENTRY", sizeof(*te), 0);
194 RTE_LOG(ERR, LPM, "Failed to allocate tailq entry!\n");
198 /* Allocate memory to store the LPM data structures. */
199 lpm = (struct rte_lpm6 *)rte_zmalloc_socket(mem_name, (size_t)mem_size,
200 RTE_CACHE_LINE_SIZE, socket_id);
203 RTE_LOG(ERR, LPM, "LPM memory allocation failed\n");
208 lpm->rules_tbl = (struct rte_lpm6_rule *)rte_zmalloc_socket(NULL,
209 (size_t)rules_size, RTE_CACHE_LINE_SIZE, socket_id);
211 if (lpm->rules_tbl == NULL) {
212 RTE_LOG(ERR, LPM, "LPM rules_tbl allocation failed\n");
219 /* Save user arguments. */
220 lpm->max_rules = config->max_rules;
221 lpm->number_tbl8s = config->number_tbl8s;
222 snprintf(lpm->name, sizeof(lpm->name), "%s", name);
224 te->data = (void *) lpm;
226 TAILQ_INSERT_TAIL(lpm_list, te, next);
229 rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
235 * Find an existing lpm table and return a pointer to it.
238 rte_lpm6_find_existing(const char *name)
240 struct rte_lpm6 *l = NULL;
241 struct rte_tailq_entry *te;
242 struct rte_lpm6_list *lpm_list;
244 lpm_list = RTE_TAILQ_CAST(rte_lpm6_tailq.head, rte_lpm6_list);
246 rte_rwlock_read_lock(RTE_EAL_TAILQ_RWLOCK);
247 TAILQ_FOREACH(te, lpm_list, next) {
248 l = (struct rte_lpm6 *) te->data;
249 if (strncmp(name, l->name, RTE_LPM6_NAMESIZE) == 0)
252 rte_rwlock_read_unlock(RTE_EAL_TAILQ_RWLOCK);
263 * Deallocates memory for given LPM table.
266 rte_lpm6_free(struct rte_lpm6 *lpm)
268 struct rte_lpm6_list *lpm_list;
269 struct rte_tailq_entry *te;
271 /* Check user arguments. */
275 lpm_list = RTE_TAILQ_CAST(rte_lpm6_tailq.head, rte_lpm6_list);
277 rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
279 /* find our tailq entry */
280 TAILQ_FOREACH(te, lpm_list, next) {
281 if (te->data == (void *) lpm)
286 TAILQ_REMOVE(lpm_list, te, next);
288 rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
290 rte_free(lpm->rules_tbl);
296 * Checks if a rule already exists in the rules table and updates
297 * the nexthop if so. Otherwise it adds a new rule if enough space is available.
299 static inline int32_t
300 rule_add(struct rte_lpm6 *lpm, uint8_t *ip, uint32_t next_hop, uint8_t depth)
304 /* Scan through rule list to see if rule already exists. */
305 for (rule_index = 0; rule_index < lpm->used_rules; rule_index++) {
307 /* If rule already exists update its next_hop and return. */
308 if ((memcmp (lpm->rules_tbl[rule_index].ip, ip,
309 RTE_LPM6_IPV6_ADDR_SIZE) == 0) &&
310 lpm->rules_tbl[rule_index].depth == depth) {
311 lpm->rules_tbl[rule_index].next_hop = next_hop;
318 * If rule does not exist check if there is space to add a new rule to
319 * this rule group. If there is no space return error.
321 if (lpm->used_rules == lpm->max_rules) {
325 /* If there is space for the new rule add it. */
326 rte_memcpy(lpm->rules_tbl[rule_index].ip, ip, RTE_LPM6_IPV6_ADDR_SIZE);
327 lpm->rules_tbl[rule_index].next_hop = next_hop;
328 lpm->rules_tbl[rule_index].depth = depth;
330 /* Increment the used rules counter for this rule group. */
337 * Function that expands a rule across the data structure when a less-generic
338 * one has been added before. It assures that every possible combination of bits
339 * in the IP address returns a match.
342 expand_rule(struct rte_lpm6 *lpm, uint32_t tbl8_gindex, uint8_t depth,
345 uint32_t tbl8_group_end, tbl8_gindex_next, j;
347 tbl8_group_end = tbl8_gindex + RTE_LPM6_TBL8_GROUP_NUM_ENTRIES;
349 struct rte_lpm6_tbl_entry new_tbl8_entry = {
351 .valid_group = VALID,
353 .next_hop = next_hop,
357 for (j = tbl8_gindex; j < tbl8_group_end; j++) {
358 if (!lpm->tbl8[j].valid || (lpm->tbl8[j].ext_entry == 0
359 && lpm->tbl8[j].depth <= depth)) {
361 lpm->tbl8[j] = new_tbl8_entry;
363 } else if (lpm->tbl8[j].ext_entry == 1) {
365 tbl8_gindex_next = lpm->tbl8[j].lpm6_tbl8_gindex
366 * RTE_LPM6_TBL8_GROUP_NUM_ENTRIES;
367 expand_rule(lpm, tbl8_gindex_next, depth, next_hop);
373 * Partially adds a new route to the data structure (tbl24+tbl8s).
374 * It returns 0 on success, a negative number on failure, or 1 if
375 * the process needs to be continued by calling the function again.
378 add_step(struct rte_lpm6 *lpm, struct rte_lpm6_tbl_entry *tbl,
379 struct rte_lpm6_tbl_entry **tbl_next, uint8_t *ip, uint8_t bytes,
380 uint8_t first_byte, uint8_t depth, uint32_t next_hop)
382 uint32_t tbl_index, tbl_range, tbl8_group_start, tbl8_group_end, i;
385 uint8_t bits_covered;
388 * Calculate index to the table based on the number and position
389 * of the bytes being inspected in this step.
392 for (i = first_byte; i < (uint32_t)(first_byte + bytes); i++) {
393 bitshift = (int8_t)((bytes - i)*BYTE_SIZE);
395 if (bitshift < 0) bitshift = 0;
396 tbl_index = tbl_index | ip[i-1] << bitshift;
399 /* Number of bits covered in this step */
400 bits_covered = (uint8_t)((bytes+first_byte-1)*BYTE_SIZE);
403 * If depth if smaller than this number (ie this is the last step)
404 * expand the rule across the relevant positions in the table.
406 if (depth <= bits_covered) {
407 tbl_range = 1 << (bits_covered - depth);
409 for (i = tbl_index; i < (tbl_index + tbl_range); i++) {
410 if (!tbl[i].valid || (tbl[i].ext_entry == 0 &&
411 tbl[i].depth <= depth)) {
413 struct rte_lpm6_tbl_entry new_tbl_entry = {
414 .next_hop = next_hop,
417 .valid_group = VALID,
421 tbl[i] = new_tbl_entry;
423 } else if (tbl[i].ext_entry == 1) {
426 * If tbl entry is valid and extended calculate the index
427 * into next tbl8 and expand the rule across the data structure.
429 tbl8_gindex = tbl[i].lpm6_tbl8_gindex *
430 RTE_LPM6_TBL8_GROUP_NUM_ENTRIES;
431 expand_rule(lpm, tbl8_gindex, depth, next_hop);
438 * If this is not the last step just fill one position
439 * and calculate the index to the next table.
442 /* If it's invalid a new tbl8 is needed */
443 if (!tbl[tbl_index].valid) {
444 if (lpm->next_tbl8 < lpm->number_tbl8s)
445 tbl8_gindex = (lpm->next_tbl8)++;
449 struct rte_lpm6_tbl_entry new_tbl_entry = {
450 .lpm6_tbl8_gindex = tbl8_gindex,
453 .valid_group = VALID,
457 tbl[tbl_index] = new_tbl_entry;
460 * If it's valid but not extended the rule that was stored *
461 * here needs to be moved to the next table.
463 else if (tbl[tbl_index].ext_entry == 0) {
464 /* Search for free tbl8 group. */
465 if (lpm->next_tbl8 < lpm->number_tbl8s)
466 tbl8_gindex = (lpm->next_tbl8)++;
470 tbl8_group_start = tbl8_gindex *
471 RTE_LPM6_TBL8_GROUP_NUM_ENTRIES;
472 tbl8_group_end = tbl8_group_start +
473 RTE_LPM6_TBL8_GROUP_NUM_ENTRIES;
475 /* Populate new tbl8 with tbl value. */
476 for (i = tbl8_group_start; i < tbl8_group_end; i++) {
477 lpm->tbl8[i].valid = VALID;
478 lpm->tbl8[i].depth = tbl[tbl_index].depth;
479 lpm->tbl8[i].next_hop = tbl[tbl_index].next_hop;
480 lpm->tbl8[i].ext_entry = 0;
484 * Update tbl entry to point to new tbl8 entry. Note: The
485 * ext_flag and tbl8_index need to be updated simultaneously,
486 * so assign whole structure in one go.
488 struct rte_lpm6_tbl_entry new_tbl_entry = {
489 .lpm6_tbl8_gindex = tbl8_gindex,
492 .valid_group = VALID,
496 tbl[tbl_index] = new_tbl_entry;
499 *tbl_next = &(lpm->tbl8[tbl[tbl_index].lpm6_tbl8_gindex *
500 RTE_LPM6_TBL8_GROUP_NUM_ENTRIES]);
510 rte_lpm6_add_v20(struct rte_lpm6 *lpm, uint8_t *ip, uint8_t depth,
513 return rte_lpm6_add_v1705(lpm, ip, depth, next_hop);
515 VERSION_SYMBOL(rte_lpm6_add, _v20, 2.0);
518 rte_lpm6_add_v1705(struct rte_lpm6 *lpm, uint8_t *ip, uint8_t depth,
521 struct rte_lpm6_tbl_entry *tbl;
522 struct rte_lpm6_tbl_entry *tbl_next;
525 uint8_t masked_ip[RTE_LPM6_IPV6_ADDR_SIZE];
528 /* Check user arguments. */
529 if ((lpm == NULL) || (depth < 1) || (depth > RTE_LPM6_MAX_DEPTH))
532 /* Copy the IP and mask it to avoid modifying user's input data. */
533 memcpy(masked_ip, ip, RTE_LPM6_IPV6_ADDR_SIZE);
534 mask_ip(masked_ip, depth);
536 /* Add the rule to the rule table. */
537 rule_index = rule_add(lpm, masked_ip, next_hop, depth);
539 /* If there is no space available for new rule return error. */
540 if (rule_index < 0) {
544 /* Inspect the first three bytes through tbl24 on the first step. */
546 status = add_step (lpm, tbl, &tbl_next, masked_ip, ADD_FIRST_BYTE, 1,
549 rte_lpm6_delete(lpm, masked_ip, depth);
555 * Inspect one by one the rest of the bytes until
556 * the process is completed.
558 for (i = ADD_FIRST_BYTE; i < RTE_LPM6_IPV6_ADDR_SIZE && status == 1; i++) {
560 status = add_step (lpm, tbl, &tbl_next, masked_ip, 1, (uint8_t)(i+1),
563 rte_lpm6_delete(lpm, masked_ip, depth);
571 BIND_DEFAULT_SYMBOL(rte_lpm6_add, _v1705, 17.05);
572 MAP_STATIC_SYMBOL(int rte_lpm6_add(struct rte_lpm6 *lpm, uint8_t *ip,
573 uint8_t depth, uint32_t next_hop),
577 * Takes a pointer to a table entry and inspect one level.
578 * The function returns 0 on lookup success, ENOENT if no match was found
579 * or 1 if the process needs to be continued by calling the function again.
582 lookup_step(const struct rte_lpm6 *lpm, const struct rte_lpm6_tbl_entry *tbl,
583 const struct rte_lpm6_tbl_entry **tbl_next, uint8_t *ip,
584 uint8_t first_byte, uint32_t *next_hop)
586 uint32_t tbl8_index, tbl_entry;
588 /* Take the integer value from the pointer. */
589 tbl_entry = *(const uint32_t *)tbl;
591 /* If it is valid and extended we calculate the new pointer to return. */
592 if ((tbl_entry & RTE_LPM6_VALID_EXT_ENTRY_BITMASK) ==
593 RTE_LPM6_VALID_EXT_ENTRY_BITMASK) {
595 tbl8_index = ip[first_byte-1] +
596 ((tbl_entry & RTE_LPM6_TBL8_BITMASK) *
597 RTE_LPM6_TBL8_GROUP_NUM_ENTRIES);
599 *tbl_next = &lpm->tbl8[tbl8_index];
603 /* If not extended then we can have a match. */
604 *next_hop = ((uint32_t)tbl_entry & RTE_LPM6_TBL8_BITMASK);
605 return (tbl_entry & RTE_LPM6_LOOKUP_SUCCESS) ? 0 : -ENOENT;
613 rte_lpm6_lookup_v20(const struct rte_lpm6 *lpm, uint8_t *ip, uint8_t *next_hop)
615 uint32_t next_hop32 = 0;
618 /* DEBUG: Check user input arguments. */
619 if (next_hop == NULL)
622 status = rte_lpm6_lookup_v1705(lpm, ip, &next_hop32);
624 *next_hop = (uint8_t)next_hop32;
628 VERSION_SYMBOL(rte_lpm6_lookup, _v20, 2.0);
631 rte_lpm6_lookup_v1705(const struct rte_lpm6 *lpm, uint8_t *ip,
634 const struct rte_lpm6_tbl_entry *tbl;
635 const struct rte_lpm6_tbl_entry *tbl_next = NULL;
638 uint32_t tbl24_index;
640 /* DEBUG: Check user input arguments. */
641 if ((lpm == NULL) || (ip == NULL) || (next_hop == NULL)) {
645 first_byte = LOOKUP_FIRST_BYTE;
646 tbl24_index = (ip[0] << BYTES2_SIZE) | (ip[1] << BYTE_SIZE) | ip[2];
648 /* Calculate pointer to the first entry to be inspected */
649 tbl = &lpm->tbl24[tbl24_index];
652 /* Continue inspecting following levels until success or failure */
653 status = lookup_step(lpm, tbl, &tbl_next, ip, first_byte++, next_hop);
655 } while (status == 1);
659 BIND_DEFAULT_SYMBOL(rte_lpm6_lookup, _v1705, 17.05);
660 MAP_STATIC_SYMBOL(int rte_lpm6_lookup(const struct rte_lpm6 *lpm, uint8_t *ip,
661 uint32_t *next_hop), rte_lpm6_lookup_v1705);
664 * Looks up a group of IP addresses
667 rte_lpm6_lookup_bulk_func_v20(const struct rte_lpm6 *lpm,
668 uint8_t ips[][RTE_LPM6_IPV6_ADDR_SIZE],
669 int16_t * next_hops, unsigned n)
672 const struct rte_lpm6_tbl_entry *tbl;
673 const struct rte_lpm6_tbl_entry *tbl_next = NULL;
674 uint32_t tbl24_index, next_hop;
678 /* DEBUG: Check user input arguments. */
679 if ((lpm == NULL) || (ips == NULL) || (next_hops == NULL)) {
683 for (i = 0; i < n; i++) {
684 first_byte = LOOKUP_FIRST_BYTE;
685 tbl24_index = (ips[i][0] << BYTES2_SIZE) |
686 (ips[i][1] << BYTE_SIZE) | ips[i][2];
688 /* Calculate pointer to the first entry to be inspected */
689 tbl = &lpm->tbl24[tbl24_index];
692 /* Continue inspecting following levels until success or failure */
693 status = lookup_step(lpm, tbl, &tbl_next, ips[i], first_byte++,
696 } while (status == 1);
701 next_hops[i] = (int16_t)next_hop;
706 VERSION_SYMBOL(rte_lpm6_lookup_bulk_func, _v20, 2.0);
709 rte_lpm6_lookup_bulk_func_v1705(const struct rte_lpm6 *lpm,
710 uint8_t ips[][RTE_LPM6_IPV6_ADDR_SIZE],
711 int32_t *next_hops, unsigned int n)
714 const struct rte_lpm6_tbl_entry *tbl;
715 const struct rte_lpm6_tbl_entry *tbl_next = NULL;
716 uint32_t tbl24_index, next_hop;
720 /* DEBUG: Check user input arguments. */
721 if ((lpm == NULL) || (ips == NULL) || (next_hops == NULL))
724 for (i = 0; i < n; i++) {
725 first_byte = LOOKUP_FIRST_BYTE;
726 tbl24_index = (ips[i][0] << BYTES2_SIZE) |
727 (ips[i][1] << BYTE_SIZE) | ips[i][2];
729 /* Calculate pointer to the first entry to be inspected */
730 tbl = &lpm->tbl24[tbl24_index];
733 /* Continue inspecting following levels
734 * until success or failure
736 status = lookup_step(lpm, tbl, &tbl_next, ips[i],
737 first_byte++, &next_hop);
739 } while (status == 1);
744 next_hops[i] = (int32_t)next_hop;
749 BIND_DEFAULT_SYMBOL(rte_lpm6_lookup_bulk_func, _v1705, 17.05);
750 MAP_STATIC_SYMBOL(int rte_lpm6_lookup_bulk_func(const struct rte_lpm6 *lpm,
751 uint8_t ips[][RTE_LPM6_IPV6_ADDR_SIZE],
752 int32_t *next_hops, unsigned int n),
753 rte_lpm6_lookup_bulk_func_v1705);
756 * Finds a rule in rule table.
757 * NOTE: Valid range for depth parameter is 1 .. 128 inclusive.
759 static inline int32_t
760 rule_find(struct rte_lpm6 *lpm, uint8_t *ip, uint8_t depth)
764 /* Scan used rules at given depth to find rule. */
765 for (rule_index = 0; rule_index < lpm->used_rules; rule_index++) {
766 /* If rule is found return the rule index. */
767 if ((memcmp (lpm->rules_tbl[rule_index].ip, ip,
768 RTE_LPM6_IPV6_ADDR_SIZE) == 0) &&
769 lpm->rules_tbl[rule_index].depth == depth) {
775 /* If rule is not found return -ENOENT. */
780 * Look for a rule in the high-level rules table
783 rte_lpm6_is_rule_present_v20(struct rte_lpm6 *lpm, uint8_t *ip, uint8_t depth,
786 uint32_t next_hop32 = 0;
789 /* DEBUG: Check user input arguments. */
790 if (next_hop == NULL)
793 status = rte_lpm6_is_rule_present_v1705(lpm, ip, depth, &next_hop32);
795 *next_hop = (uint8_t)next_hop32;
800 VERSION_SYMBOL(rte_lpm6_is_rule_present, _v20, 2.0);
803 rte_lpm6_is_rule_present_v1705(struct rte_lpm6 *lpm, uint8_t *ip, uint8_t depth,
806 uint8_t ip_masked[RTE_LPM6_IPV6_ADDR_SIZE];
809 /* Check user arguments. */
810 if ((lpm == NULL) || next_hop == NULL || ip == NULL ||
811 (depth < 1) || (depth > RTE_LPM6_MAX_DEPTH))
814 /* Copy the IP and mask it to avoid modifying user's input data. */
815 memcpy(ip_masked, ip, RTE_LPM6_IPV6_ADDR_SIZE);
816 mask_ip(ip_masked, depth);
818 /* Look for the rule using rule_find. */
819 rule_index = rule_find(lpm, ip_masked, depth);
821 if (rule_index >= 0) {
822 *next_hop = lpm->rules_tbl[rule_index].next_hop;
826 /* If rule is not found return 0. */
829 BIND_DEFAULT_SYMBOL(rte_lpm6_is_rule_present, _v1705, 17.05);
830 MAP_STATIC_SYMBOL(int rte_lpm6_is_rule_present(struct rte_lpm6 *lpm,
831 uint8_t *ip, uint8_t depth, uint32_t *next_hop),
832 rte_lpm6_is_rule_present_v1705);
835 * Delete a rule from the rule table.
836 * NOTE: Valid range for depth parameter is 1 .. 128 inclusive.
839 rule_delete(struct rte_lpm6 *lpm, int32_t rule_index)
842 * Overwrite redundant rule with last rule in group and decrement rule
845 lpm->rules_tbl[rule_index] = lpm->rules_tbl[lpm->used_rules-1];
853 rte_lpm6_delete(struct rte_lpm6 *lpm, uint8_t *ip, uint8_t depth)
855 int32_t rule_to_delete_index;
856 uint8_t ip_masked[RTE_LPM6_IPV6_ADDR_SIZE];
860 * Check input arguments.
862 if ((lpm == NULL) || (depth < 1) || (depth > RTE_LPM6_MAX_DEPTH)) {
866 /* Copy the IP and mask it to avoid modifying user's input data. */
867 memcpy(ip_masked, ip, RTE_LPM6_IPV6_ADDR_SIZE);
868 mask_ip(ip_masked, depth);
871 * Find the index of the input rule, that needs to be deleted, in the
874 rule_to_delete_index = rule_find(lpm, ip_masked, depth);
877 * Check if rule_to_delete_index was found. If no rule was found the
878 * function rule_find returns -ENOENT.
880 if (rule_to_delete_index < 0)
881 return rule_to_delete_index;
883 /* Delete the rule from the rule table. */
884 rule_delete(lpm, rule_to_delete_index);
887 * Set all the table entries to 0 (ie delete every rule
888 * from the data structure.
891 memset(lpm->tbl24, 0, sizeof(lpm->tbl24));
892 memset(lpm->tbl8, 0, sizeof(lpm->tbl8[0])
893 * RTE_LPM6_TBL8_GROUP_NUM_ENTRIES * lpm->number_tbl8s);
896 * Add every rule again (except for the one that was removed from
899 for (i = 0; i < lpm->used_rules; i++) {
900 rte_lpm6_add(lpm, lpm->rules_tbl[i].ip, lpm->rules_tbl[i].depth,
901 lpm->rules_tbl[i].next_hop);
908 * Deletes a group of rules
911 rte_lpm6_delete_bulk_func(struct rte_lpm6 *lpm,
912 uint8_t ips[][RTE_LPM6_IPV6_ADDR_SIZE], uint8_t *depths, unsigned n)
914 int32_t rule_to_delete_index;
915 uint8_t ip_masked[RTE_LPM6_IPV6_ADDR_SIZE];
919 * Check input arguments.
921 if ((lpm == NULL) || (ips == NULL) || (depths == NULL)) {
925 for (i = 0; i < n; i++) {
926 /* Copy the IP and mask it to avoid modifying user's input data. */
927 memcpy(ip_masked, ips[i], RTE_LPM6_IPV6_ADDR_SIZE);
928 mask_ip(ip_masked, depths[i]);
931 * Find the index of the input rule, that needs to be deleted, in the
934 rule_to_delete_index = rule_find(lpm, ip_masked, depths[i]);
937 * Check if rule_to_delete_index was found. If no rule was found the
938 * function rule_find returns -ENOENT.
940 if (rule_to_delete_index < 0)
943 /* Delete the rule from the rule table. */
944 rule_delete(lpm, rule_to_delete_index);
948 * Set all the table entries to 0 (ie delete every rule
949 * from the data structure.
952 memset(lpm->tbl24, 0, sizeof(lpm->tbl24));
953 memset(lpm->tbl8, 0, sizeof(lpm->tbl8[0])
954 * RTE_LPM6_TBL8_GROUP_NUM_ENTRIES * lpm->number_tbl8s);
957 * Add every rule again (except for the ones that were removed from
960 for (i = 0; i < lpm->used_rules; i++) {
961 rte_lpm6_add(lpm, lpm->rules_tbl[i].ip, lpm->rules_tbl[i].depth,
962 lpm->rules_tbl[i].next_hop);
969 * Delete all rules from the LPM table.
972 rte_lpm6_delete_all(struct rte_lpm6 *lpm)
974 /* Zero used rules counter. */
977 /* Zero next tbl8 index. */
981 memset(lpm->tbl24, 0, sizeof(lpm->tbl24));
984 memset(lpm->tbl8, 0, sizeof(lpm->tbl8[0]) *
985 RTE_LPM6_TBL8_GROUP_NUM_ENTRIES * lpm->number_tbl8s);
987 /* Delete all rules form the rules table. */
988 memset(lpm->rules_tbl, 0, sizeof(struct rte_lpm6_rule) * lpm->max_rules);