4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
38 #include <sys/queue.h>
41 #include <rte_branch_prediction.h>
42 #include <rte_common.h>
43 #include <rte_memory.h>
44 #include <rte_malloc.h>
45 #include <rte_memzone.h>
46 #include <rte_memcpy.h>
48 #include <rte_eal_memconfig.h>
49 #include <rte_per_lcore.h>
50 #include <rte_string_fns.h>
51 #include <rte_errno.h>
52 #include <rte_rwlock.h>
53 #include <rte_spinlock.h>
57 #define RTE_LPM6_TBL24_NUM_ENTRIES (1 << 24)
58 #define RTE_LPM6_TBL8_GROUP_NUM_ENTRIES 256
59 #define RTE_LPM6_TBL8_MAX_NUM_GROUPS (1 << 21)
61 #define RTE_LPM6_VALID_EXT_ENTRY_BITMASK 0xA0000000
62 #define RTE_LPM6_LOOKUP_SUCCESS 0x20000000
63 #define RTE_LPM6_TBL8_BITMASK 0x001FFFFF
65 #define ADD_FIRST_BYTE 3
66 #define LOOKUP_FIRST_BYTE 4
68 #define BYTES2_SIZE 16
70 #define lpm6_tbl8_gindex next_hop
72 /** Flags for setting an entry as valid/invalid. */
78 TAILQ_HEAD(rte_lpm6_list, rte_tailq_entry);
80 static struct rte_tailq_elem rte_lpm6_tailq = {
83 EAL_REGISTER_TAILQ(rte_lpm6_tailq)
85 /** Tbl entry structure. It is the same for both tbl24 and tbl8 */
86 struct rte_lpm6_tbl_entry {
87 uint32_t next_hop: 21; /**< Next hop / next table to be checked. */
88 uint32_t depth :8; /**< Rule depth. */
91 uint32_t valid :1; /**< Validation flag. */
92 uint32_t valid_group :1; /**< Group validation flag. */
93 uint32_t ext_entry :1; /**< External entry. */
96 /** Rules tbl entry structure. */
97 struct rte_lpm6_rule {
98 uint8_t ip[RTE_LPM6_IPV6_ADDR_SIZE]; /**< Rule IP address. */
99 uint32_t next_hop; /**< Rule next hop. */
100 uint8_t depth; /**< Rule depth. */
103 /** LPM6 structure. */
106 char name[RTE_LPM6_NAMESIZE]; /**< Name of the lpm. */
107 uint32_t max_rules; /**< Max number of rules. */
108 uint32_t used_rules; /**< Used rules so far. */
109 uint32_t number_tbl8s; /**< Number of tbl8s to allocate. */
110 uint32_t next_tbl8; /**< Next tbl8 to be used. */
113 struct rte_lpm6_rule *rules_tbl; /**< LPM rules. */
114 struct rte_lpm6_tbl_entry tbl24[RTE_LPM6_TBL24_NUM_ENTRIES]
115 __rte_cache_aligned; /**< LPM tbl24 table. */
116 struct rte_lpm6_tbl_entry tbl8[0]
117 __rte_cache_aligned; /**< LPM tbl8 table. */
121 * Takes an array of uint8_t (IPv6 address) and masks it using the depth.
122 * It leaves untouched one bit per unit in the depth variable
123 * and set the rest to 0.
126 mask_ip(uint8_t *ip, uint8_t depth)
128 int16_t part_depth, mask;
133 for (i = 0; i < RTE_LPM6_IPV6_ADDR_SIZE; i++) {
134 if (part_depth < BYTE_SIZE && part_depth >= 0) {
135 mask = (uint16_t)(~(UINT8_MAX >> part_depth));
136 ip[i] = (uint8_t)(ip[i] & mask);
137 } else if (part_depth < 0) {
140 part_depth -= BYTE_SIZE;
145 * Allocates memory for LPM object
148 rte_lpm6_create(const char *name, int socket_id,
149 const struct rte_lpm6_config *config)
151 char mem_name[RTE_LPM6_NAMESIZE];
152 struct rte_lpm6 *lpm = NULL;
153 struct rte_tailq_entry *te;
154 uint64_t mem_size, rules_size;
155 struct rte_lpm6_list *lpm_list;
157 lpm_list = RTE_TAILQ_CAST(rte_lpm6_tailq.head, rte_lpm6_list);
159 RTE_BUILD_BUG_ON(sizeof(struct rte_lpm6_tbl_entry) != sizeof(uint32_t));
161 /* Check user arguments. */
162 if ((name == NULL) || (socket_id < -1) || (config == NULL) ||
163 (config->max_rules == 0) ||
164 config->number_tbl8s > RTE_LPM6_TBL8_MAX_NUM_GROUPS) {
169 snprintf(mem_name, sizeof(mem_name), "LPM_%s", name);
171 /* Determine the amount of memory to allocate. */
172 mem_size = sizeof(*lpm) + (sizeof(lpm->tbl8[0]) *
173 RTE_LPM6_TBL8_GROUP_NUM_ENTRIES * config->number_tbl8s);
174 rules_size = sizeof(struct rte_lpm6_rule) * config->max_rules;
176 rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
178 /* Guarantee there's no existing */
179 TAILQ_FOREACH(te, lpm_list, next) {
180 lpm = (struct rte_lpm6 *) te->data;
181 if (strncmp(name, lpm->name, RTE_LPM6_NAMESIZE) == 0)
190 /* allocate tailq entry */
191 te = rte_zmalloc("LPM6_TAILQ_ENTRY", sizeof(*te), 0);
193 RTE_LOG(ERR, LPM, "Failed to allocate tailq entry!\n");
197 /* Allocate memory to store the LPM data structures. */
198 lpm = (struct rte_lpm6 *)rte_zmalloc_socket(mem_name, (size_t)mem_size,
199 RTE_CACHE_LINE_SIZE, socket_id);
202 RTE_LOG(ERR, LPM, "LPM memory allocation failed\n");
207 lpm->rules_tbl = (struct rte_lpm6_rule *)rte_zmalloc_socket(NULL,
208 (size_t)rules_size, RTE_CACHE_LINE_SIZE, socket_id);
210 if (lpm->rules_tbl == NULL) {
211 RTE_LOG(ERR, LPM, "LPM rules_tbl allocation failed\n");
218 /* Save user arguments. */
219 lpm->max_rules = config->max_rules;
220 lpm->number_tbl8s = config->number_tbl8s;
221 snprintf(lpm->name, sizeof(lpm->name), "%s", name);
223 te->data = (void *) lpm;
225 TAILQ_INSERT_TAIL(lpm_list, te, next);
228 rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
234 * Find an existing lpm table and return a pointer to it.
237 rte_lpm6_find_existing(const char *name)
239 struct rte_lpm6 *l = NULL;
240 struct rte_tailq_entry *te;
241 struct rte_lpm6_list *lpm_list;
243 lpm_list = RTE_TAILQ_CAST(rte_lpm6_tailq.head, rte_lpm6_list);
245 rte_rwlock_read_lock(RTE_EAL_TAILQ_RWLOCK);
246 TAILQ_FOREACH(te, lpm_list, next) {
247 l = (struct rte_lpm6 *) te->data;
248 if (strncmp(name, l->name, RTE_LPM6_NAMESIZE) == 0)
251 rte_rwlock_read_unlock(RTE_EAL_TAILQ_RWLOCK);
262 * Deallocates memory for given LPM table.
265 rte_lpm6_free(struct rte_lpm6 *lpm)
267 struct rte_lpm6_list *lpm_list;
268 struct rte_tailq_entry *te;
270 /* Check user arguments. */
274 lpm_list = RTE_TAILQ_CAST(rte_lpm6_tailq.head, rte_lpm6_list);
276 rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
278 /* find our tailq entry */
279 TAILQ_FOREACH(te, lpm_list, next) {
280 if (te->data == (void *) lpm)
285 TAILQ_REMOVE(lpm_list, te, next);
287 rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
289 rte_free(lpm->rules_tbl);
295 * Checks if a rule already exists in the rules table and updates
296 * the nexthop if so. Otherwise it adds a new rule if enough space is available.
298 static inline int32_t
299 rule_add(struct rte_lpm6 *lpm, uint8_t *ip, uint32_t next_hop, uint8_t depth)
303 /* Scan through rule list to see if rule already exists. */
304 for (rule_index = 0; rule_index < lpm->used_rules; rule_index++) {
306 /* If rule already exists update its next_hop and return. */
307 if ((memcmp (lpm->rules_tbl[rule_index].ip, ip,
308 RTE_LPM6_IPV6_ADDR_SIZE) == 0) &&
309 lpm->rules_tbl[rule_index].depth == depth) {
310 lpm->rules_tbl[rule_index].next_hop = next_hop;
317 * If rule does not exist check if there is space to add a new rule to
318 * this rule group. If there is no space return error.
320 if (lpm->used_rules == lpm->max_rules) {
324 /* If there is space for the new rule add it. */
325 rte_memcpy(lpm->rules_tbl[rule_index].ip, ip, RTE_LPM6_IPV6_ADDR_SIZE);
326 lpm->rules_tbl[rule_index].next_hop = next_hop;
327 lpm->rules_tbl[rule_index].depth = depth;
329 /* Increment the used rules counter for this rule group. */
336 * Function that expands a rule across the data structure when a less-generic
337 * one has been added before. It assures that every possible combination of bits
338 * in the IP address returns a match.
341 expand_rule(struct rte_lpm6 *lpm, uint32_t tbl8_gindex, uint8_t depth,
344 uint32_t tbl8_group_end, tbl8_gindex_next, j;
346 tbl8_group_end = tbl8_gindex + RTE_LPM6_TBL8_GROUP_NUM_ENTRIES;
348 struct rte_lpm6_tbl_entry new_tbl8_entry = {
350 .valid_group = VALID,
352 .next_hop = next_hop,
356 for (j = tbl8_gindex; j < tbl8_group_end; j++) {
357 if (!lpm->tbl8[j].valid || (lpm->tbl8[j].ext_entry == 0
358 && lpm->tbl8[j].depth <= depth)) {
360 lpm->tbl8[j] = new_tbl8_entry;
362 } else if (lpm->tbl8[j].ext_entry == 1) {
364 tbl8_gindex_next = lpm->tbl8[j].lpm6_tbl8_gindex
365 * RTE_LPM6_TBL8_GROUP_NUM_ENTRIES;
366 expand_rule(lpm, tbl8_gindex_next, depth, next_hop);
372 * Partially adds a new route to the data structure (tbl24+tbl8s).
373 * It returns 0 on success, a negative number on failure, or 1 if
374 * the process needs to be continued by calling the function again.
377 add_step(struct rte_lpm6 *lpm, struct rte_lpm6_tbl_entry *tbl,
378 struct rte_lpm6_tbl_entry **tbl_next, uint8_t *ip, uint8_t bytes,
379 uint8_t first_byte, uint8_t depth, uint32_t next_hop)
381 uint32_t tbl_index, tbl_range, tbl8_group_start, tbl8_group_end, i;
384 uint8_t bits_covered;
387 * Calculate index to the table based on the number and position
388 * of the bytes being inspected in this step.
391 for (i = first_byte; i < (uint32_t)(first_byte + bytes); i++) {
392 bitshift = (int8_t)((bytes - i)*BYTE_SIZE);
394 if (bitshift < 0) bitshift = 0;
395 tbl_index = tbl_index | ip[i-1] << bitshift;
398 /* Number of bits covered in this step */
399 bits_covered = (uint8_t)((bytes+first_byte-1)*BYTE_SIZE);
402 * If depth if smaller than this number (ie this is the last step)
403 * expand the rule across the relevant positions in the table.
405 if (depth <= bits_covered) {
406 tbl_range = 1 << (bits_covered - depth);
408 for (i = tbl_index; i < (tbl_index + tbl_range); i++) {
409 if (!tbl[i].valid || (tbl[i].ext_entry == 0 &&
410 tbl[i].depth <= depth)) {
412 struct rte_lpm6_tbl_entry new_tbl_entry = {
413 .next_hop = next_hop,
416 .valid_group = VALID,
420 tbl[i] = new_tbl_entry;
422 } else if (tbl[i].ext_entry == 1) {
425 * If tbl entry is valid and extended calculate the index
426 * into next tbl8 and expand the rule across the data structure.
428 tbl8_gindex = tbl[i].lpm6_tbl8_gindex *
429 RTE_LPM6_TBL8_GROUP_NUM_ENTRIES;
430 expand_rule(lpm, tbl8_gindex, depth, next_hop);
437 * If this is not the last step just fill one position
438 * and calculate the index to the next table.
441 /* If it's invalid a new tbl8 is needed */
442 if (!tbl[tbl_index].valid) {
443 if (lpm->next_tbl8 < lpm->number_tbl8s)
444 tbl8_gindex = (lpm->next_tbl8)++;
448 struct rte_lpm6_tbl_entry new_tbl_entry = {
449 .lpm6_tbl8_gindex = tbl8_gindex,
452 .valid_group = VALID,
456 tbl[tbl_index] = new_tbl_entry;
459 * If it's valid but not extended the rule that was stored *
460 * here needs to be moved to the next table.
462 else if (tbl[tbl_index].ext_entry == 0) {
463 /* Search for free tbl8 group. */
464 if (lpm->next_tbl8 < lpm->number_tbl8s)
465 tbl8_gindex = (lpm->next_tbl8)++;
469 tbl8_group_start = tbl8_gindex *
470 RTE_LPM6_TBL8_GROUP_NUM_ENTRIES;
471 tbl8_group_end = tbl8_group_start +
472 RTE_LPM6_TBL8_GROUP_NUM_ENTRIES;
474 /* Populate new tbl8 with tbl value. */
475 for (i = tbl8_group_start; i < tbl8_group_end; i++) {
476 lpm->tbl8[i].valid = VALID;
477 lpm->tbl8[i].depth = tbl[tbl_index].depth;
478 lpm->tbl8[i].next_hop = tbl[tbl_index].next_hop;
479 lpm->tbl8[i].ext_entry = 0;
483 * Update tbl entry to point to new tbl8 entry. Note: The
484 * ext_flag and tbl8_index need to be updated simultaneously,
485 * so assign whole structure in one go.
487 struct rte_lpm6_tbl_entry new_tbl_entry = {
488 .lpm6_tbl8_gindex = tbl8_gindex,
491 .valid_group = VALID,
495 tbl[tbl_index] = new_tbl_entry;
498 *tbl_next = &(lpm->tbl8[tbl[tbl_index].lpm6_tbl8_gindex *
499 RTE_LPM6_TBL8_GROUP_NUM_ENTRIES]);
509 rte_lpm6_add_v20(struct rte_lpm6 *lpm, uint8_t *ip, uint8_t depth,
512 return rte_lpm6_add_v1705(lpm, ip, depth, next_hop);
514 VERSION_SYMBOL(rte_lpm6_add, _v20, 2.0);
517 rte_lpm6_add_v1705(struct rte_lpm6 *lpm, uint8_t *ip, uint8_t depth,
520 struct rte_lpm6_tbl_entry *tbl;
521 struct rte_lpm6_tbl_entry *tbl_next;
524 uint8_t masked_ip[RTE_LPM6_IPV6_ADDR_SIZE];
527 /* Check user arguments. */
528 if ((lpm == NULL) || (depth < 1) || (depth > RTE_LPM6_MAX_DEPTH))
531 /* Copy the IP and mask it to avoid modifying user's input data. */
532 memcpy(masked_ip, ip, RTE_LPM6_IPV6_ADDR_SIZE);
533 mask_ip(masked_ip, depth);
535 /* Add the rule to the rule table. */
536 rule_index = rule_add(lpm, masked_ip, next_hop, depth);
538 /* If there is no space available for new rule return error. */
539 if (rule_index < 0) {
543 /* Inspect the first three bytes through tbl24 on the first step. */
545 status = add_step (lpm, tbl, &tbl_next, masked_ip, ADD_FIRST_BYTE, 1,
548 rte_lpm6_delete(lpm, masked_ip, depth);
554 * Inspect one by one the rest of the bytes until
555 * the process is completed.
557 for (i = ADD_FIRST_BYTE; i < RTE_LPM6_IPV6_ADDR_SIZE && status == 1; i++) {
559 status = add_step (lpm, tbl, &tbl_next, masked_ip, 1, (uint8_t)(i+1),
562 rte_lpm6_delete(lpm, masked_ip, depth);
570 BIND_DEFAULT_SYMBOL(rte_lpm6_add, _v1705, 17.05);
571 MAP_STATIC_SYMBOL(int rte_lpm6_add(struct rte_lpm6 *lpm, uint8_t *ip,
572 uint8_t depth, uint32_t next_hop),
576 * Takes a pointer to a table entry and inspect one level.
577 * The function returns 0 on lookup success, ENOENT if no match was found
578 * or 1 if the process needs to be continued by calling the function again.
581 lookup_step(const struct rte_lpm6 *lpm, const struct rte_lpm6_tbl_entry *tbl,
582 const struct rte_lpm6_tbl_entry **tbl_next, uint8_t *ip,
583 uint8_t first_byte, uint32_t *next_hop)
585 uint32_t tbl8_index, tbl_entry;
587 /* Take the integer value from the pointer. */
588 tbl_entry = *(const uint32_t *)tbl;
590 /* If it is valid and extended we calculate the new pointer to return. */
591 if ((tbl_entry & RTE_LPM6_VALID_EXT_ENTRY_BITMASK) ==
592 RTE_LPM6_VALID_EXT_ENTRY_BITMASK) {
594 tbl8_index = ip[first_byte-1] +
595 ((tbl_entry & RTE_LPM6_TBL8_BITMASK) *
596 RTE_LPM6_TBL8_GROUP_NUM_ENTRIES);
598 *tbl_next = &lpm->tbl8[tbl8_index];
602 /* If not extended then we can have a match. */
603 *next_hop = ((uint32_t)tbl_entry & RTE_LPM6_TBL8_BITMASK);
604 return (tbl_entry & RTE_LPM6_LOOKUP_SUCCESS) ? 0 : -ENOENT;
612 rte_lpm6_lookup_v20(const struct rte_lpm6 *lpm, uint8_t *ip, uint8_t *next_hop)
614 uint32_t next_hop32 = 0;
617 /* DEBUG: Check user input arguments. */
618 if (next_hop == NULL)
621 status = rte_lpm6_lookup_v1705(lpm, ip, &next_hop32);
623 *next_hop = (uint8_t)next_hop32;
627 VERSION_SYMBOL(rte_lpm6_lookup, _v20, 2.0);
630 rte_lpm6_lookup_v1705(const struct rte_lpm6 *lpm, uint8_t *ip,
633 const struct rte_lpm6_tbl_entry *tbl;
634 const struct rte_lpm6_tbl_entry *tbl_next = NULL;
637 uint32_t tbl24_index;
639 /* DEBUG: Check user input arguments. */
640 if ((lpm == NULL) || (ip == NULL) || (next_hop == NULL)) {
644 first_byte = LOOKUP_FIRST_BYTE;
645 tbl24_index = (ip[0] << BYTES2_SIZE) | (ip[1] << BYTE_SIZE) | ip[2];
647 /* Calculate pointer to the first entry to be inspected */
648 tbl = &lpm->tbl24[tbl24_index];
651 /* Continue inspecting following levels until success or failure */
652 status = lookup_step(lpm, tbl, &tbl_next, ip, first_byte++, next_hop);
654 } while (status == 1);
658 BIND_DEFAULT_SYMBOL(rte_lpm6_lookup, _v1705, 17.05);
659 MAP_STATIC_SYMBOL(int rte_lpm6_lookup(const struct rte_lpm6 *lpm, uint8_t *ip,
660 uint32_t *next_hop), rte_lpm6_lookup_v1705);
663 * Looks up a group of IP addresses
666 rte_lpm6_lookup_bulk_func_v20(const struct rte_lpm6 *lpm,
667 uint8_t ips[][RTE_LPM6_IPV6_ADDR_SIZE],
668 int16_t * next_hops, unsigned n)
671 const struct rte_lpm6_tbl_entry *tbl;
672 const struct rte_lpm6_tbl_entry *tbl_next = NULL;
673 uint32_t tbl24_index, next_hop;
677 /* DEBUG: Check user input arguments. */
678 if ((lpm == NULL) || (ips == NULL) || (next_hops == NULL)) {
682 for (i = 0; i < n; i++) {
683 first_byte = LOOKUP_FIRST_BYTE;
684 tbl24_index = (ips[i][0] << BYTES2_SIZE) |
685 (ips[i][1] << BYTE_SIZE) | ips[i][2];
687 /* Calculate pointer to the first entry to be inspected */
688 tbl = &lpm->tbl24[tbl24_index];
691 /* Continue inspecting following levels until success or failure */
692 status = lookup_step(lpm, tbl, &tbl_next, ips[i], first_byte++,
695 } while (status == 1);
700 next_hops[i] = (int16_t)next_hop;
705 VERSION_SYMBOL(rte_lpm6_lookup_bulk_func, _v20, 2.0);
708 rte_lpm6_lookup_bulk_func_v1705(const struct rte_lpm6 *lpm,
709 uint8_t ips[][RTE_LPM6_IPV6_ADDR_SIZE],
710 int32_t *next_hops, unsigned int n)
713 const struct rte_lpm6_tbl_entry *tbl;
714 const struct rte_lpm6_tbl_entry *tbl_next = NULL;
715 uint32_t tbl24_index, next_hop;
719 /* DEBUG: Check user input arguments. */
720 if ((lpm == NULL) || (ips == NULL) || (next_hops == NULL))
723 for (i = 0; i < n; i++) {
724 first_byte = LOOKUP_FIRST_BYTE;
725 tbl24_index = (ips[i][0] << BYTES2_SIZE) |
726 (ips[i][1] << BYTE_SIZE) | ips[i][2];
728 /* Calculate pointer to the first entry to be inspected */
729 tbl = &lpm->tbl24[tbl24_index];
732 /* Continue inspecting following levels
733 * until success or failure
735 status = lookup_step(lpm, tbl, &tbl_next, ips[i],
736 first_byte++, &next_hop);
738 } while (status == 1);
743 next_hops[i] = (int32_t)next_hop;
748 BIND_DEFAULT_SYMBOL(rte_lpm6_lookup_bulk_func, _v1705, 17.05);
749 MAP_STATIC_SYMBOL(int rte_lpm6_lookup_bulk_func(const struct rte_lpm6 *lpm,
750 uint8_t ips[][RTE_LPM6_IPV6_ADDR_SIZE],
751 int32_t *next_hops, unsigned int n),
752 rte_lpm6_lookup_bulk_func_v1705);
755 * Finds a rule in rule table.
756 * NOTE: Valid range for depth parameter is 1 .. 128 inclusive.
758 static inline int32_t
759 rule_find(struct rte_lpm6 *lpm, uint8_t *ip, uint8_t depth)
763 /* Scan used rules at given depth to find rule. */
764 for (rule_index = 0; rule_index < lpm->used_rules; rule_index++) {
765 /* If rule is found return the rule index. */
766 if ((memcmp (lpm->rules_tbl[rule_index].ip, ip,
767 RTE_LPM6_IPV6_ADDR_SIZE) == 0) &&
768 lpm->rules_tbl[rule_index].depth == depth) {
774 /* If rule is not found return -ENOENT. */
779 * Look for a rule in the high-level rules table
782 rte_lpm6_is_rule_present_v20(struct rte_lpm6 *lpm, uint8_t *ip, uint8_t depth,
785 uint32_t next_hop32 = 0;
788 /* DEBUG: Check user input arguments. */
789 if (next_hop == NULL)
792 status = rte_lpm6_is_rule_present_v1705(lpm, ip, depth, &next_hop32);
794 *next_hop = (uint8_t)next_hop32;
799 VERSION_SYMBOL(rte_lpm6_is_rule_present, _v20, 2.0);
802 rte_lpm6_is_rule_present_v1705(struct rte_lpm6 *lpm, uint8_t *ip, uint8_t depth,
805 uint8_t ip_masked[RTE_LPM6_IPV6_ADDR_SIZE];
808 /* Check user arguments. */
809 if ((lpm == NULL) || next_hop == NULL || ip == NULL ||
810 (depth < 1) || (depth > RTE_LPM6_MAX_DEPTH))
813 /* Copy the IP and mask it to avoid modifying user's input data. */
814 memcpy(ip_masked, ip, RTE_LPM6_IPV6_ADDR_SIZE);
815 mask_ip(ip_masked, depth);
817 /* Look for the rule using rule_find. */
818 rule_index = rule_find(lpm, ip_masked, depth);
820 if (rule_index >= 0) {
821 *next_hop = lpm->rules_tbl[rule_index].next_hop;
825 /* If rule is not found return 0. */
828 BIND_DEFAULT_SYMBOL(rte_lpm6_is_rule_present, _v1705, 17.05);
829 MAP_STATIC_SYMBOL(int rte_lpm6_is_rule_present(struct rte_lpm6 *lpm,
830 uint8_t *ip, uint8_t depth, uint32_t *next_hop),
831 rte_lpm6_is_rule_present_v1705);
834 * Delete a rule from the rule table.
835 * NOTE: Valid range for depth parameter is 1 .. 128 inclusive.
838 rule_delete(struct rte_lpm6 *lpm, int32_t rule_index)
841 * Overwrite redundant rule with last rule in group and decrement rule
844 lpm->rules_tbl[rule_index] = lpm->rules_tbl[lpm->used_rules-1];
852 rte_lpm6_delete(struct rte_lpm6 *lpm, uint8_t *ip, uint8_t depth)
854 int32_t rule_to_delete_index;
855 uint8_t ip_masked[RTE_LPM6_IPV6_ADDR_SIZE];
859 * Check input arguments.
861 if ((lpm == NULL) || (depth < 1) || (depth > RTE_LPM6_MAX_DEPTH)) {
865 /* Copy the IP and mask it to avoid modifying user's input data. */
866 memcpy(ip_masked, ip, RTE_LPM6_IPV6_ADDR_SIZE);
867 mask_ip(ip_masked, depth);
870 * Find the index of the input rule, that needs to be deleted, in the
873 rule_to_delete_index = rule_find(lpm, ip_masked, depth);
876 * Check if rule_to_delete_index was found. If no rule was found the
877 * function rule_find returns -ENOENT.
879 if (rule_to_delete_index < 0)
880 return rule_to_delete_index;
882 /* Delete the rule from the rule table. */
883 rule_delete(lpm, rule_to_delete_index);
886 * Set all the table entries to 0 (ie delete every rule
887 * from the data structure.
890 memset(lpm->tbl24, 0, sizeof(lpm->tbl24));
891 memset(lpm->tbl8, 0, sizeof(lpm->tbl8[0])
892 * RTE_LPM6_TBL8_GROUP_NUM_ENTRIES * lpm->number_tbl8s);
895 * Add every rule again (except for the one that was removed from
898 for (i = 0; i < lpm->used_rules; i++) {
899 rte_lpm6_add(lpm, lpm->rules_tbl[i].ip, lpm->rules_tbl[i].depth,
900 lpm->rules_tbl[i].next_hop);
907 * Deletes a group of rules
910 rte_lpm6_delete_bulk_func(struct rte_lpm6 *lpm,
911 uint8_t ips[][RTE_LPM6_IPV6_ADDR_SIZE], uint8_t *depths, unsigned n)
913 int32_t rule_to_delete_index;
914 uint8_t ip_masked[RTE_LPM6_IPV6_ADDR_SIZE];
918 * Check input arguments.
920 if ((lpm == NULL) || (ips == NULL) || (depths == NULL)) {
924 for (i = 0; i < n; i++) {
925 /* Copy the IP and mask it to avoid modifying user's input data. */
926 memcpy(ip_masked, ips[i], RTE_LPM6_IPV6_ADDR_SIZE);
927 mask_ip(ip_masked, depths[i]);
930 * Find the index of the input rule, that needs to be deleted, in the
933 rule_to_delete_index = rule_find(lpm, ip_masked, depths[i]);
936 * Check if rule_to_delete_index was found. If no rule was found the
937 * function rule_find returns -ENOENT.
939 if (rule_to_delete_index < 0)
942 /* Delete the rule from the rule table. */
943 rule_delete(lpm, rule_to_delete_index);
947 * Set all the table entries to 0 (ie delete every rule
948 * from the data structure.
951 memset(lpm->tbl24, 0, sizeof(lpm->tbl24));
952 memset(lpm->tbl8, 0, sizeof(lpm->tbl8[0])
953 * RTE_LPM6_TBL8_GROUP_NUM_ENTRIES * lpm->number_tbl8s);
956 * Add every rule again (except for the ones that were removed from
959 for (i = 0; i < lpm->used_rules; i++) {
960 rte_lpm6_add(lpm, lpm->rules_tbl[i].ip, lpm->rules_tbl[i].depth,
961 lpm->rules_tbl[i].next_hop);
968 * Delete all rules from the LPM table.
971 rte_lpm6_delete_all(struct rte_lpm6 *lpm)
973 /* Zero used rules counter. */
976 /* Zero next tbl8 index. */
980 memset(lpm->tbl24, 0, sizeof(lpm->tbl24));
983 memset(lpm->tbl8, 0, sizeof(lpm->tbl8[0]) *
984 RTE_LPM6_TBL8_GROUP_NUM_ENTRIES * lpm->number_tbl8s);
986 /* Delete all rules form the rules table. */
987 memset(lpm->rules_tbl, 0, sizeof(struct rte_lpm6_rule) * lpm->max_rules);