X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=lib%2Flibrte_lpm%2Frte_lpm6.c;h=9cc7be77dd78446a7f998dc46ed6c96df3d448e3;hb=de85636da0d8cd2d08b80599f4398cd4113543a5;hp=2e0a9e54f8462fcbaa3862097ed50c9509a15bdf;hpb=1c1d4d7a923d4804f1926fc5264f9ecdd8977b04;p=dpdk.git diff --git a/lib/librte_lpm/rte_lpm6.c b/lib/librte_lpm/rte_lpm6.c index 2e0a9e54f8..9cc7be77dd 100644 --- a/lib/librte_lpm/rte_lpm6.c +++ b/lib/librte_lpm/rte_lpm6.c @@ -1,13 +1,13 @@ /*- * BSD LICENSE - * - * Copyright(c) 2010-2013 Intel Corporation. All rights reserved. + * + * Copyright(c) 2010-2014 Intel Corporation. All rights reserved. * All rights reserved. - * + * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: - * + * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright @@ -17,7 +17,7 @@ * * Neither the name of Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. - * + * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR @@ -45,7 +45,6 @@ #include #include #include -#include #include #include #include @@ -77,13 +76,18 @@ enum valid_flag { VALID }; -TAILQ_HEAD(rte_lpm6_list, rte_lpm6); +TAILQ_HEAD(rte_lpm6_list, rte_tailq_entry); + +static struct rte_tailq_elem rte_lpm6_tailq = { + .name = "RTE_LPM6", +}; +EAL_REGISTER_TAILQ(rte_lpm6_tailq) /** Tbl entry structure. It is the same for both tbl24 and tbl8 */ struct rte_lpm6_tbl_entry { uint32_t next_hop: 21; /**< Next hop / next table to be checked. */ uint32_t depth :8; /**< Rule depth. */ - + /* Flags. */ uint32_t valid :1; /**< Validation flag. */ uint32_t valid_group :1; /**< Group validation flag. */ @@ -93,14 +97,12 @@ struct rte_lpm6_tbl_entry { /** Rules tbl entry structure. */ struct rte_lpm6_rule { uint8_t ip[RTE_LPM6_IPV6_ADDR_SIZE]; /**< Rule IP address. */ - uint8_t next_hop; /**< Rule next hop. */ + uint32_t next_hop; /**< Rule next hop. */ uint8_t depth; /**< Rule depth. */ }; /** LPM6 structure. */ struct rte_lpm6 { - TAILQ_ENTRY(rte_lpm6) next; /**< Next in list. */ - /* LPM metadata. */ char name[RTE_LPM6_NAMESIZE]; /**< Name of the lpm. */ uint32_t max_rules; /**< Max number of rules. */ @@ -126,7 +128,7 @@ mask_ip(uint8_t *ip, uint8_t depth) { int16_t part_depth, mask; int i; - + part_depth = depth; for (i = 0; i < RTE_LPM6_IPV6_ADDR_SIZE; i++) { @@ -149,15 +151,11 @@ rte_lpm6_create(const char *name, int socket_id, { char mem_name[RTE_LPM6_NAMESIZE]; struct rte_lpm6 *lpm = NULL; + struct rte_tailq_entry *te; uint64_t mem_size, rules_size; struct rte_lpm6_list *lpm_list; - /* Check that we have an initialised tail queue */ - if ((lpm_list = - RTE_TAILQ_LOOKUP_BY_IDX(RTE_TAILQ_LPM6, rte_lpm6_list)) == NULL) { - rte_errno = E_RTE_NO_TAILQ; - return NULL; - } + lpm_list = RTE_TAILQ_CAST(rte_lpm6_tailq.head, rte_lpm6_list); RTE_BUILD_BUG_ON(sizeof(struct rte_lpm6_tbl_entry) != sizeof(uint32_t)); @@ -169,7 +167,7 @@ rte_lpm6_create(const char *name, int socket_id, return NULL; } - rte_snprintf(mem_name, sizeof(mem_name), "LPM_%s", name); + snprintf(mem_name, sizeof(mem_name), "LPM_%s", name); /* Determine the amount of memory to allocate. */ mem_size = sizeof(*lpm) + (sizeof(lpm->tbl8[0]) * @@ -179,39 +177,55 @@ rte_lpm6_create(const char *name, int socket_id, rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK); /* Guarantee there's no existing */ - TAILQ_FOREACH(lpm, lpm_list, next) { + TAILQ_FOREACH(te, lpm_list, next) { + lpm = (struct rte_lpm6 *) te->data; if (strncmp(name, lpm->name, RTE_LPM6_NAMESIZE) == 0) break; } - if (lpm != NULL) + lpm = NULL; + if (te != NULL) { + rte_errno = EEXIST; goto exit; + } + + /* allocate tailq entry */ + te = rte_zmalloc("LPM6_TAILQ_ENTRY", sizeof(*te), 0); + if (te == NULL) { + RTE_LOG(ERR, LPM, "Failed to allocate tailq entry!\n"); + goto exit; + } /* Allocate memory to store the LPM data structures. */ lpm = (struct rte_lpm6 *)rte_zmalloc_socket(mem_name, (size_t)mem_size, - CACHE_LINE_SIZE, socket_id); - + RTE_CACHE_LINE_SIZE, socket_id); + if (lpm == NULL) { RTE_LOG(ERR, LPM, "LPM memory allocation failed\n"); + rte_free(te); goto exit; } lpm->rules_tbl = (struct rte_lpm6_rule *)rte_zmalloc_socket(NULL, - (size_t)rules_size, CACHE_LINE_SIZE, socket_id); - + (size_t)rules_size, RTE_CACHE_LINE_SIZE, socket_id); + if (lpm->rules_tbl == NULL) { - RTE_LOG(ERR, LPM, "LPM memory allocation failed\n"); + RTE_LOG(ERR, LPM, "LPM rules_tbl allocation failed\n"); rte_free(lpm); + lpm = NULL; + rte_free(te); goto exit; } /* Save user arguments. */ lpm->max_rules = config->max_rules; lpm->number_tbl8s = config->number_tbl8s; - rte_snprintf(lpm->name, sizeof(lpm->name), "%s", name); + snprintf(lpm->name, sizeof(lpm->name), "%s", name); - TAILQ_INSERT_TAIL(lpm_list, lpm, next); + te->data = (void *) lpm; -exit: + TAILQ_INSERT_TAIL(lpm_list, te, next); + +exit: rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK); return lpm; @@ -223,25 +237,24 @@ exit: struct rte_lpm6 * rte_lpm6_find_existing(const char *name) { - struct rte_lpm6 *l; + struct rte_lpm6 *l = NULL; + struct rte_tailq_entry *te; struct rte_lpm6_list *lpm_list; - /* Check that we have an initialised tail queue */ - if ((lpm_list = RTE_TAILQ_LOOKUP_BY_IDX(RTE_TAILQ_LPM6, - rte_lpm6_list)) == NULL) { - rte_errno = E_RTE_NO_TAILQ; - return NULL; - } + lpm_list = RTE_TAILQ_CAST(rte_lpm6_tailq.head, rte_lpm6_list); rte_rwlock_read_lock(RTE_EAL_TAILQ_RWLOCK); - TAILQ_FOREACH(l, lpm_list, next) { + TAILQ_FOREACH(te, lpm_list, next) { + l = (struct rte_lpm6 *) te->data; if (strncmp(name, l->name, RTE_LPM6_NAMESIZE) == 0) break; } rte_rwlock_read_unlock(RTE_EAL_TAILQ_RWLOCK); - if (l == NULL) + if (te == NULL) { rte_errno = ENOENT; + return NULL; + } return l; } @@ -252,13 +265,31 @@ rte_lpm6_find_existing(const char *name) void rte_lpm6_free(struct rte_lpm6 *lpm) { + struct rte_lpm6_list *lpm_list; + struct rte_tailq_entry *te; + /* Check user arguments. */ if (lpm == NULL) return; - RTE_EAL_TAILQ_REMOVE(RTE_TAILQ_LPM6, rte_lpm6_list, lpm); + lpm_list = RTE_TAILQ_CAST(rte_lpm6_tailq.head, rte_lpm6_list); + + rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK); + + /* find our tailq entry */ + TAILQ_FOREACH(te, lpm_list, next) { + if (te->data == (void *) lpm) + break; + } + + if (te != NULL) + TAILQ_REMOVE(lpm_list, te, next); + + rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK); + rte_free(lpm->rules_tbl); rte_free(lpm); + rte_free(te); } /* @@ -266,10 +297,10 @@ rte_lpm6_free(struct rte_lpm6 *lpm) * the nexthop if so. Otherwise it adds a new rule if enough space is available. */ static inline int32_t -rule_add(struct rte_lpm6 *lpm, uint8_t *ip, uint8_t next_hop, uint8_t depth) +rule_add(struct rte_lpm6 *lpm, uint8_t *ip, uint32_t next_hop, uint8_t depth) { uint32_t rule_index; - + /* Scan through rule list to see if rule already exists. */ for (rule_index = 0; rule_index < lpm->used_rules; rule_index++) { @@ -298,7 +329,7 @@ rule_add(struct rte_lpm6 *lpm, uint8_t *ip, uint8_t next_hop, uint8_t depth) /* Increment the used rules counter for this rule group. */ lpm->used_rules++; - + return rule_index; } @@ -309,7 +340,7 @@ rule_add(struct rte_lpm6 *lpm, uint8_t *ip, uint8_t next_hop, uint8_t depth) */ static void expand_rule(struct rte_lpm6 *lpm, uint32_t tbl8_gindex, uint8_t depth, - uint8_t next_hop) + uint32_t next_hop) { uint32_t tbl8_group_end, tbl8_gindex_next, j; @@ -346,13 +377,13 @@ expand_rule(struct rte_lpm6 *lpm, uint32_t tbl8_gindex, uint8_t depth, static inline int add_step(struct rte_lpm6 *lpm, struct rte_lpm6_tbl_entry *tbl, struct rte_lpm6_tbl_entry **tbl_next, uint8_t *ip, uint8_t bytes, - uint8_t first_byte, uint8_t depth, uint8_t next_hop) + uint8_t first_byte, uint8_t depth, uint32_t next_hop) { uint32_t tbl_index, tbl_range, tbl8_group_start, tbl8_group_end, i; int32_t tbl8_gindex; int8_t bitshift; uint8_t bits_covered; - + /* * Calculate index to the table based on the number and position * of the bytes being inspected in this step. @@ -360,16 +391,16 @@ add_step(struct rte_lpm6 *lpm, struct rte_lpm6_tbl_entry *tbl, tbl_index = 0; for (i = first_byte; i < (uint32_t)(first_byte + bytes); i++) { bitshift = (int8_t)((bytes - i)*BYTE_SIZE); - + if (bitshift < 0) bitshift = 0; tbl_index = tbl_index | ip[i-1] << bitshift; } /* Number of bits covered in this step */ bits_covered = (uint8_t)((bytes+first_byte-1)*BYTE_SIZE); - + /* - * If depth if smaller than this number (ie this is the last step) + * If depth if smaller than this number (ie this is the last step) * expand the rule across the relevant positions in the table. */ if (depth <= bits_covered) { @@ -390,17 +421,17 @@ add_step(struct rte_lpm6 *lpm, struct rte_lpm6_tbl_entry *tbl, tbl[i] = new_tbl_entry; } else if (tbl[i].ext_entry == 1) { - + /* * If tbl entry is valid and extended calculate the index * into next tbl8 and expand the rule across the data structure. */ - tbl8_gindex = tbl[i].lpm6_tbl8_gindex * + tbl8_gindex = tbl[i].lpm6_tbl8_gindex * RTE_LPM6_TBL8_GROUP_NUM_ENTRIES; expand_rule(lpm, tbl8_gindex, depth, next_hop); } - } - + } + return 0; } /* @@ -414,7 +445,7 @@ add_step(struct rte_lpm6 *lpm, struct rte_lpm6_tbl_entry *tbl, tbl8_gindex = (lpm->next_tbl8)++; else return -ENOSPC; - + struct rte_lpm6_tbl_entry new_tbl_entry = { .lpm6_tbl8_gindex = tbl8_gindex, .depth = 0, @@ -464,11 +495,11 @@ add_step(struct rte_lpm6 *lpm, struct rte_lpm6_tbl_entry *tbl, tbl[tbl_index] = new_tbl_entry; } - + *tbl_next = &(lpm->tbl8[tbl[tbl_index].lpm6_tbl8_gindex * RTE_LPM6_TBL8_GROUP_NUM_ENTRIES]); } - + return 1; } @@ -476,8 +507,16 @@ add_step(struct rte_lpm6 *lpm, struct rte_lpm6_tbl_entry *tbl, * Add a route */ int -rte_lpm6_add(struct rte_lpm6 *lpm, uint8_t *ip, uint8_t depth, +rte_lpm6_add_v20(struct rte_lpm6 *lpm, uint8_t *ip, uint8_t depth, uint8_t next_hop) +{ + return rte_lpm6_add_v1705(lpm, ip, depth, next_hop); +} +VERSION_SYMBOL(rte_lpm6_add, _v20, 2.0); + +int +rte_lpm6_add_v1705(struct rte_lpm6 *lpm, uint8_t *ip, uint8_t depth, + uint32_t next_hop) { struct rte_lpm6_tbl_entry *tbl; struct rte_lpm6_tbl_entry *tbl_next; @@ -485,11 +524,11 @@ rte_lpm6_add(struct rte_lpm6 *lpm, uint8_t *ip, uint8_t depth, int status; uint8_t masked_ip[RTE_LPM6_IPV6_ADDR_SIZE]; int i; - + /* Check user arguments. */ if ((lpm == NULL) || (depth < 1) || (depth > RTE_LPM6_MAX_DEPTH)) return -EINVAL; - + /* Copy the IP and mask it to avoid modifying user's input data. */ memcpy(masked_ip, ip, RTE_LPM6_IPV6_ADDR_SIZE); mask_ip(masked_ip, depth); @@ -508,11 +547,11 @@ rte_lpm6_add(struct rte_lpm6 *lpm, uint8_t *ip, uint8_t depth, depth, next_hop); if (status < 0) { rte_lpm6_delete(lpm, masked_ip, depth); - + return status; } - /* + /* * Inspect one by one the rest of the bytes until * the process is completed. */ @@ -522,13 +561,17 @@ rte_lpm6_add(struct rte_lpm6 *lpm, uint8_t *ip, uint8_t depth, depth, next_hop); if (status < 0) { rte_lpm6_delete(lpm, masked_ip, depth); - + return status; } } - + return status; } +BIND_DEFAULT_SYMBOL(rte_lpm6_add, _v1705, 17.05); +MAP_STATIC_SYMBOL(int rte_lpm6_add(struct rte_lpm6 *lpm, uint8_t *ip, + uint8_t depth, uint32_t next_hop), + rte_lpm6_add_v1705); /* * Takes a pointer to a table entry and inspect one level. @@ -538,13 +581,13 @@ rte_lpm6_add(struct rte_lpm6 *lpm, uint8_t *ip, uint8_t depth, static inline int lookup_step(const struct rte_lpm6 *lpm, const struct rte_lpm6_tbl_entry *tbl, const struct rte_lpm6_tbl_entry **tbl_next, uint8_t *ip, - uint8_t first_byte, uint8_t *next_hop) + uint8_t first_byte, uint32_t *next_hop) { uint32_t tbl8_index, tbl_entry; - + /* Take the integer value from the pointer. */ tbl_entry = *(const uint32_t *)tbl; - + /* If it is valid and extended we calculate the new pointer to return. */ if ((tbl_entry & RTE_LPM6_VALID_EXT_ENTRY_BITMASK) == RTE_LPM6_VALID_EXT_ENTRY_BITMASK) { @@ -558,7 +601,7 @@ lookup_step(const struct rte_lpm6 *lpm, const struct rte_lpm6_tbl_entry *tbl, return 1; } else { /* If not extended then we can have a match. */ - *next_hop = (uint8_t)tbl_entry; + *next_hop = ((uint32_t)tbl_entry & RTE_LPM6_TBL8_BITMASK); return (tbl_entry & RTE_LPM6_LOOKUP_SUCCESS) ? 0 : -ENOENT; } } @@ -567,47 +610,69 @@ lookup_step(const struct rte_lpm6 *lpm, const struct rte_lpm6_tbl_entry *tbl, * Looks up an IP */ int -rte_lpm6_lookup(const struct rte_lpm6 *lpm, uint8_t *ip, uint8_t *next_hop) +rte_lpm6_lookup_v20(const struct rte_lpm6 *lpm, uint8_t *ip, uint8_t *next_hop) +{ + uint32_t next_hop32 = 0; + int32_t status; + + /* DEBUG: Check user input arguments. */ + if (next_hop == NULL) + return -EINVAL; + + status = rte_lpm6_lookup_v1705(lpm, ip, &next_hop32); + if (status == 0) + *next_hop = (uint8_t)next_hop32; + + return status; +} +VERSION_SYMBOL(rte_lpm6_lookup, _v20, 2.0); + +int +rte_lpm6_lookup_v1705(const struct rte_lpm6 *lpm, uint8_t *ip, + uint32_t *next_hop) { const struct rte_lpm6_tbl_entry *tbl; - const struct rte_lpm6_tbl_entry *tbl_next; + const struct rte_lpm6_tbl_entry *tbl_next = NULL; int status; uint8_t first_byte; uint32_t tbl24_index; - + /* DEBUG: Check user input arguments. */ if ((lpm == NULL) || (ip == NULL) || (next_hop == NULL)) { return -EINVAL; } - + first_byte = LOOKUP_FIRST_BYTE; tbl24_index = (ip[0] << BYTES2_SIZE) | (ip[1] << BYTE_SIZE) | ip[2]; /* Calculate pointer to the first entry to be inspected */ tbl = &lpm->tbl24[tbl24_index]; - + do { - /* Continue inspecting following levels until success or failure */ + /* Continue inspecting following levels until success or failure */ status = lookup_step(lpm, tbl, &tbl_next, ip, first_byte++, next_hop); tbl = tbl_next; } while (status == 1); - + return status; } +BIND_DEFAULT_SYMBOL(rte_lpm6_lookup, _v1705, 17.05); +MAP_STATIC_SYMBOL(int rte_lpm6_lookup(const struct rte_lpm6 *lpm, uint8_t *ip, + uint32_t *next_hop), rte_lpm6_lookup_v1705); /* * Looks up a group of IP addresses */ int -rte_lpm6_lookup_bulk_func(const struct rte_lpm6 *lpm, +rte_lpm6_lookup_bulk_func_v20(const struct rte_lpm6 *lpm, uint8_t ips[][RTE_LPM6_IPV6_ADDR_SIZE], int16_t * next_hops, unsigned n) { unsigned i; const struct rte_lpm6_tbl_entry *tbl; - const struct rte_lpm6_tbl_entry *tbl_next; - uint32_t tbl24_index; - uint8_t first_byte, next_hop; + const struct rte_lpm6_tbl_entry *tbl_next = NULL; + uint32_t tbl24_index, next_hop; + uint8_t first_byte; int status; /* DEBUG: Check user input arguments. */ @@ -622,22 +687,70 @@ rte_lpm6_lookup_bulk_func(const struct rte_lpm6 *lpm, /* Calculate pointer to the first entry to be inspected */ tbl = &lpm->tbl24[tbl24_index]; - + do { - /* Continue inspecting following levels until success or failure */ + /* Continue inspecting following levels until success or failure */ status = lookup_step(lpm, tbl, &tbl_next, ips[i], first_byte++, &next_hop); tbl = tbl_next; } while (status == 1); - + if (status < 0) next_hops[i] = -1; else - next_hops[i] = next_hop; + next_hops[i] = (int16_t)next_hop; } - + return 0; } +VERSION_SYMBOL(rte_lpm6_lookup_bulk_func, _v20, 2.0); + +int +rte_lpm6_lookup_bulk_func_v1705(const struct rte_lpm6 *lpm, + uint8_t ips[][RTE_LPM6_IPV6_ADDR_SIZE], + int32_t *next_hops, unsigned int n) +{ + unsigned int i; + const struct rte_lpm6_tbl_entry *tbl; + const struct rte_lpm6_tbl_entry *tbl_next = NULL; + uint32_t tbl24_index, next_hop; + uint8_t first_byte; + int status; + + /* DEBUG: Check user input arguments. */ + if ((lpm == NULL) || (ips == NULL) || (next_hops == NULL)) + return -EINVAL; + + for (i = 0; i < n; i++) { + first_byte = LOOKUP_FIRST_BYTE; + tbl24_index = (ips[i][0] << BYTES2_SIZE) | + (ips[i][1] << BYTE_SIZE) | ips[i][2]; + + /* Calculate pointer to the first entry to be inspected */ + tbl = &lpm->tbl24[tbl24_index]; + + do { + /* Continue inspecting following levels + * until success or failure + */ + status = lookup_step(lpm, tbl, &tbl_next, ips[i], + first_byte++, &next_hop); + tbl = tbl_next; + } while (status == 1); + + if (status < 0) + next_hops[i] = -1; + else + next_hops[i] = (int32_t)next_hop; + } + + return 0; +} +BIND_DEFAULT_SYMBOL(rte_lpm6_lookup_bulk_func, _v1705, 17.05); +MAP_STATIC_SYMBOL(int rte_lpm6_lookup_bulk_func(const struct rte_lpm6 *lpm, + uint8_t ips[][RTE_LPM6_IPV6_ADDR_SIZE], + int32_t *next_hops, unsigned int n), + rte_lpm6_lookup_bulk_func_v1705); /* * Finds a rule in rule table. @@ -647,14 +760,14 @@ static inline int32_t rule_find(struct rte_lpm6 *lpm, uint8_t *ip, uint8_t depth) { uint32_t rule_index; - + /* Scan used rules at given depth to find rule. */ for (rule_index = 0; rule_index < lpm->used_rules; rule_index++) { /* If rule is found return the rule index. */ if ((memcmp (lpm->rules_tbl[rule_index].ip, ip, RTE_LPM6_IPV6_ADDR_SIZE) == 0) && lpm->rules_tbl[rule_index].depth == depth) { - + return rule_index; } } @@ -663,6 +776,61 @@ rule_find(struct rte_lpm6 *lpm, uint8_t *ip, uint8_t depth) return -ENOENT; } +/* + * Look for a rule in the high-level rules table + */ +int +rte_lpm6_is_rule_present_v20(struct rte_lpm6 *lpm, uint8_t *ip, uint8_t depth, + uint8_t *next_hop) +{ + uint32_t next_hop32 = 0; + int32_t status; + + /* DEBUG: Check user input arguments. */ + if (next_hop == NULL) + return -EINVAL; + + status = rte_lpm6_is_rule_present_v1705(lpm, ip, depth, &next_hop32); + if (status > 0) + *next_hop = (uint8_t)next_hop32; + + return status; + +} +VERSION_SYMBOL(rte_lpm6_is_rule_present, _v20, 2.0); + +int +rte_lpm6_is_rule_present_v1705(struct rte_lpm6 *lpm, uint8_t *ip, uint8_t depth, + uint32_t *next_hop) +{ + uint8_t ip_masked[RTE_LPM6_IPV6_ADDR_SIZE]; + int32_t rule_index; + + /* Check user arguments. */ + if ((lpm == NULL) || next_hop == NULL || ip == NULL || + (depth < 1) || (depth > RTE_LPM6_MAX_DEPTH)) + return -EINVAL; + + /* Copy the IP and mask it to avoid modifying user's input data. */ + memcpy(ip_masked, ip, RTE_LPM6_IPV6_ADDR_SIZE); + mask_ip(ip_masked, depth); + + /* Look for the rule using rule_find. */ + rule_index = rule_find(lpm, ip_masked, depth); + + if (rule_index >= 0) { + *next_hop = lpm->rules_tbl[rule_index].next_hop; + return 1; + } + + /* If rule is not found return 0. */ + return 0; +} +BIND_DEFAULT_SYMBOL(rte_lpm6_is_rule_present, _v1705, 17.05); +MAP_STATIC_SYMBOL(int rte_lpm6_is_rule_present(struct rte_lpm6 *lpm, + uint8_t *ip, uint8_t depth, uint32_t *next_hop), + rte_lpm6_is_rule_present_v1705); + /* * Delete a rule from the rule table. * NOTE: Valid range for depth parameter is 1 .. 128 inclusive. @@ -687,7 +855,7 @@ rte_lpm6_delete(struct rte_lpm6 *lpm, uint8_t *ip, uint8_t depth) int32_t rule_to_delete_index; uint8_t ip_masked[RTE_LPM6_IPV6_ADDR_SIZE]; unsigned i; - + /* * Check input arguments. */ @@ -714,8 +882,8 @@ rte_lpm6_delete(struct rte_lpm6 *lpm, uint8_t *ip, uint8_t depth) /* Delete the rule from the rule table. */ rule_delete(lpm, rule_to_delete_index); - - /* + + /* * Set all the table entries to 0 (ie delete every rule * from the data structure. */ @@ -723,8 +891,8 @@ rte_lpm6_delete(struct rte_lpm6 *lpm, uint8_t *ip, uint8_t depth) memset(lpm->tbl24, 0, sizeof(lpm->tbl24)); memset(lpm->tbl8, 0, sizeof(lpm->tbl8[0]) * RTE_LPM6_TBL8_GROUP_NUM_ENTRIES * lpm->number_tbl8s); - - /* + + /* * Add every rule again (except for the one that was removed from * the rules table). */ @@ -732,7 +900,7 @@ rte_lpm6_delete(struct rte_lpm6 *lpm, uint8_t *ip, uint8_t depth) rte_lpm6_add(lpm, lpm->rules_tbl[i].ip, lpm->rules_tbl[i].depth, lpm->rules_tbl[i].next_hop); } - + return 0; } @@ -746,14 +914,14 @@ rte_lpm6_delete_bulk_func(struct rte_lpm6 *lpm, int32_t rule_to_delete_index; uint8_t ip_masked[RTE_LPM6_IPV6_ADDR_SIZE]; unsigned i; - + /* * Check input arguments. */ if ((lpm == NULL) || (ips == NULL) || (depths == NULL)) { return -EINVAL; } - + for (i = 0; i < n; i++) { /* Copy the IP and mask it to avoid modifying user's input data. */ memcpy(ip_masked, ips[i], RTE_LPM6_IPV6_ADDR_SIZE); @@ -775,8 +943,8 @@ rte_lpm6_delete_bulk_func(struct rte_lpm6 *lpm, /* Delete the rule from the rule table. */ rule_delete(lpm, rule_to_delete_index); } - - /* + + /* * Set all the table entries to 0 (ie delete every rule * from the data structure. */ @@ -784,8 +952,8 @@ rte_lpm6_delete_bulk_func(struct rte_lpm6 *lpm, memset(lpm->tbl24, 0, sizeof(lpm->tbl24)); memset(lpm->tbl8, 0, sizeof(lpm->tbl8[0]) * RTE_LPM6_TBL8_GROUP_NUM_ENTRIES * lpm->number_tbl8s); - - /* + + /* * Add every rule again (except for the ones that were removed from * the rules table). */ @@ -793,7 +961,7 @@ rte_lpm6_delete_bulk_func(struct rte_lpm6 *lpm, rte_lpm6_add(lpm, lpm->rules_tbl[i].ip, lpm->rules_tbl[i].depth, lpm->rules_tbl[i].next_hop); } - + return 0; } @@ -805,7 +973,7 @@ rte_lpm6_delete_all(struct rte_lpm6 *lpm) { /* Zero used rules counter. */ lpm->used_rules = 0; - + /* Zero next tbl8 index. */ lpm->next_tbl8 = 0;