X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=lib%2Flibrte_lpm%2Frte_lpm.h;h=03da2d37e0f767cdcae5936cf3276e2be2af1e7e;hb=0f392d91b9977fdb879ef595839806c29b150c14;hp=033f5423efd353fdea327a01ee1559058fb5e07b;hpb=e9d48c0072d36eb6423b45fba4ec49d0def6c36f;p=dpdk.git diff --git a/lib/librte_lpm/rte_lpm.h b/lib/librte_lpm/rte_lpm.h index 033f5423ef..03da2d37e0 100644 --- a/lib/librte_lpm/rte_lpm.h +++ b/lib/librte_lpm/rte_lpm.h @@ -1,34 +1,6 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2010-2014 Intel Corporation. All rights reserved. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2014 Intel Corporation + * Copyright(c) 2020 Arm Limited */ #ifndef _RTE_LPM_H_ @@ -44,7 +16,12 @@ #include #include #include +#include +#include #include +#include +#include +#include #ifdef __cplusplus extern "C" { @@ -53,16 +30,6 @@ extern "C" { /** Max number of characters in LPM name. */ #define RTE_LPM_NAMESIZE 32 -/** @deprecated Possible location to allocate memory. This was for last - * parameter of rte_lpm_create(), but is now redundant. The LPM table is always - * allocated in memory using librte_malloc which uses a memzone. */ -#define RTE_LPM_HEAP 0 - -/** @deprecated Possible location to allocate memory. This was for last - * parameter of rte_lpm_create(), but is now redundant. The LPM table is always - * allocated in memory using librte_malloc which uses a memzone. */ -#define RTE_LPM_MEMZONE 1 - /** Maximum depth value possible for IPv4 LPM. */ #define RTE_LPM_MAX_DEPTH 32 @@ -72,6 +39,9 @@ extern "C" { /** @internal Number of entries in a tbl8 group. */ #define RTE_LPM_TBL8_GROUP_NUM_ENTRIES 256 +/** @internal Max number of tbl8 groups in the tbl8. */ +#define RTE_LPM_MAX_TBL8_NUM_GROUPS (1 << 24) + /** @internal Total number of tbl8 groups in the tbl8. */ #define RTE_LPM_TBL8_NUM_GROUPS 256 @@ -88,38 +58,70 @@ extern "C" { #define RTE_LPM_RETURN_IF_TRUE(cond, retval) #endif -/** @internal bitmask with valid and ext_entry/valid_group fields set */ -#define RTE_LPM_VALID_EXT_ENTRY_BITMASK 0x0300 +/** @internal bitmask with valid and valid_group fields set */ +#define RTE_LPM_VALID_EXT_ENTRY_BITMASK 0x03000000 /** Bitmask used to indicate successful lookup */ -#define RTE_LPM_LOOKUP_SUCCESS 0x0100 +#define RTE_LPM_LOOKUP_SUCCESS 0x01000000 + +/** @internal Default RCU defer queue entries to reclaim in one go. */ +#define RTE_LPM_RCU_DQ_RECLAIM_MAX 16 + +/** RCU reclamation modes */ +enum rte_lpm_qsbr_mode { + /** Create defer queue for reclaim. */ + RTE_LPM_QSBR_MODE_DQ = 0, + /** Use blocking mode reclaim. No defer queue created. */ + RTE_LPM_QSBR_MODE_SYNC +}; +#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN /** @internal Tbl24 entry structure. */ -struct rte_lpm_tbl24_entry { - /* Stores Next hop or group index (i.e. gindex)into tbl8. */ - union { - uint8_t next_hop; - uint8_t tbl8_gindex; - }; +__extension__ +struct rte_lpm_tbl_entry { + /** + * Stores Next hop (tbl8 or tbl24 when valid_group is not set) or + * a group index pointing to a tbl8 structure (tbl24 only, when + * valid_group is set) + */ + uint32_t next_hop :24; /* Using single uint8_t to store 3 values. */ - uint8_t valid :1; /**< Validation flag. */ - uint8_t ext_entry :1; /**< External entry. */ - uint8_t depth :6; /**< Rule depth. */ + uint32_t valid :1; /**< Validation flag. */ + /** + * For tbl24: + * - valid_group == 0: entry stores a next hop + * - valid_group == 1: entry stores a group_index pointing to a tbl8 + * For tbl8: + * - valid_group indicates whether the current tbl8 is in use or not + */ + uint32_t valid_group :1; + uint32_t depth :6; /**< Rule depth. */ }; -/** @internal Tbl8 entry structure. */ -struct rte_lpm_tbl8_entry { - uint8_t next_hop; /**< next hop. */ - /* Using single uint8_t to store 3 values. */ - uint8_t valid :1; /**< Validation flag. */ - uint8_t valid_group :1; /**< Group validation flag. */ - uint8_t depth :6; /**< Rule depth. */ +#else + +__extension__ +struct rte_lpm_tbl_entry { + uint32_t depth :6; + uint32_t valid_group :1; + uint32_t valid :1; + uint32_t next_hop :24; + +}; + +#endif + +/** LPM configuration structure. */ +struct rte_lpm_config { + uint32_t max_rules; /**< Max number of rules. */ + uint32_t number_tbl8s; /**< Number of tbl8s to allocate. */ + int flags; /**< This field is currently unused. */ }; /** @internal Rule structure. */ struct rte_lpm_rule { uint32_t ip; /**< Rule IP address. */ - uint8_t next_hop; /**< Rule next hop. */ + uint32_t next_hop; /**< Rule next hop. */ }; /** @internal Contains metadata about the rules table. */ @@ -130,21 +132,33 @@ struct rte_lpm_rule_info { /** @internal LPM structure. */ struct rte_lpm { - TAILQ_ENTRY(rte_lpm) next; /**< Next in list. */ - /* LPM metadata. */ char name[RTE_LPM_NAMESIZE]; /**< Name of the lpm. */ - int mem_location; /**< @deprecated @see RTE_LPM_HEAP and RTE_LPM_MEMZONE. */ uint32_t max_rules; /**< Max. balanced rules per lpm. */ + uint32_t number_tbl8s; /**< Number of tbl8s. */ struct rte_lpm_rule_info rule_info[RTE_LPM_MAX_DEPTH]; /**< Rule info table. */ /* LPM Tables. */ - struct rte_lpm_tbl24_entry tbl24[RTE_LPM_TBL24_NUM_ENTRIES] \ + struct rte_lpm_tbl_entry tbl24[RTE_LPM_TBL24_NUM_ENTRIES] __rte_cache_aligned; /**< LPM tbl24 table. */ - struct rte_lpm_tbl8_entry tbl8[RTE_LPM_TBL8_NUM_ENTRIES] \ - __rte_cache_aligned; /**< LPM tbl8 table. */ - struct rte_lpm_rule rules_tbl[0] \ - __rte_cache_aligned; /**< LPM rules. */ + struct rte_lpm_tbl_entry *tbl8; /**< LPM tbl8 table. */ + struct rte_lpm_rule *rules_tbl; /**< LPM rules. */ +}; + +/** LPM RCU QSBR configuration structure. */ +struct rte_lpm_rcu_config { + struct rte_rcu_qsbr *v; /* RCU QSBR variable. */ + /* Mode of RCU QSBR. RTE_LPM_QSBR_MODE_xxx + * '0' for default: create defer queue for reclaim. + */ + enum rte_lpm_qsbr_mode mode; + uint32_t dq_size; /* RCU defer queue size. + * default: lpm->number_tbl8s. + */ + uint32_t reclaim_thd; /* Threshold to trigger auto reclaim. */ + uint32_t reclaim_max; /* Max entries to reclaim in one go. + * default: RTE_LPM_RCU_DQ_RECLAIM_MAX. + */ }; /** @@ -154,23 +168,21 @@ struct rte_lpm { * LPM object name * @param socket_id * NUMA socket ID for LPM table memory allocation - * @param max_rules - * Maximum number of LPM rules that can be added - * @param flags - * This parameter is currently unused + * @param config + * Structure containing the configuration * @return * Handle to LPM object on success, NULL otherwise with rte_errno set * to an appropriate values. Possible rte_errno values include: * - E_RTE_NO_CONFIG - function could not get pointer to rte_config structure * - E_RTE_SECONDARY - function was called from a secondary process instance - * - E_RTE_NO_TAILQ - no tailq list could be got for the lpm object list * - EINVAL - invalid parameter passed to function * - ENOSPC - the maximum number of memzones has already been allocated * - EEXIST - a memzone with the same name already exists * - ENOMEM - no appropriate memory area found in which to create memzone */ struct rte_lpm * -rte_lpm_create(const char *name, int socket_id, int max_rules, int flags); +rte_lpm_create(const char *name, int socket_id, + const struct rte_lpm_config *config); /** * Find an existing LPM object and return a pointer to it. @@ -196,6 +208,27 @@ rte_lpm_find_existing(const char *name); void rte_lpm_free(struct rte_lpm *lpm); +/** + * @warning + * @b EXPERIMENTAL: this API may change without prior notice + * + * Associate RCU QSBR variable with an LPM object. + * + * @param lpm + * the lpm object to add RCU QSBR + * @param cfg + * RCU QSBR configuration + * @return + * On success - 0 + * On error - 1 with error code set in rte_errno. + * Possible rte_errno codes are: + * - EINVAL - invalid pointer + * - EEXIST - already added QSBR + * - ENOMEM - memory allocation failure + */ +__rte_experimental +int rte_lpm_rcu_qsbr_add(struct rte_lpm *lpm, struct rte_lpm_rcu_config *cfg); + /** * Add a rule to the LPM table. * @@ -211,7 +244,26 @@ rte_lpm_free(struct rte_lpm *lpm); * 0 on success, negative value otherwise */ int -rte_lpm_add(struct rte_lpm *lpm, uint32_t ip, uint8_t depth, uint8_t next_hop); +rte_lpm_add(struct rte_lpm *lpm, uint32_t ip, uint8_t depth, uint32_t next_hop); + +/** + * Check if a rule is present in the LPM table, + * and provide its next hop if it is. + * + * @param lpm + * LPM object handle + * @param ip + * IP of the rule to be searched + * @param depth + * Depth of the rule to searched + * @param next_hop + * Next hop of the rule (valid only if it is found) + * @return + * 1 if the rule exists, 0 if it does not, a negative value on failure + */ +int +rte_lpm_is_rule_present(struct rte_lpm *lpm, uint32_t ip, uint8_t depth, +uint32_t *next_hop); /** * Delete a rule from the LPM table. @@ -250,28 +302,36 @@ rte_lpm_delete_all(struct rte_lpm *lpm); * -EINVAL for incorrect arguments, -ENOENT on lookup miss, 0 on lookup hit */ static inline int -rte_lpm_lookup(struct rte_lpm *lpm, uint32_t ip, uint8_t *next_hop) +rte_lpm_lookup(struct rte_lpm *lpm, uint32_t ip, uint32_t *next_hop) { unsigned tbl24_index = (ip >> 8); - uint16_t tbl_entry; + uint32_t tbl_entry; + const uint32_t *ptbl; /* DEBUG: Check user input arguments. */ RTE_LPM_RETURN_IF_TRUE(((lpm == NULL) || (next_hop == NULL)), -EINVAL); /* Copy tbl24 entry */ - tbl_entry = *(const uint16_t *)&lpm->tbl24[tbl24_index]; + ptbl = (const uint32_t *)(&lpm->tbl24[tbl24_index]); + tbl_entry = *ptbl; + /* Memory ordering is not required in lookup. Because dataflow + * dependency exists, compiler or HW won't be able to re-order + * the operations. + */ /* Copy tbl8 entry (only if needed) */ if (unlikely((tbl_entry & RTE_LPM_VALID_EXT_ENTRY_BITMASK) == RTE_LPM_VALID_EXT_ENTRY_BITMASK)) { unsigned tbl8_index = (uint8_t)ip + - ((uint8_t)tbl_entry * RTE_LPM_TBL8_GROUP_NUM_ENTRIES); + (((uint32_t)tbl_entry & 0x00FFFFFF) * + RTE_LPM_TBL8_GROUP_NUM_ENTRIES); - tbl_entry = *(const uint16_t *)&lpm->tbl8[tbl8_index]; + ptbl = (const uint32_t *)&lpm->tbl8[tbl8_index]; + tbl_entry = *ptbl; } - *next_hop = (uint8_t)tbl_entry; + *next_hop = ((uint32_t)tbl_entry & 0x00FFFFFF); return (tbl_entry & RTE_LPM_LOOKUP_SUCCESS) ? 0 : -ENOENT; } @@ -299,11 +359,12 @@ rte_lpm_lookup(struct rte_lpm *lpm, uint32_t ip, uint8_t *next_hop) rte_lpm_lookup_bulk_func(lpm, ips, next_hops, n) static inline int -rte_lpm_lookup_bulk_func(const struct rte_lpm *lpm, const uint32_t * ips, - uint16_t * next_hops, const unsigned n) +rte_lpm_lookup_bulk_func(const struct rte_lpm *lpm, const uint32_t *ips, + uint32_t *next_hops, const unsigned n) { unsigned i; unsigned tbl24_indexes[n]; + const uint32_t *ptbl; /* DEBUG: Check user input arguments. */ RTE_LPM_RETURN_IF_TRUE(((lpm == NULL) || (ips == NULL) || @@ -315,22 +376,58 @@ rte_lpm_lookup_bulk_func(const struct rte_lpm *lpm, const uint32_t * ips, for (i = 0; i < n; i++) { /* Simply copy tbl24 entry to output */ - next_hops[i] = *(const uint16_t *)&lpm->tbl24[tbl24_indexes[i]]; + ptbl = (const uint32_t *)&lpm->tbl24[tbl24_indexes[i]]; + next_hops[i] = *ptbl; /* Overwrite output with tbl8 entry if needed */ if (unlikely((next_hops[i] & RTE_LPM_VALID_EXT_ENTRY_BITMASK) == RTE_LPM_VALID_EXT_ENTRY_BITMASK)) { unsigned tbl8_index = (uint8_t)ips[i] + - ((uint8_t)next_hops[i] * + (((uint32_t)next_hops[i] & 0x00FFFFFF) * RTE_LPM_TBL8_GROUP_NUM_ENTRIES); - next_hops[i] = *(const uint16_t *)&lpm->tbl8[tbl8_index]; + ptbl = (const uint32_t *)&lpm->tbl8[tbl8_index]; + next_hops[i] = *ptbl; } } return 0; } +/* Mask four results. */ +#define RTE_LPM_MASKX4_RES UINT64_C(0x00ffffff00ffffff) + +/** + * Lookup four IP addresses in an LPM table. + * + * @param lpm + * LPM object handle + * @param ip + * Four IPs to be looked up in the LPM table + * @param hop + * Next hop of the most specific rule found for IP (valid on lookup hit only). + * This is an 4 elements array of two byte values. + * If the lookup was successful for the given IP, then least significant byte + * of the corresponding element is the actual next hop and the most + * significant byte is zero. + * If the lookup for the given IP failed, then corresponding element would + * contain default value, see description of then next parameter. + * @param defv + * Default value to populate into corresponding element of hop[] array, + * if lookup would fail. + */ +static inline void +rte_lpm_lookupx4(const struct rte_lpm *lpm, xmm_t ip, uint32_t hop[4], + uint32_t defv); + +#if defined(RTE_ARCH_ARM) || defined(RTE_ARCH_ARM64) +#include "rte_lpm_neon.h" +#elif defined(RTE_ARCH_PPC_64) +#include "rte_lpm_altivec.h" +#else +#include "rte_lpm_sse.h" +#endif + #ifdef __cplusplus } #endif