1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation
3 * Copyright(c) 2020 Arm Limited
11 * RTE Longest Prefix Match (LPM)
16 #include <rte_branch_prediction.h>
17 #include <rte_byteorder.h>
18 #include <rte_common.h>
20 #include <rte_rcu_qsbr.h>
26 /** Max number of characters in LPM name. */
27 #define RTE_LPM_NAMESIZE 32
29 /** Maximum depth value possible for IPv4 LPM. */
30 #define RTE_LPM_MAX_DEPTH 32
32 /** @internal Total number of tbl24 entries. */
33 #define RTE_LPM_TBL24_NUM_ENTRIES (1 << 24)
35 /** @internal Number of entries in a tbl8 group. */
36 #define RTE_LPM_TBL8_GROUP_NUM_ENTRIES 256
38 /** @internal Max number of tbl8 groups in the tbl8. */
39 #define RTE_LPM_MAX_TBL8_NUM_GROUPS (1 << 24)
41 /** @internal Total number of tbl8 groups in the tbl8. */
42 #define RTE_LPM_TBL8_NUM_GROUPS 256
44 /** @internal Total number of tbl8 entries. */
45 #define RTE_LPM_TBL8_NUM_ENTRIES (RTE_LPM_TBL8_NUM_GROUPS * \
46 RTE_LPM_TBL8_GROUP_NUM_ENTRIES)
48 /** @internal Macro to enable/disable run-time checks. */
49 #if defined(RTE_LIBRTE_LPM_DEBUG)
50 #define RTE_LPM_RETURN_IF_TRUE(cond, retval) do { \
51 if (cond) return (retval); \
54 #define RTE_LPM_RETURN_IF_TRUE(cond, retval)
57 /** @internal bitmask with valid and valid_group fields set */
58 #define RTE_LPM_VALID_EXT_ENTRY_BITMASK 0x03000000
60 /** Bitmask used to indicate successful lookup */
61 #define RTE_LPM_LOOKUP_SUCCESS 0x01000000
63 /** @internal Default RCU defer queue entries to reclaim in one go. */
64 #define RTE_LPM_RCU_DQ_RECLAIM_MAX 16
66 /** RCU reclamation modes */
67 enum rte_lpm_qsbr_mode {
68 /** Create defer queue for reclaim. */
69 RTE_LPM_QSBR_MODE_DQ = 0,
70 /** Use blocking mode reclaim. No defer queue created. */
71 RTE_LPM_QSBR_MODE_SYNC
74 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
75 /** @internal Tbl24 entry structure. */
77 struct rte_lpm_tbl_entry {
79 * Stores Next hop (tbl8 or tbl24 when valid_group is not set) or
80 * a group index pointing to a tbl8 structure (tbl24 only, when
83 uint32_t next_hop :24;
84 /* Using single uint8_t to store 3 values. */
85 uint32_t valid :1; /**< Validation flag. */
88 * - valid_group == 0: entry stores a next hop
89 * - valid_group == 1: entry stores a group_index pointing to a tbl8
91 * - valid_group indicates whether the current tbl8 is in use or not
93 uint32_t valid_group :1;
94 uint32_t depth :6; /**< Rule depth. */
100 struct rte_lpm_tbl_entry {
102 uint32_t valid_group :1;
104 uint32_t next_hop :24;
110 /** LPM configuration structure. */
111 struct rte_lpm_config {
112 uint32_t max_rules; /**< Max number of rules. */
113 uint32_t number_tbl8s; /**< Number of tbl8s to allocate. */
114 int flags; /**< This field is currently unused. */
117 /** @internal LPM structure. */
120 struct rte_lpm_tbl_entry tbl24[RTE_LPM_TBL24_NUM_ENTRIES]
121 __rte_cache_aligned; /**< LPM tbl24 table. */
122 struct rte_lpm_tbl_entry *tbl8; /**< LPM tbl8 table. */
125 /** LPM RCU QSBR configuration structure. */
126 struct rte_lpm_rcu_config {
127 struct rte_rcu_qsbr *v; /* RCU QSBR variable. */
128 /* Mode of RCU QSBR. RTE_LPM_QSBR_MODE_xxx
129 * '0' for default: create defer queue for reclaim.
131 enum rte_lpm_qsbr_mode mode;
132 uint32_t dq_size; /* RCU defer queue size.
133 * default: lpm->number_tbl8s.
135 uint32_t reclaim_thd; /* Threshold to trigger auto reclaim. */
136 uint32_t reclaim_max; /* Max entries to reclaim in one go.
137 * default: RTE_LPM_RCU_DQ_RECLAIM_MAX.
142 * Create an LPM object.
147 * NUMA socket ID for LPM table memory allocation
149 * Structure containing the configuration
151 * Handle to LPM object on success, NULL otherwise with rte_errno set
152 * to an appropriate values. Possible rte_errno values include:
153 * - E_RTE_NO_CONFIG - function could not get pointer to rte_config structure
154 * - E_RTE_SECONDARY - function was called from a secondary process instance
155 * - EINVAL - invalid parameter passed to function
156 * - ENOSPC - the maximum number of memzones has already been allocated
157 * - EEXIST - a memzone with the same name already exists
158 * - ENOMEM - no appropriate memory area found in which to create memzone
161 rte_lpm_create(const char *name, int socket_id,
162 const struct rte_lpm_config *config);
165 * Find an existing LPM object and return a pointer to it.
168 * Name of the lpm object as passed to rte_lpm_create()
170 * Pointer to lpm object or NULL if object not found with rte_errno
171 * set appropriately. Possible rte_errno values include:
172 * - ENOENT - required entry not available to return.
175 rte_lpm_find_existing(const char *name);
178 * Free an LPM object.
186 rte_lpm_free(struct rte_lpm *lpm);
190 * @b EXPERIMENTAL: this API may change without prior notice
192 * Associate RCU QSBR variable with an LPM object.
195 * the lpm object to add RCU QSBR
197 * RCU QSBR configuration
200 * On error - 1 with error code set in rte_errno.
201 * Possible rte_errno codes are:
202 * - EINVAL - invalid pointer
203 * - EEXIST - already added QSBR
204 * - ENOMEM - memory allocation failure
207 int rte_lpm_rcu_qsbr_add(struct rte_lpm *lpm, struct rte_lpm_rcu_config *cfg);
210 * Add a rule to the LPM table.
215 * IP of the rule to be added to the LPM table
217 * Depth of the rule to be added to the LPM table
219 * Next hop of the rule to be added to the LPM table
221 * 0 on success, negative value otherwise
224 rte_lpm_add(struct rte_lpm *lpm, uint32_t ip, uint8_t depth, uint32_t next_hop);
227 * Check if a rule is present in the LPM table,
228 * and provide its next hop if it is.
233 * IP of the rule to be searched
235 * Depth of the rule to searched
237 * Next hop of the rule (valid only if it is found)
239 * 1 if the rule exists, 0 if it does not, a negative value on failure
242 rte_lpm_is_rule_present(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
246 * Delete a rule from the LPM table.
251 * IP of the rule to be deleted from the LPM table
253 * Depth of the rule to be deleted from the LPM table
255 * 0 on success, negative value otherwise
258 rte_lpm_delete(struct rte_lpm *lpm, uint32_t ip, uint8_t depth);
261 * Delete all rules from the LPM table.
267 rte_lpm_delete_all(struct rte_lpm *lpm);
270 * Lookup an IP into the LPM table.
275 * IP to be looked up in the LPM table
277 * Next hop of the most specific rule found for IP (valid on lookup hit only)
279 * -EINVAL for incorrect arguments, -ENOENT on lookup miss, 0 on lookup hit
282 rte_lpm_lookup(const struct rte_lpm *lpm, uint32_t ip, uint32_t *next_hop)
284 unsigned tbl24_index = (ip >> 8);
286 const uint32_t *ptbl;
288 /* DEBUG: Check user input arguments. */
289 RTE_LPM_RETURN_IF_TRUE(((lpm == NULL) || (next_hop == NULL)), -EINVAL);
291 /* Copy tbl24 entry */
292 ptbl = (const uint32_t *)(&lpm->tbl24[tbl24_index]);
295 /* Memory ordering is not required in lookup. Because dataflow
296 * dependency exists, compiler or HW won't be able to re-order
299 /* Copy tbl8 entry (only if needed) */
300 if (unlikely((tbl_entry & RTE_LPM_VALID_EXT_ENTRY_BITMASK) ==
301 RTE_LPM_VALID_EXT_ENTRY_BITMASK)) {
303 unsigned tbl8_index = (uint8_t)ip +
304 (((uint32_t)tbl_entry & 0x00FFFFFF) *
305 RTE_LPM_TBL8_GROUP_NUM_ENTRIES);
307 ptbl = (const uint32_t *)&lpm->tbl8[tbl8_index];
311 *next_hop = ((uint32_t)tbl_entry & 0x00FFFFFF);
312 return (tbl_entry & RTE_LPM_LOOKUP_SUCCESS) ? 0 : -ENOENT;
316 * Lookup multiple IP addresses in an LPM table. This may be implemented as a
317 * macro, so the address of the function should not be used.
322 * Array of IPs to be looked up in the LPM table
324 * Next hop of the most specific rule found for IP (valid on lookup hit only).
325 * This is an array of two byte values. The most significant byte in each
326 * value says whether the lookup was successful (bitmask
327 * RTE_LPM_LOOKUP_SUCCESS is set). The least significant byte is the
330 * Number of elements in ips (and next_hops) array to lookup. This should be a
331 * compile time constant, and divisible by 8 for best performance.
333 * -EINVAL for incorrect arguments, otherwise 0
335 #define rte_lpm_lookup_bulk(lpm, ips, next_hops, n) \
336 rte_lpm_lookup_bulk_func(lpm, ips, next_hops, n)
339 rte_lpm_lookup_bulk_func(const struct rte_lpm *lpm, const uint32_t *ips,
340 uint32_t *next_hops, const unsigned n)
343 unsigned tbl24_indexes[n];
344 const uint32_t *ptbl;
346 /* DEBUG: Check user input arguments. */
347 RTE_LPM_RETURN_IF_TRUE(((lpm == NULL) || (ips == NULL) ||
348 (next_hops == NULL)), -EINVAL);
350 for (i = 0; i < n; i++) {
351 tbl24_indexes[i] = ips[i] >> 8;
354 for (i = 0; i < n; i++) {
355 /* Simply copy tbl24 entry to output */
356 ptbl = (const uint32_t *)&lpm->tbl24[tbl24_indexes[i]];
357 next_hops[i] = *ptbl;
359 /* Overwrite output with tbl8 entry if needed */
360 if (unlikely((next_hops[i] & RTE_LPM_VALID_EXT_ENTRY_BITMASK) ==
361 RTE_LPM_VALID_EXT_ENTRY_BITMASK)) {
363 unsigned tbl8_index = (uint8_t)ips[i] +
364 (((uint32_t)next_hops[i] & 0x00FFFFFF) *
365 RTE_LPM_TBL8_GROUP_NUM_ENTRIES);
367 ptbl = (const uint32_t *)&lpm->tbl8[tbl8_index];
368 next_hops[i] = *ptbl;
374 /* Mask four results. */
375 #define RTE_LPM_MASKX4_RES UINT64_C(0x00ffffff00ffffff)
378 * Lookup four IP addresses in an LPM table.
383 * Four IPs to be looked up in the LPM table
385 * Next hop of the most specific rule found for IP (valid on lookup hit only).
386 * This is an 4 elements array of two byte values.
387 * If the lookup was successful for the given IP, then least significant byte
388 * of the corresponding element is the actual next hop and the most
389 * significant byte is zero.
390 * If the lookup for the given IP failed, then corresponding element would
391 * contain default value, see description of then next parameter.
393 * Default value to populate into corresponding element of hop[] array,
394 * if lookup would fail.
397 rte_lpm_lookupx4(const struct rte_lpm *lpm, xmm_t ip, uint32_t hop[4],
400 #if defined(RTE_ARCH_ARM)
401 #ifdef RTE_HAS_SVE_ACLE
402 #include "rte_lpm_sve.h"
404 #include "rte_lpm_neon.h"
406 #elif defined(RTE_ARCH_PPC_64)
407 #include "rte_lpm_altivec.h"
408 #elif defined(RTE_ARCH_X86)
409 #include "rte_lpm_sse.h"
411 #include "rte_lpm_scalar.h"
418 #endif /* _RTE_LPM_H_ */