1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation
3 * Copyright(c) 2020 Arm Limited
11 * RTE Longest Prefix Match (LPM)
15 #include <sys/queue.h>
18 #include <rte_branch_prediction.h>
19 #include <rte_byteorder.h>
20 #include <rte_config.h>
21 #include <rte_memory.h>
22 #include <rte_common.h>
24 #include <rte_rcu_qsbr.h>
30 /** Max number of characters in LPM name. */
31 #define RTE_LPM_NAMESIZE 32
33 /** Maximum depth value possible for IPv4 LPM. */
34 #define RTE_LPM_MAX_DEPTH 32
36 /** @internal Total number of tbl24 entries. */
37 #define RTE_LPM_TBL24_NUM_ENTRIES (1 << 24)
39 /** @internal Number of entries in a tbl8 group. */
40 #define RTE_LPM_TBL8_GROUP_NUM_ENTRIES 256
42 /** @internal Max number of tbl8 groups in the tbl8. */
43 #define RTE_LPM_MAX_TBL8_NUM_GROUPS (1 << 24)
45 /** @internal Total number of tbl8 groups in the tbl8. */
46 #define RTE_LPM_TBL8_NUM_GROUPS 256
48 /** @internal Total number of tbl8 entries. */
49 #define RTE_LPM_TBL8_NUM_ENTRIES (RTE_LPM_TBL8_NUM_GROUPS * \
50 RTE_LPM_TBL8_GROUP_NUM_ENTRIES)
52 /** @internal Macro to enable/disable run-time checks. */
53 #if defined(RTE_LIBRTE_LPM_DEBUG)
54 #define RTE_LPM_RETURN_IF_TRUE(cond, retval) do { \
55 if (cond) return (retval); \
58 #define RTE_LPM_RETURN_IF_TRUE(cond, retval)
61 /** @internal bitmask with valid and valid_group fields set */
62 #define RTE_LPM_VALID_EXT_ENTRY_BITMASK 0x03000000
64 /** Bitmask used to indicate successful lookup */
65 #define RTE_LPM_LOOKUP_SUCCESS 0x01000000
67 /** @internal Default RCU defer queue entries to reclaim in one go. */
68 #define RTE_LPM_RCU_DQ_RECLAIM_MAX 16
70 /** RCU reclamation modes */
71 enum rte_lpm_qsbr_mode {
72 /** Create defer queue for reclaim. */
73 RTE_LPM_QSBR_MODE_DQ = 0,
74 /** Use blocking mode reclaim. No defer queue created. */
75 RTE_LPM_QSBR_MODE_SYNC
78 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
79 /** @internal Tbl24 entry structure. */
81 struct rte_lpm_tbl_entry {
83 * Stores Next hop (tbl8 or tbl24 when valid_group is not set) or
84 * a group index pointing to a tbl8 structure (tbl24 only, when
87 uint32_t next_hop :24;
88 /* Using single uint8_t to store 3 values. */
89 uint32_t valid :1; /**< Validation flag. */
92 * - valid_group == 0: entry stores a next hop
93 * - valid_group == 1: entry stores a group_index pointing to a tbl8
95 * - valid_group indicates whether the current tbl8 is in use or not
97 uint32_t valid_group :1;
98 uint32_t depth :6; /**< Rule depth. */
104 struct rte_lpm_tbl_entry {
106 uint32_t valid_group :1;
108 uint32_t next_hop :24;
114 /** LPM configuration structure. */
115 struct rte_lpm_config {
116 uint32_t max_rules; /**< Max number of rules. */
117 uint32_t number_tbl8s; /**< Number of tbl8s to allocate. */
118 int flags; /**< This field is currently unused. */
121 /** @internal LPM structure. */
124 struct rte_lpm_tbl_entry tbl24[RTE_LPM_TBL24_NUM_ENTRIES]
125 __rte_cache_aligned; /**< LPM tbl24 table. */
126 struct rte_lpm_tbl_entry *tbl8; /**< LPM tbl8 table. */
129 /** LPM RCU QSBR configuration structure. */
130 struct rte_lpm_rcu_config {
131 struct rte_rcu_qsbr *v; /* RCU QSBR variable. */
132 /* Mode of RCU QSBR. RTE_LPM_QSBR_MODE_xxx
133 * '0' for default: create defer queue for reclaim.
135 enum rte_lpm_qsbr_mode mode;
136 uint32_t dq_size; /* RCU defer queue size.
137 * default: lpm->number_tbl8s.
139 uint32_t reclaim_thd; /* Threshold to trigger auto reclaim. */
140 uint32_t reclaim_max; /* Max entries to reclaim in one go.
141 * default: RTE_LPM_RCU_DQ_RECLAIM_MAX.
146 * Create an LPM object.
151 * NUMA socket ID for LPM table memory allocation
153 * Structure containing the configuration
155 * Handle to LPM object on success, NULL otherwise with rte_errno set
156 * to an appropriate values. Possible rte_errno values include:
157 * - E_RTE_NO_CONFIG - function could not get pointer to rte_config structure
158 * - E_RTE_SECONDARY - function was called from a secondary process instance
159 * - EINVAL - invalid parameter passed to function
160 * - ENOSPC - the maximum number of memzones has already been allocated
161 * - EEXIST - a memzone with the same name already exists
162 * - ENOMEM - no appropriate memory area found in which to create memzone
165 rte_lpm_create(const char *name, int socket_id,
166 const struct rte_lpm_config *config);
169 * Find an existing LPM object and return a pointer to it.
172 * Name of the lpm object as passed to rte_lpm_create()
174 * Pointer to lpm object or NULL if object not found with rte_errno
175 * set appropriately. Possible rte_errno values include:
176 * - ENOENT - required entry not available to return.
179 rte_lpm_find_existing(const char *name);
182 * Free an LPM object.
190 rte_lpm_free(struct rte_lpm *lpm);
194 * @b EXPERIMENTAL: this API may change without prior notice
196 * Associate RCU QSBR variable with an LPM object.
199 * the lpm object to add RCU QSBR
201 * RCU QSBR configuration
204 * On error - 1 with error code set in rte_errno.
205 * Possible rte_errno codes are:
206 * - EINVAL - invalid pointer
207 * - EEXIST - already added QSBR
208 * - ENOMEM - memory allocation failure
211 int rte_lpm_rcu_qsbr_add(struct rte_lpm *lpm, struct rte_lpm_rcu_config *cfg);
214 * Add a rule to the LPM table.
219 * IP of the rule to be added to the LPM table
221 * Depth of the rule to be added to the LPM table
223 * Next hop of the rule to be added to the LPM table
225 * 0 on success, negative value otherwise
228 rte_lpm_add(struct rte_lpm *lpm, uint32_t ip, uint8_t depth, uint32_t next_hop);
231 * Check if a rule is present in the LPM table,
232 * and provide its next hop if it is.
237 * IP of the rule to be searched
239 * Depth of the rule to searched
241 * Next hop of the rule (valid only if it is found)
243 * 1 if the rule exists, 0 if it does not, a negative value on failure
246 rte_lpm_is_rule_present(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
250 * Delete a rule from the LPM table.
255 * IP of the rule to be deleted from the LPM table
257 * Depth of the rule to be deleted from the LPM table
259 * 0 on success, negative value otherwise
262 rte_lpm_delete(struct rte_lpm *lpm, uint32_t ip, uint8_t depth);
265 * Delete all rules from the LPM table.
271 rte_lpm_delete_all(struct rte_lpm *lpm);
274 * Lookup an IP into the LPM table.
279 * IP to be looked up in the LPM table
281 * Next hop of the most specific rule found for IP (valid on lookup hit only)
283 * -EINVAL for incorrect arguments, -ENOENT on lookup miss, 0 on lookup hit
286 rte_lpm_lookup(struct rte_lpm *lpm, uint32_t ip, uint32_t *next_hop)
288 unsigned tbl24_index = (ip >> 8);
290 const uint32_t *ptbl;
292 /* DEBUG: Check user input arguments. */
293 RTE_LPM_RETURN_IF_TRUE(((lpm == NULL) || (next_hop == NULL)), -EINVAL);
295 /* Copy tbl24 entry */
296 ptbl = (const uint32_t *)(&lpm->tbl24[tbl24_index]);
299 /* Memory ordering is not required in lookup. Because dataflow
300 * dependency exists, compiler or HW won't be able to re-order
303 /* Copy tbl8 entry (only if needed) */
304 if (unlikely((tbl_entry & RTE_LPM_VALID_EXT_ENTRY_BITMASK) ==
305 RTE_LPM_VALID_EXT_ENTRY_BITMASK)) {
307 unsigned tbl8_index = (uint8_t)ip +
308 (((uint32_t)tbl_entry & 0x00FFFFFF) *
309 RTE_LPM_TBL8_GROUP_NUM_ENTRIES);
311 ptbl = (const uint32_t *)&lpm->tbl8[tbl8_index];
315 *next_hop = ((uint32_t)tbl_entry & 0x00FFFFFF);
316 return (tbl_entry & RTE_LPM_LOOKUP_SUCCESS) ? 0 : -ENOENT;
320 * Lookup multiple IP addresses in an LPM table. This may be implemented as a
321 * macro, so the address of the function should not be used.
326 * Array of IPs to be looked up in the LPM table
328 * Next hop of the most specific rule found for IP (valid on lookup hit only).
329 * This is an array of two byte values. The most significant byte in each
330 * value says whether the lookup was successful (bitmask
331 * RTE_LPM_LOOKUP_SUCCESS is set). The least significant byte is the
334 * Number of elements in ips (and next_hops) array to lookup. This should be a
335 * compile time constant, and divisible by 8 for best performance.
337 * -EINVAL for incorrect arguments, otherwise 0
339 #define rte_lpm_lookup_bulk(lpm, ips, next_hops, n) \
340 rte_lpm_lookup_bulk_func(lpm, ips, next_hops, n)
343 rte_lpm_lookup_bulk_func(const struct rte_lpm *lpm, const uint32_t *ips,
344 uint32_t *next_hops, const unsigned n)
347 unsigned tbl24_indexes[n];
348 const uint32_t *ptbl;
350 /* DEBUG: Check user input arguments. */
351 RTE_LPM_RETURN_IF_TRUE(((lpm == NULL) || (ips == NULL) ||
352 (next_hops == NULL)), -EINVAL);
354 for (i = 0; i < n; i++) {
355 tbl24_indexes[i] = ips[i] >> 8;
358 for (i = 0; i < n; i++) {
359 /* Simply copy tbl24 entry to output */
360 ptbl = (const uint32_t *)&lpm->tbl24[tbl24_indexes[i]];
361 next_hops[i] = *ptbl;
363 /* Overwrite output with tbl8 entry if needed */
364 if (unlikely((next_hops[i] & RTE_LPM_VALID_EXT_ENTRY_BITMASK) ==
365 RTE_LPM_VALID_EXT_ENTRY_BITMASK)) {
367 unsigned tbl8_index = (uint8_t)ips[i] +
368 (((uint32_t)next_hops[i] & 0x00FFFFFF) *
369 RTE_LPM_TBL8_GROUP_NUM_ENTRIES);
371 ptbl = (const uint32_t *)&lpm->tbl8[tbl8_index];
372 next_hops[i] = *ptbl;
378 /* Mask four results. */
379 #define RTE_LPM_MASKX4_RES UINT64_C(0x00ffffff00ffffff)
382 * Lookup four IP addresses in an LPM table.
387 * Four IPs to be looked up in the LPM table
389 * Next hop of the most specific rule found for IP (valid on lookup hit only).
390 * This is an 4 elements array of two byte values.
391 * If the lookup was successful for the given IP, then least significant byte
392 * of the corresponding element is the actual next hop and the most
393 * significant byte is zero.
394 * If the lookup for the given IP failed, then corresponding element would
395 * contain default value, see description of then next parameter.
397 * Default value to populate into corresponding element of hop[] array,
398 * if lookup would fail.
401 rte_lpm_lookupx4(const struct rte_lpm *lpm, xmm_t ip, uint32_t hop[4],
404 #if defined(RTE_ARCH_ARM)
405 #ifdef RTE_HAS_SVE_ACLE
406 #include "rte_lpm_sve.h"
408 #include "rte_lpm_neon.h"
410 #elif defined(RTE_ARCH_PPC_64)
411 #include "rte_lpm_altivec.h"
413 #include "rte_lpm_sse.h"
420 #endif /* _RTE_LPM_H_ */