4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 * RTE Longest Prefix Match (LPM)
43 #include <sys/queue.h>
46 #include <rte_branch_prediction.h>
47 #include <rte_memory.h>
48 #include <rte_common.h>
55 /** Max number of characters in LPM name. */
56 #define RTE_LPM_NAMESIZE 32
58 /** @deprecated Possible location to allocate memory. This was for last
59 * parameter of rte_lpm_create(), but is now redundant. The LPM table is always
60 * allocated in memory using librte_malloc which uses a memzone. */
61 #define RTE_LPM_HEAP 0
63 /** @deprecated Possible location to allocate memory. This was for last
64 * parameter of rte_lpm_create(), but is now redundant. The LPM table is always
65 * allocated in memory using librte_malloc which uses a memzone. */
66 #define RTE_LPM_MEMZONE 1
68 /** Maximum depth value possible for IPv4 LPM. */
69 #define RTE_LPM_MAX_DEPTH 32
71 /** @internal Total number of tbl24 entries. */
72 #define RTE_LPM_TBL24_NUM_ENTRIES (1 << 24)
74 /** @internal Number of entries in a tbl8 group. */
75 #define RTE_LPM_TBL8_GROUP_NUM_ENTRIES 256
77 /** @internal Total number of tbl8 groups in the tbl8. */
78 #define RTE_LPM_TBL8_NUM_GROUPS 256
80 /** @internal Total number of tbl8 entries. */
81 #define RTE_LPM_TBL8_NUM_ENTRIES (RTE_LPM_TBL8_NUM_GROUPS * \
82 RTE_LPM_TBL8_GROUP_NUM_ENTRIES)
84 /** @internal Macro to enable/disable run-time checks. */
85 #if defined(RTE_LIBRTE_LPM_DEBUG)
86 #define RTE_LPM_RETURN_IF_TRUE(cond, retval) do { \
87 if (cond) return (retval); \
90 #define RTE_LPM_RETURN_IF_TRUE(cond, retval)
93 /** @internal bitmask with valid and ext_entry/valid_group fields set */
94 #define RTE_LPM_VALID_EXT_ENTRY_BITMASK 0x0300
96 /** Bitmask used to indicate successful lookup */
97 #define RTE_LPM_LOOKUP_SUCCESS 0x0100
99 /** @internal Tbl24 entry structure. */
100 struct rte_lpm_tbl24_entry {
101 /* Stores Next hop or group index (i.e. gindex)into tbl8. */
106 /* Using single uint8_t to store 3 values. */
107 uint8_t valid :1; /**< Validation flag. */
108 uint8_t ext_entry :1; /**< External entry. */
109 uint8_t depth :6; /**< Rule depth. */
112 /** @internal Tbl8 entry structure. */
113 struct rte_lpm_tbl8_entry {
114 uint8_t next_hop; /**< next hop. */
115 /* Using single uint8_t to store 3 values. */
116 uint8_t valid :1; /**< Validation flag. */
117 uint8_t valid_group :1; /**< Group validation flag. */
118 uint8_t depth :6; /**< Rule depth. */
121 /** @internal Rule structure. */
122 struct rte_lpm_rule {
123 uint32_t ip; /**< Rule IP address. */
124 uint8_t next_hop; /**< Rule next hop. */
127 /** @internal Contains metadata about the rules table. */
128 struct rte_lpm_rule_info {
129 uint32_t used_rules; /**< Used rules so far. */
130 uint32_t first_rule; /**< Indexes the first rule of a given depth. */
133 /** @internal LPM structure. */
136 char name[RTE_LPM_NAMESIZE]; /**< Name of the lpm. */
137 int mem_location; /**< @deprecated @see RTE_LPM_HEAP and RTE_LPM_MEMZONE. */
138 uint32_t max_rules; /**< Max. balanced rules per lpm. */
139 struct rte_lpm_rule_info rule_info[RTE_LPM_MAX_DEPTH]; /**< Rule info table. */
142 struct rte_lpm_tbl24_entry tbl24[RTE_LPM_TBL24_NUM_ENTRIES] \
143 __rte_cache_aligned; /**< LPM tbl24 table. */
144 struct rte_lpm_tbl8_entry tbl8[RTE_LPM_TBL8_NUM_ENTRIES] \
145 __rte_cache_aligned; /**< LPM tbl8 table. */
146 struct rte_lpm_rule rules_tbl[0] \
147 __rte_cache_aligned; /**< LPM rules. */
151 * Create an LPM object.
156 * NUMA socket ID for LPM table memory allocation
158 * Maximum number of LPM rules that can be added
160 * This parameter is currently unused
162 * Handle to LPM object on success, NULL otherwise with rte_errno set
163 * to an appropriate values. Possible rte_errno values include:
164 * - E_RTE_NO_CONFIG - function could not get pointer to rte_config structure
165 * - E_RTE_SECONDARY - function was called from a secondary process instance
166 * - EINVAL - invalid parameter passed to function
167 * - ENOSPC - the maximum number of memzones has already been allocated
168 * - EEXIST - a memzone with the same name already exists
169 * - ENOMEM - no appropriate memory area found in which to create memzone
172 rte_lpm_create(const char *name, int socket_id, int max_rules, int flags);
175 * Find an existing LPM object and return a pointer to it.
178 * Name of the lpm object as passed to rte_lpm_create()
180 * Pointer to lpm object or NULL if object not found with rte_errno
181 * set appropriately. Possible rte_errno values include:
182 * - ENOENT - required entry not available to return.
185 rte_lpm_find_existing(const char *name);
188 * Free an LPM object.
196 rte_lpm_free(struct rte_lpm *lpm);
199 * Add a rule to the LPM table.
204 * IP of the rule to be added to the LPM table
206 * Depth of the rule to be added to the LPM table
208 * Next hop of the rule to be added to the LPM table
210 * 0 on success, negative value otherwise
213 rte_lpm_add(struct rte_lpm *lpm, uint32_t ip, uint8_t depth, uint8_t next_hop);
216 * Check if a rule is present in the LPM table,
217 * and provide its next hop if it is.
222 * IP of the rule to be searched
224 * Depth of the rule to searched
226 * Next hop of the rule (valid only if it is found)
228 * 1 if the rule exists, 0 if it does not, a negative value on failure
231 rte_lpm_is_rule_present(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
235 * Delete a rule from the LPM table.
240 * IP of the rule to be deleted from the LPM table
242 * Depth of the rule to be deleted from the LPM table
244 * 0 on success, negative value otherwise
247 rte_lpm_delete(struct rte_lpm *lpm, uint32_t ip, uint8_t depth);
250 * Delete all rules from the LPM table.
256 rte_lpm_delete_all(struct rte_lpm *lpm);
259 * Lookup an IP into the LPM table.
264 * IP to be looked up in the LPM table
266 * Next hop of the most specific rule found for IP (valid on lookup hit only)
268 * -EINVAL for incorrect arguments, -ENOENT on lookup miss, 0 on lookup hit
271 rte_lpm_lookup(struct rte_lpm *lpm, uint32_t ip, uint8_t *next_hop)
273 unsigned tbl24_index = (ip >> 8);
276 /* DEBUG: Check user input arguments. */
277 RTE_LPM_RETURN_IF_TRUE(((lpm == NULL) || (next_hop == NULL)), -EINVAL);
279 /* Copy tbl24 entry */
280 tbl_entry = *(const uint16_t *)&lpm->tbl24[tbl24_index];
282 /* Copy tbl8 entry (only if needed) */
283 if (unlikely((tbl_entry & RTE_LPM_VALID_EXT_ENTRY_BITMASK) ==
284 RTE_LPM_VALID_EXT_ENTRY_BITMASK)) {
286 unsigned tbl8_index = (uint8_t)ip +
287 ((uint8_t)tbl_entry * RTE_LPM_TBL8_GROUP_NUM_ENTRIES);
289 tbl_entry = *(const uint16_t *)&lpm->tbl8[tbl8_index];
292 *next_hop = (uint8_t)tbl_entry;
293 return (tbl_entry & RTE_LPM_LOOKUP_SUCCESS) ? 0 : -ENOENT;
297 * Lookup multiple IP addresses in an LPM table. This may be implemented as a
298 * macro, so the address of the function should not be used.
303 * Array of IPs to be looked up in the LPM table
305 * Next hop of the most specific rule found for IP (valid on lookup hit only).
306 * This is an array of two byte values. The most significant byte in each
307 * value says whether the lookup was successful (bitmask
308 * RTE_LPM_LOOKUP_SUCCESS is set). The least significant byte is the
311 * Number of elements in ips (and next_hops) array to lookup. This should be a
312 * compile time constant, and divisible by 8 for best performance.
314 * -EINVAL for incorrect arguments, otherwise 0
316 #define rte_lpm_lookup_bulk(lpm, ips, next_hops, n) \
317 rte_lpm_lookup_bulk_func(lpm, ips, next_hops, n)
320 rte_lpm_lookup_bulk_func(const struct rte_lpm *lpm, const uint32_t * ips,
321 uint16_t * next_hops, const unsigned n)
324 unsigned tbl24_indexes[n];
326 /* DEBUG: Check user input arguments. */
327 RTE_LPM_RETURN_IF_TRUE(((lpm == NULL) || (ips == NULL) ||
328 (next_hops == NULL)), -EINVAL);
330 for (i = 0; i < n; i++) {
331 tbl24_indexes[i] = ips[i] >> 8;
334 for (i = 0; i < n; i++) {
335 /* Simply copy tbl24 entry to output */
336 next_hops[i] = *(const uint16_t *)&lpm->tbl24[tbl24_indexes[i]];
338 /* Overwrite output with tbl8 entry if needed */
339 if (unlikely((next_hops[i] & RTE_LPM_VALID_EXT_ENTRY_BITMASK) ==
340 RTE_LPM_VALID_EXT_ENTRY_BITMASK)) {
342 unsigned tbl8_index = (uint8_t)ips[i] +
343 ((uint8_t)next_hops[i] *
344 RTE_LPM_TBL8_GROUP_NUM_ENTRIES);
346 next_hops[i] = *(const uint16_t *)&lpm->tbl8[tbl8_index];
352 /* Mask four results. */
353 #define RTE_LPM_MASKX4_RES UINT64_C(0x00ff00ff00ff00ff)
356 * Lookup four IP addresses in an LPM table.
361 * Four IPs to be looked up in the LPM table
363 * Next hop of the most specific rule found for IP (valid on lookup hit only).
364 * This is an 4 elements array of two byte values.
365 * If the lookup was succesfull for the given IP, then least significant byte
366 * of the corresponding element is the actual next hop and the most
367 * significant byte is zero.
368 * If the lookup for the given IP failed, then corresponding element would
369 * contain default value, see description of then next parameter.
371 * Default value to populate into corresponding element of hop[] array,
372 * if lookup would fail.
375 rte_lpm_lookupx4(const struct rte_lpm *lpm, __m128i ip, uint16_t hop[4],
383 const __m128i mask8 =
384 _mm_set_epi32(UINT8_MAX, UINT8_MAX, UINT8_MAX, UINT8_MAX);
387 * RTE_LPM_VALID_EXT_ENTRY_BITMASK for 4 LPM entries
388 * as one 64-bit value (0x0300030003000300).
390 const uint64_t mask_xv =
391 ((uint64_t)RTE_LPM_VALID_EXT_ENTRY_BITMASK |
392 (uint64_t)RTE_LPM_VALID_EXT_ENTRY_BITMASK << 16 |
393 (uint64_t)RTE_LPM_VALID_EXT_ENTRY_BITMASK << 32 |
394 (uint64_t)RTE_LPM_VALID_EXT_ENTRY_BITMASK << 48);
397 * RTE_LPM_LOOKUP_SUCCESS for 4 LPM entries
398 * as one 64-bit value (0x0100010001000100).
400 const uint64_t mask_v =
401 ((uint64_t)RTE_LPM_LOOKUP_SUCCESS |
402 (uint64_t)RTE_LPM_LOOKUP_SUCCESS << 16 |
403 (uint64_t)RTE_LPM_LOOKUP_SUCCESS << 32 |
404 (uint64_t)RTE_LPM_LOOKUP_SUCCESS << 48);
406 /* get 4 indexes for tbl24[]. */
407 i24 = _mm_srli_epi32(ip, CHAR_BIT);
409 /* extract values from tbl24[] */
410 idx = _mm_cvtsi128_si64(i24);
411 i24 = _mm_srli_si128(i24, sizeof(uint64_t));
413 tbl[0] = *(const uint16_t *)&lpm->tbl24[(uint32_t)idx];
414 tbl[1] = *(const uint16_t *)&lpm->tbl24[idx >> 32];
416 idx = _mm_cvtsi128_si64(i24);
418 tbl[2] = *(const uint16_t *)&lpm->tbl24[(uint32_t)idx];
419 tbl[3] = *(const uint16_t *)&lpm->tbl24[idx >> 32];
421 /* get 4 indexes for tbl8[]. */
422 i8.x = _mm_and_si128(ip, mask8);
424 pt = (uint64_t)tbl[0] |
425 (uint64_t)tbl[1] << 16 |
426 (uint64_t)tbl[2] << 32 |
427 (uint64_t)tbl[3] << 48;
429 /* search successfully finished for all 4 IP addresses. */
430 if (likely((pt & mask_xv) == mask_v)) {
431 uintptr_t ph = (uintptr_t)hop;
432 *(uint64_t *)ph = pt & RTE_LPM_MASKX4_RES;
436 if (unlikely((pt & RTE_LPM_VALID_EXT_ENTRY_BITMASK) ==
437 RTE_LPM_VALID_EXT_ENTRY_BITMASK)) {
438 i8.u32[0] = i8.u32[0] +
439 (uint8_t)tbl[0] * RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
440 tbl[0] = *(const uint16_t *)&lpm->tbl8[i8.u32[0]];
442 if (unlikely((pt >> 16 & RTE_LPM_VALID_EXT_ENTRY_BITMASK) ==
443 RTE_LPM_VALID_EXT_ENTRY_BITMASK)) {
444 i8.u32[1] = i8.u32[1] +
445 (uint8_t)tbl[1] * RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
446 tbl[1] = *(const uint16_t *)&lpm->tbl8[i8.u32[1]];
448 if (unlikely((pt >> 32 & RTE_LPM_VALID_EXT_ENTRY_BITMASK) ==
449 RTE_LPM_VALID_EXT_ENTRY_BITMASK)) {
450 i8.u32[2] = i8.u32[2] +
451 (uint8_t)tbl[2] * RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
452 tbl[2] = *(const uint16_t *)&lpm->tbl8[i8.u32[2]];
454 if (unlikely((pt >> 48 & RTE_LPM_VALID_EXT_ENTRY_BITMASK) ==
455 RTE_LPM_VALID_EXT_ENTRY_BITMASK)) {
456 i8.u32[3] = i8.u32[3] +
457 (uint8_t)tbl[3] * RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
458 tbl[3] = *(const uint16_t *)&lpm->tbl8[i8.u32[3]];
461 hop[0] = (tbl[0] & RTE_LPM_LOOKUP_SUCCESS) ? (uint8_t)tbl[0] : defv;
462 hop[1] = (tbl[1] & RTE_LPM_LOOKUP_SUCCESS) ? (uint8_t)tbl[1] : defv;
463 hop[2] = (tbl[2] & RTE_LPM_LOOKUP_SUCCESS) ? (uint8_t)tbl[2] : defv;
464 hop[3] = (tbl[3] & RTE_LPM_LOOKUP_SUCCESS) ? (uint8_t)tbl[3] : defv;
471 #endif /* _RTE_LPM_H_ */