4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 * RTE Longest Prefix Match (LPM)
43 #include <sys/queue.h>
46 #include <rte_branch_prediction.h>
47 #include <rte_memory.h>
48 #include <rte_common.h>
49 #include <rte_common_vect.h>
55 /** Max number of characters in LPM name. */
56 #define RTE_LPM_NAMESIZE 32
58 /** @deprecated Possible location to allocate memory. This was for last
59 * parameter of rte_lpm_create(), but is now redundant. The LPM table is always
60 * allocated in memory using librte_malloc which uses a memzone. */
61 #define RTE_LPM_HEAP 0
63 /** @deprecated Possible location to allocate memory. This was for last
64 * parameter of rte_lpm_create(), but is now redundant. The LPM table is always
65 * allocated in memory using librte_malloc which uses a memzone. */
66 #define RTE_LPM_MEMZONE 1
68 /** Maximum depth value possible for IPv4 LPM. */
69 #define RTE_LPM_MAX_DEPTH 32
71 /** @internal Total number of tbl24 entries. */
72 #define RTE_LPM_TBL24_NUM_ENTRIES (1 << 24)
74 /** @internal Number of entries in a tbl8 group. */
75 #define RTE_LPM_TBL8_GROUP_NUM_ENTRIES 256
77 /** @internal Total number of tbl8 groups in the tbl8. */
78 #define RTE_LPM_TBL8_NUM_GROUPS 256
80 /** @internal Total number of tbl8 entries. */
81 #define RTE_LPM_TBL8_NUM_ENTRIES (RTE_LPM_TBL8_NUM_GROUPS * \
82 RTE_LPM_TBL8_GROUP_NUM_ENTRIES)
84 /** @internal Macro to enable/disable run-time checks. */
85 #if defined(RTE_LIBRTE_LPM_DEBUG)
86 #define RTE_LPM_RETURN_IF_TRUE(cond, retval) do { \
87 if (cond) return (retval); \
90 #define RTE_LPM_RETURN_IF_TRUE(cond, retval)
93 /** @internal bitmask with valid and ext_entry/valid_group fields set */
94 #define RTE_LPM_VALID_EXT_ENTRY_BITMASK 0x0300
96 /** Bitmask used to indicate successful lookup */
97 #define RTE_LPM_LOOKUP_SUCCESS 0x0100
99 /** @internal Tbl24 entry structure. */
100 struct rte_lpm_tbl24_entry {
101 /* Stores Next hop or group index (i.e. gindex)into tbl8. */
106 /* Using single uint8_t to store 3 values. */
107 uint8_t valid :1; /**< Validation flag. */
108 uint8_t ext_entry :1; /**< External entry. */
109 uint8_t depth :6; /**< Rule depth. */
112 /** @internal Tbl8 entry structure. */
113 struct rte_lpm_tbl8_entry {
114 uint8_t next_hop; /**< next hop. */
115 /* Using single uint8_t to store 3 values. */
116 uint8_t valid :1; /**< Validation flag. */
117 uint8_t valid_group :1; /**< Group validation flag. */
118 uint8_t depth :6; /**< Rule depth. */
121 /** @internal Rule structure. */
122 struct rte_lpm_rule {
123 uint32_t ip; /**< Rule IP address. */
124 uint8_t next_hop; /**< Rule next hop. */
127 /** @internal Contains metadata about the rules table. */
128 struct rte_lpm_rule_info {
129 uint32_t used_rules; /**< Used rules so far. */
130 uint32_t first_rule; /**< Indexes the first rule of a given depth. */
133 /** @internal LPM structure. */
135 TAILQ_ENTRY(rte_lpm) next; /**< Next in list. */
138 char name[RTE_LPM_NAMESIZE]; /**< Name of the lpm. */
139 int mem_location; /**< @deprecated @see RTE_LPM_HEAP and RTE_LPM_MEMZONE. */
140 uint32_t max_rules; /**< Max. balanced rules per lpm. */
141 struct rte_lpm_rule_info rule_info[RTE_LPM_MAX_DEPTH]; /**< Rule info table. */
144 struct rte_lpm_tbl24_entry tbl24[RTE_LPM_TBL24_NUM_ENTRIES] \
145 __rte_cache_aligned; /**< LPM tbl24 table. */
146 struct rte_lpm_tbl8_entry tbl8[RTE_LPM_TBL8_NUM_ENTRIES] \
147 __rte_cache_aligned; /**< LPM tbl8 table. */
148 struct rte_lpm_rule rules_tbl[0] \
149 __rte_cache_aligned; /**< LPM rules. */
153 * Create an LPM object.
158 * NUMA socket ID for LPM table memory allocation
160 * Maximum number of LPM rules that can be added
162 * This parameter is currently unused
164 * Handle to LPM object on success, NULL otherwise with rte_errno set
165 * to an appropriate values. Possible rte_errno values include:
166 * - E_RTE_NO_CONFIG - function could not get pointer to rte_config structure
167 * - E_RTE_SECONDARY - function was called from a secondary process instance
168 * - E_RTE_NO_TAILQ - no tailq list could be got for the lpm object list
169 * - EINVAL - invalid parameter passed to function
170 * - ENOSPC - the maximum number of memzones has already been allocated
171 * - EEXIST - a memzone with the same name already exists
172 * - ENOMEM - no appropriate memory area found in which to create memzone
175 rte_lpm_create(const char *name, int socket_id, int max_rules, int flags);
178 * Find an existing LPM object and return a pointer to it.
181 * Name of the lpm object as passed to rte_lpm_create()
183 * Pointer to lpm object or NULL if object not found with rte_errno
184 * set appropriately. Possible rte_errno values include:
185 * - ENOENT - required entry not available to return.
188 rte_lpm_find_existing(const char *name);
191 * Free an LPM object.
199 rte_lpm_free(struct rte_lpm *lpm);
202 * Add a rule to the LPM table.
207 * IP of the rule to be added to the LPM table
209 * Depth of the rule to be added to the LPM table
211 * Next hop of the rule to be added to the LPM table
213 * 0 on success, negative value otherwise
216 rte_lpm_add(struct rte_lpm *lpm, uint32_t ip, uint8_t depth, uint8_t next_hop);
219 * Delete a rule from the LPM table.
224 * IP of the rule to be deleted from the LPM table
226 * Depth of the rule to be deleted from the LPM table
228 * 0 on success, negative value otherwise
231 rte_lpm_delete(struct rte_lpm *lpm, uint32_t ip, uint8_t depth);
234 * Delete all rules from the LPM table.
240 rte_lpm_delete_all(struct rte_lpm *lpm);
243 * Lookup an IP into the LPM table.
248 * IP to be looked up in the LPM table
250 * Next hop of the most specific rule found for IP (valid on lookup hit only)
252 * -EINVAL for incorrect arguments, -ENOENT on lookup miss, 0 on lookup hit
255 rte_lpm_lookup(struct rte_lpm *lpm, uint32_t ip, uint8_t *next_hop)
257 unsigned tbl24_index = (ip >> 8);
260 /* DEBUG: Check user input arguments. */
261 RTE_LPM_RETURN_IF_TRUE(((lpm == NULL) || (next_hop == NULL)), -EINVAL);
263 /* Copy tbl24 entry */
264 tbl_entry = *(const uint16_t *)&lpm->tbl24[tbl24_index];
266 /* Copy tbl8 entry (only if needed) */
267 if (unlikely((tbl_entry & RTE_LPM_VALID_EXT_ENTRY_BITMASK) ==
268 RTE_LPM_VALID_EXT_ENTRY_BITMASK)) {
270 unsigned tbl8_index = (uint8_t)ip +
271 ((uint8_t)tbl_entry * RTE_LPM_TBL8_GROUP_NUM_ENTRIES);
273 tbl_entry = *(const uint16_t *)&lpm->tbl8[tbl8_index];
276 *next_hop = (uint8_t)tbl_entry;
277 return (tbl_entry & RTE_LPM_LOOKUP_SUCCESS) ? 0 : -ENOENT;
281 * Lookup multiple IP addresses in an LPM table. This may be implemented as a
282 * macro, so the address of the function should not be used.
287 * Array of IPs to be looked up in the LPM table
289 * Next hop of the most specific rule found for IP (valid on lookup hit only).
290 * This is an array of two byte values. The most significant byte in each
291 * value says whether the lookup was successful (bitmask
292 * RTE_LPM_LOOKUP_SUCCESS is set). The least significant byte is the
295 * Number of elements in ips (and next_hops) array to lookup. This should be a
296 * compile time constant, and divisible by 8 for best performance.
298 * -EINVAL for incorrect arguments, otherwise 0
300 #define rte_lpm_lookup_bulk(lpm, ips, next_hops, n) \
301 rte_lpm_lookup_bulk_func(lpm, ips, next_hops, n)
304 rte_lpm_lookup_bulk_func(const struct rte_lpm *lpm, const uint32_t * ips,
305 uint16_t * next_hops, const unsigned n)
308 unsigned tbl24_indexes[n];
310 /* DEBUG: Check user input arguments. */
311 RTE_LPM_RETURN_IF_TRUE(((lpm == NULL) || (ips == NULL) ||
312 (next_hops == NULL)), -EINVAL);
314 for (i = 0; i < n; i++) {
315 tbl24_indexes[i] = ips[i] >> 8;
318 for (i = 0; i < n; i++) {
319 /* Simply copy tbl24 entry to output */
320 next_hops[i] = *(const uint16_t *)&lpm->tbl24[tbl24_indexes[i]];
322 /* Overwrite output with tbl8 entry if needed */
323 if (unlikely((next_hops[i] & RTE_LPM_VALID_EXT_ENTRY_BITMASK) ==
324 RTE_LPM_VALID_EXT_ENTRY_BITMASK)) {
326 unsigned tbl8_index = (uint8_t)ips[i] +
327 ((uint8_t)next_hops[i] *
328 RTE_LPM_TBL8_GROUP_NUM_ENTRIES);
330 next_hops[i] = *(const uint16_t *)&lpm->tbl8[tbl8_index];
336 /* Mask four results. */
337 #define RTE_LPM_MASKX4_RES UINT64_C(0x00ff00ff00ff00ff)
340 * Lookup four IP addresses in an LPM table.
345 * Four IPs to be looked up in the LPM table
347 * Next hop of the most specific rule found for IP (valid on lookup hit only).
348 * This is an 4 elements array of two byte values.
349 * If the lookup was succesfull for the given IP, then least significant byte
350 * of the corresponding element is the actual next hop and the most
351 * significant byte is zero.
352 * If the lookup for the given IP failed, then corresponding element would
353 * contain default value, see description of then next parameter.
355 * Default value to populate into corresponding element of hop[] array,
356 * if lookup would fail.
359 rte_lpm_lookupx4(const struct rte_lpm *lpm, __m128i ip, uint16_t hop[4],
367 const __m128i mask8 =
368 _mm_set_epi32(UINT8_MAX, UINT8_MAX, UINT8_MAX, UINT8_MAX);
371 * RTE_LPM_VALID_EXT_ENTRY_BITMASK for 4 LPM entries
372 * as one 64-bit value (0x0300030003000300).
374 const uint64_t mask_xv =
375 ((uint64_t)RTE_LPM_VALID_EXT_ENTRY_BITMASK |
376 (uint64_t)RTE_LPM_VALID_EXT_ENTRY_BITMASK << 16 |
377 (uint64_t)RTE_LPM_VALID_EXT_ENTRY_BITMASK << 32 |
378 (uint64_t)RTE_LPM_VALID_EXT_ENTRY_BITMASK << 48);
381 * RTE_LPM_LOOKUP_SUCCESS for 4 LPM entries
382 * as one 64-bit value (0x0100010001000100).
384 const uint64_t mask_v =
385 ((uint64_t)RTE_LPM_LOOKUP_SUCCESS |
386 (uint64_t)RTE_LPM_LOOKUP_SUCCESS << 16 |
387 (uint64_t)RTE_LPM_LOOKUP_SUCCESS << 32 |
388 (uint64_t)RTE_LPM_LOOKUP_SUCCESS << 48);
390 /* get 4 indexes for tbl24[]. */
391 i24 = _mm_srli_epi32(ip, CHAR_BIT);
393 /* extract values from tbl24[] */
394 idx = _mm_cvtsi128_si64(i24);
395 i24 = _mm_srli_si128(i24, sizeof(uint64_t));
397 tbl[0] = *(const uint16_t *)&lpm->tbl24[(uint32_t)idx];
398 tbl[1] = *(const uint16_t *)&lpm->tbl24[idx >> 32];
400 idx = _mm_cvtsi128_si64(i24);
402 tbl[2] = *(const uint16_t *)&lpm->tbl24[(uint32_t)idx];
403 tbl[3] = *(const uint16_t *)&lpm->tbl24[idx >> 32];
405 /* get 4 indexes for tbl8[]. */
406 i8.m = _mm_and_si128(ip, mask8);
408 pt = (uint64_t)tbl[0] |
409 (uint64_t)tbl[1] << 16 |
410 (uint64_t)tbl[2] << 32 |
411 (uint64_t)tbl[3] << 48;
413 /* search successfully finished for all 4 IP addresses. */
414 if (likely((pt & mask_xv) == mask_v)) {
415 uintptr_t ph = (uintptr_t)hop;
416 *(uint64_t *)ph = pt & RTE_LPM_MASKX4_RES;
420 if (unlikely((pt & RTE_LPM_VALID_EXT_ENTRY_BITMASK) ==
421 RTE_LPM_VALID_EXT_ENTRY_BITMASK)) {
422 i8.u32[0] = i8.u32[0] +
423 (uint8_t)tbl[0] * RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
424 tbl[0] = *(const uint16_t *)&lpm->tbl8[i8.u32[0]];
426 if (unlikely((pt >> 16 & RTE_LPM_VALID_EXT_ENTRY_BITMASK) ==
427 RTE_LPM_VALID_EXT_ENTRY_BITMASK)) {
428 i8.u32[1] = i8.u32[1] +
429 (uint8_t)tbl[1] * RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
430 tbl[1] = *(const uint16_t *)&lpm->tbl8[i8.u32[1]];
432 if (unlikely((pt >> 32 & RTE_LPM_VALID_EXT_ENTRY_BITMASK) ==
433 RTE_LPM_VALID_EXT_ENTRY_BITMASK)) {
434 i8.u32[2] = i8.u32[2] +
435 (uint8_t)tbl[2] * RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
436 tbl[2] = *(const uint16_t *)&lpm->tbl8[i8.u32[2]];
438 if (unlikely((pt >> 48 & RTE_LPM_VALID_EXT_ENTRY_BITMASK) ==
439 RTE_LPM_VALID_EXT_ENTRY_BITMASK)) {
440 i8.u32[3] = i8.u32[3] +
441 (uint8_t)tbl[3] * RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
442 tbl[3] = *(const uint16_t *)&lpm->tbl8[i8.u32[3]];
445 hop[0] = (tbl[0] & RTE_LPM_LOOKUP_SUCCESS) ? (uint8_t)tbl[0] : defv;
446 hop[1] = (tbl[1] & RTE_LPM_LOOKUP_SUCCESS) ? (uint8_t)tbl[1] : defv;
447 hop[2] = (tbl[2] & RTE_LPM_LOOKUP_SUCCESS) ? (uint8_t)tbl[2] : defv;
448 hop[3] = (tbl[3] & RTE_LPM_LOOKUP_SUCCESS) ? (uint8_t)tbl[3] : defv;
455 #endif /* _RTE_LPM_H_ */