1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Vladimir Medvedkin <medvedkinv@gmail.com>
3 * Copyright(c) 2019 Intel Corporation
12 #include <rte_debug.h>
13 #include <rte_malloc.h>
14 #include <rte_prefetch.h>
15 #include <rte_errno.h>
16 #include <rte_memory.h>
17 #include <rte_branch_prediction.h>
23 /* @internal Total number of tbl24 entries. */
24 #define TRIE_TBL24_NUM_ENT (1 << 24)
26 /* Maximum depth value possible for IPv6 LPM. */
27 #define TRIE_MAX_DEPTH 128
29 /* @internal Number of entries in a tbl8 group. */
30 #define TRIE_TBL8_GRP_NUM_ENT 256ULL
32 /* @internal Total number of tbl8 groups in the tbl8. */
33 #define TRIE_TBL8_NUM_GROUPS 65536
35 /* @internal bitmask with valid and valid_group fields set */
36 #define TRIE_EXT_ENT 1
38 #define TRIE_NAMESIZE 64
40 #define BITMAP_SLAB_BIT_SIZE_LOG2 6
41 #define BITMAP_SLAB_BIT_SIZE (1ULL << BITMAP_SLAB_BIT_SIZE_LOG2)
42 #define BITMAP_SLAB_BITMASK (BITMAP_SLAB_BIT_SIZE - 1)
45 uint32_t number_tbl8s; /**< Total number of tbl8s */
46 uint32_t rsvd_tbl8s; /**< Number of reserved tbl8s */
47 uint32_t cur_tbl8s; /**< Current cumber of tbl8s */
48 uint64_t def_nh; /**< Default next hop */
49 enum rte_fib_trie_nh_sz nh_sz; /**< Size of nexthop entry */
50 uint64_t *tbl8; /**< tbl8 table. */
51 uint32_t *tbl8_pool; /**< bitmap containing free tbl8 idxes*/
52 uint32_t tbl8_pool_pos;
54 __extension__ uint64_t tbl24[0] __rte_cache_aligned;
67 static enum lookup_type test_lookup = MACRO;
69 static inline uint32_t
70 get_tbl24_idx(const uint8_t *ip)
72 return ip[0] << 16|ip[1] << 8|ip[2];
76 get_tbl24_p(struct rte_trie_tbl *dp, const uint8_t *ip, uint8_t nh_sz)
80 tbl24_idx = get_tbl24_idx(ip);
81 return (void *)&((uint8_t *)dp->tbl24)[tbl24_idx << nh_sz];
85 bits_in_nh(uint8_t nh_sz)
87 return 8 * (1 << nh_sz);
90 static inline uint64_t
91 get_max_nh(uint8_t nh_sz)
93 return ((1ULL << (bits_in_nh(nh_sz) - 1)) - 1);
96 static inline uint64_t
97 lookup_msk(uint8_t nh_sz)
99 return ((1ULL << ((1 << (nh_sz + 3)) - 1)) << 1) - 1;
102 static inline uint8_t
103 get_psd_idx(uint32_t val, uint8_t nh_sz)
105 return val & ((1 << (3 - nh_sz)) - 1);
108 static inline uint32_t
109 get_tbl_pos(uint32_t val, uint8_t nh_sz)
111 return val >> (3 - nh_sz);
114 static inline uint64_t
115 get_tbl_val_by_idx(uint64_t *tbl, uint32_t idx, uint8_t nh_sz)
117 return ((tbl[get_tbl_pos(idx, nh_sz)] >> (get_psd_idx(idx, nh_sz) *
118 bits_in_nh(nh_sz))) & lookup_msk(nh_sz));
122 get_tbl_p_by_idx(uint64_t *tbl, uint64_t idx, uint8_t nh_sz)
124 return (uint8_t *)tbl + (idx << nh_sz);
128 is_entry_extended(uint64_t ent)
130 return (ent & TRIE_EXT_ENT) == TRIE_EXT_ENT;
133 #define LOOKUP_FUNC(suffix, type, nh_sz) \
134 static void rte_trie_lookup_bulk_##suffix(void *p, \
135 uint8_t ips[][RTE_FIB6_IPV6_ADDR_SIZE], \
136 uint64_t *next_hops, const unsigned int n) \
138 struct rte_trie_tbl *dp = (struct rte_trie_tbl *)p; \
142 for (i = 0; i < n; i++) { \
143 tmp = ((type *)dp->tbl24)[get_tbl24_idx(&ips[i][0])]; \
145 while (is_entry_extended(tmp)) { \
146 tmp = ((type *)dp->tbl8)[ips[i][j++] + \
147 ((tmp >> 1) * TRIE_TBL8_GRP_NUM_ENT)]; \
149 next_hops[i] = tmp >> 1; \
152 LOOKUP_FUNC(2b, uint16_t, 1)
153 LOOKUP_FUNC(4b, uint32_t, 2)
154 LOOKUP_FUNC(8b, uint64_t, 3)
157 rte_trie_get_lookup_fn(struct rte_fib6_conf *conf)
159 enum rte_fib_trie_nh_sz nh_sz = conf->trie.nh_sz;
161 if (test_lookup == MACRO) {
163 case RTE_FIB6_TRIE_2B:
164 return rte_trie_lookup_bulk_2b;
165 case RTE_FIB6_TRIE_4B:
166 return rte_trie_lookup_bulk_4b;
167 case RTE_FIB6_TRIE_8B:
168 return rte_trie_lookup_bulk_8b;
176 write_to_dp(void *ptr, uint64_t val, enum rte_fib_trie_nh_sz size, int n)
179 uint16_t *ptr16 = (uint16_t *)ptr;
180 uint32_t *ptr32 = (uint32_t *)ptr;
181 uint64_t *ptr64 = (uint64_t *)ptr;
184 case RTE_FIB6_TRIE_2B:
185 for (i = 0; i < n; i++)
186 ptr16[i] = (uint16_t)val;
188 case RTE_FIB6_TRIE_4B:
189 for (i = 0; i < n; i++)
190 ptr32[i] = (uint32_t)val;
192 case RTE_FIB6_TRIE_8B:
193 for (i = 0; i < n; i++)
194 ptr64[i] = (uint64_t)val;
200 tbl8_pool_init(struct rte_trie_tbl *dp)
204 /* put entire range of indexes to the tbl8 pool */
205 for (i = 0; i < dp->number_tbl8s; i++)
206 dp->tbl8_pool[i] = i;
208 dp->tbl8_pool_pos = 0;
212 * Get an index of a free tbl8 from the pool
214 static inline int32_t
215 tbl8_get(struct rte_trie_tbl *dp)
217 if (dp->tbl8_pool_pos == dp->number_tbl8s)
218 /* no more free tbl8 */
222 return dp->tbl8_pool[dp->tbl8_pool_pos++];
226 * Put an index of a free tbl8 back to the pool
229 tbl8_put(struct rte_trie_tbl *dp, uint32_t tbl8_ind)
231 dp->tbl8_pool[--dp->tbl8_pool_pos] = tbl8_ind;
235 tbl8_alloc(struct rte_trie_tbl *dp, uint64_t nh)
240 tbl8_idx = tbl8_get(dp);
243 tbl8_ptr = (uint8_t *)dp->tbl8 +
244 ((tbl8_idx * TRIE_TBL8_GRP_NUM_ENT) <<
246 /*Init tbl8 entries with nexthop from tbl24*/
247 write_to_dp((void *)tbl8_ptr, nh, dp->nh_sz,
248 TRIE_TBL8_GRP_NUM_ENT);
253 tbl8_recycle(struct rte_trie_tbl *dp, void *par, uint64_t tbl8_idx)
262 case RTE_FIB6_TRIE_2B:
263 ptr16 = &((uint16_t *)dp->tbl8)[tbl8_idx *
264 TRIE_TBL8_GRP_NUM_ENT];
266 if (nh & TRIE_EXT_ENT)
268 for (i = 1; i < TRIE_TBL8_GRP_NUM_ENT; i++) {
272 write_to_dp(par, nh, dp->nh_sz, 1);
273 for (i = 0; i < TRIE_TBL8_GRP_NUM_ENT; i++)
276 case RTE_FIB6_TRIE_4B:
277 ptr32 = &((uint32_t *)dp->tbl8)[tbl8_idx *
278 TRIE_TBL8_GRP_NUM_ENT];
280 if (nh & TRIE_EXT_ENT)
282 for (i = 1; i < TRIE_TBL8_GRP_NUM_ENT; i++) {
286 write_to_dp(par, nh, dp->nh_sz, 1);
287 for (i = 0; i < TRIE_TBL8_GRP_NUM_ENT; i++)
290 case RTE_FIB6_TRIE_8B:
291 ptr64 = &((uint64_t *)dp->tbl8)[tbl8_idx *
292 TRIE_TBL8_GRP_NUM_ENT];
294 if (nh & TRIE_EXT_ENT)
296 for (i = 1; i < TRIE_TBL8_GRP_NUM_ENT; i++) {
300 write_to_dp(par, nh, dp->nh_sz, 1);
301 for (i = 0; i < TRIE_TBL8_GRP_NUM_ENT; i++)
305 tbl8_put(dp, tbl8_idx);
309 static inline uint32_t
310 get_idx(const uint8_t *ip, uint32_t prev_idx, int bytes, int first_byte)
316 for (i = first_byte; i < (first_byte + bytes); i++) {
317 bitshift = (int8_t)(((first_byte + bytes - 1) - i)*BYTE_SIZE);
318 idx |= ip[i] << bitshift;
320 return (prev_idx * 256) + idx;
323 static inline uint64_t
324 get_val_by_p(void *p, uint8_t nh_sz)
329 case RTE_FIB6_TRIE_2B:
330 val = *(uint16_t *)p;
332 case RTE_FIB6_TRIE_4B:
333 val = *(uint32_t *)p;
335 case RTE_FIB6_TRIE_8B:
336 val = *(uint64_t *)p;
343 * recursively recycle tbl8's
346 recycle_root_path(struct rte_trie_tbl *dp, const uint8_t *ip_part,
347 uint8_t common_tbl8, void *prev)
352 val = get_val_by_p(prev, dp->nh_sz);
353 if (unlikely((val & TRIE_EXT_ENT) != TRIE_EXT_ENT))
356 if (common_tbl8 != 0) {
357 p = get_tbl_p_by_idx(dp->tbl8, (val >> 1) * 256 + *ip_part,
359 recycle_root_path(dp, ip_part + 1, common_tbl8 - 1, p);
361 tbl8_recycle(dp, prev, val >> 1);
365 build_common_root(struct rte_trie_tbl *dp, const uint8_t *ip,
366 int common_bytes, void **tbl)
368 void *tbl_ptr = NULL;
371 int i, j, idx, prev_idx = 0;
374 for (i = 3, j = 0; i <= common_bytes; i++) {
375 idx = get_idx(ip, prev_idx, i - j, j);
376 val = get_tbl_val_by_idx(cur_tbl, idx, dp->nh_sz);
377 tbl_ptr = get_tbl_p_by_idx(cur_tbl, idx, dp->nh_sz);
378 if ((val & TRIE_EXT_ENT) != TRIE_EXT_ENT) {
379 idx = tbl8_alloc(dp, val);
380 if (unlikely(idx < 0))
382 write_to_dp(tbl_ptr, (idx << 1) |
383 TRIE_EXT_ENT, dp->nh_sz, 1);
391 *tbl = get_tbl_p_by_idx(cur_tbl, prev_idx * 256, dp->nh_sz);
396 write_edge(struct rte_trie_tbl *dp, const uint8_t *ip_part, uint64_t next_hop,
397 int len, enum edge edge, void *ent)
399 uint64_t val = next_hop << 1;
405 val = get_val_by_p(ent, dp->nh_sz);
406 if ((val & TRIE_EXT_ENT) == TRIE_EXT_ENT)
409 tbl8_idx = tbl8_alloc(dp, val);
412 val = (tbl8_idx << 1)|TRIE_EXT_ENT;
414 p = get_tbl_p_by_idx(dp->tbl8, (tbl8_idx * 256) + *ip_part,
416 ret = write_edge(dp, ip_part + 1, next_hop, len - 1, edge, p);
420 write_to_dp((uint8_t *)p + (1 << dp->nh_sz),
421 next_hop << 1, dp->nh_sz, UINT8_MAX - *ip_part);
423 write_to_dp(get_tbl_p_by_idx(dp->tbl8, tbl8_idx * 256,
425 next_hop << 1, dp->nh_sz, *ip_part);
427 tbl8_recycle(dp, &val, tbl8_idx);
430 write_to_dp(ent, val, dp->nh_sz, 1);
434 #define IPV6_MAX_IDX (RTE_FIB6_IPV6_ADDR_SIZE - 1)
435 #define TBL24_BYTES 3
436 #define TBL8_LEN (RTE_FIB6_IPV6_ADDR_SIZE - TBL24_BYTES)
439 install_to_dp(struct rte_trie_tbl *dp, const uint8_t *ledge, const uint8_t *r,
442 void *common_root_tbl;
450 /* decrement redge by 1*/
451 rte_rib6_copy_addr(redge, r);
452 for (i = 15; i >= 0; i--) {
454 if (redge[i] != 0xff)
458 for (common_bytes = 0; common_bytes < 15; common_bytes++) {
459 if (ledge[common_bytes] != redge[common_bytes])
463 ret = build_common_root(dp, ledge, common_bytes, &common_root_tbl);
464 if (unlikely(ret != 0))
466 /*first uncommon tbl8 byte idx*/
467 uint8_t first_tbl8_byte = RTE_MAX(common_bytes, TBL24_BYTES);
469 for (i = IPV6_MAX_IDX; i > first_tbl8_byte; i--) {
474 llen = i - first_tbl8_byte + (common_bytes < 3);
476 for (i = IPV6_MAX_IDX; i > first_tbl8_byte; i--) {
477 if (redge[i] != UINT8_MAX)
480 rlen = i - first_tbl8_byte + (common_bytes < 3);
482 /*first noncommon byte*/
483 uint8_t first_byte_idx = (common_bytes < 3) ? 0 : common_bytes;
484 uint8_t first_idx_len = (common_bytes < 3) ? 3 : 1;
486 uint32_t left_idx = get_idx(ledge, 0, first_idx_len, first_byte_idx);
487 uint32_t right_idx = get_idx(redge, 0, first_idx_len, first_byte_idx);
489 ent = get_tbl_p_by_idx(common_root_tbl, left_idx, dp->nh_sz);
490 ret = write_edge(dp, &ledge[first_tbl8_byte + !(common_bytes < 3)],
491 next_hop, llen, LEDGE, ent);
495 if (right_idx > left_idx + 1) {
496 ent = get_tbl_p_by_idx(common_root_tbl, left_idx + 1,
498 write_to_dp(ent, next_hop << 1, dp->nh_sz,
499 right_idx - (left_idx + 1));
501 ent = get_tbl_p_by_idx(common_root_tbl, right_idx, dp->nh_sz);
502 ret = write_edge(dp, &redge[first_tbl8_byte + !((common_bytes < 3))],
503 next_hop, rlen, REDGE, ent);
507 uint8_t common_tbl8 = (common_bytes < TBL24_BYTES) ?
508 0 : common_bytes - (TBL24_BYTES - 1);
509 ent = get_tbl24_p(dp, ledge, dp->nh_sz);
510 recycle_root_path(dp, ledge + TBL24_BYTES, common_tbl8, ent);
515 get_nxt_net(uint8_t *ip, uint8_t depth)
521 for (i = 0, part_depth = depth; part_depth > 8; part_depth -= 8, i++)
525 ip[i] += 1 << (8 - part_depth);
526 if (ip[i] < prev_byte) {
536 modify_dp(struct rte_trie_tbl *dp, struct rte_rib6 *rib,
537 const uint8_t ip[RTE_FIB6_IPV6_ADDR_SIZE],
538 uint8_t depth, uint64_t next_hop)
540 struct rte_rib6_node *tmp = NULL;
541 uint8_t ledge[RTE_FIB6_IPV6_ADDR_SIZE];
542 uint8_t redge[RTE_FIB6_IPV6_ADDR_SIZE];
546 if (next_hop > get_max_nh(dp->nh_sz))
549 rte_rib6_copy_addr(ledge, ip);
551 tmp = rte_rib6_get_nxt(rib, ip, depth, tmp,
552 RTE_RIB6_GET_NXT_COVER);
554 rte_rib6_get_depth(tmp, &tmp_depth);
555 if (tmp_depth == depth)
557 rte_rib6_get_ip(tmp, redge);
558 if (rte_rib6_is_equal(ledge, redge)) {
559 get_nxt_net(ledge, tmp_depth);
562 ret = install_to_dp(dp, ledge, redge,
566 get_nxt_net(redge, tmp_depth);
567 rte_rib6_copy_addr(ledge, redge);
569 rte_rib6_copy_addr(redge, ip);
570 get_nxt_net(redge, depth);
571 if (rte_rib6_is_equal(ledge, redge))
573 ret = install_to_dp(dp, ledge, redge,
584 trie_modify(struct rte_fib6 *fib, const uint8_t ip[RTE_FIB6_IPV6_ADDR_SIZE],
585 uint8_t depth, uint64_t next_hop, int op)
587 struct rte_trie_tbl *dp;
588 struct rte_rib6 *rib;
589 struct rte_rib6_node *tmp = NULL;
590 struct rte_rib6_node *node;
591 struct rte_rib6_node *parent;
592 uint8_t ip_masked[RTE_FIB6_IPV6_ADDR_SIZE];
594 uint64_t par_nh, node_nh;
595 uint8_t tmp_depth, depth_diff = 0, parent_depth = 24;
597 if ((fib == NULL) || (ip == NULL) || (depth > RTE_FIB6_MAXDEPTH))
600 dp = rte_fib6_get_dp(fib);
602 rib = rte_fib6_get_rib(fib);
605 for (i = 0; i < RTE_FIB6_IPV6_ADDR_SIZE; i++)
606 ip_masked[i] = ip[i] & get_msk_part(depth, i);
609 tmp = rte_rib6_get_nxt(rib, ip_masked,
610 RTE_ALIGN_FLOOR(depth, 8), NULL,
611 RTE_RIB6_GET_NXT_COVER);
613 tmp = rte_rib6_lookup(rib, ip);
615 rte_rib6_get_depth(tmp, &tmp_depth);
616 parent_depth = RTE_MAX(tmp_depth, 24);
618 depth_diff = RTE_ALIGN_CEIL(depth, 8) -
619 RTE_ALIGN_CEIL(parent_depth, 8);
620 depth_diff = depth_diff >> 3;
623 node = rte_rib6_lookup_exact(rib, ip_masked, depth);
627 rte_rib6_get_nh(node, &node_nh);
628 if (node_nh == next_hop)
630 ret = modify_dp(dp, rib, ip_masked, depth, next_hop);
632 rte_rib6_set_nh(node, next_hop);
636 if ((depth > 24) && (dp->rsvd_tbl8s >=
637 dp->number_tbl8s - depth_diff))
640 node = rte_rib6_insert(rib, ip_masked, depth);
643 rte_rib6_set_nh(node, next_hop);
644 parent = rte_rib6_lookup_parent(node);
645 if (parent != NULL) {
646 rte_rib6_get_nh(parent, &par_nh);
647 if (par_nh == next_hop)
650 ret = modify_dp(dp, rib, ip_masked, depth, next_hop);
652 rte_rib6_remove(rib, ip_masked, depth);
656 dp->rsvd_tbl8s += depth_diff;
662 parent = rte_rib6_lookup_parent(node);
663 if (parent != NULL) {
664 rte_rib6_get_nh(parent, &par_nh);
665 rte_rib6_get_nh(node, &node_nh);
666 if (par_nh != node_nh)
667 ret = modify_dp(dp, rib, ip_masked, depth,
670 ret = modify_dp(dp, rib, ip_masked, depth, dp->def_nh);
674 rte_rib6_remove(rib, ip, depth);
676 dp->rsvd_tbl8s -= depth_diff;
685 trie_create(const char *name, int socket_id,
686 struct rte_fib6_conf *conf)
688 char mem_name[TRIE_NAMESIZE];
689 struct rte_trie_tbl *dp = NULL;
692 enum rte_fib_trie_nh_sz nh_sz;
694 if ((name == NULL) || (conf == NULL) ||
695 (conf->trie.nh_sz < RTE_FIB6_TRIE_2B) ||
696 (conf->trie.nh_sz > RTE_FIB6_TRIE_8B) ||
697 (conf->trie.num_tbl8 >
698 get_max_nh(conf->trie.nh_sz)) ||
699 (conf->trie.num_tbl8 == 0) ||
701 get_max_nh(conf->trie.nh_sz))) {
707 def_nh = conf->default_nh;
708 nh_sz = conf->trie.nh_sz;
709 num_tbl8 = conf->trie.num_tbl8;
711 snprintf(mem_name, sizeof(mem_name), "DP_%s", name);
712 dp = rte_zmalloc_socket(name, sizeof(struct rte_trie_tbl) +
713 TRIE_TBL24_NUM_ENT * (1 << nh_sz), RTE_CACHE_LINE_SIZE,
720 write_to_dp(&dp->tbl24, (def_nh << 1), nh_sz, 1 << 24);
722 snprintf(mem_name, sizeof(mem_name), "TBL8_%p", dp);
723 dp->tbl8 = rte_zmalloc_socket(mem_name, TRIE_TBL8_GRP_NUM_ENT *
724 (1ll << nh_sz) * (num_tbl8 + 1),
725 RTE_CACHE_LINE_SIZE, socket_id);
726 if (dp->tbl8 == NULL) {
733 dp->number_tbl8s = num_tbl8;
735 snprintf(mem_name, sizeof(mem_name), "TBL8_idxes_%p", dp);
736 dp->tbl8_pool = rte_zmalloc_socket(mem_name,
737 sizeof(uint32_t) * dp->number_tbl8s,
738 RTE_CACHE_LINE_SIZE, socket_id);
739 if (dp->tbl8_pool == NULL) {
754 struct rte_trie_tbl *dp = (struct rte_trie_tbl *)p;
756 rte_free(dp->tbl8_pool);