1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Vladimir Medvedkin <medvedkinv@gmail.com>
3 * Copyright(c) 2019 Intel Corporation
10 #include <rte_eal_memconfig.h>
11 #include <rte_errno.h>
12 #include <rte_malloc.h>
13 #include <rte_mempool.h>
14 #include <rte_rwlock.h>
15 #include <rte_string_fns.h>
16 #include <rte_tailq.h>
20 #define RTE_RIB_VALID_NODE 1
21 #define RIB6_MAXDEPTH 128
22 /* Maximum length of a RIB6 name. */
23 #define RTE_RIB6_NAMESIZE 64
25 TAILQ_HEAD(rte_rib6_list, rte_tailq_entry);
26 static struct rte_tailq_elem rte_rib6_tailq = {
29 EAL_REGISTER_TAILQ(rte_rib6_tailq)
31 struct rte_rib6_node {
32 struct rte_rib6_node *left;
33 struct rte_rib6_node *right;
34 struct rte_rib6_node *parent;
36 uint8_t ip[RTE_RIB6_IPV6_ADDR_SIZE];
39 __extension__ uint64_t ext[0];
43 char name[RTE_RIB6_NAMESIZE];
44 struct rte_rib6_node *tree;
45 struct rte_mempool *node_pool;
52 is_valid_node(struct rte_rib6_node *node)
54 return (node->flag & RTE_RIB_VALID_NODE) == RTE_RIB_VALID_NODE;
58 is_right_node(struct rte_rib6_node *node)
60 return node->parent->right == node;
64 * Check if ip1 is covered by ip2/depth prefix
67 is_covered(const uint8_t ip1[RTE_RIB6_IPV6_ADDR_SIZE],
68 const uint8_t ip2[RTE_RIB6_IPV6_ADDR_SIZE], uint8_t depth)
72 for (i = 0; i < RTE_RIB6_IPV6_ADDR_SIZE; i++)
73 if ((ip1[i] ^ ip2[i]) & get_msk_part(depth, i))
80 get_dir(const uint8_t ip[RTE_RIB6_IPV6_ADDR_SIZE], uint8_t depth)
85 * depth & 127 clamps depth to values that will not
86 * read off the end of ip.
87 * depth is the number of bits deep into ip to traverse, and
88 * is incremented in blocks of 8 (1 byte). This means the last
89 * 3 bits are irrelevant to what the index of ip should be.
91 index = (depth & (UINT8_MAX - 1)) / CHAR_BIT;
94 * msk is the bitmask used to extract the bit used to decide the
95 * direction of the next step of the binary search.
97 msk = 1 << (7 - (depth & 7));
99 return (ip[index] & msk) != 0;
102 static inline struct rte_rib6_node *
103 get_nxt_node(struct rte_rib6_node *node,
104 const uint8_t ip[RTE_RIB6_IPV6_ADDR_SIZE])
106 if (node->depth == RIB6_MAXDEPTH)
109 return (get_dir(ip, node->depth)) ? node->right : node->left;
112 static struct rte_rib6_node *
113 node_alloc(struct rte_rib6 *rib)
115 struct rte_rib6_node *ent;
118 ret = rte_mempool_get(rib->node_pool, (void *)&ent);
119 if (unlikely(ret != 0))
126 node_free(struct rte_rib6 *rib, struct rte_rib6_node *ent)
129 rte_mempool_put(rib->node_pool, ent);
132 struct rte_rib6_node *
133 rte_rib6_lookup(struct rte_rib6 *rib,
134 const uint8_t ip[RTE_RIB6_IPV6_ADDR_SIZE])
136 struct rte_rib6_node *cur;
137 struct rte_rib6_node *prev = NULL;
139 if (unlikely(rib == NULL)) {
145 while ((cur != NULL) && is_covered(ip, cur->ip, cur->depth)) {
146 if (is_valid_node(cur))
148 cur = get_nxt_node(cur, ip);
153 struct rte_rib6_node *
154 rte_rib6_lookup_parent(struct rte_rib6_node *ent)
156 struct rte_rib6_node *tmp;
162 while ((tmp != NULL) && (!is_valid_node(tmp)))
168 struct rte_rib6_node *
169 rte_rib6_lookup_exact(struct rte_rib6 *rib,
170 const uint8_t ip[RTE_RIB6_IPV6_ADDR_SIZE], uint8_t depth)
172 struct rte_rib6_node *cur;
173 uint8_t tmp_ip[RTE_RIB6_IPV6_ADDR_SIZE];
176 if ((rib == NULL) || (ip == NULL) || (depth > RIB6_MAXDEPTH)) {
182 for (i = 0; i < RTE_RIB6_IPV6_ADDR_SIZE; i++)
183 tmp_ip[i] = ip[i] & get_msk_part(depth, i);
185 while (cur != NULL) {
186 if (rte_rib6_is_equal(cur->ip, tmp_ip) &&
187 (cur->depth == depth) &&
191 if (!(is_covered(tmp_ip, cur->ip, cur->depth)) ||
192 (cur->depth >= depth))
195 cur = get_nxt_node(cur, tmp_ip);
202 * Traverses on subtree and retreeves more specific routes
203 * for a given in args ip/depth prefix
204 * last = NULL means the first invocation
206 struct rte_rib6_node *
207 rte_rib6_get_nxt(struct rte_rib6 *rib,
208 const uint8_t ip[RTE_RIB6_IPV6_ADDR_SIZE],
209 uint8_t depth, struct rte_rib6_node *last, int flag)
211 struct rte_rib6_node *tmp, *prev = NULL;
212 uint8_t tmp_ip[RTE_RIB6_IPV6_ADDR_SIZE];
215 if ((rib == NULL) || (ip == NULL) || (depth > RIB6_MAXDEPTH)) {
220 for (i = 0; i < RTE_RIB6_IPV6_ADDR_SIZE; i++)
221 tmp_ip[i] = ip[i] & get_msk_part(depth, i);
225 while ((tmp) && (tmp->depth < depth))
226 tmp = get_nxt_node(tmp, tmp_ip);
229 while ((tmp->parent != NULL) && (is_right_node(tmp) ||
230 (tmp->parent->right == NULL))) {
232 if (is_valid_node(tmp) &&
233 (is_covered(tmp->ip, tmp_ip, depth) &&
234 (tmp->depth > depth)))
237 tmp = (tmp->parent != NULL) ? tmp->parent->right : NULL;
240 if (is_valid_node(tmp) &&
241 (is_covered(tmp->ip, tmp_ip, depth) &&
242 (tmp->depth > depth))) {
244 if (flag == RTE_RIB6_GET_NXT_COVER)
247 tmp = (tmp->left != NULL) ? tmp->left : tmp->right;
253 rte_rib6_remove(struct rte_rib6 *rib,
254 const uint8_t ip[RTE_RIB6_IPV6_ADDR_SIZE], uint8_t depth)
256 struct rte_rib6_node *cur, *prev, *child;
258 cur = rte_rib6_lookup_exact(rib, ip, depth);
263 cur->flag &= ~RTE_RIB_VALID_NODE;
264 while (!is_valid_node(cur)) {
265 if ((cur->left != NULL) && (cur->right != NULL))
267 child = (cur->left == NULL) ? cur->right : cur->left;
269 child->parent = cur->parent;
270 if (cur->parent == NULL) {
275 if (cur->parent->left == cur)
276 cur->parent->left = child;
278 cur->parent->right = child;
281 node_free(rib, prev);
285 struct rte_rib6_node *
286 rte_rib6_insert(struct rte_rib6 *rib,
287 const uint8_t ip[RTE_RIB6_IPV6_ADDR_SIZE], uint8_t depth)
289 struct rte_rib6_node **tmp;
290 struct rte_rib6_node *prev = NULL;
291 struct rte_rib6_node *new_node = NULL;
292 struct rte_rib6_node *common_node = NULL;
293 uint8_t common_prefix[RTE_RIB6_IPV6_ADDR_SIZE];
294 uint8_t tmp_ip[RTE_RIB6_IPV6_ADDR_SIZE];
296 uint8_t common_depth, ip_xor;
298 if (unlikely((rib == NULL) || (ip == NULL) ||
299 (depth > RIB6_MAXDEPTH))) {
306 for (i = 0; i < RTE_RIB6_IPV6_ADDR_SIZE; i++)
307 tmp_ip[i] = ip[i] & get_msk_part(depth, i);
309 new_node = rte_rib6_lookup_exact(rib, tmp_ip, depth);
310 if (new_node != NULL) {
315 new_node = node_alloc(rib);
316 if (new_node == NULL) {
320 new_node->left = NULL;
321 new_node->right = NULL;
322 new_node->parent = NULL;
323 rte_rib6_copy_addr(new_node->ip, tmp_ip);
324 new_node->depth = depth;
325 new_node->flag = RTE_RIB_VALID_NODE;
327 /* traverse down the tree to find matching node or closest matching */
329 /* insert as the last node in the branch */
332 new_node->parent = prev;
337 * Intermediate node found.
338 * Previous rte_rib6_lookup_exact() returned NULL
339 * but node with proper search criteria is found.
340 * Validate intermediate node and return.
342 if (rte_rib6_is_equal(tmp_ip, (*tmp)->ip) &&
343 (depth == (*tmp)->depth)) {
344 node_free(rib, new_node);
345 (*tmp)->flag |= RTE_RIB_VALID_NODE;
350 if (!is_covered(tmp_ip, (*tmp)->ip, (*tmp)->depth) ||
351 ((*tmp)->depth >= depth)) {
356 tmp = (get_dir(tmp_ip, (*tmp)->depth)) ? &(*tmp)->right :
360 /* closest node found, new_node should be inserted in the middle */
361 common_depth = RTE_MIN(depth, (*tmp)->depth);
362 for (i = 0, d = 0; i < RTE_RIB6_IPV6_ADDR_SIZE; i++) {
363 ip_xor = tmp_ip[i] ^ (*tmp)->ip[i];
367 d += __builtin_clz(ip_xor << 24);
372 common_depth = RTE_MIN(d, common_depth);
374 for (i = 0; i < RTE_RIB6_IPV6_ADDR_SIZE; i++)
375 common_prefix[i] = tmp_ip[i] & get_msk_part(common_depth, i);
377 if (rte_rib6_is_equal(common_prefix, tmp_ip) &&
378 (common_depth == depth)) {
379 /* insert as a parent */
380 if (get_dir((*tmp)->ip, depth))
381 new_node->right = *tmp;
383 new_node->left = *tmp;
384 new_node->parent = (*tmp)->parent;
385 (*tmp)->parent = new_node;
388 /* create intermediate node */
389 common_node = node_alloc(rib);
390 if (common_node == NULL) {
391 node_free(rib, new_node);
395 rte_rib6_copy_addr(common_node->ip, common_prefix);
396 common_node->depth = common_depth;
397 common_node->flag = 0;
398 common_node->parent = (*tmp)->parent;
399 new_node->parent = common_node;
400 (*tmp)->parent = common_node;
401 if (get_dir((*tmp)->ip, common_depth) == 1) {
402 common_node->left = new_node;
403 common_node->right = *tmp;
405 common_node->left = *tmp;
406 common_node->right = new_node;
415 rte_rib6_get_ip(const struct rte_rib6_node *node,
416 uint8_t ip[RTE_RIB6_IPV6_ADDR_SIZE])
418 if ((node == NULL) || (ip == NULL)) {
422 rte_rib6_copy_addr(ip, node->ip);
427 rte_rib6_get_depth(const struct rte_rib6_node *node, uint8_t *depth)
429 if ((node == NULL) || (depth == NULL)) {
433 *depth = node->depth;
438 rte_rib6_get_ext(struct rte_rib6_node *node)
440 return (node == NULL) ? NULL : &node->ext[0];
444 rte_rib6_get_nh(const struct rte_rib6_node *node, uint64_t *nh)
446 if ((node == NULL) || (nh == NULL)) {
455 rte_rib6_set_nh(struct rte_rib6_node *node, uint64_t nh)
466 rte_rib6_create(const char *name, int socket_id,
467 const struct rte_rib6_conf *conf)
469 char mem_name[RTE_RIB6_NAMESIZE];
470 struct rte_rib6 *rib = NULL;
471 struct rte_tailq_entry *te;
472 struct rte_rib6_list *rib6_list;
473 struct rte_mempool *node_pool;
475 /* Check user arguments. */
476 if (name == NULL || conf == NULL || conf->max_nodes <= 0) {
481 snprintf(mem_name, sizeof(mem_name), "MP_%s", name);
482 node_pool = rte_mempool_create(mem_name, conf->max_nodes,
483 sizeof(struct rte_rib6_node) + conf->ext_sz, 0, 0,
484 NULL, NULL, NULL, NULL, socket_id, 0);
486 if (node_pool == NULL) {
488 "Can not allocate mempool for RIB6 %s\n", name);
492 snprintf(mem_name, sizeof(mem_name), "RIB6_%s", name);
493 rib6_list = RTE_TAILQ_CAST(rte_rib6_tailq.head, rte_rib6_list);
495 rte_mcfg_tailq_write_lock();
497 /* guarantee there's no existing */
498 TAILQ_FOREACH(te, rib6_list, next) {
499 rib = (struct rte_rib6 *)te->data;
500 if (strncmp(name, rib->name, RTE_RIB6_NAMESIZE) == 0)
509 /* allocate tailq entry */
510 te = rte_zmalloc("RIB6_TAILQ_ENTRY", sizeof(*te), 0);
513 "Can not allocate tailq entry for RIB6 %s\n", name);
518 /* Allocate memory to store the RIB6 data structures. */
519 rib = rte_zmalloc_socket(mem_name,
520 sizeof(struct rte_rib6), RTE_CACHE_LINE_SIZE, socket_id);
522 RTE_LOG(ERR, LPM, "RIB6 %s memory allocation failed\n", name);
527 rte_strlcpy(rib->name, name, sizeof(rib->name));
529 rib->max_nodes = conf->max_nodes;
530 rib->node_pool = node_pool;
532 te->data = (void *)rib;
533 TAILQ_INSERT_TAIL(rib6_list, te, next);
535 rte_mcfg_tailq_write_unlock();
542 rte_mcfg_tailq_write_unlock();
543 rte_mempool_free(node_pool);
549 rte_rib6_find_existing(const char *name)
551 struct rte_rib6 *rib = NULL;
552 struct rte_tailq_entry *te;
553 struct rte_rib6_list *rib6_list;
555 if (unlikely(name == NULL)) {
560 rib6_list = RTE_TAILQ_CAST(rte_rib6_tailq.head, rte_rib6_list);
562 rte_mcfg_tailq_read_lock();
563 TAILQ_FOREACH(te, rib6_list, next) {
564 rib = (struct rte_rib6 *) te->data;
565 if (strncmp(name, rib->name, RTE_RIB6_NAMESIZE) == 0)
568 rte_mcfg_tailq_read_unlock();
579 rte_rib6_free(struct rte_rib6 *rib)
581 struct rte_tailq_entry *te;
582 struct rte_rib6_list *rib6_list;
583 struct rte_rib6_node *tmp = NULL;
585 if (unlikely(rib == NULL)) {
590 rib6_list = RTE_TAILQ_CAST(rte_rib6_tailq.head, rte_rib6_list);
592 rte_mcfg_tailq_write_lock();
594 /* find our tailq entry */
595 TAILQ_FOREACH(te, rib6_list, next) {
596 if (te->data == (void *)rib)
600 TAILQ_REMOVE(rib6_list, te, next);
602 rte_mcfg_tailq_write_unlock();
604 while ((tmp = rte_rib6_get_nxt(rib, 0, 0, tmp,
605 RTE_RIB6_GET_NXT_ALL)) != NULL)
606 rte_rib6_remove(rib, tmp->ip, tmp->depth);
608 rte_mempool_free(rib->node_pool);