1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Vladimir Medvedkin <medvedkinv@gmail.com>
3 * Copyright(c) 2019 Intel Corporation
12 #include <rte_debug.h>
13 #include <rte_malloc.h>
14 #include <rte_errno.h>
15 #include <rte_memory.h>
21 #define DIR24_8_NAMESIZE 64
23 #define ROUNDUP(x, y) RTE_ALIGN_CEIL(x, (1 << (32 - y)))
25 static inline rte_fib_lookup_fn_t
26 get_scalar_fn(enum rte_fib_dir24_8_nh_sz nh_sz)
29 case RTE_FIB_DIR24_8_1B:
30 return dir24_8_lookup_bulk_1b;
31 case RTE_FIB_DIR24_8_2B:
32 return dir24_8_lookup_bulk_2b;
33 case RTE_FIB_DIR24_8_4B:
34 return dir24_8_lookup_bulk_4b;
35 case RTE_FIB_DIR24_8_8B:
36 return dir24_8_lookup_bulk_8b;
42 static inline rte_fib_lookup_fn_t
43 get_scalar_fn_inlined(enum rte_fib_dir24_8_nh_sz nh_sz)
46 case RTE_FIB_DIR24_8_1B:
47 return dir24_8_lookup_bulk_0;
48 case RTE_FIB_DIR24_8_2B:
49 return dir24_8_lookup_bulk_1;
50 case RTE_FIB_DIR24_8_4B:
51 return dir24_8_lookup_bulk_2;
52 case RTE_FIB_DIR24_8_8B:
53 return dir24_8_lookup_bulk_3;
60 dir24_8_get_lookup_fn(void *p, enum rte_fib_lookup_type type)
62 enum rte_fib_dir24_8_nh_sz nh_sz;
63 struct dir24_8_tbl *dp = p;
71 case RTE_FIB_LOOKUP_DIR24_8_SCALAR_MACRO:
72 return get_scalar_fn(nh_sz);
73 case RTE_FIB_LOOKUP_DIR24_8_SCALAR_INLINE:
74 return get_scalar_fn_inlined(nh_sz);
75 case RTE_FIB_LOOKUP_DIR24_8_SCALAR_UNI:
76 return dir24_8_lookup_bulk_uni;
85 write_to_fib(void *ptr, uint64_t val, enum rte_fib_dir24_8_nh_sz size, int n)
88 uint8_t *ptr8 = (uint8_t *)ptr;
89 uint16_t *ptr16 = (uint16_t *)ptr;
90 uint32_t *ptr32 = (uint32_t *)ptr;
91 uint64_t *ptr64 = (uint64_t *)ptr;
94 case RTE_FIB_DIR24_8_1B:
95 for (i = 0; i < n; i++)
96 ptr8[i] = (uint8_t)val;
98 case RTE_FIB_DIR24_8_2B:
99 for (i = 0; i < n; i++)
100 ptr16[i] = (uint16_t)val;
102 case RTE_FIB_DIR24_8_4B:
103 for (i = 0; i < n; i++)
104 ptr32[i] = (uint32_t)val;
106 case RTE_FIB_DIR24_8_8B:
107 for (i = 0; i < n; i++)
108 ptr64[i] = (uint64_t)val;
114 tbl8_get_idx(struct dir24_8_tbl *dp)
119 for (i = 0; (i < (dp->number_tbl8s >> BITMAP_SLAB_BIT_SIZE_LOG2)) &&
120 (dp->tbl8_idxes[i] == UINT64_MAX); i++)
122 if (i < (dp->number_tbl8s >> BITMAP_SLAB_BIT_SIZE_LOG2)) {
123 bit_idx = __builtin_ctzll(~dp->tbl8_idxes[i]);
124 dp->tbl8_idxes[i] |= (1ULL << bit_idx);
125 return (i << BITMAP_SLAB_BIT_SIZE_LOG2) + bit_idx;
131 tbl8_free_idx(struct dir24_8_tbl *dp, int idx)
133 dp->tbl8_idxes[idx >> BITMAP_SLAB_BIT_SIZE_LOG2] &=
134 ~(1ULL << (idx & BITMAP_SLAB_BITMASK));
138 tbl8_alloc(struct dir24_8_tbl *dp, uint64_t nh)
143 tbl8_idx = tbl8_get_idx(dp);
146 tbl8_ptr = (uint8_t *)dp->tbl8 +
147 ((tbl8_idx * DIR24_8_TBL8_GRP_NUM_ENT) <<
149 /*Init tbl8 entries with nexthop from tbl24*/
150 write_to_fib((void *)tbl8_ptr, nh|
151 DIR24_8_EXT_ENT, dp->nh_sz,
152 DIR24_8_TBL8_GRP_NUM_ENT);
158 tbl8_recycle(struct dir24_8_tbl *dp, uint32_t ip, uint64_t tbl8_idx)
168 case RTE_FIB_DIR24_8_1B:
169 ptr8 = &((uint8_t *)dp->tbl8)[tbl8_idx *
170 DIR24_8_TBL8_GRP_NUM_ENT];
172 for (i = 1; i < DIR24_8_TBL8_GRP_NUM_ENT; i++) {
176 ((uint8_t *)dp->tbl24)[ip >> 8] =
177 nh & ~DIR24_8_EXT_ENT;
178 for (i = 0; i < DIR24_8_TBL8_GRP_NUM_ENT; i++)
181 case RTE_FIB_DIR24_8_2B:
182 ptr16 = &((uint16_t *)dp->tbl8)[tbl8_idx *
183 DIR24_8_TBL8_GRP_NUM_ENT];
185 for (i = 1; i < DIR24_8_TBL8_GRP_NUM_ENT; i++) {
189 ((uint16_t *)dp->tbl24)[ip >> 8] =
190 nh & ~DIR24_8_EXT_ENT;
191 for (i = 0; i < DIR24_8_TBL8_GRP_NUM_ENT; i++)
194 case RTE_FIB_DIR24_8_4B:
195 ptr32 = &((uint32_t *)dp->tbl8)[tbl8_idx *
196 DIR24_8_TBL8_GRP_NUM_ENT];
198 for (i = 1; i < DIR24_8_TBL8_GRP_NUM_ENT; i++) {
202 ((uint32_t *)dp->tbl24)[ip >> 8] =
203 nh & ~DIR24_8_EXT_ENT;
204 for (i = 0; i < DIR24_8_TBL8_GRP_NUM_ENT; i++)
207 case RTE_FIB_DIR24_8_8B:
208 ptr64 = &((uint64_t *)dp->tbl8)[tbl8_idx *
209 DIR24_8_TBL8_GRP_NUM_ENT];
211 for (i = 1; i < DIR24_8_TBL8_GRP_NUM_ENT; i++) {
215 ((uint64_t *)dp->tbl24)[ip >> 8] =
216 nh & ~DIR24_8_EXT_ENT;
217 for (i = 0; i < DIR24_8_TBL8_GRP_NUM_ENT; i++)
221 tbl8_free_idx(dp, tbl8_idx);
226 install_to_fib(struct dir24_8_tbl *dp, uint32_t ledge, uint32_t redge,
235 len = ((ledge == 0) && (redge == 0)) ? 1 << 24 :
236 ((redge & DIR24_8_TBL24_MASK) - ROUNDUP(ledge, 24)) >> 8;
238 if (((ledge >> 8) != (redge >> 8)) || (len == 1 << 24)) {
239 if ((ROUNDUP(ledge, 24) - ledge) != 0) {
240 tbl24_tmp = get_tbl24(dp, ledge, dp->nh_sz);
241 if ((tbl24_tmp & DIR24_8_EXT_ENT) !=
244 * Make sure there is space for two TBL8.
245 * This is necessary when installing range that
246 * needs tbl8 for ledge and redge.
248 tbl8_idx = tbl8_alloc(dp, tbl24_tmp);
249 tmp_tbl8_idx = tbl8_get_idx(dp);
252 else if (tmp_tbl8_idx < 0) {
253 tbl8_free_idx(dp, tbl8_idx);
256 tbl8_free_idx(dp, tmp_tbl8_idx);
257 /*update dir24 entry with tbl8 index*/
258 write_to_fib(get_tbl24_p(dp, ledge,
259 dp->nh_sz), (tbl8_idx << 1)|
263 tbl8_idx = tbl24_tmp >> 1;
264 tbl8_ptr = (uint8_t *)dp->tbl8 +
265 (((tbl8_idx * DIR24_8_TBL8_GRP_NUM_ENT) +
266 (ledge & ~DIR24_8_TBL24_MASK)) <<
268 /*update tbl8 with new next hop*/
269 write_to_fib((void *)tbl8_ptr, (next_hop << 1)|
271 dp->nh_sz, ROUNDUP(ledge, 24) - ledge);
272 tbl8_recycle(dp, ledge, tbl8_idx);
274 write_to_fib(get_tbl24_p(dp, ROUNDUP(ledge, 24), dp->nh_sz),
275 next_hop << 1, dp->nh_sz, len);
276 if (redge & ~DIR24_8_TBL24_MASK) {
277 tbl24_tmp = get_tbl24(dp, redge, dp->nh_sz);
278 if ((tbl24_tmp & DIR24_8_EXT_ENT) !=
280 tbl8_idx = tbl8_alloc(dp, tbl24_tmp);
283 /*update dir24 entry with tbl8 index*/
284 write_to_fib(get_tbl24_p(dp, redge,
285 dp->nh_sz), (tbl8_idx << 1)|
289 tbl8_idx = tbl24_tmp >> 1;
290 tbl8_ptr = (uint8_t *)dp->tbl8 +
291 ((tbl8_idx * DIR24_8_TBL8_GRP_NUM_ENT) <<
293 /*update tbl8 with new next hop*/
294 write_to_fib((void *)tbl8_ptr, (next_hop << 1)|
296 dp->nh_sz, redge & ~DIR24_8_TBL24_MASK);
297 tbl8_recycle(dp, redge, tbl8_idx);
299 } else if ((redge - ledge) != 0) {
300 tbl24_tmp = get_tbl24(dp, ledge, dp->nh_sz);
301 if ((tbl24_tmp & DIR24_8_EXT_ENT) !=
303 tbl8_idx = tbl8_alloc(dp, tbl24_tmp);
306 /*update dir24 entry with tbl8 index*/
307 write_to_fib(get_tbl24_p(dp, ledge, dp->nh_sz),
312 tbl8_idx = tbl24_tmp >> 1;
313 tbl8_ptr = (uint8_t *)dp->tbl8 +
314 (((tbl8_idx * DIR24_8_TBL8_GRP_NUM_ENT) +
315 (ledge & ~DIR24_8_TBL24_MASK)) <<
317 /*update tbl8 with new next hop*/
318 write_to_fib((void *)tbl8_ptr, (next_hop << 1)|
320 dp->nh_sz, redge - ledge);
321 tbl8_recycle(dp, ledge, tbl8_idx);
327 modify_fib(struct dir24_8_tbl *dp, struct rte_rib *rib, uint32_t ip,
328 uint8_t depth, uint64_t next_hop)
330 struct rte_rib_node *tmp = NULL;
331 uint32_t ledge, redge, tmp_ip;
337 tmp = rte_rib_get_nxt(rib, ip, depth, tmp,
338 RTE_RIB_GET_NXT_COVER);
340 rte_rib_get_depth(tmp, &tmp_depth);
341 if (tmp_depth == depth)
343 rte_rib_get_ip(tmp, &tmp_ip);
344 redge = tmp_ip & rte_rib_depth_to_mask(tmp_depth);
345 if (ledge == redge) {
347 (uint32_t)(1ULL << (32 - tmp_depth));
350 ret = install_to_fib(dp, ledge, redge,
355 (uint32_t)(1ULL << (32 - tmp_depth));
357 redge = ip + (uint32_t)(1ULL << (32 - depth));
360 ret = install_to_fib(dp, ledge, redge,
371 dir24_8_modify(struct rte_fib *fib, uint32_t ip, uint8_t depth,
372 uint64_t next_hop, int op)
374 struct dir24_8_tbl *dp;
376 struct rte_rib_node *tmp = NULL;
377 struct rte_rib_node *node;
378 struct rte_rib_node *parent;
380 uint64_t par_nh, node_nh;
382 if ((fib == NULL) || (depth > RTE_FIB_MAXDEPTH))
385 dp = rte_fib_get_dp(fib);
386 rib = rte_fib_get_rib(fib);
387 RTE_ASSERT((dp != NULL) && (rib != NULL));
389 if (next_hop > get_max_nh(dp->nh_sz))
392 ip &= rte_rib_depth_to_mask(depth);
394 node = rte_rib_lookup_exact(rib, ip, depth);
398 rte_rib_get_nh(node, &node_nh);
399 if (node_nh == next_hop)
401 ret = modify_fib(dp, rib, ip, depth, next_hop);
403 rte_rib_set_nh(node, next_hop);
407 tmp = rte_rib_get_nxt(rib, ip, 24, NULL,
408 RTE_RIB_GET_NXT_COVER);
410 (dp->rsvd_tbl8s >= dp->number_tbl8s))
414 node = rte_rib_insert(rib, ip, depth);
417 rte_rib_set_nh(node, next_hop);
418 parent = rte_rib_lookup_parent(node);
419 if (parent != NULL) {
420 rte_rib_get_nh(parent, &par_nh);
421 if (par_nh == next_hop)
424 ret = modify_fib(dp, rib, ip, depth, next_hop);
426 rte_rib_remove(rib, ip, depth);
429 if ((depth > 24) && (tmp == NULL))
436 parent = rte_rib_lookup_parent(node);
437 if (parent != NULL) {
438 rte_rib_get_nh(parent, &par_nh);
439 rte_rib_get_nh(node, &node_nh);
440 if (par_nh != node_nh)
441 ret = modify_fib(dp, rib, ip, depth, par_nh);
443 ret = modify_fib(dp, rib, ip, depth, dp->def_nh);
445 rte_rib_remove(rib, ip, depth);
447 tmp = rte_rib_get_nxt(rib, ip, 24, NULL,
448 RTE_RIB_GET_NXT_COVER);
461 dir24_8_create(const char *name, int socket_id, struct rte_fib_conf *fib_conf)
463 char mem_name[DIR24_8_NAMESIZE];
464 struct dir24_8_tbl *dp;
467 enum rte_fib_dir24_8_nh_sz nh_sz;
469 if ((name == NULL) || (fib_conf == NULL) ||
470 (fib_conf->dir24_8.nh_sz < RTE_FIB_DIR24_8_1B) ||
471 (fib_conf->dir24_8.nh_sz > RTE_FIB_DIR24_8_8B) ||
472 (fib_conf->dir24_8.num_tbl8 >
473 get_max_nh(fib_conf->dir24_8.nh_sz)) ||
474 (fib_conf->dir24_8.num_tbl8 == 0) ||
475 (fib_conf->default_nh >
476 get_max_nh(fib_conf->dir24_8.nh_sz))) {
481 def_nh = fib_conf->default_nh;
482 nh_sz = fib_conf->dir24_8.nh_sz;
483 num_tbl8 = RTE_ALIGN_CEIL(fib_conf->dir24_8.num_tbl8,
484 BITMAP_SLAB_BIT_SIZE);
486 snprintf(mem_name, sizeof(mem_name), "DP_%s", name);
487 dp = rte_zmalloc_socket(name, sizeof(struct dir24_8_tbl) +
488 DIR24_8_TBL24_NUM_ENT * (1 << nh_sz), RTE_CACHE_LINE_SIZE,
495 /* Init table with default value */
496 write_to_fib(dp->tbl24, (def_nh << 1), nh_sz, 1 << 24);
498 snprintf(mem_name, sizeof(mem_name), "TBL8_%p", dp);
499 uint64_t tbl8_sz = DIR24_8_TBL8_GRP_NUM_ENT * (1ULL << nh_sz) *
501 dp->tbl8 = rte_zmalloc_socket(mem_name, tbl8_sz,
502 RTE_CACHE_LINE_SIZE, socket_id);
503 if (dp->tbl8 == NULL) {
510 dp->number_tbl8s = num_tbl8;
512 snprintf(mem_name, sizeof(mem_name), "TBL8_idxes_%p", dp);
513 dp->tbl8_idxes = rte_zmalloc_socket(mem_name,
514 RTE_ALIGN_CEIL(dp->number_tbl8s, 64) >> 3,
515 RTE_CACHE_LINE_SIZE, socket_id);
516 if (dp->tbl8_idxes == NULL) {
527 dir24_8_free(void *p)
529 struct dir24_8_tbl *dp = (struct dir24_8_tbl *)p;
531 rte_free(dp->tbl8_idxes);