test mbuf attach
[dpdk.git] / lib / librte_lpm / rte_lpm.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2014 Intel Corporation
3  * Copyright(c) 2020 Arm Limited
4  */
5
6 #include <string.h>
7 #include <stdint.h>
8 #include <errno.h>
9 #include <stdarg.h>
10 #include <stdio.h>
11 #include <sys/queue.h>
12
13 #include <rte_log.h>
14 #include <rte_branch_prediction.h>
15 #include <rte_common.h>
16 #include <rte_memory.h>        /* for definition of RTE_CACHE_LINE_SIZE */
17 #include <rte_malloc.h>
18 #include <rte_eal.h>
19 #include <rte_eal_memconfig.h>
20 #include <rte_per_lcore.h>
21 #include <rte_string_fns.h>
22 #include <rte_errno.h>
23 #include <rte_rwlock.h>
24 #include <rte_spinlock.h>
25 #include <rte_tailq.h>
26
27 #include "rte_lpm.h"
28
29 TAILQ_HEAD(rte_lpm_list, rte_tailq_entry);
30
31 static struct rte_tailq_elem rte_lpm_tailq = {
32         .name = "RTE_LPM",
33 };
34 EAL_REGISTER_TAILQ(rte_lpm_tailq)
35
36 #define MAX_DEPTH_TBL24 24
37
38 enum valid_flag {
39         INVALID = 0,
40         VALID
41 };
42
43 /** @internal LPM structure. */
44 struct __rte_lpm {
45         /* LPM metadata. */
46         struct rte_lpm lpm;
47
48         /* RCU config. */
49         struct rte_rcu_qsbr *v;         /* RCU QSBR variable. */
50         enum rte_lpm_qsbr_mode rcu_mode;/* Blocking, defer queue. */
51         struct rte_rcu_qsbr_dq *dq;     /* RCU QSBR defer queue. */
52 };
53
54 /* Macro to enable/disable run-time checks. */
55 #if defined(RTE_LIBRTE_LPM_DEBUG)
56 #include <rte_debug.h>
57 #define VERIFY_DEPTH(depth) do {                                \
58         if ((depth == 0) || (depth > RTE_LPM_MAX_DEPTH))        \
59                 rte_panic("LPM: Invalid depth (%u) at line %d", \
60                                 (unsigned)(depth), __LINE__);   \
61 } while (0)
62 #else
63 #define VERIFY_DEPTH(depth)
64 #endif
65
66 /*
67  * Converts a given depth value to its corresponding mask value.
68  *
69  * depth  (IN)          : range = 1 - 32
70  * mask   (OUT)         : 32bit mask
71  */
72 static uint32_t __attribute__((pure))
73 depth_to_mask(uint8_t depth)
74 {
75         VERIFY_DEPTH(depth);
76
77         /* To calculate a mask start with a 1 on the left hand side and right
78          * shift while populating the left hand side with 1's
79          */
80         return (int)0x80000000 >> (depth - 1);
81 }
82
83 /*
84  * Converts given depth value to its corresponding range value.
85  */
86 static uint32_t __attribute__((pure))
87 depth_to_range(uint8_t depth)
88 {
89         VERIFY_DEPTH(depth);
90
91         /*
92          * Calculate tbl24 range. (Note: 2^depth = 1 << depth)
93          */
94         if (depth <= MAX_DEPTH_TBL24)
95                 return 1 << (MAX_DEPTH_TBL24 - depth);
96
97         /* Else if depth is greater than 24 */
98         return 1 << (RTE_LPM_MAX_DEPTH - depth);
99 }
100
101 /*
102  * Find an existing lpm table and return a pointer to it.
103  */
104 struct rte_lpm *
105 rte_lpm_find_existing(const char *name)
106 {
107         struct rte_lpm *l = NULL;
108         struct rte_tailq_entry *te;
109         struct rte_lpm_list *lpm_list;
110
111         lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list);
112
113         rte_mcfg_tailq_read_lock();
114         TAILQ_FOREACH(te, lpm_list, next) {
115                 l = te->data;
116                 if (strncmp(name, l->name, RTE_LPM_NAMESIZE) == 0)
117                         break;
118         }
119         rte_mcfg_tailq_read_unlock();
120
121         if (te == NULL) {
122                 rte_errno = ENOENT;
123                 return NULL;
124         }
125
126         return l;
127 }
128
129 /*
130  * Allocates memory for LPM object
131  */
132 struct rte_lpm *
133 rte_lpm_create(const char *name, int socket_id,
134                 const struct rte_lpm_config *config)
135 {
136         char mem_name[RTE_LPM_NAMESIZE];
137         struct __rte_lpm *internal_lpm;
138         struct rte_lpm *lpm = NULL;
139         struct rte_tailq_entry *te;
140         uint32_t mem_size, rules_size, tbl8s_size;
141         struct rte_lpm_list *lpm_list;
142
143         lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list);
144
145         RTE_BUILD_BUG_ON(sizeof(struct rte_lpm_tbl_entry) != 4);
146
147         /* Check user arguments. */
148         if ((name == NULL) || (socket_id < -1) || (config->max_rules == 0)
149                         || config->number_tbl8s > RTE_LPM_MAX_TBL8_NUM_GROUPS) {
150                 rte_errno = EINVAL;
151                 return NULL;
152         }
153
154         snprintf(mem_name, sizeof(mem_name), "LPM_%s", name);
155
156         rte_mcfg_tailq_write_lock();
157
158         /* guarantee there's no existing */
159         TAILQ_FOREACH(te, lpm_list, next) {
160                 lpm = te->data;
161                 if (strncmp(name, lpm->name, RTE_LPM_NAMESIZE) == 0)
162                         break;
163         }
164
165         if (te != NULL) {
166                 lpm = NULL;
167                 rte_errno = EEXIST;
168                 goto exit;
169         }
170
171         /* Determine the amount of memory to allocate. */
172         mem_size = sizeof(*internal_lpm);
173         rules_size = sizeof(struct rte_lpm_rule) * config->max_rules;
174         tbl8s_size = sizeof(struct rte_lpm_tbl_entry) *
175                         RTE_LPM_TBL8_GROUP_NUM_ENTRIES * config->number_tbl8s;
176
177         /* allocate tailq entry */
178         te = rte_zmalloc("LPM_TAILQ_ENTRY", sizeof(*te), 0);
179         if (te == NULL) {
180                 RTE_LOG(ERR, LPM, "Failed to allocate tailq entry\n");
181                 rte_errno = ENOMEM;
182                 goto exit;
183         }
184
185         /* Allocate memory to store the LPM data structures. */
186         internal_lpm = rte_zmalloc_socket(mem_name, mem_size,
187                         RTE_CACHE_LINE_SIZE, socket_id);
188         if (internal_lpm == NULL) {
189                 RTE_LOG(ERR, LPM, "LPM memory allocation failed\n");
190                 rte_free(te);
191                 rte_errno = ENOMEM;
192                 goto exit;
193         }
194
195         lpm = &internal_lpm->lpm;
196         lpm->rules_tbl = rte_zmalloc_socket(NULL,
197                         (size_t)rules_size, RTE_CACHE_LINE_SIZE, socket_id);
198
199         if (lpm->rules_tbl == NULL) {
200                 RTE_LOG(ERR, LPM, "LPM rules_tbl memory allocation failed\n");
201                 rte_free(internal_lpm);
202                 internal_lpm = NULL;
203                 lpm = NULL;
204                 rte_free(te);
205                 rte_errno = ENOMEM;
206                 goto exit;
207         }
208
209         lpm->tbl8 = rte_zmalloc_socket(NULL,
210                         (size_t)tbl8s_size, RTE_CACHE_LINE_SIZE, socket_id);
211
212         if (lpm->tbl8 == NULL) {
213                 RTE_LOG(ERR, LPM, "LPM tbl8 memory allocation failed\n");
214                 rte_free(lpm->rules_tbl);
215                 rte_free(internal_lpm);
216                 internal_lpm = NULL;
217                 lpm = NULL;
218                 rte_free(te);
219                 rte_errno = ENOMEM;
220                 goto exit;
221         }
222
223         /* Save user arguments. */
224         lpm->max_rules = config->max_rules;
225         lpm->number_tbl8s = config->number_tbl8s;
226         strlcpy(lpm->name, name, sizeof(lpm->name));
227
228         te->data = lpm;
229
230         TAILQ_INSERT_TAIL(lpm_list, te, next);
231
232 exit:
233         rte_mcfg_tailq_write_unlock();
234
235         return lpm;
236 }
237
238 /*
239  * Deallocates memory for given LPM table.
240  */
241 void
242 rte_lpm_free(struct rte_lpm *lpm)
243 {
244         struct __rte_lpm *internal_lpm;
245         struct rte_lpm_list *lpm_list;
246         struct rte_tailq_entry *te;
247
248         /* Check user arguments. */
249         if (lpm == NULL)
250                 return;
251
252         lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list);
253
254         rte_mcfg_tailq_write_lock();
255
256         /* find our tailq entry */
257         TAILQ_FOREACH(te, lpm_list, next) {
258                 if (te->data == (void *) lpm)
259                         break;
260         }
261         if (te != NULL)
262                 TAILQ_REMOVE(lpm_list, te, next);
263
264         rte_mcfg_tailq_write_unlock();
265
266         internal_lpm = container_of(lpm, struct __rte_lpm, lpm);
267         if (internal_lpm->dq != NULL)
268                 rte_rcu_qsbr_dq_delete(internal_lpm->dq);
269         rte_free(lpm->tbl8);
270         rte_free(lpm->rules_tbl);
271         rte_free(lpm);
272         rte_free(te);
273 }
274
275 static void
276 __lpm_rcu_qsbr_free_resource(void *p, void *data, unsigned int n)
277 {
278         struct rte_lpm_tbl_entry *tbl8 = ((struct rte_lpm *)p)->tbl8;
279         struct rte_lpm_tbl_entry zero_tbl8_entry = {0};
280         uint32_t tbl8_group_index = *(uint32_t *)data;
281
282         RTE_SET_USED(n);
283         /* Set tbl8 group invalid */
284         __atomic_store(&tbl8[tbl8_group_index], &zero_tbl8_entry,
285                 __ATOMIC_RELAXED);
286 }
287
288 /* Associate QSBR variable with an LPM object.
289  */
290 int
291 rte_lpm_rcu_qsbr_add(struct rte_lpm *lpm, struct rte_lpm_rcu_config *cfg)
292 {
293         struct rte_rcu_qsbr_dq_parameters params = {0};
294         char rcu_dq_name[RTE_RCU_QSBR_DQ_NAMESIZE];
295         struct __rte_lpm *internal_lpm;
296
297         if (lpm == NULL || cfg == NULL) {
298                 rte_errno = EINVAL;
299                 return 1;
300         }
301
302         internal_lpm = container_of(lpm, struct __rte_lpm, lpm);
303         if (internal_lpm->v != NULL) {
304                 rte_errno = EEXIST;
305                 return 1;
306         }
307
308         if (cfg->mode == RTE_LPM_QSBR_MODE_SYNC) {
309                 /* No other things to do. */
310         } else if (cfg->mode == RTE_LPM_QSBR_MODE_DQ) {
311                 /* Init QSBR defer queue. */
312                 snprintf(rcu_dq_name, sizeof(rcu_dq_name),
313                                 "LPM_RCU_%s", lpm->name);
314                 params.name = rcu_dq_name;
315                 params.size = cfg->dq_size;
316                 if (params.size == 0)
317                         params.size = lpm->number_tbl8s;
318                 params.trigger_reclaim_limit = cfg->reclaim_thd;
319                 params.max_reclaim_size = cfg->reclaim_max;
320                 if (params.max_reclaim_size == 0)
321                         params.max_reclaim_size = RTE_LPM_RCU_DQ_RECLAIM_MAX;
322                 params.esize = sizeof(uint32_t);        /* tbl8 group index */
323                 params.free_fn = __lpm_rcu_qsbr_free_resource;
324                 params.p = lpm;
325                 params.v = cfg->v;
326                 internal_lpm->dq = rte_rcu_qsbr_dq_create(&params);
327                 if (internal_lpm->dq == NULL) {
328                         RTE_LOG(ERR, LPM, "LPM defer queue creation failed\n");
329                         return 1;
330                 }
331         } else {
332                 rte_errno = EINVAL;
333                 return 1;
334         }
335         internal_lpm->rcu_mode = cfg->mode;
336         internal_lpm->v = cfg->v;
337
338         return 0;
339 }
340
341 /*
342  * Adds a rule to the rule table.
343  *
344  * NOTE: The rule table is split into 32 groups. Each group contains rules that
345  * apply to a specific prefix depth (i.e. group 1 contains rules that apply to
346  * prefixes with a depth of 1 etc.). In the following code (depth - 1) is used
347  * to refer to depth 1 because even though the depth range is 1 - 32, depths
348  * are stored in the rule table from 0 - 31.
349  * NOTE: Valid range for depth parameter is 1 .. 32 inclusive.
350  */
351 static int32_t
352 rule_add(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth,
353         uint32_t next_hop)
354 {
355         uint32_t rule_gindex, rule_index, last_rule;
356         int i;
357
358         VERIFY_DEPTH(depth);
359
360         /* Scan through rule group to see if rule already exists. */
361         if (lpm->rule_info[depth - 1].used_rules > 0) {
362
363                 /* rule_gindex stands for rule group index. */
364                 rule_gindex = lpm->rule_info[depth - 1].first_rule;
365                 /* Initialise rule_index to point to start of rule group. */
366                 rule_index = rule_gindex;
367                 /* Last rule = Last used rule in this rule group. */
368                 last_rule = rule_gindex + lpm->rule_info[depth - 1].used_rules;
369
370                 for (; rule_index < last_rule; rule_index++) {
371
372                         /* If rule already exists update next hop and return. */
373                         if (lpm->rules_tbl[rule_index].ip == ip_masked) {
374
375                                 if (lpm->rules_tbl[rule_index].next_hop
376                                                 == next_hop)
377                                         return -EEXIST;
378                                 lpm->rules_tbl[rule_index].next_hop = next_hop;
379
380                                 return rule_index;
381                         }
382                 }
383
384                 if (rule_index == lpm->max_rules)
385                         return -ENOSPC;
386         } else {
387                 /* Calculate the position in which the rule will be stored. */
388                 rule_index = 0;
389
390                 for (i = depth - 1; i > 0; i--) {
391                         if (lpm->rule_info[i - 1].used_rules > 0) {
392                                 rule_index = lpm->rule_info[i - 1].first_rule
393                                                 + lpm->rule_info[i - 1].used_rules;
394                                 break;
395                         }
396                 }
397                 if (rule_index == lpm->max_rules)
398                         return -ENOSPC;
399
400                 lpm->rule_info[depth - 1].first_rule = rule_index;
401         }
402
403         /* Make room for the new rule in the array. */
404         for (i = RTE_LPM_MAX_DEPTH; i > depth; i--) {
405                 if (lpm->rule_info[i - 1].first_rule
406                                 + lpm->rule_info[i - 1].used_rules == lpm->max_rules)
407                         return -ENOSPC;
408
409                 if (lpm->rule_info[i - 1].used_rules > 0) {
410                         lpm->rules_tbl[lpm->rule_info[i - 1].first_rule
411                                 + lpm->rule_info[i - 1].used_rules]
412                                         = lpm->rules_tbl[lpm->rule_info[i - 1].first_rule];
413                         lpm->rule_info[i - 1].first_rule++;
414                 }
415         }
416
417         /* Add the new rule. */
418         lpm->rules_tbl[rule_index].ip = ip_masked;
419         lpm->rules_tbl[rule_index].next_hop = next_hop;
420
421         /* Increment the used rules counter for this rule group. */
422         lpm->rule_info[depth - 1].used_rules++;
423
424         return rule_index;
425 }
426
427 /*
428  * Delete a rule from the rule table.
429  * NOTE: Valid range for depth parameter is 1 .. 32 inclusive.
430  */
431 static void
432 rule_delete(struct rte_lpm *lpm, int32_t rule_index, uint8_t depth)
433 {
434         int i;
435
436         VERIFY_DEPTH(depth);
437
438         lpm->rules_tbl[rule_index] =
439                         lpm->rules_tbl[lpm->rule_info[depth - 1].first_rule
440                         + lpm->rule_info[depth - 1].used_rules - 1];
441
442         for (i = depth; i < RTE_LPM_MAX_DEPTH; i++) {
443                 if (lpm->rule_info[i].used_rules > 0) {
444                         lpm->rules_tbl[lpm->rule_info[i].first_rule - 1] =
445                                         lpm->rules_tbl[lpm->rule_info[i].first_rule
446                                                 + lpm->rule_info[i].used_rules - 1];
447                         lpm->rule_info[i].first_rule--;
448                 }
449         }
450
451         lpm->rule_info[depth - 1].used_rules--;
452 }
453
454 /*
455  * Finds a rule in rule table.
456  * NOTE: Valid range for depth parameter is 1 .. 32 inclusive.
457  */
458 static int32_t
459 rule_find(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth)
460 {
461         uint32_t rule_gindex, last_rule, rule_index;
462
463         VERIFY_DEPTH(depth);
464
465         rule_gindex = lpm->rule_info[depth - 1].first_rule;
466         last_rule = rule_gindex + lpm->rule_info[depth - 1].used_rules;
467
468         /* Scan used rules at given depth to find rule. */
469         for (rule_index = rule_gindex; rule_index < last_rule; rule_index++) {
470                 /* If rule is found return the rule index. */
471                 if (lpm->rules_tbl[rule_index].ip == ip_masked)
472                         return rule_index;
473         }
474
475         /* If rule is not found return -EINVAL. */
476         return -EINVAL;
477 }
478
479 /*
480  * Find, clean and allocate a tbl8.
481  */
482 static int32_t
483 _tbl8_alloc(struct rte_lpm *lpm)
484 {
485         uint32_t group_idx; /* tbl8 group index. */
486         struct rte_lpm_tbl_entry *tbl8_entry;
487
488         /* Scan through tbl8 to find a free (i.e. INVALID) tbl8 group. */
489         for (group_idx = 0; group_idx < lpm->number_tbl8s; group_idx++) {
490                 tbl8_entry = &lpm->tbl8[group_idx *
491                                         RTE_LPM_TBL8_GROUP_NUM_ENTRIES];
492                 /* If a free tbl8 group is found clean it and set as VALID. */
493                 if (!tbl8_entry->valid_group) {
494                         struct rte_lpm_tbl_entry new_tbl8_entry = {
495                                 .next_hop = 0,
496                                 .valid = INVALID,
497                                 .depth = 0,
498                                 .valid_group = VALID,
499                         };
500
501                         memset(&tbl8_entry[0], 0,
502                                         RTE_LPM_TBL8_GROUP_NUM_ENTRIES *
503                                         sizeof(tbl8_entry[0]));
504
505                         __atomic_store(tbl8_entry, &new_tbl8_entry,
506                                         __ATOMIC_RELAXED);
507
508                         /* Return group index for allocated tbl8 group. */
509                         return group_idx;
510                 }
511         }
512
513         /* If there are no tbl8 groups free then return error. */
514         return -ENOSPC;
515 }
516
517 static int32_t
518 tbl8_alloc(struct rte_lpm *lpm)
519 {
520         int32_t group_idx; /* tbl8 group index. */
521         struct __rte_lpm *internal_lpm;
522
523         internal_lpm = container_of(lpm, struct __rte_lpm, lpm);
524         group_idx = _tbl8_alloc(lpm);
525         if (group_idx == -ENOSPC && internal_lpm->dq != NULL) {
526                 /* If there are no tbl8 groups try to reclaim one. */
527                 if (rte_rcu_qsbr_dq_reclaim(internal_lpm->dq, 1,
528                                 NULL, NULL, NULL) == 0)
529                         group_idx = _tbl8_alloc(lpm);
530         }
531
532         return group_idx;
533 }
534
535 static int32_t
536 tbl8_free(struct rte_lpm *lpm, uint32_t tbl8_group_start)
537 {
538         struct rte_lpm_tbl_entry zero_tbl8_entry = {0};
539         struct __rte_lpm *internal_lpm;
540         int status;
541
542         internal_lpm = container_of(lpm, struct __rte_lpm, lpm);
543         if (internal_lpm->v == NULL) {
544                 /* Set tbl8 group invalid*/
545                 __atomic_store(&lpm->tbl8[tbl8_group_start], &zero_tbl8_entry,
546                                 __ATOMIC_RELAXED);
547         } else if (internal_lpm->rcu_mode == RTE_LPM_QSBR_MODE_SYNC) {
548                 /* Wait for quiescent state change. */
549                 rte_rcu_qsbr_synchronize(internal_lpm->v,
550                         RTE_QSBR_THRID_INVALID);
551                 /* Set tbl8 group invalid*/
552                 __atomic_store(&lpm->tbl8[tbl8_group_start], &zero_tbl8_entry,
553                                 __ATOMIC_RELAXED);
554         } else if (internal_lpm->rcu_mode == RTE_LPM_QSBR_MODE_DQ) {
555                 /* Push into QSBR defer queue. */
556                 status = rte_rcu_qsbr_dq_enqueue(internal_lpm->dq,
557                                 (void *)&tbl8_group_start);
558                 if (status == 1) {
559                         RTE_LOG(ERR, LPM, "Failed to push QSBR FIFO\n");
560                         return -rte_errno;
561                 }
562         }
563
564         return 0;
565 }
566
567 static __rte_noinline int32_t
568 add_depth_small(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
569                 uint32_t next_hop)
570 {
571 #define group_idx next_hop
572         uint32_t tbl24_index, tbl24_range, tbl8_index, tbl8_group_end, i, j;
573
574         /* Calculate the index into Table24. */
575         tbl24_index = ip >> 8;
576         tbl24_range = depth_to_range(depth);
577
578         for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) {
579                 /*
580                  * For invalid OR valid and non-extended tbl 24 entries set
581                  * entry.
582                  */
583                 if (!lpm->tbl24[i].valid || (lpm->tbl24[i].valid_group == 0 &&
584                                 lpm->tbl24[i].depth <= depth)) {
585
586                         struct rte_lpm_tbl_entry new_tbl24_entry = {
587                                 .next_hop = next_hop,
588                                 .valid = VALID,
589                                 .valid_group = 0,
590                                 .depth = depth,
591                         };
592
593                         /* Setting tbl24 entry in one go to avoid race
594                          * conditions
595                          */
596                         __atomic_store(&lpm->tbl24[i], &new_tbl24_entry,
597                                         __ATOMIC_RELEASE);
598
599                         continue;
600                 }
601
602                 if (lpm->tbl24[i].valid_group == 1) {
603                         /* If tbl24 entry is valid and extended calculate the
604                          *  index into tbl8.
605                          */
606                         tbl8_index = lpm->tbl24[i].group_idx *
607                                         RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
608                         tbl8_group_end = tbl8_index +
609                                         RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
610
611                         for (j = tbl8_index; j < tbl8_group_end; j++) {
612                                 if (!lpm->tbl8[j].valid ||
613                                                 lpm->tbl8[j].depth <= depth) {
614                                         struct rte_lpm_tbl_entry
615                                                 new_tbl8_entry = {
616                                                 .valid = VALID,
617                                                 .valid_group = VALID,
618                                                 .depth = depth,
619                                                 .next_hop = next_hop,
620                                         };
621
622                                         /*
623                                          * Setting tbl8 entry in one go to avoid
624                                          * race conditions
625                                          */
626                                         __atomic_store(&lpm->tbl8[j],
627                                                 &new_tbl8_entry,
628                                                 __ATOMIC_RELAXED);
629
630                                         continue;
631                                 }
632                         }
633                 }
634         }
635 #undef group_idx
636         return 0;
637 }
638
639 static __rte_noinline int32_t
640 add_depth_big(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth,
641                 uint32_t next_hop)
642 {
643 #define group_idx next_hop
644         uint32_t tbl24_index;
645         int32_t tbl8_group_index, tbl8_group_start, tbl8_group_end, tbl8_index,
646                 tbl8_range, i;
647
648         tbl24_index = (ip_masked >> 8);
649         tbl8_range = depth_to_range(depth);
650
651         if (!lpm->tbl24[tbl24_index].valid) {
652                 /* Search for a free tbl8 group. */
653                 tbl8_group_index = tbl8_alloc(lpm);
654
655                 /* Check tbl8 allocation was successful. */
656                 if (tbl8_group_index < 0) {
657                         return tbl8_group_index;
658                 }
659
660                 /* Find index into tbl8 and range. */
661                 tbl8_index = (tbl8_group_index *
662                                 RTE_LPM_TBL8_GROUP_NUM_ENTRIES) +
663                                 (ip_masked & 0xFF);
664
665                 /* Set tbl8 entry. */
666                 for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
667                         struct rte_lpm_tbl_entry new_tbl8_entry = {
668                                 .valid = VALID,
669                                 .depth = depth,
670                                 .valid_group = lpm->tbl8[i].valid_group,
671                                 .next_hop = next_hop,
672                         };
673                         __atomic_store(&lpm->tbl8[i], &new_tbl8_entry,
674                                         __ATOMIC_RELAXED);
675                 }
676
677                 /*
678                  * Update tbl24 entry to point to new tbl8 entry. Note: The
679                  * ext_flag and tbl8_index need to be updated simultaneously,
680                  * so assign whole structure in one go
681                  */
682
683                 struct rte_lpm_tbl_entry new_tbl24_entry = {
684                         .group_idx = tbl8_group_index,
685                         .valid = VALID,
686                         .valid_group = 1,
687                         .depth = 0,
688                 };
689
690                 /* The tbl24 entry must be written only after the
691                  * tbl8 entries are written.
692                  */
693                 __atomic_store(&lpm->tbl24[tbl24_index], &new_tbl24_entry,
694                                 __ATOMIC_RELEASE);
695
696         } /* If valid entry but not extended calculate the index into Table8. */
697         else if (lpm->tbl24[tbl24_index].valid_group == 0) {
698                 /* Search for free tbl8 group. */
699                 tbl8_group_index = tbl8_alloc(lpm);
700
701                 if (tbl8_group_index < 0) {
702                         return tbl8_group_index;
703                 }
704
705                 tbl8_group_start = tbl8_group_index *
706                                 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
707                 tbl8_group_end = tbl8_group_start +
708                                 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
709
710                 /* Populate new tbl8 with tbl24 value. */
711                 for (i = tbl8_group_start; i < tbl8_group_end; i++) {
712                         struct rte_lpm_tbl_entry new_tbl8_entry = {
713                                 .valid = VALID,
714                                 .depth = lpm->tbl24[tbl24_index].depth,
715                                 .valid_group = lpm->tbl8[i].valid_group,
716                                 .next_hop = lpm->tbl24[tbl24_index].next_hop,
717                         };
718                         __atomic_store(&lpm->tbl8[i], &new_tbl8_entry,
719                                         __ATOMIC_RELAXED);
720                 }
721
722                 tbl8_index = tbl8_group_start + (ip_masked & 0xFF);
723
724                 /* Insert new rule into the tbl8 entry. */
725                 for (i = tbl8_index; i < tbl8_index + tbl8_range; i++) {
726                         struct rte_lpm_tbl_entry new_tbl8_entry = {
727                                 .valid = VALID,
728                                 .depth = depth,
729                                 .valid_group = lpm->tbl8[i].valid_group,
730                                 .next_hop = next_hop,
731                         };
732                         __atomic_store(&lpm->tbl8[i], &new_tbl8_entry,
733                                         __ATOMIC_RELAXED);
734                 }
735
736                 /*
737                  * Update tbl24 entry to point to new tbl8 entry. Note: The
738                  * ext_flag and tbl8_index need to be updated simultaneously,
739                  * so assign whole structure in one go.
740                  */
741
742                 struct rte_lpm_tbl_entry new_tbl24_entry = {
743                                 .group_idx = tbl8_group_index,
744                                 .valid = VALID,
745                                 .valid_group = 1,
746                                 .depth = 0,
747                 };
748
749                 /* The tbl24 entry must be written only after the
750                  * tbl8 entries are written.
751                  */
752                 __atomic_store(&lpm->tbl24[tbl24_index], &new_tbl24_entry,
753                                 __ATOMIC_RELEASE);
754
755         } else { /*
756                 * If it is valid, extended entry calculate the index into tbl8.
757                 */
758                 tbl8_group_index = lpm->tbl24[tbl24_index].group_idx;
759                 tbl8_group_start = tbl8_group_index *
760                                 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
761                 tbl8_index = tbl8_group_start + (ip_masked & 0xFF);
762
763                 for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
764
765                         if (!lpm->tbl8[i].valid ||
766                                         lpm->tbl8[i].depth <= depth) {
767                                 struct rte_lpm_tbl_entry new_tbl8_entry = {
768                                         .valid = VALID,
769                                         .depth = depth,
770                                         .next_hop = next_hop,
771                                         .valid_group = lpm->tbl8[i].valid_group,
772                                 };
773
774                                 /*
775                                  * Setting tbl8 entry in one go to avoid race
776                                  * condition
777                                  */
778                                 __atomic_store(&lpm->tbl8[i], &new_tbl8_entry,
779                                                 __ATOMIC_RELAXED);
780
781                                 continue;
782                         }
783                 }
784         }
785 #undef group_idx
786         return 0;
787 }
788
789 /*
790  * Add a route
791  */
792 int
793 rte_lpm_add(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
794                 uint32_t next_hop)
795 {
796         int32_t rule_index, status = 0;
797         uint32_t ip_masked;
798
799         /* Check user arguments. */
800         if ((lpm == NULL) || (depth < 1) || (depth > RTE_LPM_MAX_DEPTH))
801                 return -EINVAL;
802
803         ip_masked = ip & depth_to_mask(depth);
804
805         /* Add the rule to the rule table. */
806         rule_index = rule_add(lpm, ip_masked, depth, next_hop);
807
808         /* Skip table entries update if The rule is the same as
809          * the rule in the rules table.
810          */
811         if (rule_index == -EEXIST)
812                 return 0;
813
814         /* If the is no space available for new rule return error. */
815         if (rule_index < 0) {
816                 return rule_index;
817         }
818
819         if (depth <= MAX_DEPTH_TBL24) {
820                 status = add_depth_small(lpm, ip_masked, depth, next_hop);
821         } else { /* If depth > RTE_LPM_MAX_DEPTH_TBL24 */
822                 status = add_depth_big(lpm, ip_masked, depth, next_hop);
823
824                 /*
825                  * If add fails due to exhaustion of tbl8 extensions delete
826                  * rule that was added to rule table.
827                  */
828                 if (status < 0) {
829                         rule_delete(lpm, rule_index, depth);
830
831                         return status;
832                 }
833         }
834
835         return 0;
836 }
837
838 /*
839  * Look for a rule in the high-level rules table
840  */
841 int
842 rte_lpm_is_rule_present(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
843 uint32_t *next_hop)
844 {
845         uint32_t ip_masked;
846         int32_t rule_index;
847
848         /* Check user arguments. */
849         if ((lpm == NULL) ||
850                 (next_hop == NULL) ||
851                 (depth < 1) || (depth > RTE_LPM_MAX_DEPTH))
852                 return -EINVAL;
853
854         /* Look for the rule using rule_find. */
855         ip_masked = ip & depth_to_mask(depth);
856         rule_index = rule_find(lpm, ip_masked, depth);
857
858         if (rule_index >= 0) {
859                 *next_hop = lpm->rules_tbl[rule_index].next_hop;
860                 return 1;
861         }
862
863         /* If rule is not found return 0. */
864         return 0;
865 }
866
867 static int32_t
868 find_previous_rule(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
869                 uint8_t *sub_rule_depth)
870 {
871         int32_t rule_index;
872         uint32_t ip_masked;
873         uint8_t prev_depth;
874
875         for (prev_depth = (uint8_t)(depth - 1); prev_depth > 0; prev_depth--) {
876                 ip_masked = ip & depth_to_mask(prev_depth);
877
878                 rule_index = rule_find(lpm, ip_masked, prev_depth);
879
880                 if (rule_index >= 0) {
881                         *sub_rule_depth = prev_depth;
882                         return rule_index;
883                 }
884         }
885
886         return -1;
887 }
888
889 static int32_t
890 delete_depth_small(struct rte_lpm *lpm, uint32_t ip_masked,
891         uint8_t depth, int32_t sub_rule_index, uint8_t sub_rule_depth)
892 {
893 #define group_idx next_hop
894         uint32_t tbl24_range, tbl24_index, tbl8_group_index, tbl8_index, i, j;
895
896         /* Calculate the range and index into Table24. */
897         tbl24_range = depth_to_range(depth);
898         tbl24_index = (ip_masked >> 8);
899         struct rte_lpm_tbl_entry zero_tbl24_entry = {0};
900
901         /*
902          * Firstly check the sub_rule_index. A -1 indicates no replacement rule
903          * and a positive number indicates a sub_rule_index.
904          */
905         if (sub_rule_index < 0) {
906                 /*
907                  * If no replacement rule exists then invalidate entries
908                  * associated with this rule.
909                  */
910                 for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) {
911
912                         if (lpm->tbl24[i].valid_group == 0 &&
913                                         lpm->tbl24[i].depth <= depth) {
914                                 __atomic_store(&lpm->tbl24[i],
915                                         &zero_tbl24_entry, __ATOMIC_RELEASE);
916                         } else if (lpm->tbl24[i].valid_group == 1) {
917                                 /*
918                                  * If TBL24 entry is extended, then there has
919                                  * to be a rule with depth >= 25 in the
920                                  * associated TBL8 group.
921                                  */
922
923                                 tbl8_group_index = lpm->tbl24[i].group_idx;
924                                 tbl8_index = tbl8_group_index *
925                                                 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
926
927                                 for (j = tbl8_index; j < (tbl8_index +
928                                         RTE_LPM_TBL8_GROUP_NUM_ENTRIES); j++) {
929
930                                         if (lpm->tbl8[j].depth <= depth)
931                                                 lpm->tbl8[j].valid = INVALID;
932                                 }
933                         }
934                 }
935         } else {
936                 /*
937                  * If a replacement rule exists then modify entries
938                  * associated with this rule.
939                  */
940
941                 struct rte_lpm_tbl_entry new_tbl24_entry = {
942                         .next_hop = lpm->rules_tbl[sub_rule_index].next_hop,
943                         .valid = VALID,
944                         .valid_group = 0,
945                         .depth = sub_rule_depth,
946                 };
947
948                 struct rte_lpm_tbl_entry new_tbl8_entry = {
949                         .valid = VALID,
950                         .valid_group = VALID,
951                         .depth = sub_rule_depth,
952                         .next_hop = lpm->rules_tbl
953                         [sub_rule_index].next_hop,
954                 };
955
956                 for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) {
957
958                         if (lpm->tbl24[i].valid_group == 0 &&
959                                         lpm->tbl24[i].depth <= depth) {
960                                 __atomic_store(&lpm->tbl24[i], &new_tbl24_entry,
961                                                 __ATOMIC_RELEASE);
962                         } else  if (lpm->tbl24[i].valid_group == 1) {
963                                 /*
964                                  * If TBL24 entry is extended, then there has
965                                  * to be a rule with depth >= 25 in the
966                                  * associated TBL8 group.
967                                  */
968
969                                 tbl8_group_index = lpm->tbl24[i].group_idx;
970                                 tbl8_index = tbl8_group_index *
971                                                 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
972
973                                 for (j = tbl8_index; j < (tbl8_index +
974                                         RTE_LPM_TBL8_GROUP_NUM_ENTRIES); j++) {
975
976                                         if (lpm->tbl8[j].depth <= depth)
977                                                 __atomic_store(&lpm->tbl8[j],
978                                                         &new_tbl8_entry,
979                                                         __ATOMIC_RELAXED);
980                                 }
981                         }
982                 }
983         }
984 #undef group_idx
985         return 0;
986 }
987
988 /*
989  * Checks if table 8 group can be recycled.
990  *
991  * Return of -EEXIST means tbl8 is in use and thus can not be recycled.
992  * Return of -EINVAL means tbl8 is empty and thus can be recycled
993  * Return of value > -1 means tbl8 is in use but has all the same values and
994  * thus can be recycled
995  */
996 static int32_t
997 tbl8_recycle_check(struct rte_lpm_tbl_entry *tbl8,
998                 uint32_t tbl8_group_start)
999 {
1000         uint32_t tbl8_group_end, i;
1001         tbl8_group_end = tbl8_group_start + RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1002
1003         /*
1004          * Check the first entry of the given tbl8. If it is invalid we know
1005          * this tbl8 does not contain any rule with a depth < RTE_LPM_MAX_DEPTH
1006          *  (As they would affect all entries in a tbl8) and thus this table
1007          *  can not be recycled.
1008          */
1009         if (tbl8[tbl8_group_start].valid) {
1010                 /*
1011                  * If first entry is valid check if the depth is less than 24
1012                  * and if so check the rest of the entries to verify that they
1013                  * are all of this depth.
1014                  */
1015                 if (tbl8[tbl8_group_start].depth <= MAX_DEPTH_TBL24) {
1016                         for (i = (tbl8_group_start + 1); i < tbl8_group_end;
1017                                         i++) {
1018
1019                                 if (tbl8[i].depth !=
1020                                                 tbl8[tbl8_group_start].depth) {
1021
1022                                         return -EEXIST;
1023                                 }
1024                         }
1025                         /* If all entries are the same return the tb8 index */
1026                         return tbl8_group_start;
1027                 }
1028
1029                 return -EEXIST;
1030         }
1031         /*
1032          * If the first entry is invalid check if the rest of the entries in
1033          * the tbl8 are invalid.
1034          */
1035         for (i = (tbl8_group_start + 1); i < tbl8_group_end; i++) {
1036                 if (tbl8[i].valid)
1037                         return -EEXIST;
1038         }
1039         /* If no valid entries are found then return -EINVAL. */
1040         return -EINVAL;
1041 }
1042
1043 static int32_t
1044 delete_depth_big(struct rte_lpm *lpm, uint32_t ip_masked,
1045         uint8_t depth, int32_t sub_rule_index, uint8_t sub_rule_depth)
1046 {
1047 #define group_idx next_hop
1048         uint32_t tbl24_index, tbl8_group_index, tbl8_group_start, tbl8_index,
1049                         tbl8_range, i;
1050         int32_t tbl8_recycle_index, status = 0;
1051
1052         /*
1053          * Calculate the index into tbl24 and range. Note: All depths larger
1054          * than MAX_DEPTH_TBL24 are associated with only one tbl24 entry.
1055          */
1056         tbl24_index = ip_masked >> 8;
1057
1058         /* Calculate the index into tbl8 and range. */
1059         tbl8_group_index = lpm->tbl24[tbl24_index].group_idx;
1060         tbl8_group_start = tbl8_group_index * RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1061         tbl8_index = tbl8_group_start + (ip_masked & 0xFF);
1062         tbl8_range = depth_to_range(depth);
1063
1064         if (sub_rule_index < 0) {
1065                 /*
1066                  * Loop through the range of entries on tbl8 for which the
1067                  * rule_to_delete must be removed or modified.
1068                  */
1069                 for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
1070                         if (lpm->tbl8[i].depth <= depth)
1071                                 lpm->tbl8[i].valid = INVALID;
1072                 }
1073         } else {
1074                 /* Set new tbl8 entry. */
1075                 struct rte_lpm_tbl_entry new_tbl8_entry = {
1076                         .valid = VALID,
1077                         .depth = sub_rule_depth,
1078                         .valid_group = lpm->tbl8[tbl8_group_start].valid_group,
1079                         .next_hop = lpm->rules_tbl[sub_rule_index].next_hop,
1080                 };
1081
1082                 /*
1083                  * Loop through the range of entries on tbl8 for which the
1084                  * rule_to_delete must be modified.
1085                  */
1086                 for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
1087                         if (lpm->tbl8[i].depth <= depth)
1088                                 __atomic_store(&lpm->tbl8[i], &new_tbl8_entry,
1089                                                 __ATOMIC_RELAXED);
1090                 }
1091         }
1092
1093         /*
1094          * Check if there are any valid entries in this tbl8 group. If all
1095          * tbl8 entries are invalid we can free the tbl8 and invalidate the
1096          * associated tbl24 entry.
1097          */
1098
1099         tbl8_recycle_index = tbl8_recycle_check(lpm->tbl8, tbl8_group_start);
1100
1101         if (tbl8_recycle_index == -EINVAL) {
1102                 /* Set tbl24 before freeing tbl8 to avoid race condition.
1103                  * Prevent the free of the tbl8 group from hoisting.
1104                  */
1105                 lpm->tbl24[tbl24_index].valid = 0;
1106                 __atomic_thread_fence(__ATOMIC_RELEASE);
1107                 status = tbl8_free(lpm, tbl8_group_start);
1108         } else if (tbl8_recycle_index > -1) {
1109                 /* Update tbl24 entry. */
1110                 struct rte_lpm_tbl_entry new_tbl24_entry = {
1111                         .next_hop = lpm->tbl8[tbl8_recycle_index].next_hop,
1112                         .valid = VALID,
1113                         .valid_group = 0,
1114                         .depth = lpm->tbl8[tbl8_recycle_index].depth,
1115                 };
1116
1117                 /* Set tbl24 before freeing tbl8 to avoid race condition.
1118                  * Prevent the free of the tbl8 group from hoisting.
1119                  */
1120                 __atomic_store(&lpm->tbl24[tbl24_index], &new_tbl24_entry,
1121                                 __ATOMIC_RELAXED);
1122                 __atomic_thread_fence(__ATOMIC_RELEASE);
1123                 status = tbl8_free(lpm, tbl8_group_start);
1124         }
1125 #undef group_idx
1126         return status;
1127 }
1128
1129 /*
1130  * Deletes a rule
1131  */
1132 int
1133 rte_lpm_delete(struct rte_lpm *lpm, uint32_t ip, uint8_t depth)
1134 {
1135         int32_t rule_to_delete_index, sub_rule_index;
1136         uint32_t ip_masked;
1137         uint8_t sub_rule_depth;
1138         /*
1139          * Check input arguments. Note: IP must be a positive integer of 32
1140          * bits in length therefore it need not be checked.
1141          */
1142         if ((lpm == NULL) || (depth < 1) || (depth > RTE_LPM_MAX_DEPTH)) {
1143                 return -EINVAL;
1144         }
1145
1146         ip_masked = ip & depth_to_mask(depth);
1147
1148         /*
1149          * Find the index of the input rule, that needs to be deleted, in the
1150          * rule table.
1151          */
1152         rule_to_delete_index = rule_find(lpm, ip_masked, depth);
1153
1154         /*
1155          * Check if rule_to_delete_index was found. If no rule was found the
1156          * function rule_find returns -EINVAL.
1157          */
1158         if (rule_to_delete_index < 0)
1159                 return -EINVAL;
1160
1161         /* Delete the rule from the rule table. */
1162         rule_delete(lpm, rule_to_delete_index, depth);
1163
1164         /*
1165          * Find rule to replace the rule_to_delete. If there is no rule to
1166          * replace the rule_to_delete we return -1 and invalidate the table
1167          * entries associated with this rule.
1168          */
1169         sub_rule_depth = 0;
1170         sub_rule_index = find_previous_rule(lpm, ip, depth, &sub_rule_depth);
1171
1172         /*
1173          * If the input depth value is less than 25 use function
1174          * delete_depth_small otherwise use delete_depth_big.
1175          */
1176         if (depth <= MAX_DEPTH_TBL24) {
1177                 return delete_depth_small(lpm, ip_masked, depth,
1178                                 sub_rule_index, sub_rule_depth);
1179         } else { /* If depth > MAX_DEPTH_TBL24 */
1180                 return delete_depth_big(lpm, ip_masked, depth, sub_rule_index,
1181                                 sub_rule_depth);
1182         }
1183 }
1184
1185 /*
1186  * Delete all rules from the LPM table.
1187  */
1188 void
1189 rte_lpm_delete_all(struct rte_lpm *lpm)
1190 {
1191         /* Zero rule information. */
1192         memset(lpm->rule_info, 0, sizeof(lpm->rule_info));
1193
1194         /* Zero tbl24. */
1195         memset(lpm->tbl24, 0, sizeof(lpm->tbl24));
1196
1197         /* Zero tbl8. */
1198         memset(lpm->tbl8, 0, sizeof(lpm->tbl8[0])
1199                         * RTE_LPM_TBL8_GROUP_NUM_ENTRIES * lpm->number_tbl8s);
1200
1201         /* Delete all rules form the rules table. */
1202         memset(lpm->rules_tbl, 0, sizeof(lpm->rules_tbl[0]) * lpm->max_rules);
1203 }