38ab512a42c3c25875f9205a6a359db1bee32da2
[dpdk.git] / lib / librte_lpm / rte_lpm.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2014 Intel Corporation
3  */
4
5 #include <string.h>
6 #include <stdint.h>
7 #include <errno.h>
8 #include <stdarg.h>
9 #include <stdio.h>
10 #include <sys/queue.h>
11
12 #include <rte_log.h>
13 #include <rte_branch_prediction.h>
14 #include <rte_common.h>
15 #include <rte_memory.h>        /* for definition of RTE_CACHE_LINE_SIZE */
16 #include <rte_malloc.h>
17 #include <rte_eal.h>
18 #include <rte_eal_memconfig.h>
19 #include <rte_per_lcore.h>
20 #include <rte_string_fns.h>
21 #include <rte_errno.h>
22 #include <rte_rwlock.h>
23 #include <rte_spinlock.h>
24 #include <rte_tailq.h>
25
26 #include "rte_lpm.h"
27
28 TAILQ_HEAD(rte_lpm_list, rte_tailq_entry);
29
30 static struct rte_tailq_elem rte_lpm_tailq = {
31         .name = "RTE_LPM",
32 };
33 EAL_REGISTER_TAILQ(rte_lpm_tailq)
34
35 #define MAX_DEPTH_TBL24 24
36
37 enum valid_flag {
38         INVALID = 0,
39         VALID
40 };
41
42 /* Macro to enable/disable run-time checks. */
43 #if defined(RTE_LIBRTE_LPM_DEBUG)
44 #include <rte_debug.h>
45 #define VERIFY_DEPTH(depth) do {                                \
46         if ((depth == 0) || (depth > RTE_LPM_MAX_DEPTH))        \
47                 rte_panic("LPM: Invalid depth (%u) at line %d", \
48                                 (unsigned)(depth), __LINE__);   \
49 } while (0)
50 #else
51 #define VERIFY_DEPTH(depth)
52 #endif
53
54 /*
55  * Converts a given depth value to its corresponding mask value.
56  *
57  * depth  (IN)          : range = 1 - 32
58  * mask   (OUT)         : 32bit mask
59  */
60 static uint32_t __attribute__((pure))
61 depth_to_mask(uint8_t depth)
62 {
63         VERIFY_DEPTH(depth);
64
65         /* To calculate a mask start with a 1 on the left hand side and right
66          * shift while populating the left hand side with 1's
67          */
68         return (int)0x80000000 >> (depth - 1);
69 }
70
71 /*
72  * Converts given depth value to its corresponding range value.
73  */
74 static uint32_t __attribute__((pure))
75 depth_to_range(uint8_t depth)
76 {
77         VERIFY_DEPTH(depth);
78
79         /*
80          * Calculate tbl24 range. (Note: 2^depth = 1 << depth)
81          */
82         if (depth <= MAX_DEPTH_TBL24)
83                 return 1 << (MAX_DEPTH_TBL24 - depth);
84
85         /* Else if depth is greater than 24 */
86         return 1 << (RTE_LPM_MAX_DEPTH - depth);
87 }
88
89 /*
90  * Find an existing lpm table and return a pointer to it.
91  */
92 struct rte_lpm *
93 rte_lpm_find_existing(const char *name)
94 {
95         struct rte_lpm *l = NULL;
96         struct rte_tailq_entry *te;
97         struct rte_lpm_list *lpm_list;
98
99         lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list);
100
101         rte_mcfg_tailq_read_lock();
102         TAILQ_FOREACH(te, lpm_list, next) {
103                 l = te->data;
104                 if (strncmp(name, l->name, RTE_LPM_NAMESIZE) == 0)
105                         break;
106         }
107         rte_mcfg_tailq_read_unlock();
108
109         if (te == NULL) {
110                 rte_errno = ENOENT;
111                 return NULL;
112         }
113
114         return l;
115 }
116
117 /*
118  * Allocates memory for LPM object
119  */
120 struct rte_lpm *
121 rte_lpm_create(const char *name, int socket_id,
122                 const struct rte_lpm_config *config)
123 {
124         char mem_name[RTE_LPM_NAMESIZE];
125         struct rte_lpm *lpm = NULL;
126         struct rte_tailq_entry *te;
127         uint32_t mem_size, rules_size, tbl8s_size;
128         struct rte_lpm_list *lpm_list;
129
130         lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list);
131
132         RTE_BUILD_BUG_ON(sizeof(struct rte_lpm_tbl_entry) != 4);
133
134         /* Check user arguments. */
135         if ((name == NULL) || (socket_id < -1) || (config->max_rules == 0)
136                         || config->number_tbl8s > RTE_LPM_MAX_TBL8_NUM_GROUPS) {
137                 rte_errno = EINVAL;
138                 return NULL;
139         }
140
141         snprintf(mem_name, sizeof(mem_name), "LPM_%s", name);
142
143         /* Determine the amount of memory to allocate. */
144         mem_size = sizeof(*lpm);
145         rules_size = sizeof(struct rte_lpm_rule) * config->max_rules;
146         tbl8s_size = (sizeof(struct rte_lpm_tbl_entry) *
147                         RTE_LPM_TBL8_GROUP_NUM_ENTRIES * config->number_tbl8s);
148
149         rte_mcfg_tailq_write_lock();
150
151         /* guarantee there's no existing */
152         TAILQ_FOREACH(te, lpm_list, next) {
153                 lpm = te->data;
154                 if (strncmp(name, lpm->name, RTE_LPM_NAMESIZE) == 0)
155                         break;
156         }
157
158         if (te != NULL) {
159                 lpm = NULL;
160                 rte_errno = EEXIST;
161                 goto exit;
162         }
163
164         /* allocate tailq entry */
165         te = rte_zmalloc("LPM_TAILQ_ENTRY", sizeof(*te), 0);
166         if (te == NULL) {
167                 RTE_LOG(ERR, LPM, "Failed to allocate tailq entry\n");
168                 rte_errno = ENOMEM;
169                 goto exit;
170         }
171
172         /* Allocate memory to store the LPM data structures. */
173         lpm = rte_zmalloc_socket(mem_name, mem_size,
174                         RTE_CACHE_LINE_SIZE, socket_id);
175         if (lpm == NULL) {
176                 RTE_LOG(ERR, LPM, "LPM memory allocation failed\n");
177                 rte_free(te);
178                 rte_errno = ENOMEM;
179                 goto exit;
180         }
181
182         lpm->rules_tbl = rte_zmalloc_socket(NULL,
183                         (size_t)rules_size, RTE_CACHE_LINE_SIZE, socket_id);
184
185         if (lpm->rules_tbl == NULL) {
186                 RTE_LOG(ERR, LPM, "LPM rules_tbl memory allocation failed\n");
187                 rte_free(lpm);
188                 lpm = NULL;
189                 rte_free(te);
190                 rte_errno = ENOMEM;
191                 goto exit;
192         }
193
194         lpm->tbl8 = rte_zmalloc_socket(NULL,
195                         (size_t)tbl8s_size, RTE_CACHE_LINE_SIZE, socket_id);
196
197         if (lpm->tbl8 == NULL) {
198                 RTE_LOG(ERR, LPM, "LPM tbl8 memory allocation failed\n");
199                 rte_free(lpm->rules_tbl);
200                 rte_free(lpm);
201                 lpm = NULL;
202                 rte_free(te);
203                 rte_errno = ENOMEM;
204                 goto exit;
205         }
206
207         /* Save user arguments. */
208         lpm->max_rules = config->max_rules;
209         lpm->number_tbl8s = config->number_tbl8s;
210         strlcpy(lpm->name, name, sizeof(lpm->name));
211
212         te->data = lpm;
213
214         TAILQ_INSERT_TAIL(lpm_list, te, next);
215
216 exit:
217         rte_mcfg_tailq_write_unlock();
218
219         return lpm;
220 }
221
222 /*
223  * Deallocates memory for given LPM table.
224  */
225 void
226 rte_lpm_free(struct rte_lpm *lpm)
227 {
228         struct rte_lpm_list *lpm_list;
229         struct rte_tailq_entry *te;
230
231         /* Check user arguments. */
232         if (lpm == NULL)
233                 return;
234
235         lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list);
236
237         rte_mcfg_tailq_write_lock();
238
239         /* find our tailq entry */
240         TAILQ_FOREACH(te, lpm_list, next) {
241                 if (te->data == (void *) lpm)
242                         break;
243         }
244         if (te != NULL)
245                 TAILQ_REMOVE(lpm_list, te, next);
246
247         rte_mcfg_tailq_write_unlock();
248
249         rte_free(lpm->tbl8);
250         rte_free(lpm->rules_tbl);
251         rte_free(lpm);
252         rte_free(te);
253 }
254
255 /*
256  * Adds a rule to the rule table.
257  *
258  * NOTE: The rule table is split into 32 groups. Each group contains rules that
259  * apply to a specific prefix depth (i.e. group 1 contains rules that apply to
260  * prefixes with a depth of 1 etc.). In the following code (depth - 1) is used
261  * to refer to depth 1 because even though the depth range is 1 - 32, depths
262  * are stored in the rule table from 0 - 31.
263  * NOTE: Valid range for depth parameter is 1 .. 32 inclusive.
264  */
265 static int32_t
266 rule_add(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth,
267         uint32_t next_hop)
268 {
269         uint32_t rule_gindex, rule_index, last_rule;
270         int i;
271
272         VERIFY_DEPTH(depth);
273
274         /* Scan through rule group to see if rule already exists. */
275         if (lpm->rule_info[depth - 1].used_rules > 0) {
276
277                 /* rule_gindex stands for rule group index. */
278                 rule_gindex = lpm->rule_info[depth - 1].first_rule;
279                 /* Initialise rule_index to point to start of rule group. */
280                 rule_index = rule_gindex;
281                 /* Last rule = Last used rule in this rule group. */
282                 last_rule = rule_gindex + lpm->rule_info[depth - 1].used_rules;
283
284                 for (; rule_index < last_rule; rule_index++) {
285
286                         /* If rule already exists update next hop and return. */
287                         if (lpm->rules_tbl[rule_index].ip == ip_masked) {
288
289                                 if (lpm->rules_tbl[rule_index].next_hop
290                                                 == next_hop)
291                                         return -EEXIST;
292                                 lpm->rules_tbl[rule_index].next_hop = next_hop;
293
294                                 return rule_index;
295                         }
296                 }
297
298                 if (rule_index == lpm->max_rules)
299                         return -ENOSPC;
300         } else {
301                 /* Calculate the position in which the rule will be stored. */
302                 rule_index = 0;
303
304                 for (i = depth - 1; i > 0; i--) {
305                         if (lpm->rule_info[i - 1].used_rules > 0) {
306                                 rule_index = lpm->rule_info[i - 1].first_rule
307                                                 + lpm->rule_info[i - 1].used_rules;
308                                 break;
309                         }
310                 }
311                 if (rule_index == lpm->max_rules)
312                         return -ENOSPC;
313
314                 lpm->rule_info[depth - 1].first_rule = rule_index;
315         }
316
317         /* Make room for the new rule in the array. */
318         for (i = RTE_LPM_MAX_DEPTH; i > depth; i--) {
319                 if (lpm->rule_info[i - 1].first_rule
320                                 + lpm->rule_info[i - 1].used_rules == lpm->max_rules)
321                         return -ENOSPC;
322
323                 if (lpm->rule_info[i - 1].used_rules > 0) {
324                         lpm->rules_tbl[lpm->rule_info[i - 1].first_rule
325                                 + lpm->rule_info[i - 1].used_rules]
326                                         = lpm->rules_tbl[lpm->rule_info[i - 1].first_rule];
327                         lpm->rule_info[i - 1].first_rule++;
328                 }
329         }
330
331         /* Add the new rule. */
332         lpm->rules_tbl[rule_index].ip = ip_masked;
333         lpm->rules_tbl[rule_index].next_hop = next_hop;
334
335         /* Increment the used rules counter for this rule group. */
336         lpm->rule_info[depth - 1].used_rules++;
337
338         return rule_index;
339 }
340
341 /*
342  * Delete a rule from the rule table.
343  * NOTE: Valid range for depth parameter is 1 .. 32 inclusive.
344  */
345 static void
346 rule_delete(struct rte_lpm *lpm, int32_t rule_index, uint8_t depth)
347 {
348         int i;
349
350         VERIFY_DEPTH(depth);
351
352         lpm->rules_tbl[rule_index] =
353                         lpm->rules_tbl[lpm->rule_info[depth - 1].first_rule
354                         + lpm->rule_info[depth - 1].used_rules - 1];
355
356         for (i = depth; i < RTE_LPM_MAX_DEPTH; i++) {
357                 if (lpm->rule_info[i].used_rules > 0) {
358                         lpm->rules_tbl[lpm->rule_info[i].first_rule - 1] =
359                                         lpm->rules_tbl[lpm->rule_info[i].first_rule
360                                                 + lpm->rule_info[i].used_rules - 1];
361                         lpm->rule_info[i].first_rule--;
362                 }
363         }
364
365         lpm->rule_info[depth - 1].used_rules--;
366 }
367
368 /*
369  * Finds a rule in rule table.
370  * NOTE: Valid range for depth parameter is 1 .. 32 inclusive.
371  */
372 static int32_t
373 rule_find(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth)
374 {
375         uint32_t rule_gindex, last_rule, rule_index;
376
377         VERIFY_DEPTH(depth);
378
379         rule_gindex = lpm->rule_info[depth - 1].first_rule;
380         last_rule = rule_gindex + lpm->rule_info[depth - 1].used_rules;
381
382         /* Scan used rules at given depth to find rule. */
383         for (rule_index = rule_gindex; rule_index < last_rule; rule_index++) {
384                 /* If rule is found return the rule index. */
385                 if (lpm->rules_tbl[rule_index].ip == ip_masked)
386                         return rule_index;
387         }
388
389         /* If rule is not found return -EINVAL. */
390         return -EINVAL;
391 }
392
393 /*
394  * Find, clean and allocate a tbl8.
395  */
396 static int32_t
397 tbl8_alloc(struct rte_lpm_tbl_entry *tbl8, uint32_t number_tbl8s)
398 {
399         uint32_t group_idx; /* tbl8 group index. */
400         struct rte_lpm_tbl_entry *tbl8_entry;
401
402         /* Scan through tbl8 to find a free (i.e. INVALID) tbl8 group. */
403         for (group_idx = 0; group_idx < number_tbl8s; group_idx++) {
404                 tbl8_entry = &tbl8[group_idx * RTE_LPM_TBL8_GROUP_NUM_ENTRIES];
405                 /* If a free tbl8 group is found clean it and set as VALID. */
406                 if (!tbl8_entry->valid_group) {
407                         struct rte_lpm_tbl_entry new_tbl8_entry = {
408                                 .next_hop = 0,
409                                 .valid = INVALID,
410                                 .depth = 0,
411                                 .valid_group = VALID,
412                         };
413
414                         memset(&tbl8_entry[0], 0,
415                                         RTE_LPM_TBL8_GROUP_NUM_ENTRIES *
416                                         sizeof(tbl8_entry[0]));
417
418                         __atomic_store(tbl8_entry, &new_tbl8_entry,
419                                         __ATOMIC_RELAXED);
420
421                         /* Return group index for allocated tbl8 group. */
422                         return group_idx;
423                 }
424         }
425
426         /* If there are no tbl8 groups free then return error. */
427         return -ENOSPC;
428 }
429
430 static void
431 tbl8_free(struct rte_lpm_tbl_entry *tbl8, uint32_t tbl8_group_start)
432 {
433         /* Set tbl8 group invalid*/
434         struct rte_lpm_tbl_entry zero_tbl8_entry = {0};
435
436         __atomic_store(&tbl8[tbl8_group_start], &zero_tbl8_entry,
437                         __ATOMIC_RELAXED);
438 }
439
440 static __rte_noinline int32_t
441 add_depth_small(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
442                 uint32_t next_hop)
443 {
444 #define group_idx next_hop
445         uint32_t tbl24_index, tbl24_range, tbl8_index, tbl8_group_end, i, j;
446
447         /* Calculate the index into Table24. */
448         tbl24_index = ip >> 8;
449         tbl24_range = depth_to_range(depth);
450
451         for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) {
452                 /*
453                  * For invalid OR valid and non-extended tbl 24 entries set
454                  * entry.
455                  */
456                 if (!lpm->tbl24[i].valid || (lpm->tbl24[i].valid_group == 0 &&
457                                 lpm->tbl24[i].depth <= depth)) {
458
459                         struct rte_lpm_tbl_entry new_tbl24_entry = {
460                                 .next_hop = next_hop,
461                                 .valid = VALID,
462                                 .valid_group = 0,
463                                 .depth = depth,
464                         };
465
466                         /* Setting tbl24 entry in one go to avoid race
467                          * conditions
468                          */
469                         __atomic_store(&lpm->tbl24[i], &new_tbl24_entry,
470                                         __ATOMIC_RELEASE);
471
472                         continue;
473                 }
474
475                 if (lpm->tbl24[i].valid_group == 1) {
476                         /* If tbl24 entry is valid and extended calculate the
477                          *  index into tbl8.
478                          */
479                         tbl8_index = lpm->tbl24[i].group_idx *
480                                         RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
481                         tbl8_group_end = tbl8_index +
482                                         RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
483
484                         for (j = tbl8_index; j < tbl8_group_end; j++) {
485                                 if (!lpm->tbl8[j].valid ||
486                                                 lpm->tbl8[j].depth <= depth) {
487                                         struct rte_lpm_tbl_entry
488                                                 new_tbl8_entry = {
489                                                 .valid = VALID,
490                                                 .valid_group = VALID,
491                                                 .depth = depth,
492                                                 .next_hop = next_hop,
493                                         };
494
495                                         /*
496                                          * Setting tbl8 entry in one go to avoid
497                                          * race conditions
498                                          */
499                                         __atomic_store(&lpm->tbl8[j],
500                                                 &new_tbl8_entry,
501                                                 __ATOMIC_RELAXED);
502
503                                         continue;
504                                 }
505                         }
506                 }
507         }
508 #undef group_idx
509         return 0;
510 }
511
512 static __rte_noinline int32_t
513 add_depth_big(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth,
514                 uint32_t next_hop)
515 {
516 #define group_idx next_hop
517         uint32_t tbl24_index;
518         int32_t tbl8_group_index, tbl8_group_start, tbl8_group_end, tbl8_index,
519                 tbl8_range, i;
520
521         tbl24_index = (ip_masked >> 8);
522         tbl8_range = depth_to_range(depth);
523
524         if (!lpm->tbl24[tbl24_index].valid) {
525                 /* Search for a free tbl8 group. */
526                 tbl8_group_index = tbl8_alloc(lpm->tbl8, lpm->number_tbl8s);
527
528                 /* Check tbl8 allocation was successful. */
529                 if (tbl8_group_index < 0) {
530                         return tbl8_group_index;
531                 }
532
533                 /* Find index into tbl8 and range. */
534                 tbl8_index = (tbl8_group_index *
535                                 RTE_LPM_TBL8_GROUP_NUM_ENTRIES) +
536                                 (ip_masked & 0xFF);
537
538                 /* Set tbl8 entry. */
539                 for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
540                         struct rte_lpm_tbl_entry new_tbl8_entry = {
541                                 .valid = VALID,
542                                 .depth = depth,
543                                 .valid_group = lpm->tbl8[i].valid_group,
544                                 .next_hop = next_hop,
545                         };
546                         __atomic_store(&lpm->tbl8[i], &new_tbl8_entry,
547                                         __ATOMIC_RELAXED);
548                 }
549
550                 /*
551                  * Update tbl24 entry to point to new tbl8 entry. Note: The
552                  * ext_flag and tbl8_index need to be updated simultaneously,
553                  * so assign whole structure in one go
554                  */
555
556                 struct rte_lpm_tbl_entry new_tbl24_entry = {
557                         .group_idx = tbl8_group_index,
558                         .valid = VALID,
559                         .valid_group = 1,
560                         .depth = 0,
561                 };
562
563                 /* The tbl24 entry must be written only after the
564                  * tbl8 entries are written.
565                  */
566                 __atomic_store(&lpm->tbl24[tbl24_index], &new_tbl24_entry,
567                                 __ATOMIC_RELEASE);
568
569         } /* If valid entry but not extended calculate the index into Table8. */
570         else if (lpm->tbl24[tbl24_index].valid_group == 0) {
571                 /* Search for free tbl8 group. */
572                 tbl8_group_index = tbl8_alloc(lpm->tbl8, lpm->number_tbl8s);
573
574                 if (tbl8_group_index < 0) {
575                         return tbl8_group_index;
576                 }
577
578                 tbl8_group_start = tbl8_group_index *
579                                 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
580                 tbl8_group_end = tbl8_group_start +
581                                 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
582
583                 /* Populate new tbl8 with tbl24 value. */
584                 for (i = tbl8_group_start; i < tbl8_group_end; i++) {
585                         struct rte_lpm_tbl_entry new_tbl8_entry = {
586                                 .valid = VALID,
587                                 .depth = lpm->tbl24[tbl24_index].depth,
588                                 .valid_group = lpm->tbl8[i].valid_group,
589                                 .next_hop = lpm->tbl24[tbl24_index].next_hop,
590                         };
591                         __atomic_store(&lpm->tbl8[i], &new_tbl8_entry,
592                                         __ATOMIC_RELAXED);
593                 }
594
595                 tbl8_index = tbl8_group_start + (ip_masked & 0xFF);
596
597                 /* Insert new rule into the tbl8 entry. */
598                 for (i = tbl8_index; i < tbl8_index + tbl8_range; i++) {
599                         struct rte_lpm_tbl_entry new_tbl8_entry = {
600                                 .valid = VALID,
601                                 .depth = depth,
602                                 .valid_group = lpm->tbl8[i].valid_group,
603                                 .next_hop = next_hop,
604                         };
605                         __atomic_store(&lpm->tbl8[i], &new_tbl8_entry,
606                                         __ATOMIC_RELAXED);
607                 }
608
609                 /*
610                  * Update tbl24 entry to point to new tbl8 entry. Note: The
611                  * ext_flag and tbl8_index need to be updated simultaneously,
612                  * so assign whole structure in one go.
613                  */
614
615                 struct rte_lpm_tbl_entry new_tbl24_entry = {
616                                 .group_idx = tbl8_group_index,
617                                 .valid = VALID,
618                                 .valid_group = 1,
619                                 .depth = 0,
620                 };
621
622                 /* The tbl24 entry must be written only after the
623                  * tbl8 entries are written.
624                  */
625                 __atomic_store(&lpm->tbl24[tbl24_index], &new_tbl24_entry,
626                                 __ATOMIC_RELEASE);
627
628         } else { /*
629                 * If it is valid, extended entry calculate the index into tbl8.
630                 */
631                 tbl8_group_index = lpm->tbl24[tbl24_index].group_idx;
632                 tbl8_group_start = tbl8_group_index *
633                                 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
634                 tbl8_index = tbl8_group_start + (ip_masked & 0xFF);
635
636                 for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
637
638                         if (!lpm->tbl8[i].valid ||
639                                         lpm->tbl8[i].depth <= depth) {
640                                 struct rte_lpm_tbl_entry new_tbl8_entry = {
641                                         .valid = VALID,
642                                         .depth = depth,
643                                         .next_hop = next_hop,
644                                         .valid_group = lpm->tbl8[i].valid_group,
645                                 };
646
647                                 /*
648                                  * Setting tbl8 entry in one go to avoid race
649                                  * condition
650                                  */
651                                 __atomic_store(&lpm->tbl8[i], &new_tbl8_entry,
652                                                 __ATOMIC_RELAXED);
653
654                                 continue;
655                         }
656                 }
657         }
658 #undef group_idx
659         return 0;
660 }
661
662 /*
663  * Add a route
664  */
665 int
666 rte_lpm_add(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
667                 uint32_t next_hop)
668 {
669         int32_t rule_index, status = 0;
670         uint32_t ip_masked;
671
672         /* Check user arguments. */
673         if ((lpm == NULL) || (depth < 1) || (depth > RTE_LPM_MAX_DEPTH))
674                 return -EINVAL;
675
676         ip_masked = ip & depth_to_mask(depth);
677
678         /* Add the rule to the rule table. */
679         rule_index = rule_add(lpm, ip_masked, depth, next_hop);
680
681         /* Skip table entries update if The rule is the same as
682          * the rule in the rules table.
683          */
684         if (rule_index == -EEXIST)
685                 return 0;
686
687         /* If the is no space available for new rule return error. */
688         if (rule_index < 0) {
689                 return rule_index;
690         }
691
692         if (depth <= MAX_DEPTH_TBL24) {
693                 status = add_depth_small(lpm, ip_masked, depth, next_hop);
694         } else { /* If depth > RTE_LPM_MAX_DEPTH_TBL24 */
695                 status = add_depth_big(lpm, ip_masked, depth, next_hop);
696
697                 /*
698                  * If add fails due to exhaustion of tbl8 extensions delete
699                  * rule that was added to rule table.
700                  */
701                 if (status < 0) {
702                         rule_delete(lpm, rule_index, depth);
703
704                         return status;
705                 }
706         }
707
708         return 0;
709 }
710
711 /*
712  * Look for a rule in the high-level rules table
713  */
714 int
715 rte_lpm_is_rule_present(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
716 uint32_t *next_hop)
717 {
718         uint32_t ip_masked;
719         int32_t rule_index;
720
721         /* Check user arguments. */
722         if ((lpm == NULL) ||
723                 (next_hop == NULL) ||
724                 (depth < 1) || (depth > RTE_LPM_MAX_DEPTH))
725                 return -EINVAL;
726
727         /* Look for the rule using rule_find. */
728         ip_masked = ip & depth_to_mask(depth);
729         rule_index = rule_find(lpm, ip_masked, depth);
730
731         if (rule_index >= 0) {
732                 *next_hop = lpm->rules_tbl[rule_index].next_hop;
733                 return 1;
734         }
735
736         /* If rule is not found return 0. */
737         return 0;
738 }
739
740 static int32_t
741 find_previous_rule(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
742                 uint8_t *sub_rule_depth)
743 {
744         int32_t rule_index;
745         uint32_t ip_masked;
746         uint8_t prev_depth;
747
748         for (prev_depth = (uint8_t)(depth - 1); prev_depth > 0; prev_depth--) {
749                 ip_masked = ip & depth_to_mask(prev_depth);
750
751                 rule_index = rule_find(lpm, ip_masked, prev_depth);
752
753                 if (rule_index >= 0) {
754                         *sub_rule_depth = prev_depth;
755                         return rule_index;
756                 }
757         }
758
759         return -1;
760 }
761
762 static int32_t
763 delete_depth_small(struct rte_lpm *lpm, uint32_t ip_masked,
764         uint8_t depth, int32_t sub_rule_index, uint8_t sub_rule_depth)
765 {
766 #define group_idx next_hop
767         uint32_t tbl24_range, tbl24_index, tbl8_group_index, tbl8_index, i, j;
768
769         /* Calculate the range and index into Table24. */
770         tbl24_range = depth_to_range(depth);
771         tbl24_index = (ip_masked >> 8);
772         struct rte_lpm_tbl_entry zero_tbl24_entry = {0};
773
774         /*
775          * Firstly check the sub_rule_index. A -1 indicates no replacement rule
776          * and a positive number indicates a sub_rule_index.
777          */
778         if (sub_rule_index < 0) {
779                 /*
780                  * If no replacement rule exists then invalidate entries
781                  * associated with this rule.
782                  */
783                 for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) {
784
785                         if (lpm->tbl24[i].valid_group == 0 &&
786                                         lpm->tbl24[i].depth <= depth) {
787                                 __atomic_store(&lpm->tbl24[i],
788                                         &zero_tbl24_entry, __ATOMIC_RELEASE);
789                         } else if (lpm->tbl24[i].valid_group == 1) {
790                                 /*
791                                  * If TBL24 entry is extended, then there has
792                                  * to be a rule with depth >= 25 in the
793                                  * associated TBL8 group.
794                                  */
795
796                                 tbl8_group_index = lpm->tbl24[i].group_idx;
797                                 tbl8_index = tbl8_group_index *
798                                                 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
799
800                                 for (j = tbl8_index; j < (tbl8_index +
801                                         RTE_LPM_TBL8_GROUP_NUM_ENTRIES); j++) {
802
803                                         if (lpm->tbl8[j].depth <= depth)
804                                                 lpm->tbl8[j].valid = INVALID;
805                                 }
806                         }
807                 }
808         } else {
809                 /*
810                  * If a replacement rule exists then modify entries
811                  * associated with this rule.
812                  */
813
814                 struct rte_lpm_tbl_entry new_tbl24_entry = {
815                         .next_hop = lpm->rules_tbl[sub_rule_index].next_hop,
816                         .valid = VALID,
817                         .valid_group = 0,
818                         .depth = sub_rule_depth,
819                 };
820
821                 struct rte_lpm_tbl_entry new_tbl8_entry = {
822                         .valid = VALID,
823                         .valid_group = VALID,
824                         .depth = sub_rule_depth,
825                         .next_hop = lpm->rules_tbl
826                         [sub_rule_index].next_hop,
827                 };
828
829                 for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) {
830
831                         if (lpm->tbl24[i].valid_group == 0 &&
832                                         lpm->tbl24[i].depth <= depth) {
833                                 __atomic_store(&lpm->tbl24[i], &new_tbl24_entry,
834                                                 __ATOMIC_RELEASE);
835                         } else  if (lpm->tbl24[i].valid_group == 1) {
836                                 /*
837                                  * If TBL24 entry is extended, then there has
838                                  * to be a rule with depth >= 25 in the
839                                  * associated TBL8 group.
840                                  */
841
842                                 tbl8_group_index = lpm->tbl24[i].group_idx;
843                                 tbl8_index = tbl8_group_index *
844                                                 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
845
846                                 for (j = tbl8_index; j < (tbl8_index +
847                                         RTE_LPM_TBL8_GROUP_NUM_ENTRIES); j++) {
848
849                                         if (lpm->tbl8[j].depth <= depth)
850                                                 __atomic_store(&lpm->tbl8[j],
851                                                         &new_tbl8_entry,
852                                                         __ATOMIC_RELAXED);
853                                 }
854                         }
855                 }
856         }
857 #undef group_idx
858         return 0;
859 }
860
861 /*
862  * Checks if table 8 group can be recycled.
863  *
864  * Return of -EEXIST means tbl8 is in use and thus can not be recycled.
865  * Return of -EINVAL means tbl8 is empty and thus can be recycled
866  * Return of value > -1 means tbl8 is in use but has all the same values and
867  * thus can be recycled
868  */
869 static int32_t
870 tbl8_recycle_check(struct rte_lpm_tbl_entry *tbl8,
871                 uint32_t tbl8_group_start)
872 {
873         uint32_t tbl8_group_end, i;
874         tbl8_group_end = tbl8_group_start + RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
875
876         /*
877          * Check the first entry of the given tbl8. If it is invalid we know
878          * this tbl8 does not contain any rule with a depth < RTE_LPM_MAX_DEPTH
879          *  (As they would affect all entries in a tbl8) and thus this table
880          *  can not be recycled.
881          */
882         if (tbl8[tbl8_group_start].valid) {
883                 /*
884                  * If first entry is valid check if the depth is less than 24
885                  * and if so check the rest of the entries to verify that they
886                  * are all of this depth.
887                  */
888                 if (tbl8[tbl8_group_start].depth <= MAX_DEPTH_TBL24) {
889                         for (i = (tbl8_group_start + 1); i < tbl8_group_end;
890                                         i++) {
891
892                                 if (tbl8[i].depth !=
893                                                 tbl8[tbl8_group_start].depth) {
894
895                                         return -EEXIST;
896                                 }
897                         }
898                         /* If all entries are the same return the tb8 index */
899                         return tbl8_group_start;
900                 }
901
902                 return -EEXIST;
903         }
904         /*
905          * If the first entry is invalid check if the rest of the entries in
906          * the tbl8 are invalid.
907          */
908         for (i = (tbl8_group_start + 1); i < tbl8_group_end; i++) {
909                 if (tbl8[i].valid)
910                         return -EEXIST;
911         }
912         /* If no valid entries are found then return -EINVAL. */
913         return -EINVAL;
914 }
915
916 static int32_t
917 delete_depth_big(struct rte_lpm *lpm, uint32_t ip_masked,
918         uint8_t depth, int32_t sub_rule_index, uint8_t sub_rule_depth)
919 {
920 #define group_idx next_hop
921         uint32_t tbl24_index, tbl8_group_index, tbl8_group_start, tbl8_index,
922                         tbl8_range, i;
923         int32_t tbl8_recycle_index;
924
925         /*
926          * Calculate the index into tbl24 and range. Note: All depths larger
927          * than MAX_DEPTH_TBL24 are associated with only one tbl24 entry.
928          */
929         tbl24_index = ip_masked >> 8;
930
931         /* Calculate the index into tbl8 and range. */
932         tbl8_group_index = lpm->tbl24[tbl24_index].group_idx;
933         tbl8_group_start = tbl8_group_index * RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
934         tbl8_index = tbl8_group_start + (ip_masked & 0xFF);
935         tbl8_range = depth_to_range(depth);
936
937         if (sub_rule_index < 0) {
938                 /*
939                  * Loop through the range of entries on tbl8 for which the
940                  * rule_to_delete must be removed or modified.
941                  */
942                 for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
943                         if (lpm->tbl8[i].depth <= depth)
944                                 lpm->tbl8[i].valid = INVALID;
945                 }
946         } else {
947                 /* Set new tbl8 entry. */
948                 struct rte_lpm_tbl_entry new_tbl8_entry = {
949                         .valid = VALID,
950                         .depth = sub_rule_depth,
951                         .valid_group = lpm->tbl8[tbl8_group_start].valid_group,
952                         .next_hop = lpm->rules_tbl[sub_rule_index].next_hop,
953                 };
954
955                 /*
956                  * Loop through the range of entries on tbl8 for which the
957                  * rule_to_delete must be modified.
958                  */
959                 for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
960                         if (lpm->tbl8[i].depth <= depth)
961                                 __atomic_store(&lpm->tbl8[i], &new_tbl8_entry,
962                                                 __ATOMIC_RELAXED);
963                 }
964         }
965
966         /*
967          * Check if there are any valid entries in this tbl8 group. If all
968          * tbl8 entries are invalid we can free the tbl8 and invalidate the
969          * associated tbl24 entry.
970          */
971
972         tbl8_recycle_index = tbl8_recycle_check(lpm->tbl8, tbl8_group_start);
973
974         if (tbl8_recycle_index == -EINVAL) {
975                 /* Set tbl24 before freeing tbl8 to avoid race condition.
976                  * Prevent the free of the tbl8 group from hoisting.
977                  */
978                 lpm->tbl24[tbl24_index].valid = 0;
979                 __atomic_thread_fence(__ATOMIC_RELEASE);
980                 tbl8_free(lpm->tbl8, tbl8_group_start);
981         } else if (tbl8_recycle_index > -1) {
982                 /* Update tbl24 entry. */
983                 struct rte_lpm_tbl_entry new_tbl24_entry = {
984                         .next_hop = lpm->tbl8[tbl8_recycle_index].next_hop,
985                         .valid = VALID,
986                         .valid_group = 0,
987                         .depth = lpm->tbl8[tbl8_recycle_index].depth,
988                 };
989
990                 /* Set tbl24 before freeing tbl8 to avoid race condition.
991                  * Prevent the free of the tbl8 group from hoisting.
992                  */
993                 __atomic_store(&lpm->tbl24[tbl24_index], &new_tbl24_entry,
994                                 __ATOMIC_RELAXED);
995                 __atomic_thread_fence(__ATOMIC_RELEASE);
996                 tbl8_free(lpm->tbl8, tbl8_group_start);
997         }
998 #undef group_idx
999         return 0;
1000 }
1001
1002 /*
1003  * Deletes a rule
1004  */
1005 int
1006 rte_lpm_delete(struct rte_lpm *lpm, uint32_t ip, uint8_t depth)
1007 {
1008         int32_t rule_to_delete_index, sub_rule_index;
1009         uint32_t ip_masked;
1010         uint8_t sub_rule_depth;
1011         /*
1012          * Check input arguments. Note: IP must be a positive integer of 32
1013          * bits in length therefore it need not be checked.
1014          */
1015         if ((lpm == NULL) || (depth < 1) || (depth > RTE_LPM_MAX_DEPTH)) {
1016                 return -EINVAL;
1017         }
1018
1019         ip_masked = ip & depth_to_mask(depth);
1020
1021         /*
1022          * Find the index of the input rule, that needs to be deleted, in the
1023          * rule table.
1024          */
1025         rule_to_delete_index = rule_find(lpm, ip_masked, depth);
1026
1027         /*
1028          * Check if rule_to_delete_index was found. If no rule was found the
1029          * function rule_find returns -EINVAL.
1030          */
1031         if (rule_to_delete_index < 0)
1032                 return -EINVAL;
1033
1034         /* Delete the rule from the rule table. */
1035         rule_delete(lpm, rule_to_delete_index, depth);
1036
1037         /*
1038          * Find rule to replace the rule_to_delete. If there is no rule to
1039          * replace the rule_to_delete we return -1 and invalidate the table
1040          * entries associated with this rule.
1041          */
1042         sub_rule_depth = 0;
1043         sub_rule_index = find_previous_rule(lpm, ip, depth, &sub_rule_depth);
1044
1045         /*
1046          * If the input depth value is less than 25 use function
1047          * delete_depth_small otherwise use delete_depth_big.
1048          */
1049         if (depth <= MAX_DEPTH_TBL24) {
1050                 return delete_depth_small(lpm, ip_masked, depth,
1051                                 sub_rule_index, sub_rule_depth);
1052         } else { /* If depth > MAX_DEPTH_TBL24 */
1053                 return delete_depth_big(lpm, ip_masked, depth, sub_rule_index,
1054                                 sub_rule_depth);
1055         }
1056 }
1057
1058 /*
1059  * Delete all rules from the LPM table.
1060  */
1061 void
1062 rte_lpm_delete_all(struct rte_lpm *lpm)
1063 {
1064         /* Zero rule information. */
1065         memset(lpm->rule_info, 0, sizeof(lpm->rule_info));
1066
1067         /* Zero tbl24. */
1068         memset(lpm->tbl24, 0, sizeof(lpm->tbl24));
1069
1070         /* Zero tbl8. */
1071         memset(lpm->tbl8, 0, sizeof(lpm->tbl8[0])
1072                         * RTE_LPM_TBL8_GROUP_NUM_ENTRIES * lpm->number_tbl8s);
1073
1074         /* Delete all rules form the rules table. */
1075         memset(lpm->rules_tbl, 0, sizeof(lpm->rules_tbl[0]) * lpm->max_rules);
1076 }