lib: remove librte_ prefix from directory names
[dpdk.git] / lib / lpm / rte_lpm.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2014 Intel Corporation
3  * Copyright(c) 2020 Arm Limited
4  */
5
6 #include <string.h>
7 #include <stdint.h>
8 #include <errno.h>
9 #include <stdarg.h>
10 #include <stdio.h>
11 #include <sys/queue.h>
12
13 #include <rte_log.h>
14 #include <rte_branch_prediction.h>
15 #include <rte_common.h>
16 #include <rte_memory.h>        /* for definition of RTE_CACHE_LINE_SIZE */
17 #include <rte_malloc.h>
18 #include <rte_eal.h>
19 #include <rte_eal_memconfig.h>
20 #include <rte_per_lcore.h>
21 #include <rte_string_fns.h>
22 #include <rte_errno.h>
23 #include <rte_rwlock.h>
24 #include <rte_spinlock.h>
25 #include <rte_tailq.h>
26
27 #include "rte_lpm.h"
28
29 TAILQ_HEAD(rte_lpm_list, rte_tailq_entry);
30
31 static struct rte_tailq_elem rte_lpm_tailq = {
32         .name = "RTE_LPM",
33 };
34 EAL_REGISTER_TAILQ(rte_lpm_tailq)
35
36 #define MAX_DEPTH_TBL24 24
37
38 enum valid_flag {
39         INVALID = 0,
40         VALID
41 };
42
43 /** @internal Rule structure. */
44 struct rte_lpm_rule {
45         uint32_t ip; /**< Rule IP address. */
46         uint32_t next_hop; /**< Rule next hop. */
47 };
48
49 /** @internal Contains metadata about the rules table. */
50 struct rte_lpm_rule_info {
51         uint32_t used_rules; /**< Used rules so far. */
52         uint32_t first_rule; /**< Indexes the first rule of a given depth. */
53 };
54
55 /** @internal LPM structure. */
56 struct __rte_lpm {
57         /* Exposed LPM data. */
58         struct rte_lpm lpm;
59
60         /* LPM metadata. */
61         char name[RTE_LPM_NAMESIZE];        /**< Name of the lpm. */
62         uint32_t max_rules; /**< Max. balanced rules per lpm. */
63         uint32_t number_tbl8s; /**< Number of tbl8s. */
64         /**< Rule info table. */
65         struct rte_lpm_rule_info rule_info[RTE_LPM_MAX_DEPTH];
66         struct rte_lpm_rule *rules_tbl; /**< LPM rules. */
67
68         /* RCU config. */
69         struct rte_rcu_qsbr *v;         /* RCU QSBR variable. */
70         enum rte_lpm_qsbr_mode rcu_mode;/* Blocking, defer queue. */
71         struct rte_rcu_qsbr_dq *dq;     /* RCU QSBR defer queue. */
72 };
73
74 /* Macro to enable/disable run-time checks. */
75 #if defined(RTE_LIBRTE_LPM_DEBUG)
76 #include <rte_debug.h>
77 #define VERIFY_DEPTH(depth) do {                                \
78         if ((depth == 0) || (depth > RTE_LPM_MAX_DEPTH))        \
79                 rte_panic("LPM: Invalid depth (%u) at line %d", \
80                                 (unsigned)(depth), __LINE__);   \
81 } while (0)
82 #else
83 #define VERIFY_DEPTH(depth)
84 #endif
85
86 /*
87  * Converts a given depth value to its corresponding mask value.
88  *
89  * depth  (IN)          : range = 1 - 32
90  * mask   (OUT)         : 32bit mask
91  */
92 static uint32_t __attribute__((pure))
93 depth_to_mask(uint8_t depth)
94 {
95         VERIFY_DEPTH(depth);
96
97         /* To calculate a mask start with a 1 on the left hand side and right
98          * shift while populating the left hand side with 1's
99          */
100         return (int)0x80000000 >> (depth - 1);
101 }
102
103 /*
104  * Converts given depth value to its corresponding range value.
105  */
106 static uint32_t __attribute__((pure))
107 depth_to_range(uint8_t depth)
108 {
109         VERIFY_DEPTH(depth);
110
111         /*
112          * Calculate tbl24 range. (Note: 2^depth = 1 << depth)
113          */
114         if (depth <= MAX_DEPTH_TBL24)
115                 return 1 << (MAX_DEPTH_TBL24 - depth);
116
117         /* Else if depth is greater than 24 */
118         return 1 << (RTE_LPM_MAX_DEPTH - depth);
119 }
120
121 /*
122  * Find an existing lpm table and return a pointer to it.
123  */
124 struct rte_lpm *
125 rte_lpm_find_existing(const char *name)
126 {
127         struct __rte_lpm *i_lpm = NULL;
128         struct rte_tailq_entry *te;
129         struct rte_lpm_list *lpm_list;
130
131         lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list);
132
133         rte_mcfg_tailq_read_lock();
134         TAILQ_FOREACH(te, lpm_list, next) {
135                 i_lpm = te->data;
136                 if (strncmp(name, i_lpm->name, RTE_LPM_NAMESIZE) == 0)
137                         break;
138         }
139         rte_mcfg_tailq_read_unlock();
140
141         if (te == NULL) {
142                 rte_errno = ENOENT;
143                 return NULL;
144         }
145
146         return &i_lpm->lpm;
147 }
148
149 /*
150  * Allocates memory for LPM object
151  */
152 struct rte_lpm *
153 rte_lpm_create(const char *name, int socket_id,
154                 const struct rte_lpm_config *config)
155 {
156         char mem_name[RTE_LPM_NAMESIZE];
157         struct __rte_lpm *i_lpm;
158         struct rte_lpm *lpm = NULL;
159         struct rte_tailq_entry *te;
160         uint32_t mem_size, rules_size, tbl8s_size;
161         struct rte_lpm_list *lpm_list;
162
163         lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list);
164
165         RTE_BUILD_BUG_ON(sizeof(struct rte_lpm_tbl_entry) != 4);
166
167         /* Check user arguments. */
168         if ((name == NULL) || (socket_id < -1) || (config->max_rules == 0)
169                         || config->number_tbl8s > RTE_LPM_MAX_TBL8_NUM_GROUPS) {
170                 rte_errno = EINVAL;
171                 return NULL;
172         }
173
174         snprintf(mem_name, sizeof(mem_name), "LPM_%s", name);
175
176         rte_mcfg_tailq_write_lock();
177
178         /* guarantee there's no existing */
179         TAILQ_FOREACH(te, lpm_list, next) {
180                 i_lpm = te->data;
181                 if (strncmp(name, i_lpm->name, RTE_LPM_NAMESIZE) == 0)
182                         break;
183         }
184
185         if (te != NULL) {
186                 rte_errno = EEXIST;
187                 goto exit;
188         }
189
190         /* Determine the amount of memory to allocate. */
191         mem_size = sizeof(*i_lpm);
192         rules_size = sizeof(struct rte_lpm_rule) * config->max_rules;
193         tbl8s_size = sizeof(struct rte_lpm_tbl_entry) *
194                         RTE_LPM_TBL8_GROUP_NUM_ENTRIES * config->number_tbl8s;
195
196         /* allocate tailq entry */
197         te = rte_zmalloc("LPM_TAILQ_ENTRY", sizeof(*te), 0);
198         if (te == NULL) {
199                 RTE_LOG(ERR, LPM, "Failed to allocate tailq entry\n");
200                 rte_errno = ENOMEM;
201                 goto exit;
202         }
203
204         /* Allocate memory to store the LPM data structures. */
205         i_lpm = rte_zmalloc_socket(mem_name, mem_size,
206                         RTE_CACHE_LINE_SIZE, socket_id);
207         if (i_lpm == NULL) {
208                 RTE_LOG(ERR, LPM, "LPM memory allocation failed\n");
209                 rte_free(te);
210                 rte_errno = ENOMEM;
211                 goto exit;
212         }
213
214         i_lpm->rules_tbl = rte_zmalloc_socket(NULL,
215                         (size_t)rules_size, RTE_CACHE_LINE_SIZE, socket_id);
216
217         if (i_lpm->rules_tbl == NULL) {
218                 RTE_LOG(ERR, LPM, "LPM rules_tbl memory allocation failed\n");
219                 rte_free(i_lpm);
220                 i_lpm = NULL;
221                 rte_free(te);
222                 rte_errno = ENOMEM;
223                 goto exit;
224         }
225
226         i_lpm->lpm.tbl8 = rte_zmalloc_socket(NULL,
227                         (size_t)tbl8s_size, RTE_CACHE_LINE_SIZE, socket_id);
228
229         if (i_lpm->lpm.tbl8 == NULL) {
230                 RTE_LOG(ERR, LPM, "LPM tbl8 memory allocation failed\n");
231                 rte_free(i_lpm->rules_tbl);
232                 rte_free(i_lpm);
233                 i_lpm = NULL;
234                 rte_free(te);
235                 rte_errno = ENOMEM;
236                 goto exit;
237         }
238
239         /* Save user arguments. */
240         i_lpm->max_rules = config->max_rules;
241         i_lpm->number_tbl8s = config->number_tbl8s;
242         strlcpy(i_lpm->name, name, sizeof(i_lpm->name));
243
244         te->data = i_lpm;
245         lpm = &i_lpm->lpm;
246
247         TAILQ_INSERT_TAIL(lpm_list, te, next);
248
249 exit:
250         rte_mcfg_tailq_write_unlock();
251
252         return lpm;
253 }
254
255 /*
256  * Deallocates memory for given LPM table.
257  */
258 void
259 rte_lpm_free(struct rte_lpm *lpm)
260 {
261         struct rte_lpm_list *lpm_list;
262         struct rte_tailq_entry *te;
263         struct __rte_lpm *i_lpm;
264
265         /* Check user arguments. */
266         if (lpm == NULL)
267                 return;
268         i_lpm = container_of(lpm, struct __rte_lpm, lpm);
269
270         lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list);
271
272         rte_mcfg_tailq_write_lock();
273
274         /* find our tailq entry */
275         TAILQ_FOREACH(te, lpm_list, next) {
276                 if (te->data == (void *)i_lpm)
277                         break;
278         }
279         if (te != NULL)
280                 TAILQ_REMOVE(lpm_list, te, next);
281
282         rte_mcfg_tailq_write_unlock();
283
284         if (i_lpm->dq != NULL)
285                 rte_rcu_qsbr_dq_delete(i_lpm->dq);
286         rte_free(i_lpm->lpm.tbl8);
287         rte_free(i_lpm->rules_tbl);
288         rte_free(i_lpm);
289         rte_free(te);
290 }
291
292 static void
293 __lpm_rcu_qsbr_free_resource(void *p, void *data, unsigned int n)
294 {
295         struct rte_lpm_tbl_entry *tbl8 = ((struct __rte_lpm *)p)->lpm.tbl8;
296         struct rte_lpm_tbl_entry zero_tbl8_entry = {0};
297         uint32_t tbl8_group_index = *(uint32_t *)data;
298
299         RTE_SET_USED(n);
300         /* Set tbl8 group invalid */
301         __atomic_store(&tbl8[tbl8_group_index], &zero_tbl8_entry,
302                 __ATOMIC_RELAXED);
303 }
304
305 /* Associate QSBR variable with an LPM object.
306  */
307 int
308 rte_lpm_rcu_qsbr_add(struct rte_lpm *lpm, struct rte_lpm_rcu_config *cfg)
309 {
310         struct rte_rcu_qsbr_dq_parameters params = {0};
311         char rcu_dq_name[RTE_RCU_QSBR_DQ_NAMESIZE];
312         struct __rte_lpm *i_lpm;
313
314         if (lpm == NULL || cfg == NULL) {
315                 rte_errno = EINVAL;
316                 return 1;
317         }
318
319         i_lpm = container_of(lpm, struct __rte_lpm, lpm);
320         if (i_lpm->v != NULL) {
321                 rte_errno = EEXIST;
322                 return 1;
323         }
324
325         if (cfg->mode == RTE_LPM_QSBR_MODE_SYNC) {
326                 /* No other things to do. */
327         } else if (cfg->mode == RTE_LPM_QSBR_MODE_DQ) {
328                 /* Init QSBR defer queue. */
329                 snprintf(rcu_dq_name, sizeof(rcu_dq_name),
330                                 "LPM_RCU_%s", i_lpm->name);
331                 params.name = rcu_dq_name;
332                 params.size = cfg->dq_size;
333                 if (params.size == 0)
334                         params.size = i_lpm->number_tbl8s;
335                 params.trigger_reclaim_limit = cfg->reclaim_thd;
336                 params.max_reclaim_size = cfg->reclaim_max;
337                 if (params.max_reclaim_size == 0)
338                         params.max_reclaim_size = RTE_LPM_RCU_DQ_RECLAIM_MAX;
339                 params.esize = sizeof(uint32_t);        /* tbl8 group index */
340                 params.free_fn = __lpm_rcu_qsbr_free_resource;
341                 params.p = i_lpm;
342                 params.v = cfg->v;
343                 i_lpm->dq = rte_rcu_qsbr_dq_create(&params);
344                 if (i_lpm->dq == NULL) {
345                         RTE_LOG(ERR, LPM, "LPM defer queue creation failed\n");
346                         return 1;
347                 }
348         } else {
349                 rte_errno = EINVAL;
350                 return 1;
351         }
352         i_lpm->rcu_mode = cfg->mode;
353         i_lpm->v = cfg->v;
354
355         return 0;
356 }
357
358 /*
359  * Adds a rule to the rule table.
360  *
361  * NOTE: The rule table is split into 32 groups. Each group contains rules that
362  * apply to a specific prefix depth (i.e. group 1 contains rules that apply to
363  * prefixes with a depth of 1 etc.). In the following code (depth - 1) is used
364  * to refer to depth 1 because even though the depth range is 1 - 32, depths
365  * are stored in the rule table from 0 - 31.
366  * NOTE: Valid range for depth parameter is 1 .. 32 inclusive.
367  */
368 static int32_t
369 rule_add(struct __rte_lpm *i_lpm, uint32_t ip_masked, uint8_t depth,
370         uint32_t next_hop)
371 {
372         uint32_t rule_gindex, rule_index, last_rule;
373         int i;
374
375         VERIFY_DEPTH(depth);
376
377         /* Scan through rule group to see if rule already exists. */
378         if (i_lpm->rule_info[depth - 1].used_rules > 0) {
379
380                 /* rule_gindex stands for rule group index. */
381                 rule_gindex = i_lpm->rule_info[depth - 1].first_rule;
382                 /* Initialise rule_index to point to start of rule group. */
383                 rule_index = rule_gindex;
384                 /* Last rule = Last used rule in this rule group. */
385                 last_rule = rule_gindex + i_lpm->rule_info[depth - 1].used_rules;
386
387                 for (; rule_index < last_rule; rule_index++) {
388
389                         /* If rule already exists update next hop and return. */
390                         if (i_lpm->rules_tbl[rule_index].ip == ip_masked) {
391
392                                 if (i_lpm->rules_tbl[rule_index].next_hop
393                                                 == next_hop)
394                                         return -EEXIST;
395                                 i_lpm->rules_tbl[rule_index].next_hop = next_hop;
396
397                                 return rule_index;
398                         }
399                 }
400
401                 if (rule_index == i_lpm->max_rules)
402                         return -ENOSPC;
403         } else {
404                 /* Calculate the position in which the rule will be stored. */
405                 rule_index = 0;
406
407                 for (i = depth - 1; i > 0; i--) {
408                         if (i_lpm->rule_info[i - 1].used_rules > 0) {
409                                 rule_index = i_lpm->rule_info[i - 1].first_rule
410                                                 + i_lpm->rule_info[i - 1].used_rules;
411                                 break;
412                         }
413                 }
414                 if (rule_index == i_lpm->max_rules)
415                         return -ENOSPC;
416
417                 i_lpm->rule_info[depth - 1].first_rule = rule_index;
418         }
419
420         /* Make room for the new rule in the array. */
421         for (i = RTE_LPM_MAX_DEPTH; i > depth; i--) {
422                 if (i_lpm->rule_info[i - 1].first_rule
423                                 + i_lpm->rule_info[i - 1].used_rules == i_lpm->max_rules)
424                         return -ENOSPC;
425
426                 if (i_lpm->rule_info[i - 1].used_rules > 0) {
427                         i_lpm->rules_tbl[i_lpm->rule_info[i - 1].first_rule
428                                 + i_lpm->rule_info[i - 1].used_rules]
429                                         = i_lpm->rules_tbl[i_lpm->rule_info[i - 1].first_rule];
430                         i_lpm->rule_info[i - 1].first_rule++;
431                 }
432         }
433
434         /* Add the new rule. */
435         i_lpm->rules_tbl[rule_index].ip = ip_masked;
436         i_lpm->rules_tbl[rule_index].next_hop = next_hop;
437
438         /* Increment the used rules counter for this rule group. */
439         i_lpm->rule_info[depth - 1].used_rules++;
440
441         return rule_index;
442 }
443
444 /*
445  * Delete a rule from the rule table.
446  * NOTE: Valid range for depth parameter is 1 .. 32 inclusive.
447  */
448 static void
449 rule_delete(struct __rte_lpm *i_lpm, int32_t rule_index, uint8_t depth)
450 {
451         int i;
452
453         VERIFY_DEPTH(depth);
454
455         i_lpm->rules_tbl[rule_index] =
456                         i_lpm->rules_tbl[i_lpm->rule_info[depth - 1].first_rule
457                         + i_lpm->rule_info[depth - 1].used_rules - 1];
458
459         for (i = depth; i < RTE_LPM_MAX_DEPTH; i++) {
460                 if (i_lpm->rule_info[i].used_rules > 0) {
461                         i_lpm->rules_tbl[i_lpm->rule_info[i].first_rule - 1] =
462                                         i_lpm->rules_tbl[i_lpm->rule_info[i].first_rule
463                                                 + i_lpm->rule_info[i].used_rules - 1];
464                         i_lpm->rule_info[i].first_rule--;
465                 }
466         }
467
468         i_lpm->rule_info[depth - 1].used_rules--;
469 }
470
471 /*
472  * Finds a rule in rule table.
473  * NOTE: Valid range for depth parameter is 1 .. 32 inclusive.
474  */
475 static int32_t
476 rule_find(struct __rte_lpm *i_lpm, uint32_t ip_masked, uint8_t depth)
477 {
478         uint32_t rule_gindex, last_rule, rule_index;
479
480         VERIFY_DEPTH(depth);
481
482         rule_gindex = i_lpm->rule_info[depth - 1].first_rule;
483         last_rule = rule_gindex + i_lpm->rule_info[depth - 1].used_rules;
484
485         /* Scan used rules at given depth to find rule. */
486         for (rule_index = rule_gindex; rule_index < last_rule; rule_index++) {
487                 /* If rule is found return the rule index. */
488                 if (i_lpm->rules_tbl[rule_index].ip == ip_masked)
489                         return rule_index;
490         }
491
492         /* If rule is not found return -EINVAL. */
493         return -EINVAL;
494 }
495
496 /*
497  * Find, clean and allocate a tbl8.
498  */
499 static int32_t
500 _tbl8_alloc(struct __rte_lpm *i_lpm)
501 {
502         uint32_t group_idx; /* tbl8 group index. */
503         struct rte_lpm_tbl_entry *tbl8_entry;
504
505         /* Scan through tbl8 to find a free (i.e. INVALID) tbl8 group. */
506         for (group_idx = 0; group_idx < i_lpm->number_tbl8s; group_idx++) {
507                 tbl8_entry = &i_lpm->lpm.tbl8[group_idx *
508                                         RTE_LPM_TBL8_GROUP_NUM_ENTRIES];
509                 /* If a free tbl8 group is found clean it and set as VALID. */
510                 if (!tbl8_entry->valid_group) {
511                         struct rte_lpm_tbl_entry new_tbl8_entry = {
512                                 .next_hop = 0,
513                                 .valid = INVALID,
514                                 .depth = 0,
515                                 .valid_group = VALID,
516                         };
517
518                         memset(&tbl8_entry[0], 0,
519                                         RTE_LPM_TBL8_GROUP_NUM_ENTRIES *
520                                         sizeof(tbl8_entry[0]));
521
522                         __atomic_store(tbl8_entry, &new_tbl8_entry,
523                                         __ATOMIC_RELAXED);
524
525                         /* Return group index for allocated tbl8 group. */
526                         return group_idx;
527                 }
528         }
529
530         /* If there are no tbl8 groups free then return error. */
531         return -ENOSPC;
532 }
533
534 static int32_t
535 tbl8_alloc(struct __rte_lpm *i_lpm)
536 {
537         int32_t group_idx; /* tbl8 group index. */
538
539         group_idx = _tbl8_alloc(i_lpm);
540         if (group_idx == -ENOSPC && i_lpm->dq != NULL) {
541                 /* If there are no tbl8 groups try to reclaim one. */
542                 if (rte_rcu_qsbr_dq_reclaim(i_lpm->dq, 1,
543                                 NULL, NULL, NULL) == 0)
544                         group_idx = _tbl8_alloc(i_lpm);
545         }
546
547         return group_idx;
548 }
549
550 static int32_t
551 tbl8_free(struct __rte_lpm *i_lpm, uint32_t tbl8_group_start)
552 {
553         struct rte_lpm_tbl_entry zero_tbl8_entry = {0};
554         int status;
555
556         if (i_lpm->v == NULL) {
557                 /* Set tbl8 group invalid*/
558                 __atomic_store(&i_lpm->lpm.tbl8[tbl8_group_start], &zero_tbl8_entry,
559                                 __ATOMIC_RELAXED);
560         } else if (i_lpm->rcu_mode == RTE_LPM_QSBR_MODE_SYNC) {
561                 /* Wait for quiescent state change. */
562                 rte_rcu_qsbr_synchronize(i_lpm->v,
563                         RTE_QSBR_THRID_INVALID);
564                 /* Set tbl8 group invalid*/
565                 __atomic_store(&i_lpm->lpm.tbl8[tbl8_group_start], &zero_tbl8_entry,
566                                 __ATOMIC_RELAXED);
567         } else if (i_lpm->rcu_mode == RTE_LPM_QSBR_MODE_DQ) {
568                 /* Push into QSBR defer queue. */
569                 status = rte_rcu_qsbr_dq_enqueue(i_lpm->dq,
570                                 (void *)&tbl8_group_start);
571                 if (status == 1) {
572                         RTE_LOG(ERR, LPM, "Failed to push QSBR FIFO\n");
573                         return -rte_errno;
574                 }
575         }
576
577         return 0;
578 }
579
580 static __rte_noinline int32_t
581 add_depth_small(struct __rte_lpm *i_lpm, uint32_t ip, uint8_t depth,
582                 uint32_t next_hop)
583 {
584 #define group_idx next_hop
585         uint32_t tbl24_index, tbl24_range, tbl8_index, tbl8_group_end, i, j;
586
587         /* Calculate the index into Table24. */
588         tbl24_index = ip >> 8;
589         tbl24_range = depth_to_range(depth);
590
591         for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) {
592                 /*
593                  * For invalid OR valid and non-extended tbl 24 entries set
594                  * entry.
595                  */
596                 if (!i_lpm->lpm.tbl24[i].valid || (i_lpm->lpm.tbl24[i].valid_group == 0 &&
597                                 i_lpm->lpm.tbl24[i].depth <= depth)) {
598
599                         struct rte_lpm_tbl_entry new_tbl24_entry = {
600                                 .next_hop = next_hop,
601                                 .valid = VALID,
602                                 .valid_group = 0,
603                                 .depth = depth,
604                         };
605
606                         /* Setting tbl24 entry in one go to avoid race
607                          * conditions
608                          */
609                         __atomic_store(&i_lpm->lpm.tbl24[i], &new_tbl24_entry,
610                                         __ATOMIC_RELEASE);
611
612                         continue;
613                 }
614
615                 if (i_lpm->lpm.tbl24[i].valid_group == 1) {
616                         /* If tbl24 entry is valid and extended calculate the
617                          *  index into tbl8.
618                          */
619                         tbl8_index = i_lpm->lpm.tbl24[i].group_idx *
620                                         RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
621                         tbl8_group_end = tbl8_index +
622                                         RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
623
624                         for (j = tbl8_index; j < tbl8_group_end; j++) {
625                                 if (!i_lpm->lpm.tbl8[j].valid ||
626                                                 i_lpm->lpm.tbl8[j].depth <= depth) {
627                                         struct rte_lpm_tbl_entry
628                                                 new_tbl8_entry = {
629                                                 .valid = VALID,
630                                                 .valid_group = VALID,
631                                                 .depth = depth,
632                                                 .next_hop = next_hop,
633                                         };
634
635                                         /*
636                                          * Setting tbl8 entry in one go to avoid
637                                          * race conditions
638                                          */
639                                         __atomic_store(&i_lpm->lpm.tbl8[j],
640                                                 &new_tbl8_entry,
641                                                 __ATOMIC_RELAXED);
642
643                                         continue;
644                                 }
645                         }
646                 }
647         }
648 #undef group_idx
649         return 0;
650 }
651
652 static __rte_noinline int32_t
653 add_depth_big(struct __rte_lpm *i_lpm, uint32_t ip_masked, uint8_t depth,
654                 uint32_t next_hop)
655 {
656 #define group_idx next_hop
657         uint32_t tbl24_index;
658         int32_t tbl8_group_index, tbl8_group_start, tbl8_group_end, tbl8_index,
659                 tbl8_range, i;
660
661         tbl24_index = (ip_masked >> 8);
662         tbl8_range = depth_to_range(depth);
663
664         if (!i_lpm->lpm.tbl24[tbl24_index].valid) {
665                 /* Search for a free tbl8 group. */
666                 tbl8_group_index = tbl8_alloc(i_lpm);
667
668                 /* Check tbl8 allocation was successful. */
669                 if (tbl8_group_index < 0) {
670                         return tbl8_group_index;
671                 }
672
673                 /* Find index into tbl8 and range. */
674                 tbl8_index = (tbl8_group_index *
675                                 RTE_LPM_TBL8_GROUP_NUM_ENTRIES) +
676                                 (ip_masked & 0xFF);
677
678                 /* Set tbl8 entry. */
679                 for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
680                         struct rte_lpm_tbl_entry new_tbl8_entry = {
681                                 .valid = VALID,
682                                 .depth = depth,
683                                 .valid_group = i_lpm->lpm.tbl8[i].valid_group,
684                                 .next_hop = next_hop,
685                         };
686                         __atomic_store(&i_lpm->lpm.tbl8[i], &new_tbl8_entry,
687                                         __ATOMIC_RELAXED);
688                 }
689
690                 /*
691                  * Update tbl24 entry to point to new tbl8 entry. Note: The
692                  * ext_flag and tbl8_index need to be updated simultaneously,
693                  * so assign whole structure in one go
694                  */
695
696                 struct rte_lpm_tbl_entry new_tbl24_entry = {
697                         .group_idx = tbl8_group_index,
698                         .valid = VALID,
699                         .valid_group = 1,
700                         .depth = 0,
701                 };
702
703                 /* The tbl24 entry must be written only after the
704                  * tbl8 entries are written.
705                  */
706                 __atomic_store(&i_lpm->lpm.tbl24[tbl24_index], &new_tbl24_entry,
707                                 __ATOMIC_RELEASE);
708
709         } /* If valid entry but not extended calculate the index into Table8. */
710         else if (i_lpm->lpm.tbl24[tbl24_index].valid_group == 0) {
711                 /* Search for free tbl8 group. */
712                 tbl8_group_index = tbl8_alloc(i_lpm);
713
714                 if (tbl8_group_index < 0) {
715                         return tbl8_group_index;
716                 }
717
718                 tbl8_group_start = tbl8_group_index *
719                                 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
720                 tbl8_group_end = tbl8_group_start +
721                                 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
722
723                 /* Populate new tbl8 with tbl24 value. */
724                 for (i = tbl8_group_start; i < tbl8_group_end; i++) {
725                         struct rte_lpm_tbl_entry new_tbl8_entry = {
726                                 .valid = VALID,
727                                 .depth = i_lpm->lpm.tbl24[tbl24_index].depth,
728                                 .valid_group = i_lpm->lpm.tbl8[i].valid_group,
729                                 .next_hop = i_lpm->lpm.tbl24[tbl24_index].next_hop,
730                         };
731                         __atomic_store(&i_lpm->lpm.tbl8[i], &new_tbl8_entry,
732                                         __ATOMIC_RELAXED);
733                 }
734
735                 tbl8_index = tbl8_group_start + (ip_masked & 0xFF);
736
737                 /* Insert new rule into the tbl8 entry. */
738                 for (i = tbl8_index; i < tbl8_index + tbl8_range; i++) {
739                         struct rte_lpm_tbl_entry new_tbl8_entry = {
740                                 .valid = VALID,
741                                 .depth = depth,
742                                 .valid_group = i_lpm->lpm.tbl8[i].valid_group,
743                                 .next_hop = next_hop,
744                         };
745                         __atomic_store(&i_lpm->lpm.tbl8[i], &new_tbl8_entry,
746                                         __ATOMIC_RELAXED);
747                 }
748
749                 /*
750                  * Update tbl24 entry to point to new tbl8 entry. Note: The
751                  * ext_flag and tbl8_index need to be updated simultaneously,
752                  * so assign whole structure in one go.
753                  */
754
755                 struct rte_lpm_tbl_entry new_tbl24_entry = {
756                                 .group_idx = tbl8_group_index,
757                                 .valid = VALID,
758                                 .valid_group = 1,
759                                 .depth = 0,
760                 };
761
762                 /* The tbl24 entry must be written only after the
763                  * tbl8 entries are written.
764                  */
765                 __atomic_store(&i_lpm->lpm.tbl24[tbl24_index], &new_tbl24_entry,
766                                 __ATOMIC_RELEASE);
767
768         } else { /*
769                 * If it is valid, extended entry calculate the index into tbl8.
770                 */
771                 tbl8_group_index = i_lpm->lpm.tbl24[tbl24_index].group_idx;
772                 tbl8_group_start = tbl8_group_index *
773                                 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
774                 tbl8_index = tbl8_group_start + (ip_masked & 0xFF);
775
776                 for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
777
778                         if (!i_lpm->lpm.tbl8[i].valid ||
779                                         i_lpm->lpm.tbl8[i].depth <= depth) {
780                                 struct rte_lpm_tbl_entry new_tbl8_entry = {
781                                         .valid = VALID,
782                                         .depth = depth,
783                                         .next_hop = next_hop,
784                                         .valid_group = i_lpm->lpm.tbl8[i].valid_group,
785                                 };
786
787                                 /*
788                                  * Setting tbl8 entry in one go to avoid race
789                                  * condition
790                                  */
791                                 __atomic_store(&i_lpm->lpm.tbl8[i], &new_tbl8_entry,
792                                                 __ATOMIC_RELAXED);
793
794                                 continue;
795                         }
796                 }
797         }
798 #undef group_idx
799         return 0;
800 }
801
802 /*
803  * Add a route
804  */
805 int
806 rte_lpm_add(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
807                 uint32_t next_hop)
808 {
809         int32_t rule_index, status = 0;
810         struct __rte_lpm *i_lpm;
811         uint32_t ip_masked;
812
813         /* Check user arguments. */
814         if ((lpm == NULL) || (depth < 1) || (depth > RTE_LPM_MAX_DEPTH))
815                 return -EINVAL;
816
817         i_lpm = container_of(lpm, struct __rte_lpm, lpm);
818         ip_masked = ip & depth_to_mask(depth);
819
820         /* Add the rule to the rule table. */
821         rule_index = rule_add(i_lpm, ip_masked, depth, next_hop);
822
823         /* Skip table entries update if The rule is the same as
824          * the rule in the rules table.
825          */
826         if (rule_index == -EEXIST)
827                 return 0;
828
829         /* If the is no space available for new rule return error. */
830         if (rule_index < 0) {
831                 return rule_index;
832         }
833
834         if (depth <= MAX_DEPTH_TBL24) {
835                 status = add_depth_small(i_lpm, ip_masked, depth, next_hop);
836         } else { /* If depth > RTE_LPM_MAX_DEPTH_TBL24 */
837                 status = add_depth_big(i_lpm, ip_masked, depth, next_hop);
838
839                 /*
840                  * If add fails due to exhaustion of tbl8 extensions delete
841                  * rule that was added to rule table.
842                  */
843                 if (status < 0) {
844                         rule_delete(i_lpm, rule_index, depth);
845
846                         return status;
847                 }
848         }
849
850         return 0;
851 }
852
853 /*
854  * Look for a rule in the high-level rules table
855  */
856 int
857 rte_lpm_is_rule_present(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
858 uint32_t *next_hop)
859 {
860         struct __rte_lpm *i_lpm;
861         uint32_t ip_masked;
862         int32_t rule_index;
863
864         /* Check user arguments. */
865         if ((lpm == NULL) ||
866                 (next_hop == NULL) ||
867                 (depth < 1) || (depth > RTE_LPM_MAX_DEPTH))
868                 return -EINVAL;
869
870         /* Look for the rule using rule_find. */
871         i_lpm = container_of(lpm, struct __rte_lpm, lpm);
872         ip_masked = ip & depth_to_mask(depth);
873         rule_index = rule_find(i_lpm, ip_masked, depth);
874
875         if (rule_index >= 0) {
876                 *next_hop = i_lpm->rules_tbl[rule_index].next_hop;
877                 return 1;
878         }
879
880         /* If rule is not found return 0. */
881         return 0;
882 }
883
884 static int32_t
885 find_previous_rule(struct __rte_lpm *i_lpm, uint32_t ip, uint8_t depth,
886                 uint8_t *sub_rule_depth)
887 {
888         int32_t rule_index;
889         uint32_t ip_masked;
890         uint8_t prev_depth;
891
892         for (prev_depth = (uint8_t)(depth - 1); prev_depth > 0; prev_depth--) {
893                 ip_masked = ip & depth_to_mask(prev_depth);
894
895                 rule_index = rule_find(i_lpm, ip_masked, prev_depth);
896
897                 if (rule_index >= 0) {
898                         *sub_rule_depth = prev_depth;
899                         return rule_index;
900                 }
901         }
902
903         return -1;
904 }
905
906 static int32_t
907 delete_depth_small(struct __rte_lpm *i_lpm, uint32_t ip_masked,
908         uint8_t depth, int32_t sub_rule_index, uint8_t sub_rule_depth)
909 {
910 #define group_idx next_hop
911         uint32_t tbl24_range, tbl24_index, tbl8_group_index, tbl8_index, i, j;
912
913         /* Calculate the range and index into Table24. */
914         tbl24_range = depth_to_range(depth);
915         tbl24_index = (ip_masked >> 8);
916         struct rte_lpm_tbl_entry zero_tbl24_entry = {0};
917
918         /*
919          * Firstly check the sub_rule_index. A -1 indicates no replacement rule
920          * and a positive number indicates a sub_rule_index.
921          */
922         if (sub_rule_index < 0) {
923                 /*
924                  * If no replacement rule exists then invalidate entries
925                  * associated with this rule.
926                  */
927                 for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) {
928
929                         if (i_lpm->lpm.tbl24[i].valid_group == 0 &&
930                                         i_lpm->lpm.tbl24[i].depth <= depth) {
931                                 __atomic_store(&i_lpm->lpm.tbl24[i],
932                                         &zero_tbl24_entry, __ATOMIC_RELEASE);
933                         } else if (i_lpm->lpm.tbl24[i].valid_group == 1) {
934                                 /*
935                                  * If TBL24 entry is extended, then there has
936                                  * to be a rule with depth >= 25 in the
937                                  * associated TBL8 group.
938                                  */
939
940                                 tbl8_group_index = i_lpm->lpm.tbl24[i].group_idx;
941                                 tbl8_index = tbl8_group_index *
942                                                 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
943
944                                 for (j = tbl8_index; j < (tbl8_index +
945                                         RTE_LPM_TBL8_GROUP_NUM_ENTRIES); j++) {
946
947                                         if (i_lpm->lpm.tbl8[j].depth <= depth)
948                                                 i_lpm->lpm.tbl8[j].valid = INVALID;
949                                 }
950                         }
951                 }
952         } else {
953                 /*
954                  * If a replacement rule exists then modify entries
955                  * associated with this rule.
956                  */
957
958                 struct rte_lpm_tbl_entry new_tbl24_entry = {
959                         .next_hop = i_lpm->rules_tbl[sub_rule_index].next_hop,
960                         .valid = VALID,
961                         .valid_group = 0,
962                         .depth = sub_rule_depth,
963                 };
964
965                 struct rte_lpm_tbl_entry new_tbl8_entry = {
966                         .valid = VALID,
967                         .valid_group = VALID,
968                         .depth = sub_rule_depth,
969                         .next_hop = i_lpm->rules_tbl
970                         [sub_rule_index].next_hop,
971                 };
972
973                 for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) {
974
975                         if (i_lpm->lpm.tbl24[i].valid_group == 0 &&
976                                         i_lpm->lpm.tbl24[i].depth <= depth) {
977                                 __atomic_store(&i_lpm->lpm.tbl24[i], &new_tbl24_entry,
978                                                 __ATOMIC_RELEASE);
979                         } else  if (i_lpm->lpm.tbl24[i].valid_group == 1) {
980                                 /*
981                                  * If TBL24 entry is extended, then there has
982                                  * to be a rule with depth >= 25 in the
983                                  * associated TBL8 group.
984                                  */
985
986                                 tbl8_group_index = i_lpm->lpm.tbl24[i].group_idx;
987                                 tbl8_index = tbl8_group_index *
988                                                 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
989
990                                 for (j = tbl8_index; j < (tbl8_index +
991                                         RTE_LPM_TBL8_GROUP_NUM_ENTRIES); j++) {
992
993                                         if (i_lpm->lpm.tbl8[j].depth <= depth)
994                                                 __atomic_store(&i_lpm->lpm.tbl8[j],
995                                                         &new_tbl8_entry,
996                                                         __ATOMIC_RELAXED);
997                                 }
998                         }
999                 }
1000         }
1001 #undef group_idx
1002         return 0;
1003 }
1004
1005 /*
1006  * Checks if table 8 group can be recycled.
1007  *
1008  * Return of -EEXIST means tbl8 is in use and thus can not be recycled.
1009  * Return of -EINVAL means tbl8 is empty and thus can be recycled
1010  * Return of value > -1 means tbl8 is in use but has all the same values and
1011  * thus can be recycled
1012  */
1013 static int32_t
1014 tbl8_recycle_check(struct rte_lpm_tbl_entry *tbl8,
1015                 uint32_t tbl8_group_start)
1016 {
1017         uint32_t tbl8_group_end, i;
1018         tbl8_group_end = tbl8_group_start + RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1019
1020         /*
1021          * Check the first entry of the given tbl8. If it is invalid we know
1022          * this tbl8 does not contain any rule with a depth < RTE_LPM_MAX_DEPTH
1023          *  (As they would affect all entries in a tbl8) and thus this table
1024          *  can not be recycled.
1025          */
1026         if (tbl8[tbl8_group_start].valid) {
1027                 /*
1028                  * If first entry is valid check if the depth is less than 24
1029                  * and if so check the rest of the entries to verify that they
1030                  * are all of this depth.
1031                  */
1032                 if (tbl8[tbl8_group_start].depth <= MAX_DEPTH_TBL24) {
1033                         for (i = (tbl8_group_start + 1); i < tbl8_group_end;
1034                                         i++) {
1035
1036                                 if (tbl8[i].depth !=
1037                                                 tbl8[tbl8_group_start].depth) {
1038
1039                                         return -EEXIST;
1040                                 }
1041                         }
1042                         /* If all entries are the same return the tb8 index */
1043                         return tbl8_group_start;
1044                 }
1045
1046                 return -EEXIST;
1047         }
1048         /*
1049          * If the first entry is invalid check if the rest of the entries in
1050          * the tbl8 are invalid.
1051          */
1052         for (i = (tbl8_group_start + 1); i < tbl8_group_end; i++) {
1053                 if (tbl8[i].valid)
1054                         return -EEXIST;
1055         }
1056         /* If no valid entries are found then return -EINVAL. */
1057         return -EINVAL;
1058 }
1059
1060 static int32_t
1061 delete_depth_big(struct __rte_lpm *i_lpm, uint32_t ip_masked,
1062         uint8_t depth, int32_t sub_rule_index, uint8_t sub_rule_depth)
1063 {
1064 #define group_idx next_hop
1065         uint32_t tbl24_index, tbl8_group_index, tbl8_group_start, tbl8_index,
1066                         tbl8_range, i;
1067         int32_t tbl8_recycle_index, status = 0;
1068
1069         /*
1070          * Calculate the index into tbl24 and range. Note: All depths larger
1071          * than MAX_DEPTH_TBL24 are associated with only one tbl24 entry.
1072          */
1073         tbl24_index = ip_masked >> 8;
1074
1075         /* Calculate the index into tbl8 and range. */
1076         tbl8_group_index = i_lpm->lpm.tbl24[tbl24_index].group_idx;
1077         tbl8_group_start = tbl8_group_index * RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1078         tbl8_index = tbl8_group_start + (ip_masked & 0xFF);
1079         tbl8_range = depth_to_range(depth);
1080
1081         if (sub_rule_index < 0) {
1082                 /*
1083                  * Loop through the range of entries on tbl8 for which the
1084                  * rule_to_delete must be removed or modified.
1085                  */
1086                 for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
1087                         if (i_lpm->lpm.tbl8[i].depth <= depth)
1088                                 i_lpm->lpm.tbl8[i].valid = INVALID;
1089                 }
1090         } else {
1091                 /* Set new tbl8 entry. */
1092                 struct rte_lpm_tbl_entry new_tbl8_entry = {
1093                         .valid = VALID,
1094                         .depth = sub_rule_depth,
1095                         .valid_group = i_lpm->lpm.tbl8[tbl8_group_start].valid_group,
1096                         .next_hop = i_lpm->rules_tbl[sub_rule_index].next_hop,
1097                 };
1098
1099                 /*
1100                  * Loop through the range of entries on tbl8 for which the
1101                  * rule_to_delete must be modified.
1102                  */
1103                 for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
1104                         if (i_lpm->lpm.tbl8[i].depth <= depth)
1105                                 __atomic_store(&i_lpm->lpm.tbl8[i], &new_tbl8_entry,
1106                                                 __ATOMIC_RELAXED);
1107                 }
1108         }
1109
1110         /*
1111          * Check if there are any valid entries in this tbl8 group. If all
1112          * tbl8 entries are invalid we can free the tbl8 and invalidate the
1113          * associated tbl24 entry.
1114          */
1115
1116         tbl8_recycle_index = tbl8_recycle_check(i_lpm->lpm.tbl8, tbl8_group_start);
1117
1118         if (tbl8_recycle_index == -EINVAL) {
1119                 /* Set tbl24 before freeing tbl8 to avoid race condition.
1120                  * Prevent the free of the tbl8 group from hoisting.
1121                  */
1122                 i_lpm->lpm.tbl24[tbl24_index].valid = 0;
1123                 __atomic_thread_fence(__ATOMIC_RELEASE);
1124                 status = tbl8_free(i_lpm, tbl8_group_start);
1125         } else if (tbl8_recycle_index > -1) {
1126                 /* Update tbl24 entry. */
1127                 struct rte_lpm_tbl_entry new_tbl24_entry = {
1128                         .next_hop = i_lpm->lpm.tbl8[tbl8_recycle_index].next_hop,
1129                         .valid = VALID,
1130                         .valid_group = 0,
1131                         .depth = i_lpm->lpm.tbl8[tbl8_recycle_index].depth,
1132                 };
1133
1134                 /* Set tbl24 before freeing tbl8 to avoid race condition.
1135                  * Prevent the free of the tbl8 group from hoisting.
1136                  */
1137                 __atomic_store(&i_lpm->lpm.tbl24[tbl24_index], &new_tbl24_entry,
1138                                 __ATOMIC_RELAXED);
1139                 __atomic_thread_fence(__ATOMIC_RELEASE);
1140                 status = tbl8_free(i_lpm, tbl8_group_start);
1141         }
1142 #undef group_idx
1143         return status;
1144 }
1145
1146 /*
1147  * Deletes a rule
1148  */
1149 int
1150 rte_lpm_delete(struct rte_lpm *lpm, uint32_t ip, uint8_t depth)
1151 {
1152         int32_t rule_to_delete_index, sub_rule_index;
1153         struct __rte_lpm *i_lpm;
1154         uint32_t ip_masked;
1155         uint8_t sub_rule_depth;
1156         /*
1157          * Check input arguments. Note: IP must be a positive integer of 32
1158          * bits in length therefore it need not be checked.
1159          */
1160         if ((lpm == NULL) || (depth < 1) || (depth > RTE_LPM_MAX_DEPTH)) {
1161                 return -EINVAL;
1162         }
1163
1164         i_lpm = container_of(lpm, struct __rte_lpm, lpm);
1165         ip_masked = ip & depth_to_mask(depth);
1166
1167         /*
1168          * Find the index of the input rule, that needs to be deleted, in the
1169          * rule table.
1170          */
1171         rule_to_delete_index = rule_find(i_lpm, ip_masked, depth);
1172
1173         /*
1174          * Check if rule_to_delete_index was found. If no rule was found the
1175          * function rule_find returns -EINVAL.
1176          */
1177         if (rule_to_delete_index < 0)
1178                 return -EINVAL;
1179
1180         /* Delete the rule from the rule table. */
1181         rule_delete(i_lpm, rule_to_delete_index, depth);
1182
1183         /*
1184          * Find rule to replace the rule_to_delete. If there is no rule to
1185          * replace the rule_to_delete we return -1 and invalidate the table
1186          * entries associated with this rule.
1187          */
1188         sub_rule_depth = 0;
1189         sub_rule_index = find_previous_rule(i_lpm, ip, depth, &sub_rule_depth);
1190
1191         /*
1192          * If the input depth value is less than 25 use function
1193          * delete_depth_small otherwise use delete_depth_big.
1194          */
1195         if (depth <= MAX_DEPTH_TBL24) {
1196                 return delete_depth_small(i_lpm, ip_masked, depth,
1197                                 sub_rule_index, sub_rule_depth);
1198         } else { /* If depth > MAX_DEPTH_TBL24 */
1199                 return delete_depth_big(i_lpm, ip_masked, depth, sub_rule_index,
1200                                 sub_rule_depth);
1201         }
1202 }
1203
1204 /*
1205  * Delete all rules from the LPM table.
1206  */
1207 void
1208 rte_lpm_delete_all(struct rte_lpm *lpm)
1209 {
1210         struct __rte_lpm *i_lpm;
1211
1212         i_lpm = container_of(lpm, struct __rte_lpm, lpm);
1213         /* Zero rule information. */
1214         memset(i_lpm->rule_info, 0, sizeof(i_lpm->rule_info));
1215
1216         /* Zero tbl24. */
1217         memset(i_lpm->lpm.tbl24, 0, sizeof(i_lpm->lpm.tbl24));
1218
1219         /* Zero tbl8. */
1220         memset(i_lpm->lpm.tbl8, 0, sizeof(i_lpm->lpm.tbl8[0])
1221                         * RTE_LPM_TBL8_GROUP_NUM_ENTRIES * i_lpm->number_tbl8s);
1222
1223         /* Delete all rules form the rules table. */
1224         memset(i_lpm->rules_tbl, 0, sizeof(i_lpm->rules_tbl[0]) * i_lpm->max_rules);
1225 }