doc: add inline crypto feature
[dpdk.git] / lib / librte_lpm / rte_lpm.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <string.h>
35 #include <stdint.h>
36 #include <errno.h>
37 #include <stdarg.h>
38 #include <stdio.h>
39 #include <sys/queue.h>
40
41 #include <rte_log.h>
42 #include <rte_branch_prediction.h>
43 #include <rte_common.h>
44 #include <rte_memory.h>        /* for definition of RTE_CACHE_LINE_SIZE */
45 #include <rte_malloc.h>
46 #include <rte_eal.h>
47 #include <rte_eal_memconfig.h>
48 #include <rte_per_lcore.h>
49 #include <rte_string_fns.h>
50 #include <rte_errno.h>
51 #include <rte_rwlock.h>
52 #include <rte_spinlock.h>
53
54 #include "rte_lpm.h"
55
56 TAILQ_HEAD(rte_lpm_list, rte_tailq_entry);
57
58 static struct rte_tailq_elem rte_lpm_tailq = {
59         .name = "RTE_LPM",
60 };
61 EAL_REGISTER_TAILQ(rte_lpm_tailq)
62
63 #define MAX_DEPTH_TBL24 24
64
65 enum valid_flag {
66         INVALID = 0,
67         VALID
68 };
69
70 /* Macro to enable/disable run-time checks. */
71 #if defined(RTE_LIBRTE_LPM_DEBUG)
72 #include <rte_debug.h>
73 #define VERIFY_DEPTH(depth) do {                                \
74         if ((depth == 0) || (depth > RTE_LPM_MAX_DEPTH))        \
75                 rte_panic("LPM: Invalid depth (%u) at line %d", \
76                                 (unsigned)(depth), __LINE__);   \
77 } while (0)
78 #else
79 #define VERIFY_DEPTH(depth)
80 #endif
81
82 /*
83  * Converts a given depth value to its corresponding mask value.
84  *
85  * depth  (IN)          : range = 1 - 32
86  * mask   (OUT)         : 32bit mask
87  */
88 static uint32_t __attribute__((pure))
89 depth_to_mask(uint8_t depth)
90 {
91         VERIFY_DEPTH(depth);
92
93         /* To calculate a mask start with a 1 on the left hand side and right
94          * shift while populating the left hand side with 1's
95          */
96         return (int)0x80000000 >> (depth - 1);
97 }
98
99 /*
100  * Converts given depth value to its corresponding range value.
101  */
102 static inline uint32_t __attribute__((pure))
103 depth_to_range(uint8_t depth)
104 {
105         VERIFY_DEPTH(depth);
106
107         /*
108          * Calculate tbl24 range. (Note: 2^depth = 1 << depth)
109          */
110         if (depth <= MAX_DEPTH_TBL24)
111                 return 1 << (MAX_DEPTH_TBL24 - depth);
112
113         /* Else if depth is greater than 24 */
114         return 1 << (RTE_LPM_MAX_DEPTH - depth);
115 }
116
117 /*
118  * Find an existing lpm table and return a pointer to it.
119  */
120 struct rte_lpm_v20 *
121 rte_lpm_find_existing_v20(const char *name)
122 {
123         struct rte_lpm_v20 *l = NULL;
124         struct rte_tailq_entry *te;
125         struct rte_lpm_list *lpm_list;
126
127         lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list);
128
129         rte_rwlock_read_lock(RTE_EAL_TAILQ_RWLOCK);
130         TAILQ_FOREACH(te, lpm_list, next) {
131                 l = (struct rte_lpm_v20 *) te->data;
132                 if (strncmp(name, l->name, RTE_LPM_NAMESIZE) == 0)
133                         break;
134         }
135         rte_rwlock_read_unlock(RTE_EAL_TAILQ_RWLOCK);
136
137         if (te == NULL) {
138                 rte_errno = ENOENT;
139                 return NULL;
140         }
141
142         return l;
143 }
144 VERSION_SYMBOL(rte_lpm_find_existing, _v20, 2.0);
145
146 struct rte_lpm *
147 rte_lpm_find_existing_v1604(const char *name)
148 {
149         struct rte_lpm *l = NULL;
150         struct rte_tailq_entry *te;
151         struct rte_lpm_list *lpm_list;
152
153         lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list);
154
155         rte_rwlock_read_lock(RTE_EAL_TAILQ_RWLOCK);
156         TAILQ_FOREACH(te, lpm_list, next) {
157                 l = (struct rte_lpm *) te->data;
158                 if (strncmp(name, l->name, RTE_LPM_NAMESIZE) == 0)
159                         break;
160         }
161         rte_rwlock_read_unlock(RTE_EAL_TAILQ_RWLOCK);
162
163         if (te == NULL) {
164                 rte_errno = ENOENT;
165                 return NULL;
166         }
167
168         return l;
169 }
170 BIND_DEFAULT_SYMBOL(rte_lpm_find_existing, _v1604, 16.04);
171 MAP_STATIC_SYMBOL(struct rte_lpm *rte_lpm_find_existing(const char *name),
172                 rte_lpm_find_existing_v1604);
173
174 /*
175  * Allocates memory for LPM object
176  */
177 struct rte_lpm_v20 *
178 rte_lpm_create_v20(const char *name, int socket_id, int max_rules,
179                 __rte_unused int flags)
180 {
181         char mem_name[RTE_LPM_NAMESIZE];
182         struct rte_lpm_v20 *lpm = NULL;
183         struct rte_tailq_entry *te;
184         uint32_t mem_size;
185         struct rte_lpm_list *lpm_list;
186
187         lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list);
188
189         RTE_BUILD_BUG_ON(sizeof(struct rte_lpm_tbl_entry_v20) != 2);
190
191         /* Check user arguments. */
192         if ((name == NULL) || (socket_id < -1) || (max_rules == 0)) {
193                 rte_errno = EINVAL;
194                 return NULL;
195         }
196
197         snprintf(mem_name, sizeof(mem_name), "LPM_%s", name);
198
199         /* Determine the amount of memory to allocate. */
200         mem_size = sizeof(*lpm) + (sizeof(lpm->rules_tbl[0]) * max_rules);
201
202         rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
203
204         /* guarantee there's no existing */
205         TAILQ_FOREACH(te, lpm_list, next) {
206                 lpm = (struct rte_lpm_v20 *) te->data;
207                 if (strncmp(name, lpm->name, RTE_LPM_NAMESIZE) == 0)
208                         break;
209         }
210         lpm = NULL;
211         if (te != NULL) {
212                 rte_errno = EEXIST;
213                 goto exit;
214         }
215
216         /* allocate tailq entry */
217         te = rte_zmalloc("LPM_TAILQ_ENTRY", sizeof(*te), 0);
218         if (te == NULL) {
219                 RTE_LOG(ERR, LPM, "Failed to allocate tailq entry\n");
220                 rte_errno = ENOMEM;
221                 goto exit;
222         }
223
224         /* Allocate memory to store the LPM data structures. */
225         lpm = (struct rte_lpm_v20 *)rte_zmalloc_socket(mem_name, mem_size,
226                         RTE_CACHE_LINE_SIZE, socket_id);
227         if (lpm == NULL) {
228                 RTE_LOG(ERR, LPM, "LPM memory allocation failed\n");
229                 rte_free(te);
230                 rte_errno = ENOMEM;
231                 goto exit;
232         }
233
234         /* Save user arguments. */
235         lpm->max_rules = max_rules;
236         snprintf(lpm->name, sizeof(lpm->name), "%s", name);
237
238         te->data = (void *) lpm;
239
240         TAILQ_INSERT_TAIL(lpm_list, te, next);
241
242 exit:
243         rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
244
245         return lpm;
246 }
247 VERSION_SYMBOL(rte_lpm_create, _v20, 2.0);
248
249 struct rte_lpm *
250 rte_lpm_create_v1604(const char *name, int socket_id,
251                 const struct rte_lpm_config *config)
252 {
253         char mem_name[RTE_LPM_NAMESIZE];
254         struct rte_lpm *lpm = NULL;
255         struct rte_tailq_entry *te;
256         uint32_t mem_size, rules_size, tbl8s_size;
257         struct rte_lpm_list *lpm_list;
258
259         lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list);
260
261         RTE_BUILD_BUG_ON(sizeof(struct rte_lpm_tbl_entry) != 4);
262
263         /* Check user arguments. */
264         if ((name == NULL) || (socket_id < -1) || (config->max_rules == 0)
265                         || config->number_tbl8s > RTE_LPM_MAX_TBL8_NUM_GROUPS) {
266                 rte_errno = EINVAL;
267                 return NULL;
268         }
269
270         snprintf(mem_name, sizeof(mem_name), "LPM_%s", name);
271
272         /* Determine the amount of memory to allocate. */
273         mem_size = sizeof(*lpm);
274         rules_size = sizeof(struct rte_lpm_rule) * config->max_rules;
275         tbl8s_size = (sizeof(struct rte_lpm_tbl_entry) *
276                         RTE_LPM_TBL8_GROUP_NUM_ENTRIES * config->number_tbl8s);
277
278         rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
279
280         /* guarantee there's no existing */
281         TAILQ_FOREACH(te, lpm_list, next) {
282                 lpm = (struct rte_lpm *) te->data;
283                 if (strncmp(name, lpm->name, RTE_LPM_NAMESIZE) == 0)
284                         break;
285         }
286         lpm = NULL;
287         if (te != NULL) {
288                 rte_errno = EEXIST;
289                 goto exit;
290         }
291
292         /* allocate tailq entry */
293         te = rte_zmalloc("LPM_TAILQ_ENTRY", sizeof(*te), 0);
294         if (te == NULL) {
295                 RTE_LOG(ERR, LPM, "Failed to allocate tailq entry\n");
296                 rte_errno = ENOMEM;
297                 goto exit;
298         }
299
300         /* Allocate memory to store the LPM data structures. */
301         lpm = (struct rte_lpm *)rte_zmalloc_socket(mem_name, mem_size,
302                         RTE_CACHE_LINE_SIZE, socket_id);
303         if (lpm == NULL) {
304                 RTE_LOG(ERR, LPM, "LPM memory allocation failed\n");
305                 rte_free(te);
306                 rte_errno = ENOMEM;
307                 goto exit;
308         }
309
310         lpm->rules_tbl = (struct rte_lpm_rule *)rte_zmalloc_socket(NULL,
311                         (size_t)rules_size, RTE_CACHE_LINE_SIZE, socket_id);
312
313         if (lpm->rules_tbl == NULL) {
314                 RTE_LOG(ERR, LPM, "LPM rules_tbl memory allocation failed\n");
315                 rte_free(lpm);
316                 lpm = NULL;
317                 rte_free(te);
318                 rte_errno = ENOMEM;
319                 goto exit;
320         }
321
322         lpm->tbl8 = (struct rte_lpm_tbl_entry *)rte_zmalloc_socket(NULL,
323                         (size_t)tbl8s_size, RTE_CACHE_LINE_SIZE, socket_id);
324
325         if (lpm->tbl8 == NULL) {
326                 RTE_LOG(ERR, LPM, "LPM tbl8 memory allocation failed\n");
327                 rte_free(lpm->rules_tbl);
328                 rte_free(lpm);
329                 lpm = NULL;
330                 rte_free(te);
331                 rte_errno = ENOMEM;
332                 goto exit;
333         }
334
335         /* Save user arguments. */
336         lpm->max_rules = config->max_rules;
337         lpm->number_tbl8s = config->number_tbl8s;
338         snprintf(lpm->name, sizeof(lpm->name), "%s", name);
339
340         te->data = (void *) lpm;
341
342         TAILQ_INSERT_TAIL(lpm_list, te, next);
343
344 exit:
345         rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
346
347         return lpm;
348 }
349 BIND_DEFAULT_SYMBOL(rte_lpm_create, _v1604, 16.04);
350 MAP_STATIC_SYMBOL(
351         struct rte_lpm *rte_lpm_create(const char *name, int socket_id,
352                         const struct rte_lpm_config *config), rte_lpm_create_v1604);
353
354 /*
355  * Deallocates memory for given LPM table.
356  */
357 void
358 rte_lpm_free_v20(struct rte_lpm_v20 *lpm)
359 {
360         struct rte_lpm_list *lpm_list;
361         struct rte_tailq_entry *te;
362
363         /* Check user arguments. */
364         if (lpm == NULL)
365                 return;
366
367         lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list);
368
369         rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
370
371         /* find our tailq entry */
372         TAILQ_FOREACH(te, lpm_list, next) {
373                 if (te->data == (void *) lpm)
374                         break;
375         }
376         if (te != NULL)
377                 TAILQ_REMOVE(lpm_list, te, next);
378
379         rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
380
381         rte_free(lpm);
382         rte_free(te);
383 }
384 VERSION_SYMBOL(rte_lpm_free, _v20, 2.0);
385
386 void
387 rte_lpm_free_v1604(struct rte_lpm *lpm)
388 {
389         struct rte_lpm_list *lpm_list;
390         struct rte_tailq_entry *te;
391
392         /* Check user arguments. */
393         if (lpm == NULL)
394                 return;
395
396         lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list);
397
398         rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
399
400         /* find our tailq entry */
401         TAILQ_FOREACH(te, lpm_list, next) {
402                 if (te->data == (void *) lpm)
403                         break;
404         }
405         if (te != NULL)
406                 TAILQ_REMOVE(lpm_list, te, next);
407
408         rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
409
410         rte_free(lpm->tbl8);
411         rte_free(lpm->rules_tbl);
412         rte_free(lpm);
413         rte_free(te);
414 }
415 BIND_DEFAULT_SYMBOL(rte_lpm_free, _v1604, 16.04);
416 MAP_STATIC_SYMBOL(void rte_lpm_free(struct rte_lpm *lpm),
417                 rte_lpm_free_v1604);
418
419 /*
420  * Adds a rule to the rule table.
421  *
422  * NOTE: The rule table is split into 32 groups. Each group contains rules that
423  * apply to a specific prefix depth (i.e. group 1 contains rules that apply to
424  * prefixes with a depth of 1 etc.). In the following code (depth - 1) is used
425  * to refer to depth 1 because even though the depth range is 1 - 32, depths
426  * are stored in the rule table from 0 - 31.
427  * NOTE: Valid range for depth parameter is 1 .. 32 inclusive.
428  */
429 static inline int32_t
430 rule_add_v20(struct rte_lpm_v20 *lpm, uint32_t ip_masked, uint8_t depth,
431         uint8_t next_hop)
432 {
433         uint32_t rule_gindex, rule_index, last_rule;
434         int i;
435
436         VERIFY_DEPTH(depth);
437
438         /* Scan through rule group to see if rule already exists. */
439         if (lpm->rule_info[depth - 1].used_rules > 0) {
440
441                 /* rule_gindex stands for rule group index. */
442                 rule_gindex = lpm->rule_info[depth - 1].first_rule;
443                 /* Initialise rule_index to point to start of rule group. */
444                 rule_index = rule_gindex;
445                 /* Last rule = Last used rule in this rule group. */
446                 last_rule = rule_gindex + lpm->rule_info[depth - 1].used_rules;
447
448                 for (; rule_index < last_rule; rule_index++) {
449
450                         /* If rule already exists update its next_hop and return. */
451                         if (lpm->rules_tbl[rule_index].ip == ip_masked) {
452                                 lpm->rules_tbl[rule_index].next_hop = next_hop;
453
454                                 return rule_index;
455                         }
456                 }
457
458                 if (rule_index == lpm->max_rules)
459                         return -ENOSPC;
460         } else {
461                 /* Calculate the position in which the rule will be stored. */
462                 rule_index = 0;
463
464                 for (i = depth - 1; i > 0; i--) {
465                         if (lpm->rule_info[i - 1].used_rules > 0) {
466                                 rule_index = lpm->rule_info[i - 1].first_rule
467                                                 + lpm->rule_info[i - 1].used_rules;
468                                 break;
469                         }
470                 }
471                 if (rule_index == lpm->max_rules)
472                         return -ENOSPC;
473
474                 lpm->rule_info[depth - 1].first_rule = rule_index;
475         }
476
477         /* Make room for the new rule in the array. */
478         for (i = RTE_LPM_MAX_DEPTH; i > depth; i--) {
479                 if (lpm->rule_info[i - 1].first_rule
480                                 + lpm->rule_info[i - 1].used_rules == lpm->max_rules)
481                         return -ENOSPC;
482
483                 if (lpm->rule_info[i - 1].used_rules > 0) {
484                         lpm->rules_tbl[lpm->rule_info[i - 1].first_rule
485                                 + lpm->rule_info[i - 1].used_rules]
486                                         = lpm->rules_tbl[lpm->rule_info[i - 1].first_rule];
487                         lpm->rule_info[i - 1].first_rule++;
488                 }
489         }
490
491         /* Add the new rule. */
492         lpm->rules_tbl[rule_index].ip = ip_masked;
493         lpm->rules_tbl[rule_index].next_hop = next_hop;
494
495         /* Increment the used rules counter for this rule group. */
496         lpm->rule_info[depth - 1].used_rules++;
497
498         return rule_index;
499 }
500
501 static inline int32_t
502 rule_add_v1604(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth,
503         uint32_t next_hop)
504 {
505         uint32_t rule_gindex, rule_index, last_rule;
506         int i;
507
508         VERIFY_DEPTH(depth);
509
510         /* Scan through rule group to see if rule already exists. */
511         if (lpm->rule_info[depth - 1].used_rules > 0) {
512
513                 /* rule_gindex stands for rule group index. */
514                 rule_gindex = lpm->rule_info[depth - 1].first_rule;
515                 /* Initialise rule_index to point to start of rule group. */
516                 rule_index = rule_gindex;
517                 /* Last rule = Last used rule in this rule group. */
518                 last_rule = rule_gindex + lpm->rule_info[depth - 1].used_rules;
519
520                 for (; rule_index < last_rule; rule_index++) {
521
522                         /* If rule already exists update its next_hop and return. */
523                         if (lpm->rules_tbl[rule_index].ip == ip_masked) {
524                                 lpm->rules_tbl[rule_index].next_hop = next_hop;
525
526                                 return rule_index;
527                         }
528                 }
529
530                 if (rule_index == lpm->max_rules)
531                         return -ENOSPC;
532         } else {
533                 /* Calculate the position in which the rule will be stored. */
534                 rule_index = 0;
535
536                 for (i = depth - 1; i > 0; i--) {
537                         if (lpm->rule_info[i - 1].used_rules > 0) {
538                                 rule_index = lpm->rule_info[i - 1].first_rule
539                                                 + lpm->rule_info[i - 1].used_rules;
540                                 break;
541                         }
542                 }
543                 if (rule_index == lpm->max_rules)
544                         return -ENOSPC;
545
546                 lpm->rule_info[depth - 1].first_rule = rule_index;
547         }
548
549         /* Make room for the new rule in the array. */
550         for (i = RTE_LPM_MAX_DEPTH; i > depth; i--) {
551                 if (lpm->rule_info[i - 1].first_rule
552                                 + lpm->rule_info[i - 1].used_rules == lpm->max_rules)
553                         return -ENOSPC;
554
555                 if (lpm->rule_info[i - 1].used_rules > 0) {
556                         lpm->rules_tbl[lpm->rule_info[i - 1].first_rule
557                                 + lpm->rule_info[i - 1].used_rules]
558                                         = lpm->rules_tbl[lpm->rule_info[i - 1].first_rule];
559                         lpm->rule_info[i - 1].first_rule++;
560                 }
561         }
562
563         /* Add the new rule. */
564         lpm->rules_tbl[rule_index].ip = ip_masked;
565         lpm->rules_tbl[rule_index].next_hop = next_hop;
566
567         /* Increment the used rules counter for this rule group. */
568         lpm->rule_info[depth - 1].used_rules++;
569
570         return rule_index;
571 }
572
573 /*
574  * Delete a rule from the rule table.
575  * NOTE: Valid range for depth parameter is 1 .. 32 inclusive.
576  */
577 static inline void
578 rule_delete_v20(struct rte_lpm_v20 *lpm, int32_t rule_index, uint8_t depth)
579 {
580         int i;
581
582         VERIFY_DEPTH(depth);
583
584         lpm->rules_tbl[rule_index] =
585                         lpm->rules_tbl[lpm->rule_info[depth - 1].first_rule
586                                 + lpm->rule_info[depth - 1].used_rules - 1];
587
588         for (i = depth; i < RTE_LPM_MAX_DEPTH; i++) {
589                 if (lpm->rule_info[i].used_rules > 0) {
590                         lpm->rules_tbl[lpm->rule_info[i].first_rule - 1] =
591                                 lpm->rules_tbl[lpm->rule_info[i].first_rule
592                                         + lpm->rule_info[i].used_rules - 1];
593                         lpm->rule_info[i].first_rule--;
594                 }
595         }
596
597         lpm->rule_info[depth - 1].used_rules--;
598 }
599
600 static inline void
601 rule_delete_v1604(struct rte_lpm *lpm, int32_t rule_index, uint8_t depth)
602 {
603         int i;
604
605         VERIFY_DEPTH(depth);
606
607         lpm->rules_tbl[rule_index] =
608                         lpm->rules_tbl[lpm->rule_info[depth - 1].first_rule
609                         + lpm->rule_info[depth - 1].used_rules - 1];
610
611         for (i = depth; i < RTE_LPM_MAX_DEPTH; i++) {
612                 if (lpm->rule_info[i].used_rules > 0) {
613                         lpm->rules_tbl[lpm->rule_info[i].first_rule - 1] =
614                                         lpm->rules_tbl[lpm->rule_info[i].first_rule
615                                                 + lpm->rule_info[i].used_rules - 1];
616                         lpm->rule_info[i].first_rule--;
617                 }
618         }
619
620         lpm->rule_info[depth - 1].used_rules--;
621 }
622
623 /*
624  * Finds a rule in rule table.
625  * NOTE: Valid range for depth parameter is 1 .. 32 inclusive.
626  */
627 static inline int32_t
628 rule_find_v20(struct rte_lpm_v20 *lpm, uint32_t ip_masked, uint8_t depth)
629 {
630         uint32_t rule_gindex, last_rule, rule_index;
631
632         VERIFY_DEPTH(depth);
633
634         rule_gindex = lpm->rule_info[depth - 1].first_rule;
635         last_rule = rule_gindex + lpm->rule_info[depth - 1].used_rules;
636
637         /* Scan used rules at given depth to find rule. */
638         for (rule_index = rule_gindex; rule_index < last_rule; rule_index++) {
639                 /* If rule is found return the rule index. */
640                 if (lpm->rules_tbl[rule_index].ip == ip_masked)
641                         return rule_index;
642         }
643
644         /* If rule is not found return -EINVAL. */
645         return -EINVAL;
646 }
647
648 static inline int32_t
649 rule_find_v1604(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth)
650 {
651         uint32_t rule_gindex, last_rule, rule_index;
652
653         VERIFY_DEPTH(depth);
654
655         rule_gindex = lpm->rule_info[depth - 1].first_rule;
656         last_rule = rule_gindex + lpm->rule_info[depth - 1].used_rules;
657
658         /* Scan used rules at given depth to find rule. */
659         for (rule_index = rule_gindex; rule_index < last_rule; rule_index++) {
660                 /* If rule is found return the rule index. */
661                 if (lpm->rules_tbl[rule_index].ip == ip_masked)
662                         return rule_index;
663         }
664
665         /* If rule is not found return -EINVAL. */
666         return -EINVAL;
667 }
668
669 /*
670  * Find, clean and allocate a tbl8.
671  */
672 static inline int32_t
673 tbl8_alloc_v20(struct rte_lpm_tbl_entry_v20 *tbl8)
674 {
675         uint32_t group_idx; /* tbl8 group index. */
676         struct rte_lpm_tbl_entry_v20 *tbl8_entry;
677
678         /* Scan through tbl8 to find a free (i.e. INVALID) tbl8 group. */
679         for (group_idx = 0; group_idx < RTE_LPM_TBL8_NUM_GROUPS;
680                         group_idx++) {
681                 tbl8_entry = &tbl8[group_idx * RTE_LPM_TBL8_GROUP_NUM_ENTRIES];
682                 /* If a free tbl8 group is found clean it and set as VALID. */
683                 if (!tbl8_entry->valid_group) {
684                         memset(&tbl8_entry[0], 0,
685                                         RTE_LPM_TBL8_GROUP_NUM_ENTRIES *
686                                         sizeof(tbl8_entry[0]));
687
688                         tbl8_entry->valid_group = VALID;
689
690                         /* Return group index for allocated tbl8 group. */
691                         return group_idx;
692                 }
693         }
694
695         /* If there are no tbl8 groups free then return error. */
696         return -ENOSPC;
697 }
698
699 static inline int32_t
700 tbl8_alloc_v1604(struct rte_lpm_tbl_entry *tbl8, uint32_t number_tbl8s)
701 {
702         uint32_t group_idx; /* tbl8 group index. */
703         struct rte_lpm_tbl_entry *tbl8_entry;
704
705         /* Scan through tbl8 to find a free (i.e. INVALID) tbl8 group. */
706         for (group_idx = 0; group_idx < number_tbl8s; group_idx++) {
707                 tbl8_entry = &tbl8[group_idx * RTE_LPM_TBL8_GROUP_NUM_ENTRIES];
708                 /* If a free tbl8 group is found clean it and set as VALID. */
709                 if (!tbl8_entry->valid_group) {
710                         memset(&tbl8_entry[0], 0,
711                                         RTE_LPM_TBL8_GROUP_NUM_ENTRIES *
712                                         sizeof(tbl8_entry[0]));
713
714                         tbl8_entry->valid_group = VALID;
715
716                         /* Return group index for allocated tbl8 group. */
717                         return group_idx;
718                 }
719         }
720
721         /* If there are no tbl8 groups free then return error. */
722         return -ENOSPC;
723 }
724
725 static inline void
726 tbl8_free_v20(struct rte_lpm_tbl_entry_v20 *tbl8, uint32_t tbl8_group_start)
727 {
728         /* Set tbl8 group invalid*/
729         tbl8[tbl8_group_start].valid_group = INVALID;
730 }
731
732 static inline void
733 tbl8_free_v1604(struct rte_lpm_tbl_entry *tbl8, uint32_t tbl8_group_start)
734 {
735         /* Set tbl8 group invalid*/
736         tbl8[tbl8_group_start].valid_group = INVALID;
737 }
738
739 static inline int32_t
740 add_depth_small_v20(struct rte_lpm_v20 *lpm, uint32_t ip, uint8_t depth,
741                 uint8_t next_hop)
742 {
743         uint32_t tbl24_index, tbl24_range, tbl8_index, tbl8_group_end, i, j;
744
745         /* Calculate the index into Table24. */
746         tbl24_index = ip >> 8;
747         tbl24_range = depth_to_range(depth);
748
749         for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) {
750                 /*
751                  * For invalid OR valid and non-extended tbl 24 entries set
752                  * entry.
753                  */
754                 if (!lpm->tbl24[i].valid || (lpm->tbl24[i].valid_group == 0 &&
755                                 lpm->tbl24[i].depth <= depth)) {
756
757                         struct rte_lpm_tbl_entry_v20 new_tbl24_entry = {
758                                 .valid = VALID,
759                                 .valid_group = 0,
760                                 .depth = depth,
761                         };
762                         new_tbl24_entry.next_hop = next_hop;
763
764                         /* Setting tbl24 entry in one go to avoid race
765                          * conditions
766                          */
767                         lpm->tbl24[i] = new_tbl24_entry;
768
769                         continue;
770                 }
771
772                 if (lpm->tbl24[i].valid_group == 1) {
773                         /* If tbl24 entry is valid and extended calculate the
774                          *  index into tbl8.
775                          */
776                         tbl8_index = lpm->tbl24[i].group_idx *
777                                         RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
778                         tbl8_group_end = tbl8_index +
779                                         RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
780
781                         for (j = tbl8_index; j < tbl8_group_end; j++) {
782                                 if (!lpm->tbl8[j].valid ||
783                                                 lpm->tbl8[j].depth <= depth) {
784                                         struct rte_lpm_tbl_entry_v20
785                                                 new_tbl8_entry = {
786                                                 .valid = VALID,
787                                                 .valid_group = VALID,
788                                                 .depth = depth,
789                                         };
790                                         new_tbl8_entry.next_hop = next_hop;
791
792                                         /*
793                                          * Setting tbl8 entry in one go to avoid
794                                          * race conditions
795                                          */
796                                         lpm->tbl8[j] = new_tbl8_entry;
797
798                                         continue;
799                                 }
800                         }
801                 }
802         }
803
804         return 0;
805 }
806
807 static inline int32_t
808 add_depth_small_v1604(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
809                 uint32_t next_hop)
810 {
811 #define group_idx next_hop
812         uint32_t tbl24_index, tbl24_range, tbl8_index, tbl8_group_end, i, j;
813
814         /* Calculate the index into Table24. */
815         tbl24_index = ip >> 8;
816         tbl24_range = depth_to_range(depth);
817
818         for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) {
819                 /*
820                  * For invalid OR valid and non-extended tbl 24 entries set
821                  * entry.
822                  */
823                 if (!lpm->tbl24[i].valid || (lpm->tbl24[i].valid_group == 0 &&
824                                 lpm->tbl24[i].depth <= depth)) {
825
826                         struct rte_lpm_tbl_entry new_tbl24_entry = {
827                                 .next_hop = next_hop,
828                                 .valid = VALID,
829                                 .valid_group = 0,
830                                 .depth = depth,
831                         };
832
833                         /* Setting tbl24 entry in one go to avoid race
834                          * conditions
835                          */
836                         lpm->tbl24[i] = new_tbl24_entry;
837
838                         continue;
839                 }
840
841                 if (lpm->tbl24[i].valid_group == 1) {
842                         /* If tbl24 entry is valid and extended calculate the
843                          *  index into tbl8.
844                          */
845                         tbl8_index = lpm->tbl24[i].group_idx *
846                                         RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
847                         tbl8_group_end = tbl8_index +
848                                         RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
849
850                         for (j = tbl8_index; j < tbl8_group_end; j++) {
851                                 if (!lpm->tbl8[j].valid ||
852                                                 lpm->tbl8[j].depth <= depth) {
853                                         struct rte_lpm_tbl_entry
854                                                 new_tbl8_entry = {
855                                                 .valid = VALID,
856                                                 .valid_group = VALID,
857                                                 .depth = depth,
858                                                 .next_hop = next_hop,
859                                         };
860
861                                         /*
862                                          * Setting tbl8 entry in one go to avoid
863                                          * race conditions
864                                          */
865                                         lpm->tbl8[j] = new_tbl8_entry;
866
867                                         continue;
868                                 }
869                         }
870                 }
871         }
872 #undef group_idx
873         return 0;
874 }
875
876 static inline int32_t
877 add_depth_big_v20(struct rte_lpm_v20 *lpm, uint32_t ip_masked, uint8_t depth,
878                 uint8_t next_hop)
879 {
880         uint32_t tbl24_index;
881         int32_t tbl8_group_index, tbl8_group_start, tbl8_group_end, tbl8_index,
882                 tbl8_range, i;
883
884         tbl24_index = (ip_masked >> 8);
885         tbl8_range = depth_to_range(depth);
886
887         if (!lpm->tbl24[tbl24_index].valid) {
888                 /* Search for a free tbl8 group. */
889                 tbl8_group_index = tbl8_alloc_v20(lpm->tbl8);
890
891                 /* Check tbl8 allocation was successful. */
892                 if (tbl8_group_index < 0) {
893                         return tbl8_group_index;
894                 }
895
896                 /* Find index into tbl8 and range. */
897                 tbl8_index = (tbl8_group_index *
898                                 RTE_LPM_TBL8_GROUP_NUM_ENTRIES) +
899                                 (ip_masked & 0xFF);
900
901                 /* Set tbl8 entry. */
902                 for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
903                         lpm->tbl8[i].depth = depth;
904                         lpm->tbl8[i].next_hop = next_hop;
905                         lpm->tbl8[i].valid = VALID;
906                 }
907
908                 /*
909                  * Update tbl24 entry to point to new tbl8 entry. Note: The
910                  * ext_flag and tbl8_index need to be updated simultaneously,
911                  * so assign whole structure in one go
912                  */
913
914                 struct rte_lpm_tbl_entry_v20 new_tbl24_entry = {
915                         { .group_idx = (uint8_t)tbl8_group_index, },
916                         .valid = VALID,
917                         .valid_group = 1,
918                         .depth = 0,
919                 };
920
921                 lpm->tbl24[tbl24_index] = new_tbl24_entry;
922
923         } /* If valid entry but not extended calculate the index into Table8. */
924         else if (lpm->tbl24[tbl24_index].valid_group == 0) {
925                 /* Search for free tbl8 group. */
926                 tbl8_group_index = tbl8_alloc_v20(lpm->tbl8);
927
928                 if (tbl8_group_index < 0) {
929                         return tbl8_group_index;
930                 }
931
932                 tbl8_group_start = tbl8_group_index *
933                                 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
934                 tbl8_group_end = tbl8_group_start +
935                                 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
936
937                 /* Populate new tbl8 with tbl24 value. */
938                 for (i = tbl8_group_start; i < tbl8_group_end; i++) {
939                         lpm->tbl8[i].valid = VALID;
940                         lpm->tbl8[i].depth = lpm->tbl24[tbl24_index].depth;
941                         lpm->tbl8[i].next_hop =
942                                         lpm->tbl24[tbl24_index].next_hop;
943                 }
944
945                 tbl8_index = tbl8_group_start + (ip_masked & 0xFF);
946
947                 /* Insert new rule into the tbl8 entry. */
948                 for (i = tbl8_index; i < tbl8_index + tbl8_range; i++) {
949                         lpm->tbl8[i].valid = VALID;
950                         lpm->tbl8[i].depth = depth;
951                         lpm->tbl8[i].next_hop = next_hop;
952                 }
953
954                 /*
955                  * Update tbl24 entry to point to new tbl8 entry. Note: The
956                  * ext_flag and tbl8_index need to be updated simultaneously,
957                  * so assign whole structure in one go.
958                  */
959
960                 struct rte_lpm_tbl_entry_v20 new_tbl24_entry = {
961                                 { .group_idx = (uint8_t)tbl8_group_index, },
962                                 .valid = VALID,
963                                 .valid_group = 1,
964                                 .depth = 0,
965                 };
966
967                 lpm->tbl24[tbl24_index] = new_tbl24_entry;
968
969         } else { /*
970                 * If it is valid, extended entry calculate the index into tbl8.
971                 */
972                 tbl8_group_index = lpm->tbl24[tbl24_index].group_idx;
973                 tbl8_group_start = tbl8_group_index *
974                                 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
975                 tbl8_index = tbl8_group_start + (ip_masked & 0xFF);
976
977                 for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
978
979                         if (!lpm->tbl8[i].valid ||
980                                         lpm->tbl8[i].depth <= depth) {
981                                 struct rte_lpm_tbl_entry_v20 new_tbl8_entry = {
982                                         .valid = VALID,
983                                         .depth = depth,
984                                         .valid_group = lpm->tbl8[i].valid_group,
985                                 };
986                                 new_tbl8_entry.next_hop = next_hop;
987                                 /*
988                                  * Setting tbl8 entry in one go to avoid race
989                                  * condition
990                                  */
991                                 lpm->tbl8[i] = new_tbl8_entry;
992
993                                 continue;
994                         }
995                 }
996         }
997
998         return 0;
999 }
1000
1001 static inline int32_t
1002 add_depth_big_v1604(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth,
1003                 uint32_t next_hop)
1004 {
1005 #define group_idx next_hop
1006         uint32_t tbl24_index;
1007         int32_t tbl8_group_index, tbl8_group_start, tbl8_group_end, tbl8_index,
1008                 tbl8_range, i;
1009
1010         tbl24_index = (ip_masked >> 8);
1011         tbl8_range = depth_to_range(depth);
1012
1013         if (!lpm->tbl24[tbl24_index].valid) {
1014                 /* Search for a free tbl8 group. */
1015                 tbl8_group_index = tbl8_alloc_v1604(lpm->tbl8, lpm->number_tbl8s);
1016
1017                 /* Check tbl8 allocation was successful. */
1018                 if (tbl8_group_index < 0) {
1019                         return tbl8_group_index;
1020                 }
1021
1022                 /* Find index into tbl8 and range. */
1023                 tbl8_index = (tbl8_group_index *
1024                                 RTE_LPM_TBL8_GROUP_NUM_ENTRIES) +
1025                                 (ip_masked & 0xFF);
1026
1027                 /* Set tbl8 entry. */
1028                 for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
1029                         lpm->tbl8[i].depth = depth;
1030                         lpm->tbl8[i].next_hop = next_hop;
1031                         lpm->tbl8[i].valid = VALID;
1032                 }
1033
1034                 /*
1035                  * Update tbl24 entry to point to new tbl8 entry. Note: The
1036                  * ext_flag and tbl8_index need to be updated simultaneously,
1037                  * so assign whole structure in one go
1038                  */
1039
1040                 struct rte_lpm_tbl_entry new_tbl24_entry = {
1041                         .group_idx = tbl8_group_index,
1042                         .valid = VALID,
1043                         .valid_group = 1,
1044                         .depth = 0,
1045                 };
1046
1047                 lpm->tbl24[tbl24_index] = new_tbl24_entry;
1048
1049         } /* If valid entry but not extended calculate the index into Table8. */
1050         else if (lpm->tbl24[tbl24_index].valid_group == 0) {
1051                 /* Search for free tbl8 group. */
1052                 tbl8_group_index = tbl8_alloc_v1604(lpm->tbl8, lpm->number_tbl8s);
1053
1054                 if (tbl8_group_index < 0) {
1055                         return tbl8_group_index;
1056                 }
1057
1058                 tbl8_group_start = tbl8_group_index *
1059                                 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1060                 tbl8_group_end = tbl8_group_start +
1061                                 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1062
1063                 /* Populate new tbl8 with tbl24 value. */
1064                 for (i = tbl8_group_start; i < tbl8_group_end; i++) {
1065                         lpm->tbl8[i].valid = VALID;
1066                         lpm->tbl8[i].depth = lpm->tbl24[tbl24_index].depth;
1067                         lpm->tbl8[i].next_hop =
1068                                         lpm->tbl24[tbl24_index].next_hop;
1069                 }
1070
1071                 tbl8_index = tbl8_group_start + (ip_masked & 0xFF);
1072
1073                 /* Insert new rule into the tbl8 entry. */
1074                 for (i = tbl8_index; i < tbl8_index + tbl8_range; i++) {
1075                         lpm->tbl8[i].valid = VALID;
1076                         lpm->tbl8[i].depth = depth;
1077                         lpm->tbl8[i].next_hop = next_hop;
1078                 }
1079
1080                 /*
1081                  * Update tbl24 entry to point to new tbl8 entry. Note: The
1082                  * ext_flag and tbl8_index need to be updated simultaneously,
1083                  * so assign whole structure in one go.
1084                  */
1085
1086                 struct rte_lpm_tbl_entry new_tbl24_entry = {
1087                                 .group_idx = tbl8_group_index,
1088                                 .valid = VALID,
1089                                 .valid_group = 1,
1090                                 .depth = 0,
1091                 };
1092
1093                 lpm->tbl24[tbl24_index] = new_tbl24_entry;
1094
1095         } else { /*
1096                 * If it is valid, extended entry calculate the index into tbl8.
1097                 */
1098                 tbl8_group_index = lpm->tbl24[tbl24_index].group_idx;
1099                 tbl8_group_start = tbl8_group_index *
1100                                 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1101                 tbl8_index = tbl8_group_start + (ip_masked & 0xFF);
1102
1103                 for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
1104
1105                         if (!lpm->tbl8[i].valid ||
1106                                         lpm->tbl8[i].depth <= depth) {
1107                                 struct rte_lpm_tbl_entry new_tbl8_entry = {
1108                                         .valid = VALID,
1109                                         .depth = depth,
1110                                         .next_hop = next_hop,
1111                                         .valid_group = lpm->tbl8[i].valid_group,
1112                                 };
1113
1114                                 /*
1115                                  * Setting tbl8 entry in one go to avoid race
1116                                  * condition
1117                                  */
1118                                 lpm->tbl8[i] = new_tbl8_entry;
1119
1120                                 continue;
1121                         }
1122                 }
1123         }
1124 #undef group_idx
1125         return 0;
1126 }
1127
1128 /*
1129  * Add a route
1130  */
1131 int
1132 rte_lpm_add_v20(struct rte_lpm_v20 *lpm, uint32_t ip, uint8_t depth,
1133                 uint8_t next_hop)
1134 {
1135         int32_t rule_index, status = 0;
1136         uint32_t ip_masked;
1137
1138         /* Check user arguments. */
1139         if ((lpm == NULL) || (depth < 1) || (depth > RTE_LPM_MAX_DEPTH))
1140                 return -EINVAL;
1141
1142         ip_masked = ip & depth_to_mask(depth);
1143
1144         /* Add the rule to the rule table. */
1145         rule_index = rule_add_v20(lpm, ip_masked, depth, next_hop);
1146
1147         /* If the is no space available for new rule return error. */
1148         if (rule_index < 0) {
1149                 return rule_index;
1150         }
1151
1152         if (depth <= MAX_DEPTH_TBL24) {
1153                 status = add_depth_small_v20(lpm, ip_masked, depth, next_hop);
1154         } else { /* If depth > RTE_LPM_MAX_DEPTH_TBL24 */
1155                 status = add_depth_big_v20(lpm, ip_masked, depth, next_hop);
1156
1157                 /*
1158                  * If add fails due to exhaustion of tbl8 extensions delete
1159                  * rule that was added to rule table.
1160                  */
1161                 if (status < 0) {
1162                         rule_delete_v20(lpm, rule_index, depth);
1163
1164                         return status;
1165                 }
1166         }
1167
1168         return 0;
1169 }
1170 VERSION_SYMBOL(rte_lpm_add, _v20, 2.0);
1171
1172 int
1173 rte_lpm_add_v1604(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
1174                 uint32_t next_hop)
1175 {
1176         int32_t rule_index, status = 0;
1177         uint32_t ip_masked;
1178
1179         /* Check user arguments. */
1180         if ((lpm == NULL) || (depth < 1) || (depth > RTE_LPM_MAX_DEPTH))
1181                 return -EINVAL;
1182
1183         ip_masked = ip & depth_to_mask(depth);
1184
1185         /* Add the rule to the rule table. */
1186         rule_index = rule_add_v1604(lpm, ip_masked, depth, next_hop);
1187
1188         /* If the is no space available for new rule return error. */
1189         if (rule_index < 0) {
1190                 return rule_index;
1191         }
1192
1193         if (depth <= MAX_DEPTH_TBL24) {
1194                 status = add_depth_small_v1604(lpm, ip_masked, depth, next_hop);
1195         } else { /* If depth > RTE_LPM_MAX_DEPTH_TBL24 */
1196                 status = add_depth_big_v1604(lpm, ip_masked, depth, next_hop);
1197
1198                 /*
1199                  * If add fails due to exhaustion of tbl8 extensions delete
1200                  * rule that was added to rule table.
1201                  */
1202                 if (status < 0) {
1203                         rule_delete_v1604(lpm, rule_index, depth);
1204
1205                         return status;
1206                 }
1207         }
1208
1209         return 0;
1210 }
1211 BIND_DEFAULT_SYMBOL(rte_lpm_add, _v1604, 16.04);
1212 MAP_STATIC_SYMBOL(int rte_lpm_add(struct rte_lpm *lpm, uint32_t ip,
1213                 uint8_t depth, uint32_t next_hop), rte_lpm_add_v1604);
1214
1215 /*
1216  * Look for a rule in the high-level rules table
1217  */
1218 int
1219 rte_lpm_is_rule_present_v20(struct rte_lpm_v20 *lpm, uint32_t ip, uint8_t depth,
1220 uint8_t *next_hop)
1221 {
1222         uint32_t ip_masked;
1223         int32_t rule_index;
1224
1225         /* Check user arguments. */
1226         if ((lpm == NULL) ||
1227                 (next_hop == NULL) ||
1228                 (depth < 1) || (depth > RTE_LPM_MAX_DEPTH))
1229                 return -EINVAL;
1230
1231         /* Look for the rule using rule_find. */
1232         ip_masked = ip & depth_to_mask(depth);
1233         rule_index = rule_find_v20(lpm, ip_masked, depth);
1234
1235         if (rule_index >= 0) {
1236                 *next_hop = lpm->rules_tbl[rule_index].next_hop;
1237                 return 1;
1238         }
1239
1240         /* If rule is not found return 0. */
1241         return 0;
1242 }
1243 VERSION_SYMBOL(rte_lpm_is_rule_present, _v20, 2.0);
1244
1245 int
1246 rte_lpm_is_rule_present_v1604(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
1247 uint32_t *next_hop)
1248 {
1249         uint32_t ip_masked;
1250         int32_t rule_index;
1251
1252         /* Check user arguments. */
1253         if ((lpm == NULL) ||
1254                 (next_hop == NULL) ||
1255                 (depth < 1) || (depth > RTE_LPM_MAX_DEPTH))
1256                 return -EINVAL;
1257
1258         /* Look for the rule using rule_find. */
1259         ip_masked = ip & depth_to_mask(depth);
1260         rule_index = rule_find_v1604(lpm, ip_masked, depth);
1261
1262         if (rule_index >= 0) {
1263                 *next_hop = lpm->rules_tbl[rule_index].next_hop;
1264                 return 1;
1265         }
1266
1267         /* If rule is not found return 0. */
1268         return 0;
1269 }
1270 BIND_DEFAULT_SYMBOL(rte_lpm_is_rule_present, _v1604, 16.04);
1271 MAP_STATIC_SYMBOL(int rte_lpm_is_rule_present(struct rte_lpm *lpm, uint32_t ip,
1272                 uint8_t depth, uint32_t *next_hop), rte_lpm_is_rule_present_v1604);
1273
1274 static inline int32_t
1275 find_previous_rule_v20(struct rte_lpm_v20 *lpm, uint32_t ip, uint8_t depth,
1276                 uint8_t *sub_rule_depth)
1277 {
1278         int32_t rule_index;
1279         uint32_t ip_masked;
1280         uint8_t prev_depth;
1281
1282         for (prev_depth = (uint8_t)(depth - 1); prev_depth > 0; prev_depth--) {
1283                 ip_masked = ip & depth_to_mask(prev_depth);
1284
1285                 rule_index = rule_find_v20(lpm, ip_masked, prev_depth);
1286
1287                 if (rule_index >= 0) {
1288                         *sub_rule_depth = prev_depth;
1289                         return rule_index;
1290                 }
1291         }
1292
1293         return -1;
1294 }
1295
1296 static inline int32_t
1297 find_previous_rule_v1604(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
1298                 uint8_t *sub_rule_depth)
1299 {
1300         int32_t rule_index;
1301         uint32_t ip_masked;
1302         uint8_t prev_depth;
1303
1304         for (prev_depth = (uint8_t)(depth - 1); prev_depth > 0; prev_depth--) {
1305                 ip_masked = ip & depth_to_mask(prev_depth);
1306
1307                 rule_index = rule_find_v1604(lpm, ip_masked, prev_depth);
1308
1309                 if (rule_index >= 0) {
1310                         *sub_rule_depth = prev_depth;
1311                         return rule_index;
1312                 }
1313         }
1314
1315         return -1;
1316 }
1317
1318 static inline int32_t
1319 delete_depth_small_v20(struct rte_lpm_v20 *lpm, uint32_t ip_masked,
1320         uint8_t depth, int32_t sub_rule_index, uint8_t sub_rule_depth)
1321 {
1322         uint32_t tbl24_range, tbl24_index, tbl8_group_index, tbl8_index, i, j;
1323
1324         /* Calculate the range and index into Table24. */
1325         tbl24_range = depth_to_range(depth);
1326         tbl24_index = (ip_masked >> 8);
1327
1328         /*
1329          * Firstly check the sub_rule_index. A -1 indicates no replacement rule
1330          * and a positive number indicates a sub_rule_index.
1331          */
1332         if (sub_rule_index < 0) {
1333                 /*
1334                  * If no replacement rule exists then invalidate entries
1335                  * associated with this rule.
1336                  */
1337                 for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) {
1338
1339                         if (lpm->tbl24[i].valid_group == 0 &&
1340                                         lpm->tbl24[i].depth <= depth) {
1341                                 lpm->tbl24[i].valid = INVALID;
1342                         } else if (lpm->tbl24[i].valid_group == 1) {
1343                                 /*
1344                                  * If TBL24 entry is extended, then there has
1345                                  * to be a rule with depth >= 25 in the
1346                                  * associated TBL8 group.
1347                                  */
1348
1349                                 tbl8_group_index = lpm->tbl24[i].group_idx;
1350                                 tbl8_index = tbl8_group_index *
1351                                                 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1352
1353                                 for (j = tbl8_index; j < (tbl8_index +
1354                                         RTE_LPM_TBL8_GROUP_NUM_ENTRIES); j++) {
1355
1356                                         if (lpm->tbl8[j].depth <= depth)
1357                                                 lpm->tbl8[j].valid = INVALID;
1358                                 }
1359                         }
1360                 }
1361         } else {
1362                 /*
1363                  * If a replacement rule exists then modify entries
1364                  * associated with this rule.
1365                  */
1366
1367                 struct rte_lpm_tbl_entry_v20 new_tbl24_entry = {
1368                         {.next_hop = lpm->rules_tbl[sub_rule_index].next_hop,},
1369                         .valid = VALID,
1370                         .valid_group = 0,
1371                         .depth = sub_rule_depth,
1372                 };
1373
1374                 struct rte_lpm_tbl_entry_v20 new_tbl8_entry = {
1375                         .valid = VALID,
1376                         .valid_group = VALID,
1377                         .depth = sub_rule_depth,
1378                 };
1379                 new_tbl8_entry.next_hop =
1380                                 lpm->rules_tbl[sub_rule_index].next_hop;
1381
1382                 for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) {
1383
1384                         if (lpm->tbl24[i].valid_group == 0 &&
1385                                         lpm->tbl24[i].depth <= depth) {
1386                                 lpm->tbl24[i] = new_tbl24_entry;
1387                         } else  if (lpm->tbl24[i].valid_group == 1) {
1388                                 /*
1389                                  * If TBL24 entry is extended, then there has
1390                                  * to be a rule with depth >= 25 in the
1391                                  * associated TBL8 group.
1392                                  */
1393
1394                                 tbl8_group_index = lpm->tbl24[i].group_idx;
1395                                 tbl8_index = tbl8_group_index *
1396                                                 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1397
1398                                 for (j = tbl8_index; j < (tbl8_index +
1399                                         RTE_LPM_TBL8_GROUP_NUM_ENTRIES); j++) {
1400
1401                                         if (lpm->tbl8[j].depth <= depth)
1402                                                 lpm->tbl8[j] = new_tbl8_entry;
1403                                 }
1404                         }
1405                 }
1406         }
1407
1408         return 0;
1409 }
1410
1411 static inline int32_t
1412 delete_depth_small_v1604(struct rte_lpm *lpm, uint32_t ip_masked,
1413         uint8_t depth, int32_t sub_rule_index, uint8_t sub_rule_depth)
1414 {
1415 #define group_idx next_hop
1416         uint32_t tbl24_range, tbl24_index, tbl8_group_index, tbl8_index, i, j;
1417
1418         /* Calculate the range and index into Table24. */
1419         tbl24_range = depth_to_range(depth);
1420         tbl24_index = (ip_masked >> 8);
1421
1422         /*
1423          * Firstly check the sub_rule_index. A -1 indicates no replacement rule
1424          * and a positive number indicates a sub_rule_index.
1425          */
1426         if (sub_rule_index < 0) {
1427                 /*
1428                  * If no replacement rule exists then invalidate entries
1429                  * associated with this rule.
1430                  */
1431                 for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) {
1432
1433                         if (lpm->tbl24[i].valid_group == 0 &&
1434                                         lpm->tbl24[i].depth <= depth) {
1435                                 lpm->tbl24[i].valid = INVALID;
1436                         } else if (lpm->tbl24[i].valid_group == 1) {
1437                                 /*
1438                                  * If TBL24 entry is extended, then there has
1439                                  * to be a rule with depth >= 25 in the
1440                                  * associated TBL8 group.
1441                                  */
1442
1443                                 tbl8_group_index = lpm->tbl24[i].group_idx;
1444                                 tbl8_index = tbl8_group_index *
1445                                                 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1446
1447                                 for (j = tbl8_index; j < (tbl8_index +
1448                                         RTE_LPM_TBL8_GROUP_NUM_ENTRIES); j++) {
1449
1450                                         if (lpm->tbl8[j].depth <= depth)
1451                                                 lpm->tbl8[j].valid = INVALID;
1452                                 }
1453                         }
1454                 }
1455         } else {
1456                 /*
1457                  * If a replacement rule exists then modify entries
1458                  * associated with this rule.
1459                  */
1460
1461                 struct rte_lpm_tbl_entry new_tbl24_entry = {
1462                         .next_hop = lpm->rules_tbl[sub_rule_index].next_hop,
1463                         .valid = VALID,
1464                         .valid_group = 0,
1465                         .depth = sub_rule_depth,
1466                 };
1467
1468                 struct rte_lpm_tbl_entry new_tbl8_entry = {
1469                         .valid = VALID,
1470                         .valid_group = VALID,
1471                         .depth = sub_rule_depth,
1472                         .next_hop = lpm->rules_tbl
1473                         [sub_rule_index].next_hop,
1474                 };
1475
1476                 for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) {
1477
1478                         if (lpm->tbl24[i].valid_group == 0 &&
1479                                         lpm->tbl24[i].depth <= depth) {
1480                                 lpm->tbl24[i] = new_tbl24_entry;
1481                         } else  if (lpm->tbl24[i].valid_group == 1) {
1482                                 /*
1483                                  * If TBL24 entry is extended, then there has
1484                                  * to be a rule with depth >= 25 in the
1485                                  * associated TBL8 group.
1486                                  */
1487
1488                                 tbl8_group_index = lpm->tbl24[i].group_idx;
1489                                 tbl8_index = tbl8_group_index *
1490                                                 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1491
1492                                 for (j = tbl8_index; j < (tbl8_index +
1493                                         RTE_LPM_TBL8_GROUP_NUM_ENTRIES); j++) {
1494
1495                                         if (lpm->tbl8[j].depth <= depth)
1496                                                 lpm->tbl8[j] = new_tbl8_entry;
1497                                 }
1498                         }
1499                 }
1500         }
1501 #undef group_idx
1502         return 0;
1503 }
1504
1505 /*
1506  * Checks if table 8 group can be recycled.
1507  *
1508  * Return of -EEXIST means tbl8 is in use and thus can not be recycled.
1509  * Return of -EINVAL means tbl8 is empty and thus can be recycled
1510  * Return of value > -1 means tbl8 is in use but has all the same values and
1511  * thus can be recycled
1512  */
1513 static inline int32_t
1514 tbl8_recycle_check_v20(struct rte_lpm_tbl_entry_v20 *tbl8,
1515                 uint32_t tbl8_group_start)
1516 {
1517         uint32_t tbl8_group_end, i;
1518         tbl8_group_end = tbl8_group_start + RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1519
1520         /*
1521          * Check the first entry of the given tbl8. If it is invalid we know
1522          * this tbl8 does not contain any rule with a depth < RTE_LPM_MAX_DEPTH
1523          *  (As they would affect all entries in a tbl8) and thus this table
1524          *  can not be recycled.
1525          */
1526         if (tbl8[tbl8_group_start].valid) {
1527                 /*
1528                  * If first entry is valid check if the depth is less than 24
1529                  * and if so check the rest of the entries to verify that they
1530                  * are all of this depth.
1531                  */
1532                 if (tbl8[tbl8_group_start].depth <= MAX_DEPTH_TBL24) {
1533                         for (i = (tbl8_group_start + 1); i < tbl8_group_end;
1534                                         i++) {
1535
1536                                 if (tbl8[i].depth !=
1537                                                 tbl8[tbl8_group_start].depth) {
1538
1539                                         return -EEXIST;
1540                                 }
1541                         }
1542                         /* If all entries are the same return the tb8 index */
1543                         return tbl8_group_start;
1544                 }
1545
1546                 return -EEXIST;
1547         }
1548         /*
1549          * If the first entry is invalid check if the rest of the entries in
1550          * the tbl8 are invalid.
1551          */
1552         for (i = (tbl8_group_start + 1); i < tbl8_group_end; i++) {
1553                 if (tbl8[i].valid)
1554                         return -EEXIST;
1555         }
1556         /* If no valid entries are found then return -EINVAL. */
1557         return -EINVAL;
1558 }
1559
1560 static inline int32_t
1561 tbl8_recycle_check_v1604(struct rte_lpm_tbl_entry *tbl8,
1562                 uint32_t tbl8_group_start)
1563 {
1564         uint32_t tbl8_group_end, i;
1565         tbl8_group_end = tbl8_group_start + RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1566
1567         /*
1568          * Check the first entry of the given tbl8. If it is invalid we know
1569          * this tbl8 does not contain any rule with a depth < RTE_LPM_MAX_DEPTH
1570          *  (As they would affect all entries in a tbl8) and thus this table
1571          *  can not be recycled.
1572          */
1573         if (tbl8[tbl8_group_start].valid) {
1574                 /*
1575                  * If first entry is valid check if the depth is less than 24
1576                  * and if so check the rest of the entries to verify that they
1577                  * are all of this depth.
1578                  */
1579                 if (tbl8[tbl8_group_start].depth <= MAX_DEPTH_TBL24) {
1580                         for (i = (tbl8_group_start + 1); i < tbl8_group_end;
1581                                         i++) {
1582
1583                                 if (tbl8[i].depth !=
1584                                                 tbl8[tbl8_group_start].depth) {
1585
1586                                         return -EEXIST;
1587                                 }
1588                         }
1589                         /* If all entries are the same return the tb8 index */
1590                         return tbl8_group_start;
1591                 }
1592
1593                 return -EEXIST;
1594         }
1595         /*
1596          * If the first entry is invalid check if the rest of the entries in
1597          * the tbl8 are invalid.
1598          */
1599         for (i = (tbl8_group_start + 1); i < tbl8_group_end; i++) {
1600                 if (tbl8[i].valid)
1601                         return -EEXIST;
1602         }
1603         /* If no valid entries are found then return -EINVAL. */
1604         return -EINVAL;
1605 }
1606
1607 static inline int32_t
1608 delete_depth_big_v20(struct rte_lpm_v20 *lpm, uint32_t ip_masked,
1609         uint8_t depth, int32_t sub_rule_index, uint8_t sub_rule_depth)
1610 {
1611         uint32_t tbl24_index, tbl8_group_index, tbl8_group_start, tbl8_index,
1612                         tbl8_range, i;
1613         int32_t tbl8_recycle_index;
1614
1615         /*
1616          * Calculate the index into tbl24 and range. Note: All depths larger
1617          * than MAX_DEPTH_TBL24 are associated with only one tbl24 entry.
1618          */
1619         tbl24_index = ip_masked >> 8;
1620
1621         /* Calculate the index into tbl8 and range. */
1622         tbl8_group_index = lpm->tbl24[tbl24_index].group_idx;
1623         tbl8_group_start = tbl8_group_index * RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1624         tbl8_index = tbl8_group_start + (ip_masked & 0xFF);
1625         tbl8_range = depth_to_range(depth);
1626
1627         if (sub_rule_index < 0) {
1628                 /*
1629                  * Loop through the range of entries on tbl8 for which the
1630                  * rule_to_delete must be removed or modified.
1631                  */
1632                 for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
1633                         if (lpm->tbl8[i].depth <= depth)
1634                                 lpm->tbl8[i].valid = INVALID;
1635                 }
1636         } else {
1637                 /* Set new tbl8 entry. */
1638                 struct rte_lpm_tbl_entry_v20 new_tbl8_entry = {
1639                         .valid = VALID,
1640                         .depth = sub_rule_depth,
1641                         .valid_group = lpm->tbl8[tbl8_group_start].valid_group,
1642                 };
1643
1644                 new_tbl8_entry.next_hop =
1645                                 lpm->rules_tbl[sub_rule_index].next_hop;
1646                 /*
1647                  * Loop through the range of entries on tbl8 for which the
1648                  * rule_to_delete must be modified.
1649                  */
1650                 for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
1651                         if (lpm->tbl8[i].depth <= depth)
1652                                 lpm->tbl8[i] = new_tbl8_entry;
1653                 }
1654         }
1655
1656         /*
1657          * Check if there are any valid entries in this tbl8 group. If all
1658          * tbl8 entries are invalid we can free the tbl8 and invalidate the
1659          * associated tbl24 entry.
1660          */
1661
1662         tbl8_recycle_index = tbl8_recycle_check_v20(lpm->tbl8, tbl8_group_start);
1663
1664         if (tbl8_recycle_index == -EINVAL) {
1665                 /* Set tbl24 before freeing tbl8 to avoid race condition. */
1666                 lpm->tbl24[tbl24_index].valid = 0;
1667                 tbl8_free_v20(lpm->tbl8, tbl8_group_start);
1668         } else if (tbl8_recycle_index > -1) {
1669                 /* Update tbl24 entry. */
1670                 struct rte_lpm_tbl_entry_v20 new_tbl24_entry = {
1671                         { .next_hop = lpm->tbl8[tbl8_recycle_index].next_hop, },
1672                         .valid = VALID,
1673                         .valid_group = 0,
1674                         .depth = lpm->tbl8[tbl8_recycle_index].depth,
1675                 };
1676
1677                 /* Set tbl24 before freeing tbl8 to avoid race condition. */
1678                 lpm->tbl24[tbl24_index] = new_tbl24_entry;
1679                 tbl8_free_v20(lpm->tbl8, tbl8_group_start);
1680         }
1681
1682         return 0;
1683 }
1684
1685 static inline int32_t
1686 delete_depth_big_v1604(struct rte_lpm *lpm, uint32_t ip_masked,
1687         uint8_t depth, int32_t sub_rule_index, uint8_t sub_rule_depth)
1688 {
1689 #define group_idx next_hop
1690         uint32_t tbl24_index, tbl8_group_index, tbl8_group_start, tbl8_index,
1691                         tbl8_range, i;
1692         int32_t tbl8_recycle_index;
1693
1694         /*
1695          * Calculate the index into tbl24 and range. Note: All depths larger
1696          * than MAX_DEPTH_TBL24 are associated with only one tbl24 entry.
1697          */
1698         tbl24_index = ip_masked >> 8;
1699
1700         /* Calculate the index into tbl8 and range. */
1701         tbl8_group_index = lpm->tbl24[tbl24_index].group_idx;
1702         tbl8_group_start = tbl8_group_index * RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1703         tbl8_index = tbl8_group_start + (ip_masked & 0xFF);
1704         tbl8_range = depth_to_range(depth);
1705
1706         if (sub_rule_index < 0) {
1707                 /*
1708                  * Loop through the range of entries on tbl8 for which the
1709                  * rule_to_delete must be removed or modified.
1710                  */
1711                 for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
1712                         if (lpm->tbl8[i].depth <= depth)
1713                                 lpm->tbl8[i].valid = INVALID;
1714                 }
1715         } else {
1716                 /* Set new tbl8 entry. */
1717                 struct rte_lpm_tbl_entry new_tbl8_entry = {
1718                         .valid = VALID,
1719                         .depth = sub_rule_depth,
1720                         .valid_group = lpm->tbl8[tbl8_group_start].valid_group,
1721                         .next_hop = lpm->rules_tbl[sub_rule_index].next_hop,
1722                 };
1723
1724                 /*
1725                  * Loop through the range of entries on tbl8 for which the
1726                  * rule_to_delete must be modified.
1727                  */
1728                 for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
1729                         if (lpm->tbl8[i].depth <= depth)
1730                                 lpm->tbl8[i] = new_tbl8_entry;
1731                 }
1732         }
1733
1734         /*
1735          * Check if there are any valid entries in this tbl8 group. If all
1736          * tbl8 entries are invalid we can free the tbl8 and invalidate the
1737          * associated tbl24 entry.
1738          */
1739
1740         tbl8_recycle_index = tbl8_recycle_check_v1604(lpm->tbl8, tbl8_group_start);
1741
1742         if (tbl8_recycle_index == -EINVAL) {
1743                 /* Set tbl24 before freeing tbl8 to avoid race condition. */
1744                 lpm->tbl24[tbl24_index].valid = 0;
1745                 tbl8_free_v1604(lpm->tbl8, tbl8_group_start);
1746         } else if (tbl8_recycle_index > -1) {
1747                 /* Update tbl24 entry. */
1748                 struct rte_lpm_tbl_entry new_tbl24_entry = {
1749                         .next_hop = lpm->tbl8[tbl8_recycle_index].next_hop,
1750                         .valid = VALID,
1751                         .valid_group = 0,
1752                         .depth = lpm->tbl8[tbl8_recycle_index].depth,
1753                 };
1754
1755                 /* Set tbl24 before freeing tbl8 to avoid race condition. */
1756                 lpm->tbl24[tbl24_index] = new_tbl24_entry;
1757                 tbl8_free_v1604(lpm->tbl8, tbl8_group_start);
1758         }
1759 #undef group_idx
1760         return 0;
1761 }
1762
1763 /*
1764  * Deletes a rule
1765  */
1766 int
1767 rte_lpm_delete_v20(struct rte_lpm_v20 *lpm, uint32_t ip, uint8_t depth)
1768 {
1769         int32_t rule_to_delete_index, sub_rule_index;
1770         uint32_t ip_masked;
1771         uint8_t sub_rule_depth;
1772         /*
1773          * Check input arguments. Note: IP must be a positive integer of 32
1774          * bits in length therefore it need not be checked.
1775          */
1776         if ((lpm == NULL) || (depth < 1) || (depth > RTE_LPM_MAX_DEPTH)) {
1777                 return -EINVAL;
1778         }
1779
1780         ip_masked = ip & depth_to_mask(depth);
1781
1782         /*
1783          * Find the index of the input rule, that needs to be deleted, in the
1784          * rule table.
1785          */
1786         rule_to_delete_index = rule_find_v20(lpm, ip_masked, depth);
1787
1788         /*
1789          * Check if rule_to_delete_index was found. If no rule was found the
1790          * function rule_find returns -EINVAL.
1791          */
1792         if (rule_to_delete_index < 0)
1793                 return -EINVAL;
1794
1795         /* Delete the rule from the rule table. */
1796         rule_delete_v20(lpm, rule_to_delete_index, depth);
1797
1798         /*
1799          * Find rule to replace the rule_to_delete. If there is no rule to
1800          * replace the rule_to_delete we return -1 and invalidate the table
1801          * entries associated with this rule.
1802          */
1803         sub_rule_depth = 0;
1804         sub_rule_index = find_previous_rule_v20(lpm, ip, depth, &sub_rule_depth);
1805
1806         /*
1807          * If the input depth value is less than 25 use function
1808          * delete_depth_small otherwise use delete_depth_big.
1809          */
1810         if (depth <= MAX_DEPTH_TBL24) {
1811                 return delete_depth_small_v20(lpm, ip_masked, depth,
1812                                 sub_rule_index, sub_rule_depth);
1813         } else { /* If depth > MAX_DEPTH_TBL24 */
1814                 return delete_depth_big_v20(lpm, ip_masked, depth, sub_rule_index,
1815                                 sub_rule_depth);
1816         }
1817 }
1818 VERSION_SYMBOL(rte_lpm_delete, _v20, 2.0);
1819
1820 int
1821 rte_lpm_delete_v1604(struct rte_lpm *lpm, uint32_t ip, uint8_t depth)
1822 {
1823         int32_t rule_to_delete_index, sub_rule_index;
1824         uint32_t ip_masked;
1825         uint8_t sub_rule_depth;
1826         /*
1827          * Check input arguments. Note: IP must be a positive integer of 32
1828          * bits in length therefore it need not be checked.
1829          */
1830         if ((lpm == NULL) || (depth < 1) || (depth > RTE_LPM_MAX_DEPTH)) {
1831                 return -EINVAL;
1832         }
1833
1834         ip_masked = ip & depth_to_mask(depth);
1835
1836         /*
1837          * Find the index of the input rule, that needs to be deleted, in the
1838          * rule table.
1839          */
1840         rule_to_delete_index = rule_find_v1604(lpm, ip_masked, depth);
1841
1842         /*
1843          * Check if rule_to_delete_index was found. If no rule was found the
1844          * function rule_find returns -EINVAL.
1845          */
1846         if (rule_to_delete_index < 0)
1847                 return -EINVAL;
1848
1849         /* Delete the rule from the rule table. */
1850         rule_delete_v1604(lpm, rule_to_delete_index, depth);
1851
1852         /*
1853          * Find rule to replace the rule_to_delete. If there is no rule to
1854          * replace the rule_to_delete we return -1 and invalidate the table
1855          * entries associated with this rule.
1856          */
1857         sub_rule_depth = 0;
1858         sub_rule_index = find_previous_rule_v1604(lpm, ip, depth, &sub_rule_depth);
1859
1860         /*
1861          * If the input depth value is less than 25 use function
1862          * delete_depth_small otherwise use delete_depth_big.
1863          */
1864         if (depth <= MAX_DEPTH_TBL24) {
1865                 return delete_depth_small_v1604(lpm, ip_masked, depth,
1866                                 sub_rule_index, sub_rule_depth);
1867         } else { /* If depth > MAX_DEPTH_TBL24 */
1868                 return delete_depth_big_v1604(lpm, ip_masked, depth, sub_rule_index,
1869                                 sub_rule_depth);
1870         }
1871 }
1872 BIND_DEFAULT_SYMBOL(rte_lpm_delete, _v1604, 16.04);
1873 MAP_STATIC_SYMBOL(int rte_lpm_delete(struct rte_lpm *lpm, uint32_t ip,
1874                 uint8_t depth), rte_lpm_delete_v1604);
1875
1876 /*
1877  * Delete all rules from the LPM table.
1878  */
1879 void
1880 rte_lpm_delete_all_v20(struct rte_lpm_v20 *lpm)
1881 {
1882         /* Zero rule information. */
1883         memset(lpm->rule_info, 0, sizeof(lpm->rule_info));
1884
1885         /* Zero tbl24. */
1886         memset(lpm->tbl24, 0, sizeof(lpm->tbl24));
1887
1888         /* Zero tbl8. */
1889         memset(lpm->tbl8, 0, sizeof(lpm->tbl8));
1890
1891         /* Delete all rules form the rules table. */
1892         memset(lpm->rules_tbl, 0, sizeof(lpm->rules_tbl[0]) * lpm->max_rules);
1893 }
1894 VERSION_SYMBOL(rte_lpm_delete_all, _v20, 2.0);
1895
1896 void
1897 rte_lpm_delete_all_v1604(struct rte_lpm *lpm)
1898 {
1899         /* Zero rule information. */
1900         memset(lpm->rule_info, 0, sizeof(lpm->rule_info));
1901
1902         /* Zero tbl24. */
1903         memset(lpm->tbl24, 0, sizeof(lpm->tbl24));
1904
1905         /* Zero tbl8. */
1906         memset(lpm->tbl8, 0, sizeof(lpm->tbl8[0])
1907                         * RTE_LPM_TBL8_GROUP_NUM_ENTRIES * lpm->number_tbl8s);
1908
1909         /* Delete all rules form the rules table. */
1910         memset(lpm->rules_tbl, 0, sizeof(lpm->rules_tbl[0]) * lpm->max_rules);
1911 }
1912 BIND_DEFAULT_SYMBOL(rte_lpm_delete_all, _v1604, 16.04);
1913 MAP_STATIC_SYMBOL(void rte_lpm_delete_all(struct rte_lpm *lpm),
1914                 rte_lpm_delete_all_v1604);