lpm: fix missing free
[dpdk.git] / lib / librte_lpm / rte_lpm.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <string.h>
35 #include <stdint.h>
36 #include <errno.h>
37 #include <stdarg.h>
38 #include <stdio.h>
39 #include <errno.h>
40 #include <sys/queue.h>
41
42 #include <rte_log.h>
43 #include <rte_branch_prediction.h>
44 #include <rte_common.h>
45 #include <rte_memory.h>        /* for definition of RTE_CACHE_LINE_SIZE */
46 #include <rte_malloc.h>
47 #include <rte_memzone.h>
48 #include <rte_eal.h>
49 #include <rte_eal_memconfig.h>
50 #include <rte_per_lcore.h>
51 #include <rte_string_fns.h>
52 #include <rte_errno.h>
53 #include <rte_rwlock.h>
54 #include <rte_spinlock.h>
55
56 #include "rte_lpm.h"
57
58 TAILQ_HEAD(rte_lpm_list, rte_tailq_entry);
59
60 static struct rte_tailq_elem rte_lpm_tailq = {
61         .name = "RTE_LPM",
62 };
63 EAL_REGISTER_TAILQ(rte_lpm_tailq)
64
65 #define MAX_DEPTH_TBL24 24
66
67 enum valid_flag {
68         INVALID = 0,
69         VALID
70 };
71
72 /* Macro to enable/disable run-time checks. */
73 #if defined(RTE_LIBRTE_LPM_DEBUG)
74 #include <rte_debug.h>
75 #define VERIFY_DEPTH(depth) do {                                \
76         if ((depth == 0) || (depth > RTE_LPM_MAX_DEPTH))        \
77                 rte_panic("LPM: Invalid depth (%u) at line %d", \
78                                 (unsigned)(depth), __LINE__);   \
79 } while (0)
80 #else
81 #define VERIFY_DEPTH(depth)
82 #endif
83
84 /*
85  * Converts a given depth value to its corresponding mask value.
86  *
87  * depth  (IN)          : range = 1 - 32
88  * mask   (OUT)         : 32bit mask
89  */
90 static uint32_t __attribute__((pure))
91 depth_to_mask(uint8_t depth)
92 {
93         VERIFY_DEPTH(depth);
94
95         /* To calculate a mask start with a 1 on the left hand side and right
96          * shift while populating the left hand side with 1's
97          */
98         return (int)0x80000000 >> (depth - 1);
99 }
100
101 /*
102  * Converts given depth value to its corresponding range value.
103  */
104 static inline uint32_t __attribute__((pure))
105 depth_to_range(uint8_t depth)
106 {
107         VERIFY_DEPTH(depth);
108
109         /*
110          * Calculate tbl24 range. (Note: 2^depth = 1 << depth)
111          */
112         if (depth <= MAX_DEPTH_TBL24)
113                 return 1 << (MAX_DEPTH_TBL24 - depth);
114
115         /* Else if depth is greater than 24 */
116         return 1 << (RTE_LPM_MAX_DEPTH - depth);
117 }
118
119 /*
120  * Find an existing lpm table and return a pointer to it.
121  */
122 struct rte_lpm_v20 *
123 rte_lpm_find_existing_v20(const char *name)
124 {
125         struct rte_lpm_v20 *l = NULL;
126         struct rte_tailq_entry *te;
127         struct rte_lpm_list *lpm_list;
128
129         lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list);
130
131         rte_rwlock_read_lock(RTE_EAL_TAILQ_RWLOCK);
132         TAILQ_FOREACH(te, lpm_list, next) {
133                 l = (struct rte_lpm_v20 *) te->data;
134                 if (strncmp(name, l->name, RTE_LPM_NAMESIZE) == 0)
135                         break;
136         }
137         rte_rwlock_read_unlock(RTE_EAL_TAILQ_RWLOCK);
138
139         if (te == NULL) {
140                 rte_errno = ENOENT;
141                 return NULL;
142         }
143
144         return l;
145 }
146 VERSION_SYMBOL(rte_lpm_find_existing, _v20, 2.0);
147
148 struct rte_lpm *
149 rte_lpm_find_existing_v1604(const char *name)
150 {
151         struct rte_lpm *l = NULL;
152         struct rte_tailq_entry *te;
153         struct rte_lpm_list *lpm_list;
154
155         lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list);
156
157         rte_rwlock_read_lock(RTE_EAL_TAILQ_RWLOCK);
158         TAILQ_FOREACH(te, lpm_list, next) {
159                 l = (struct rte_lpm *) te->data;
160                 if (strncmp(name, l->name, RTE_LPM_NAMESIZE) == 0)
161                         break;
162         }
163         rte_rwlock_read_unlock(RTE_EAL_TAILQ_RWLOCK);
164
165         if (te == NULL) {
166                 rte_errno = ENOENT;
167                 return NULL;
168         }
169
170         return l;
171 }
172 BIND_DEFAULT_SYMBOL(rte_lpm_find_existing, _v1604, 16.04);
173 MAP_STATIC_SYMBOL(struct rte_lpm *rte_lpm_find_existing(const char *name),
174                 rte_lpm_find_existing_v1604);
175
176 /*
177  * Allocates memory for LPM object
178  */
179 struct rte_lpm_v20 *
180 rte_lpm_create_v20(const char *name, int socket_id, int max_rules,
181                 __rte_unused int flags)
182 {
183         char mem_name[RTE_LPM_NAMESIZE];
184         struct rte_lpm_v20 *lpm = NULL;
185         struct rte_tailq_entry *te;
186         uint32_t mem_size;
187         struct rte_lpm_list *lpm_list;
188
189         lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list);
190
191         RTE_BUILD_BUG_ON(sizeof(struct rte_lpm_tbl_entry_v20) != 2);
192
193         /* Check user arguments. */
194         if ((name == NULL) || (socket_id < -1) || (max_rules == 0)) {
195                 rte_errno = EINVAL;
196                 return NULL;
197         }
198
199         snprintf(mem_name, sizeof(mem_name), "LPM_%s", name);
200
201         /* Determine the amount of memory to allocate. */
202         mem_size = sizeof(*lpm) + (sizeof(lpm->rules_tbl[0]) * max_rules);
203
204         rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
205
206         /* guarantee there's no existing */
207         TAILQ_FOREACH(te, lpm_list, next) {
208                 lpm = (struct rte_lpm_v20 *) te->data;
209                 if (strncmp(name, lpm->name, RTE_LPM_NAMESIZE) == 0)
210                         break;
211         }
212         if (te != NULL)
213                 goto exit;
214
215         /* allocate tailq entry */
216         te = rte_zmalloc("LPM_TAILQ_ENTRY", sizeof(*te), 0);
217         if (te == NULL) {
218                 RTE_LOG(ERR, LPM, "Failed to allocate tailq entry\n");
219                 goto exit;
220         }
221
222         /* Allocate memory to store the LPM data structures. */
223         lpm = (struct rte_lpm_v20 *)rte_zmalloc_socket(mem_name, mem_size,
224                         RTE_CACHE_LINE_SIZE, socket_id);
225         if (lpm == NULL) {
226                 RTE_LOG(ERR, LPM, "LPM memory allocation failed\n");
227                 rte_free(te);
228                 goto exit;
229         }
230
231         /* Save user arguments. */
232         lpm->max_rules = max_rules;
233         snprintf(lpm->name, sizeof(lpm->name), "%s", name);
234
235         te->data = (void *) lpm;
236
237         TAILQ_INSERT_TAIL(lpm_list, te, next);
238
239 exit:
240         rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
241
242         return lpm;
243 }
244 VERSION_SYMBOL(rte_lpm_create, _v20, 2.0);
245
246 struct rte_lpm *
247 rte_lpm_create_v1604(const char *name, int socket_id,
248                 const struct rte_lpm_config *config)
249 {
250         char mem_name[RTE_LPM_NAMESIZE];
251         struct rte_lpm *lpm = NULL;
252         struct rte_tailq_entry *te;
253         uint32_t mem_size, rules_size, tbl8s_size;
254         struct rte_lpm_list *lpm_list;
255
256         lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list);
257
258         RTE_BUILD_BUG_ON(sizeof(struct rte_lpm_tbl_entry) != 4);
259
260         /* Check user arguments. */
261         if ((name == NULL) || (socket_id < -1) || (config->max_rules == 0)
262                         || config->number_tbl8s > RTE_LPM_MAX_TBL8_NUM_GROUPS) {
263                 rte_errno = EINVAL;
264                 return NULL;
265         }
266
267         snprintf(mem_name, sizeof(mem_name), "LPM_%s", name);
268
269         /* Determine the amount of memory to allocate. */
270         mem_size = sizeof(*lpm);
271         rules_size = sizeof(struct rte_lpm_rule) * config->max_rules;
272         tbl8s_size = (sizeof(struct rte_lpm_tbl_entry) *
273                         RTE_LPM_TBL8_GROUP_NUM_ENTRIES * config->number_tbl8s);
274
275         rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
276
277         /* guarantee there's no existing */
278         TAILQ_FOREACH(te, lpm_list, next) {
279                 lpm = (struct rte_lpm *) te->data;
280                 if (strncmp(name, lpm->name, RTE_LPM_NAMESIZE) == 0)
281                         break;
282         }
283         if (te != NULL)
284                 goto exit;
285
286         /* allocate tailq entry */
287         te = rte_zmalloc("LPM_TAILQ_ENTRY", sizeof(*te), 0);
288         if (te == NULL) {
289                 RTE_LOG(ERR, LPM, "Failed to allocate tailq entry\n");
290                 goto exit;
291         }
292
293         /* Allocate memory to store the LPM data structures. */
294         lpm = (struct rte_lpm *)rte_zmalloc_socket(mem_name, mem_size,
295                         RTE_CACHE_LINE_SIZE, socket_id);
296         if (lpm == NULL) {
297                 RTE_LOG(ERR, LPM, "LPM memory allocation failed\n");
298                 rte_free(te);
299                 goto exit;
300         }
301
302         lpm->rules_tbl = (struct rte_lpm_rule *)rte_zmalloc_socket(NULL,
303                         (size_t)rules_size, RTE_CACHE_LINE_SIZE, socket_id);
304
305         if (lpm->rules_tbl == NULL) {
306                 RTE_LOG(ERR, LPM, "LPM memory allocation failed\n");
307                 rte_free(lpm);
308                 rte_free(te);
309                 goto exit;
310         }
311
312         lpm->tbl8 = (struct rte_lpm_tbl_entry *)rte_zmalloc_socket(NULL,
313                         (size_t)tbl8s_size, RTE_CACHE_LINE_SIZE, socket_id);
314
315         if (lpm->tbl8 == NULL) {
316                 RTE_LOG(ERR, LPM, "LPM memory allocation failed\n");
317                 rte_free(lpm);
318                 rte_free(te);
319                 goto exit;
320         }
321
322         /* Save user arguments. */
323         lpm->max_rules = config->max_rules;
324         lpm->number_tbl8s = config->number_tbl8s;
325         snprintf(lpm->name, sizeof(lpm->name), "%s", name);
326
327         te->data = (void *) lpm;
328
329         TAILQ_INSERT_TAIL(lpm_list, te, next);
330
331 exit:
332         rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
333
334         return lpm;
335 }
336 BIND_DEFAULT_SYMBOL(rte_lpm_create, _v1604, 16.04);
337 MAP_STATIC_SYMBOL(
338         struct rte_lpm *rte_lpm_create(const char *name, int socket_id,
339                         const struct rte_lpm_config *config), rte_lpm_create_v1604);
340
341 /*
342  * Deallocates memory for given LPM table.
343  */
344 void
345 rte_lpm_free_v20(struct rte_lpm_v20 *lpm)
346 {
347         struct rte_lpm_list *lpm_list;
348         struct rte_tailq_entry *te;
349
350         /* Check user arguments. */
351         if (lpm == NULL)
352                 return;
353
354         lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list);
355
356         rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
357
358         /* find our tailq entry */
359         TAILQ_FOREACH(te, lpm_list, next) {
360                 if (te->data == (void *) lpm)
361                         break;
362         }
363         if (te != NULL)
364                 TAILQ_REMOVE(lpm_list, te, next);
365
366         rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
367
368         rte_free(lpm->rules_tbl);
369         rte_free(lpm);
370         rte_free(te);
371 }
372 VERSION_SYMBOL(rte_lpm_free, _v20, 2.0);
373
374 void
375 rte_lpm_free_v1604(struct rte_lpm *lpm)
376 {
377         struct rte_lpm_list *lpm_list;
378         struct rte_tailq_entry *te;
379
380         /* Check user arguments. */
381         if (lpm == NULL)
382                 return;
383
384         lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list);
385
386         rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
387
388         /* find our tailq entry */
389         TAILQ_FOREACH(te, lpm_list, next) {
390                 if (te->data == (void *) lpm)
391                         break;
392         }
393         if (te != NULL)
394                 TAILQ_REMOVE(lpm_list, te, next);
395
396         rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
397
398         rte_free(lpm->rules_tbl);
399         rte_free(lpm);
400         rte_free(te);
401 }
402 BIND_DEFAULT_SYMBOL(rte_lpm_free, _v1604, 16.04);
403 MAP_STATIC_SYMBOL(void rte_lpm_free(struct rte_lpm *lpm),
404                 rte_lpm_free_v1604);
405
406 /*
407  * Adds a rule to the rule table.
408  *
409  * NOTE: The rule table is split into 32 groups. Each group contains rules that
410  * apply to a specific prefix depth (i.e. group 1 contains rules that apply to
411  * prefixes with a depth of 1 etc.). In the following code (depth - 1) is used
412  * to refer to depth 1 because even though the depth range is 1 - 32, depths
413  * are stored in the rule table from 0 - 31.
414  * NOTE: Valid range for depth parameter is 1 .. 32 inclusive.
415  */
416 static inline int32_t
417 rule_add_v20(struct rte_lpm_v20 *lpm, uint32_t ip_masked, uint8_t depth,
418         uint8_t next_hop)
419 {
420         uint32_t rule_gindex, rule_index, last_rule;
421         int i;
422
423         VERIFY_DEPTH(depth);
424
425         /* Scan through rule group to see if rule already exists. */
426         if (lpm->rule_info[depth - 1].used_rules > 0) {
427
428                 /* rule_gindex stands for rule group index. */
429                 rule_gindex = lpm->rule_info[depth - 1].first_rule;
430                 /* Initialise rule_index to point to start of rule group. */
431                 rule_index = rule_gindex;
432                 /* Last rule = Last used rule in this rule group. */
433                 last_rule = rule_gindex + lpm->rule_info[depth - 1].used_rules;
434
435                 for (; rule_index < last_rule; rule_index++) {
436
437                         /* If rule already exists update its next_hop and return. */
438                         if (lpm->rules_tbl[rule_index].ip == ip_masked) {
439                                 lpm->rules_tbl[rule_index].next_hop = next_hop;
440
441                                 return rule_index;
442                         }
443                 }
444
445                 if (rule_index == lpm->max_rules)
446                         return -ENOSPC;
447         } else {
448                 /* Calculate the position in which the rule will be stored. */
449                 rule_index = 0;
450
451                 for (i = depth - 1; i > 0; i--) {
452                         if (lpm->rule_info[i - 1].used_rules > 0) {
453                                 rule_index = lpm->rule_info[i - 1].first_rule
454                                                 + lpm->rule_info[i - 1].used_rules;
455                                 break;
456                         }
457                 }
458                 if (rule_index == lpm->max_rules)
459                         return -ENOSPC;
460
461                 lpm->rule_info[depth - 1].first_rule = rule_index;
462         }
463
464         /* Make room for the new rule in the array. */
465         for (i = RTE_LPM_MAX_DEPTH; i > depth; i--) {
466                 if (lpm->rule_info[i - 1].first_rule
467                                 + lpm->rule_info[i - 1].used_rules == lpm->max_rules)
468                         return -ENOSPC;
469
470                 if (lpm->rule_info[i - 1].used_rules > 0) {
471                         lpm->rules_tbl[lpm->rule_info[i - 1].first_rule
472                                 + lpm->rule_info[i - 1].used_rules]
473                                         = lpm->rules_tbl[lpm->rule_info[i - 1].first_rule];
474                         lpm->rule_info[i - 1].first_rule++;
475                 }
476         }
477
478         /* Add the new rule. */
479         lpm->rules_tbl[rule_index].ip = ip_masked;
480         lpm->rules_tbl[rule_index].next_hop = next_hop;
481
482         /* Increment the used rules counter for this rule group. */
483         lpm->rule_info[depth - 1].used_rules++;
484
485         return rule_index;
486 }
487
488 static inline int32_t
489 rule_add_v1604(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth,
490         uint32_t next_hop)
491 {
492         uint32_t rule_gindex, rule_index, last_rule;
493         int i;
494
495         VERIFY_DEPTH(depth);
496
497         /* Scan through rule group to see if rule already exists. */
498         if (lpm->rule_info[depth - 1].used_rules > 0) {
499
500                 /* rule_gindex stands for rule group index. */
501                 rule_gindex = lpm->rule_info[depth - 1].first_rule;
502                 /* Initialise rule_index to point to start of rule group. */
503                 rule_index = rule_gindex;
504                 /* Last rule = Last used rule in this rule group. */
505                 last_rule = rule_gindex + lpm->rule_info[depth - 1].used_rules;
506
507                 for (; rule_index < last_rule; rule_index++) {
508
509                         /* If rule already exists update its next_hop and return. */
510                         if (lpm->rules_tbl[rule_index].ip == ip_masked) {
511                                 lpm->rules_tbl[rule_index].next_hop = next_hop;
512
513                                 return rule_index;
514                         }
515                 }
516
517                 if (rule_index == lpm->max_rules)
518                         return -ENOSPC;
519         } else {
520                 /* Calculate the position in which the rule will be stored. */
521                 rule_index = 0;
522
523                 for (i = depth - 1; i > 0; i--) {
524                         if (lpm->rule_info[i - 1].used_rules > 0) {
525                                 rule_index = lpm->rule_info[i - 1].first_rule
526                                                 + lpm->rule_info[i - 1].used_rules;
527                                 break;
528                         }
529                 }
530                 if (rule_index == lpm->max_rules)
531                         return -ENOSPC;
532
533                 lpm->rule_info[depth - 1].first_rule = rule_index;
534         }
535
536         /* Make room for the new rule in the array. */
537         for (i = RTE_LPM_MAX_DEPTH; i > depth; i--) {
538                 if (lpm->rule_info[i - 1].first_rule
539                                 + lpm->rule_info[i - 1].used_rules == lpm->max_rules)
540                         return -ENOSPC;
541
542                 if (lpm->rule_info[i - 1].used_rules > 0) {
543                         lpm->rules_tbl[lpm->rule_info[i - 1].first_rule
544                                 + lpm->rule_info[i - 1].used_rules]
545                                         = lpm->rules_tbl[lpm->rule_info[i - 1].first_rule];
546                         lpm->rule_info[i - 1].first_rule++;
547                 }
548         }
549
550         /* Add the new rule. */
551         lpm->rules_tbl[rule_index].ip = ip_masked;
552         lpm->rules_tbl[rule_index].next_hop = next_hop;
553
554         /* Increment the used rules counter for this rule group. */
555         lpm->rule_info[depth - 1].used_rules++;
556
557         return rule_index;
558 }
559
560 /*
561  * Delete a rule from the rule table.
562  * NOTE: Valid range for depth parameter is 1 .. 32 inclusive.
563  */
564 static inline void
565 rule_delete_v20(struct rte_lpm_v20 *lpm, int32_t rule_index, uint8_t depth)
566 {
567         int i;
568
569         VERIFY_DEPTH(depth);
570
571         lpm->rules_tbl[rule_index] =
572                         lpm->rules_tbl[lpm->rule_info[depth - 1].first_rule
573                                 + lpm->rule_info[depth - 1].used_rules - 1];
574
575         for (i = depth; i < RTE_LPM_MAX_DEPTH; i++) {
576                 if (lpm->rule_info[i].used_rules > 0) {
577                         lpm->rules_tbl[lpm->rule_info[i].first_rule - 1] =
578                                 lpm->rules_tbl[lpm->rule_info[i].first_rule
579                                         + lpm->rule_info[i].used_rules - 1];
580                         lpm->rule_info[i].first_rule--;
581                 }
582         }
583
584         lpm->rule_info[depth - 1].used_rules--;
585 }
586
587 static inline void
588 rule_delete_v1604(struct rte_lpm *lpm, int32_t rule_index, uint8_t depth)
589 {
590         int i;
591
592         VERIFY_DEPTH(depth);
593
594         lpm->rules_tbl[rule_index] =
595                         lpm->rules_tbl[lpm->rule_info[depth - 1].first_rule
596                         + lpm->rule_info[depth - 1].used_rules - 1];
597
598         for (i = depth; i < RTE_LPM_MAX_DEPTH; i++) {
599                 if (lpm->rule_info[i].used_rules > 0) {
600                         lpm->rules_tbl[lpm->rule_info[i].first_rule - 1] =
601                                         lpm->rules_tbl[lpm->rule_info[i].first_rule
602                                                 + lpm->rule_info[i].used_rules - 1];
603                         lpm->rule_info[i].first_rule--;
604                 }
605         }
606
607         lpm->rule_info[depth - 1].used_rules--;
608 }
609
610 /*
611  * Finds a rule in rule table.
612  * NOTE: Valid range for depth parameter is 1 .. 32 inclusive.
613  */
614 static inline int32_t
615 rule_find_v20(struct rte_lpm_v20 *lpm, uint32_t ip_masked, uint8_t depth)
616 {
617         uint32_t rule_gindex, last_rule, rule_index;
618
619         VERIFY_DEPTH(depth);
620
621         rule_gindex = lpm->rule_info[depth - 1].first_rule;
622         last_rule = rule_gindex + lpm->rule_info[depth - 1].used_rules;
623
624         /* Scan used rules at given depth to find rule. */
625         for (rule_index = rule_gindex; rule_index < last_rule; rule_index++) {
626                 /* If rule is found return the rule index. */
627                 if (lpm->rules_tbl[rule_index].ip == ip_masked)
628                         return rule_index;
629         }
630
631         /* If rule is not found return -EINVAL. */
632         return -EINVAL;
633 }
634
635 static inline int32_t
636 rule_find_v1604(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth)
637 {
638         uint32_t rule_gindex, last_rule, rule_index;
639
640         VERIFY_DEPTH(depth);
641
642         rule_gindex = lpm->rule_info[depth - 1].first_rule;
643         last_rule = rule_gindex + lpm->rule_info[depth - 1].used_rules;
644
645         /* Scan used rules at given depth to find rule. */
646         for (rule_index = rule_gindex; rule_index < last_rule; rule_index++) {
647                 /* If rule is found return the rule index. */
648                 if (lpm->rules_tbl[rule_index].ip == ip_masked)
649                         return rule_index;
650         }
651
652         /* If rule is not found return -EINVAL. */
653         return -EINVAL;
654 }
655
656 /*
657  * Find, clean and allocate a tbl8.
658  */
659 static inline int32_t
660 tbl8_alloc_v20(struct rte_lpm_tbl_entry_v20 *tbl8)
661 {
662         uint32_t group_idx; /* tbl8 group index. */
663         struct rte_lpm_tbl_entry_v20 *tbl8_entry;
664
665         /* Scan through tbl8 to find a free (i.e. INVALID) tbl8 group. */
666         for (group_idx = 0; group_idx < RTE_LPM_TBL8_NUM_GROUPS;
667                         group_idx++) {
668                 tbl8_entry = &tbl8[group_idx * RTE_LPM_TBL8_GROUP_NUM_ENTRIES];
669                 /* If a free tbl8 group is found clean it and set as VALID. */
670                 if (!tbl8_entry->valid_group) {
671                         memset(&tbl8_entry[0], 0,
672                                         RTE_LPM_TBL8_GROUP_NUM_ENTRIES *
673                                         sizeof(tbl8_entry[0]));
674
675                         tbl8_entry->valid_group = VALID;
676
677                         /* Return group index for allocated tbl8 group. */
678                         return group_idx;
679                 }
680         }
681
682         /* If there are no tbl8 groups free then return error. */
683         return -ENOSPC;
684 }
685
686 static inline int32_t
687 tbl8_alloc_v1604(struct rte_lpm_tbl_entry *tbl8, uint32_t number_tbl8s)
688 {
689         uint32_t group_idx; /* tbl8 group index. */
690         struct rte_lpm_tbl_entry *tbl8_entry;
691
692         /* Scan through tbl8 to find a free (i.e. INVALID) tbl8 group. */
693         for (group_idx = 0; group_idx < number_tbl8s; group_idx++) {
694                 tbl8_entry = &tbl8[group_idx * RTE_LPM_TBL8_GROUP_NUM_ENTRIES];
695                 /* If a free tbl8 group is found clean it and set as VALID. */
696                 if (!tbl8_entry->valid_group) {
697                         memset(&tbl8_entry[0], 0,
698                                         RTE_LPM_TBL8_GROUP_NUM_ENTRIES *
699                                         sizeof(tbl8_entry[0]));
700
701                         tbl8_entry->valid_group = VALID;
702
703                         /* Return group index for allocated tbl8 group. */
704                         return group_idx;
705                 }
706         }
707
708         /* If there are no tbl8 groups free then return error. */
709         return -ENOSPC;
710 }
711
712 static inline void
713 tbl8_free_v20(struct rte_lpm_tbl_entry_v20 *tbl8, uint32_t tbl8_group_start)
714 {
715         /* Set tbl8 group invalid*/
716         tbl8[tbl8_group_start].valid_group = INVALID;
717 }
718
719 static inline void
720 tbl8_free_v1604(struct rte_lpm_tbl_entry *tbl8, uint32_t tbl8_group_start)
721 {
722         /* Set tbl8 group invalid*/
723         tbl8[tbl8_group_start].valid_group = INVALID;
724 }
725
726 static inline int32_t
727 add_depth_small_v20(struct rte_lpm_v20 *lpm, uint32_t ip, uint8_t depth,
728                 uint8_t next_hop)
729 {
730         uint32_t tbl24_index, tbl24_range, tbl8_index, tbl8_group_end, i, j;
731
732         /* Calculate the index into Table24. */
733         tbl24_index = ip >> 8;
734         tbl24_range = depth_to_range(depth);
735
736         for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) {
737                 /*
738                  * For invalid OR valid and non-extended tbl 24 entries set
739                  * entry.
740                  */
741                 if (!lpm->tbl24[i].valid || (lpm->tbl24[i].valid_group == 0 &&
742                                 lpm->tbl24[i].depth <= depth)) {
743
744                         struct rte_lpm_tbl_entry_v20 new_tbl24_entry = {
745                                 { .next_hop = next_hop, },
746                                 .valid = VALID,
747                                 .valid_group = 0,
748                                 .depth = depth,
749                         };
750
751                         /* Setting tbl24 entry in one go to avoid race
752                          * conditions
753                          */
754                         lpm->tbl24[i] = new_tbl24_entry;
755
756                         continue;
757                 }
758
759                 if (lpm->tbl24[i].valid_group == 1) {
760                         /* If tbl24 entry is valid and extended calculate the
761                          *  index into tbl8.
762                          */
763                         tbl8_index = lpm->tbl24[i].group_idx *
764                                         RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
765                         tbl8_group_end = tbl8_index +
766                                         RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
767
768                         for (j = tbl8_index; j < tbl8_group_end; j++) {
769                                 if (!lpm->tbl8[j].valid ||
770                                                 lpm->tbl8[j].depth <= depth) {
771                                         struct rte_lpm_tbl_entry_v20
772                                                 new_tbl8_entry = {
773                                                 .valid = VALID,
774                                                 .valid_group = VALID,
775                                                 .depth = depth,
776                                                 .next_hop = next_hop,
777                                         };
778
779                                         /*
780                                          * Setting tbl8 entry in one go to avoid
781                                          * race conditions
782                                          */
783                                         lpm->tbl8[j] = new_tbl8_entry;
784
785                                         continue;
786                                 }
787                         }
788                 }
789         }
790
791         return 0;
792 }
793
794 static inline int32_t
795 add_depth_small_v1604(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
796                 uint32_t next_hop)
797 {
798 #define group_idx next_hop
799         uint32_t tbl24_index, tbl24_range, tbl8_index, tbl8_group_end, i, j;
800
801         /* Calculate the index into Table24. */
802         tbl24_index = ip >> 8;
803         tbl24_range = depth_to_range(depth);
804
805         for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) {
806                 /*
807                  * For invalid OR valid and non-extended tbl 24 entries set
808                  * entry.
809                  */
810                 if (!lpm->tbl24[i].valid || (lpm->tbl24[i].valid_group == 0 &&
811                                 lpm->tbl24[i].depth <= depth)) {
812
813                         struct rte_lpm_tbl_entry new_tbl24_entry = {
814                                 .next_hop = next_hop,
815                                 .valid = VALID,
816                                 .valid_group = 0,
817                                 .depth = depth,
818                         };
819
820                         /* Setting tbl24 entry in one go to avoid race
821                          * conditions
822                          */
823                         lpm->tbl24[i] = new_tbl24_entry;
824
825                         continue;
826                 }
827
828                 if (lpm->tbl24[i].valid_group == 1) {
829                         /* If tbl24 entry is valid and extended calculate the
830                          *  index into tbl8.
831                          */
832                         tbl8_index = lpm->tbl24[i].group_idx *
833                                         RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
834                         tbl8_group_end = tbl8_index +
835                                         RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
836
837                         for (j = tbl8_index; j < tbl8_group_end; j++) {
838                                 if (!lpm->tbl8[j].valid ||
839                                                 lpm->tbl8[j].depth <= depth) {
840                                         struct rte_lpm_tbl_entry
841                                                 new_tbl8_entry = {
842                                                 .valid = VALID,
843                                                 .valid_group = VALID,
844                                                 .depth = depth,
845                                                 .next_hop = next_hop,
846                                         };
847
848                                         /*
849                                          * Setting tbl8 entry in one go to avoid
850                                          * race conditions
851                                          */
852                                         lpm->tbl8[j] = new_tbl8_entry;
853
854                                         continue;
855                                 }
856                         }
857                 }
858         }
859 #undef group_idx
860         return 0;
861 }
862
863 static inline int32_t
864 add_depth_big_v20(struct rte_lpm_v20 *lpm, uint32_t ip_masked, uint8_t depth,
865                 uint8_t next_hop)
866 {
867         uint32_t tbl24_index;
868         int32_t tbl8_group_index, tbl8_group_start, tbl8_group_end, tbl8_index,
869                 tbl8_range, i;
870
871         tbl24_index = (ip_masked >> 8);
872         tbl8_range = depth_to_range(depth);
873
874         if (!lpm->tbl24[tbl24_index].valid) {
875                 /* Search for a free tbl8 group. */
876                 tbl8_group_index = tbl8_alloc_v20(lpm->tbl8);
877
878                 /* Check tbl8 allocation was successful. */
879                 if (tbl8_group_index < 0) {
880                         return tbl8_group_index;
881                 }
882
883                 /* Find index into tbl8 and range. */
884                 tbl8_index = (tbl8_group_index *
885                                 RTE_LPM_TBL8_GROUP_NUM_ENTRIES) +
886                                 (ip_masked & 0xFF);
887
888                 /* Set tbl8 entry. */
889                 for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
890                         lpm->tbl8[i].depth = depth;
891                         lpm->tbl8[i].next_hop = next_hop;
892                         lpm->tbl8[i].valid = VALID;
893                 }
894
895                 /*
896                  * Update tbl24 entry to point to new tbl8 entry. Note: The
897                  * ext_flag and tbl8_index need to be updated simultaneously,
898                  * so assign whole structure in one go
899                  */
900
901                 struct rte_lpm_tbl_entry_v20 new_tbl24_entry = {
902                         { .group_idx = (uint8_t)tbl8_group_index, },
903                         .valid = VALID,
904                         .valid_group = 1,
905                         .depth = 0,
906                 };
907
908                 lpm->tbl24[tbl24_index] = new_tbl24_entry;
909
910         } /* If valid entry but not extended calculate the index into Table8. */
911         else if (lpm->tbl24[tbl24_index].valid_group == 0) {
912                 /* Search for free tbl8 group. */
913                 tbl8_group_index = tbl8_alloc_v20(lpm->tbl8);
914
915                 if (tbl8_group_index < 0) {
916                         return tbl8_group_index;
917                 }
918
919                 tbl8_group_start = tbl8_group_index *
920                                 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
921                 tbl8_group_end = tbl8_group_start +
922                                 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
923
924                 /* Populate new tbl8 with tbl24 value. */
925                 for (i = tbl8_group_start; i < tbl8_group_end; i++) {
926                         lpm->tbl8[i].valid = VALID;
927                         lpm->tbl8[i].depth = lpm->tbl24[tbl24_index].depth;
928                         lpm->tbl8[i].next_hop =
929                                         lpm->tbl24[tbl24_index].next_hop;
930                 }
931
932                 tbl8_index = tbl8_group_start + (ip_masked & 0xFF);
933
934                 /* Insert new rule into the tbl8 entry. */
935                 for (i = tbl8_index; i < tbl8_index + tbl8_range; i++) {
936                         if (!lpm->tbl8[i].valid ||
937                                         lpm->tbl8[i].depth <= depth) {
938                                 lpm->tbl8[i].valid = VALID;
939                                 lpm->tbl8[i].depth = depth;
940                                 lpm->tbl8[i].next_hop = next_hop;
941
942                                 continue;
943                         }
944                 }
945
946                 /*
947                  * Update tbl24 entry to point to new tbl8 entry. Note: The
948                  * ext_flag and tbl8_index need to be updated simultaneously,
949                  * so assign whole structure in one go.
950                  */
951
952                 struct rte_lpm_tbl_entry_v20 new_tbl24_entry = {
953                                 { .group_idx = (uint8_t)tbl8_group_index, },
954                                 .valid = VALID,
955                                 .valid_group = 1,
956                                 .depth = 0,
957                 };
958
959                 lpm->tbl24[tbl24_index] = new_tbl24_entry;
960
961         } else { /*
962                 * If it is valid, extended entry calculate the index into tbl8.
963                 */
964                 tbl8_group_index = lpm->tbl24[tbl24_index].group_idx;
965                 tbl8_group_start = tbl8_group_index *
966                                 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
967                 tbl8_index = tbl8_group_start + (ip_masked & 0xFF);
968
969                 for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
970
971                         if (!lpm->tbl8[i].valid ||
972                                         lpm->tbl8[i].depth <= depth) {
973                                 struct rte_lpm_tbl_entry_v20 new_tbl8_entry = {
974                                         .valid = VALID,
975                                         .depth = depth,
976                                         .next_hop = next_hop,
977                                         .valid_group = lpm->tbl8[i].valid_group,
978                                 };
979
980                                 /*
981                                  * Setting tbl8 entry in one go to avoid race
982                                  * condition
983                                  */
984                                 lpm->tbl8[i] = new_tbl8_entry;
985
986                                 continue;
987                         }
988                 }
989         }
990
991         return 0;
992 }
993
994 static inline int32_t
995 add_depth_big_v1604(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth,
996                 uint32_t next_hop)
997 {
998 #define group_idx next_hop
999         uint32_t tbl24_index;
1000         int32_t tbl8_group_index, tbl8_group_start, tbl8_group_end, tbl8_index,
1001                 tbl8_range, i;
1002
1003         tbl24_index = (ip_masked >> 8);
1004         tbl8_range = depth_to_range(depth);
1005
1006         if (!lpm->tbl24[tbl24_index].valid) {
1007                 /* Search for a free tbl8 group. */
1008                 tbl8_group_index = tbl8_alloc_v1604(lpm->tbl8, lpm->number_tbl8s);
1009
1010                 /* Check tbl8 allocation was successful. */
1011                 if (tbl8_group_index < 0) {
1012                         return tbl8_group_index;
1013                 }
1014
1015                 /* Find index into tbl8 and range. */
1016                 tbl8_index = (tbl8_group_index *
1017                                 RTE_LPM_TBL8_GROUP_NUM_ENTRIES) +
1018                                 (ip_masked & 0xFF);
1019
1020                 /* Set tbl8 entry. */
1021                 for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
1022                         lpm->tbl8[i].depth = depth;
1023                         lpm->tbl8[i].next_hop = next_hop;
1024                         lpm->tbl8[i].valid = VALID;
1025                 }
1026
1027                 /*
1028                  * Update tbl24 entry to point to new tbl8 entry. Note: The
1029                  * ext_flag and tbl8_index need to be updated simultaneously,
1030                  * so assign whole structure in one go
1031                  */
1032
1033                 struct rte_lpm_tbl_entry new_tbl24_entry = {
1034                         .group_idx = (uint8_t)tbl8_group_index,
1035                         .valid = VALID,
1036                         .valid_group = 1,
1037                         .depth = 0,
1038                 };
1039
1040                 lpm->tbl24[tbl24_index] = new_tbl24_entry;
1041
1042         } /* If valid entry but not extended calculate the index into Table8. */
1043         else if (lpm->tbl24[tbl24_index].valid_group == 0) {
1044                 /* Search for free tbl8 group. */
1045                 tbl8_group_index = tbl8_alloc_v1604(lpm->tbl8, lpm->number_tbl8s);
1046
1047                 if (tbl8_group_index < 0) {
1048                         return tbl8_group_index;
1049                 }
1050
1051                 tbl8_group_start = tbl8_group_index *
1052                                 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1053                 tbl8_group_end = tbl8_group_start +
1054                                 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1055
1056                 /* Populate new tbl8 with tbl24 value. */
1057                 for (i = tbl8_group_start; i < tbl8_group_end; i++) {
1058                         lpm->tbl8[i].valid = VALID;
1059                         lpm->tbl8[i].depth = lpm->tbl24[tbl24_index].depth;
1060                         lpm->tbl8[i].next_hop =
1061                                         lpm->tbl24[tbl24_index].next_hop;
1062                 }
1063
1064                 tbl8_index = tbl8_group_start + (ip_masked & 0xFF);
1065
1066                 /* Insert new rule into the tbl8 entry. */
1067                 for (i = tbl8_index; i < tbl8_index + tbl8_range; i++) {
1068                         if (!lpm->tbl8[i].valid ||
1069                                         lpm->tbl8[i].depth <= depth) {
1070                                 lpm->tbl8[i].valid = VALID;
1071                                 lpm->tbl8[i].depth = depth;
1072                                 lpm->tbl8[i].next_hop = next_hop;
1073
1074                                 continue;
1075                         }
1076                 }
1077
1078                 /*
1079                  * Update tbl24 entry to point to new tbl8 entry. Note: The
1080                  * ext_flag and tbl8_index need to be updated simultaneously,
1081                  * so assign whole structure in one go.
1082                  */
1083
1084                 struct rte_lpm_tbl_entry new_tbl24_entry = {
1085                                 .group_idx = (uint8_t)tbl8_group_index,
1086                                 .valid = VALID,
1087                                 .valid_group = 1,
1088                                 .depth = 0,
1089                 };
1090
1091                 lpm->tbl24[tbl24_index] = new_tbl24_entry;
1092
1093         } else { /*
1094                 * If it is valid, extended entry calculate the index into tbl8.
1095                 */
1096                 tbl8_group_index = lpm->tbl24[tbl24_index].group_idx;
1097                 tbl8_group_start = tbl8_group_index *
1098                                 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1099                 tbl8_index = tbl8_group_start + (ip_masked & 0xFF);
1100
1101                 for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
1102
1103                         if (!lpm->tbl8[i].valid ||
1104                                         lpm->tbl8[i].depth <= depth) {
1105                                 struct rte_lpm_tbl_entry new_tbl8_entry = {
1106                                         .valid = VALID,
1107                                         .depth = depth,
1108                                         .next_hop = next_hop,
1109                                         .valid_group = lpm->tbl8[i].valid_group,
1110                                 };
1111
1112                                 /*
1113                                  * Setting tbl8 entry in one go to avoid race
1114                                  * condition
1115                                  */
1116                                 lpm->tbl8[i] = new_tbl8_entry;
1117
1118                                 continue;
1119                         }
1120                 }
1121         }
1122 #undef group_idx
1123         return 0;
1124 }
1125
1126 /*
1127  * Add a route
1128  */
1129 int
1130 rte_lpm_add_v20(struct rte_lpm_v20 *lpm, uint32_t ip, uint8_t depth,
1131                 uint8_t next_hop)
1132 {
1133         int32_t rule_index, status = 0;
1134         uint32_t ip_masked;
1135
1136         /* Check user arguments. */
1137         if ((lpm == NULL) || (depth < 1) || (depth > RTE_LPM_MAX_DEPTH))
1138                 return -EINVAL;
1139
1140         ip_masked = ip & depth_to_mask(depth);
1141
1142         /* Add the rule to the rule table. */
1143         rule_index = rule_add_v20(lpm, ip_masked, depth, next_hop);
1144
1145         /* If the is no space available for new rule return error. */
1146         if (rule_index < 0) {
1147                 return rule_index;
1148         }
1149
1150         if (depth <= MAX_DEPTH_TBL24) {
1151                 status = add_depth_small_v20(lpm, ip_masked, depth, next_hop);
1152         } else { /* If depth > RTE_LPM_MAX_DEPTH_TBL24 */
1153                 status = add_depth_big_v20(lpm, ip_masked, depth, next_hop);
1154
1155                 /*
1156                  * If add fails due to exhaustion of tbl8 extensions delete
1157                  * rule that was added to rule table.
1158                  */
1159                 if (status < 0) {
1160                         rule_delete_v20(lpm, rule_index, depth);
1161
1162                         return status;
1163                 }
1164         }
1165
1166         return 0;
1167 }
1168 VERSION_SYMBOL(rte_lpm_add, _v20, 2.0);
1169
1170 int
1171 rte_lpm_add_v1604(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
1172                 uint32_t next_hop)
1173 {
1174         int32_t rule_index, status = 0;
1175         uint32_t ip_masked;
1176
1177         /* Check user arguments. */
1178         if ((lpm == NULL) || (depth < 1) || (depth > RTE_LPM_MAX_DEPTH))
1179                 return -EINVAL;
1180
1181         ip_masked = ip & depth_to_mask(depth);
1182
1183         /* Add the rule to the rule table. */
1184         rule_index = rule_add_v1604(lpm, ip_masked, depth, next_hop);
1185
1186         /* If the is no space available for new rule return error. */
1187         if (rule_index < 0) {
1188                 return rule_index;
1189         }
1190
1191         if (depth <= MAX_DEPTH_TBL24) {
1192                 status = add_depth_small_v1604(lpm, ip_masked, depth, next_hop);
1193         } else { /* If depth > RTE_LPM_MAX_DEPTH_TBL24 */
1194                 status = add_depth_big_v1604(lpm, ip_masked, depth, next_hop);
1195
1196                 /*
1197                  * If add fails due to exhaustion of tbl8 extensions delete
1198                  * rule that was added to rule table.
1199                  */
1200                 if (status < 0) {
1201                         rule_delete_v1604(lpm, rule_index, depth);
1202
1203                         return status;
1204                 }
1205         }
1206
1207         return 0;
1208 }
1209 BIND_DEFAULT_SYMBOL(rte_lpm_add, _v1604, 16.04);
1210 MAP_STATIC_SYMBOL(int rte_lpm_add(struct rte_lpm *lpm, uint32_t ip,
1211                 uint8_t depth, uint32_t next_hop), rte_lpm_add_v1604);
1212
1213 /*
1214  * Look for a rule in the high-level rules table
1215  */
1216 int
1217 rte_lpm_is_rule_present_v20(struct rte_lpm_v20 *lpm, uint32_t ip, uint8_t depth,
1218 uint8_t *next_hop)
1219 {
1220         uint32_t ip_masked;
1221         int32_t rule_index;
1222
1223         /* Check user arguments. */
1224         if ((lpm == NULL) ||
1225                 (next_hop == NULL) ||
1226                 (depth < 1) || (depth > RTE_LPM_MAX_DEPTH))
1227                 return -EINVAL;
1228
1229         /* Look for the rule using rule_find. */
1230         ip_masked = ip & depth_to_mask(depth);
1231         rule_index = rule_find_v20(lpm, ip_masked, depth);
1232
1233         if (rule_index >= 0) {
1234                 *next_hop = lpm->rules_tbl[rule_index].next_hop;
1235                 return 1;
1236         }
1237
1238         /* If rule is not found return 0. */
1239         return 0;
1240 }
1241 VERSION_SYMBOL(rte_lpm_is_rule_present, _v20, 2.0);
1242
1243 int
1244 rte_lpm_is_rule_present_v1604(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
1245 uint32_t *next_hop)
1246 {
1247         uint32_t ip_masked;
1248         int32_t rule_index;
1249
1250         /* Check user arguments. */
1251         if ((lpm == NULL) ||
1252                 (next_hop == NULL) ||
1253                 (depth < 1) || (depth > RTE_LPM_MAX_DEPTH))
1254                 return -EINVAL;
1255
1256         /* Look for the rule using rule_find. */
1257         ip_masked = ip & depth_to_mask(depth);
1258         rule_index = rule_find_v1604(lpm, ip_masked, depth);
1259
1260         if (rule_index >= 0) {
1261                 *next_hop = lpm->rules_tbl[rule_index].next_hop;
1262                 return 1;
1263         }
1264
1265         /* If rule is not found return 0. */
1266         return 0;
1267 }
1268 BIND_DEFAULT_SYMBOL(rte_lpm_is_rule_present, _v1604, 16.04);
1269 MAP_STATIC_SYMBOL(int rte_lpm_is_rule_present(struct rte_lpm *lpm, uint32_t ip,
1270                 uint8_t depth, uint32_t *next_hop), rte_lpm_is_rule_present_v1604);
1271
1272 static inline int32_t
1273 find_previous_rule_v20(struct rte_lpm_v20 *lpm, uint32_t ip, uint8_t depth,
1274                 uint8_t *sub_rule_depth)
1275 {
1276         int32_t rule_index;
1277         uint32_t ip_masked;
1278         uint8_t prev_depth;
1279
1280         for (prev_depth = (uint8_t)(depth - 1); prev_depth > 0; prev_depth--) {
1281                 ip_masked = ip & depth_to_mask(prev_depth);
1282
1283                 rule_index = rule_find_v20(lpm, ip_masked, prev_depth);
1284
1285                 if (rule_index >= 0) {
1286                         *sub_rule_depth = prev_depth;
1287                         return rule_index;
1288                 }
1289         }
1290
1291         return -1;
1292 }
1293
1294 static inline int32_t
1295 find_previous_rule_v1604(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
1296                 uint8_t *sub_rule_depth)
1297 {
1298         int32_t rule_index;
1299         uint32_t ip_masked;
1300         uint8_t prev_depth;
1301
1302         for (prev_depth = (uint8_t)(depth - 1); prev_depth > 0; prev_depth--) {
1303                 ip_masked = ip & depth_to_mask(prev_depth);
1304
1305                 rule_index = rule_find_v1604(lpm, ip_masked, prev_depth);
1306
1307                 if (rule_index >= 0) {
1308                         *sub_rule_depth = prev_depth;
1309                         return rule_index;
1310                 }
1311         }
1312
1313         return -1;
1314 }
1315
1316 static inline int32_t
1317 delete_depth_small_v20(struct rte_lpm_v20 *lpm, uint32_t ip_masked,
1318         uint8_t depth, int32_t sub_rule_index, uint8_t sub_rule_depth)
1319 {
1320         uint32_t tbl24_range, tbl24_index, tbl8_group_index, tbl8_index, i, j;
1321
1322         /* Calculate the range and index into Table24. */
1323         tbl24_range = depth_to_range(depth);
1324         tbl24_index = (ip_masked >> 8);
1325
1326         /*
1327          * Firstly check the sub_rule_index. A -1 indicates no replacement rule
1328          * and a positive number indicates a sub_rule_index.
1329          */
1330         if (sub_rule_index < 0) {
1331                 /*
1332                  * If no replacement rule exists then invalidate entries
1333                  * associated with this rule.
1334                  */
1335                 for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) {
1336
1337                         if (lpm->tbl24[i].valid_group == 0 &&
1338                                         lpm->tbl24[i].depth <= depth) {
1339                                 lpm->tbl24[i].valid = INVALID;
1340                         } else if (lpm->tbl24[i].valid_group == 1) {
1341                                 /*
1342                                  * If TBL24 entry is extended, then there has
1343                                  * to be a rule with depth >= 25 in the
1344                                  * associated TBL8 group.
1345                                  */
1346
1347                                 tbl8_group_index = lpm->tbl24[i].group_idx;
1348                                 tbl8_index = tbl8_group_index *
1349                                                 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1350
1351                                 for (j = tbl8_index; j < (tbl8_index +
1352                                         RTE_LPM_TBL8_GROUP_NUM_ENTRIES); j++) {
1353
1354                                         if (lpm->tbl8[j].depth <= depth)
1355                                                 lpm->tbl8[j].valid = INVALID;
1356                                 }
1357                         }
1358                 }
1359         } else {
1360                 /*
1361                  * If a replacement rule exists then modify entries
1362                  * associated with this rule.
1363                  */
1364
1365                 struct rte_lpm_tbl_entry_v20 new_tbl24_entry = {
1366                         {.next_hop = lpm->rules_tbl[sub_rule_index].next_hop,},
1367                         .valid = VALID,
1368                         .valid_group = 0,
1369                         .depth = sub_rule_depth,
1370                 };
1371
1372                 struct rte_lpm_tbl_entry_v20 new_tbl8_entry = {
1373                         .valid = VALID,
1374                         .valid_group = VALID,
1375                         .depth = sub_rule_depth,
1376                         .next_hop = lpm->rules_tbl
1377                         [sub_rule_index].next_hop,
1378                 };
1379
1380                 for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) {
1381
1382                         if (lpm->tbl24[i].valid_group == 0 &&
1383                                         lpm->tbl24[i].depth <= depth) {
1384                                 lpm->tbl24[i] = new_tbl24_entry;
1385                         } else  if (lpm->tbl24[i].valid_group == 1) {
1386                                 /*
1387                                  * If TBL24 entry is extended, then there has
1388                                  * to be a rule with depth >= 25 in the
1389                                  * associated TBL8 group.
1390                                  */
1391
1392                                 tbl8_group_index = lpm->tbl24[i].group_idx;
1393                                 tbl8_index = tbl8_group_index *
1394                                                 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1395
1396                                 for (j = tbl8_index; j < (tbl8_index +
1397                                         RTE_LPM_TBL8_GROUP_NUM_ENTRIES); j++) {
1398
1399                                         if (lpm->tbl8[j].depth <= depth)
1400                                                 lpm->tbl8[j] = new_tbl8_entry;
1401                                 }
1402                         }
1403                 }
1404         }
1405
1406         return 0;
1407 }
1408
1409 static inline int32_t
1410 delete_depth_small_v1604(struct rte_lpm *lpm, uint32_t ip_masked,
1411         uint8_t depth, int32_t sub_rule_index, uint8_t sub_rule_depth)
1412 {
1413 #define group_idx next_hop
1414         uint32_t tbl24_range, tbl24_index, tbl8_group_index, tbl8_index, i, j;
1415
1416         /* Calculate the range and index into Table24. */
1417         tbl24_range = depth_to_range(depth);
1418         tbl24_index = (ip_masked >> 8);
1419
1420         /*
1421          * Firstly check the sub_rule_index. A -1 indicates no replacement rule
1422          * and a positive number indicates a sub_rule_index.
1423          */
1424         if (sub_rule_index < 0) {
1425                 /*
1426                  * If no replacement rule exists then invalidate entries
1427                  * associated with this rule.
1428                  */
1429                 for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) {
1430
1431                         if (lpm->tbl24[i].valid_group == 0 &&
1432                                         lpm->tbl24[i].depth <= depth) {
1433                                 lpm->tbl24[i].valid = INVALID;
1434                         } else if (lpm->tbl24[i].valid_group == 1) {
1435                                 /*
1436                                  * If TBL24 entry is extended, then there has
1437                                  * to be a rule with depth >= 25 in the
1438                                  * associated TBL8 group.
1439                                  */
1440
1441                                 tbl8_group_index = lpm->tbl24[i].group_idx;
1442                                 tbl8_index = tbl8_group_index *
1443                                                 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1444
1445                                 for (j = tbl8_index; j < (tbl8_index +
1446                                         RTE_LPM_TBL8_GROUP_NUM_ENTRIES); j++) {
1447
1448                                         if (lpm->tbl8[j].depth <= depth)
1449                                                 lpm->tbl8[j].valid = INVALID;
1450                                 }
1451                         }
1452                 }
1453         } else {
1454                 /*
1455                  * If a replacement rule exists then modify entries
1456                  * associated with this rule.
1457                  */
1458
1459                 struct rte_lpm_tbl_entry new_tbl24_entry = {
1460                         .next_hop = lpm->rules_tbl[sub_rule_index].next_hop,
1461                         .valid = VALID,
1462                         .valid_group = 0,
1463                         .depth = sub_rule_depth,
1464                 };
1465
1466                 struct rte_lpm_tbl_entry new_tbl8_entry = {
1467                         .valid = VALID,
1468                         .valid_group = VALID,
1469                         .depth = sub_rule_depth,
1470                         .next_hop = lpm->rules_tbl
1471                         [sub_rule_index].next_hop,
1472                 };
1473
1474                 for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) {
1475
1476                         if (lpm->tbl24[i].valid_group == 0 &&
1477                                         lpm->tbl24[i].depth <= depth) {
1478                                 lpm->tbl24[i] = new_tbl24_entry;
1479                         } else  if (lpm->tbl24[i].valid_group == 1) {
1480                                 /*
1481                                  * If TBL24 entry is extended, then there has
1482                                  * to be a rule with depth >= 25 in the
1483                                  * associated TBL8 group.
1484                                  */
1485
1486                                 tbl8_group_index = lpm->tbl24[i].group_idx;
1487                                 tbl8_index = tbl8_group_index *
1488                                                 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1489
1490                                 for (j = tbl8_index; j < (tbl8_index +
1491                                         RTE_LPM_TBL8_GROUP_NUM_ENTRIES); j++) {
1492
1493                                         if (lpm->tbl8[j].depth <= depth)
1494                                                 lpm->tbl8[j] = new_tbl8_entry;
1495                                 }
1496                         }
1497                 }
1498         }
1499 #undef group_idx
1500         return 0;
1501 }
1502
1503 /*
1504  * Checks if table 8 group can be recycled.
1505  *
1506  * Return of -EEXIST means tbl8 is in use and thus can not be recycled.
1507  * Return of -EINVAL means tbl8 is empty and thus can be recycled
1508  * Return of value > -1 means tbl8 is in use but has all the same values and
1509  * thus can be recycled
1510  */
1511 static inline int32_t
1512 tbl8_recycle_check_v20(struct rte_lpm_tbl_entry_v20 *tbl8,
1513                 uint32_t tbl8_group_start)
1514 {
1515         uint32_t tbl8_group_end, i;
1516         tbl8_group_end = tbl8_group_start + RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1517
1518         /*
1519          * Check the first entry of the given tbl8. If it is invalid we know
1520          * this tbl8 does not contain any rule with a depth < RTE_LPM_MAX_DEPTH
1521          *  (As they would affect all entries in a tbl8) and thus this table
1522          *  can not be recycled.
1523          */
1524         if (tbl8[tbl8_group_start].valid) {
1525                 /*
1526                  * If first entry is valid check if the depth is less than 24
1527                  * and if so check the rest of the entries to verify that they
1528                  * are all of this depth.
1529                  */
1530                 if (tbl8[tbl8_group_start].depth < MAX_DEPTH_TBL24) {
1531                         for (i = (tbl8_group_start + 1); i < tbl8_group_end;
1532                                         i++) {
1533
1534                                 if (tbl8[i].depth !=
1535                                                 tbl8[tbl8_group_start].depth) {
1536
1537                                         return -EEXIST;
1538                                 }
1539                         }
1540                         /* If all entries are the same return the tb8 index */
1541                         return tbl8_group_start;
1542                 }
1543
1544                 return -EEXIST;
1545         }
1546         /*
1547          * If the first entry is invalid check if the rest of the entries in
1548          * the tbl8 are invalid.
1549          */
1550         for (i = (tbl8_group_start + 1); i < tbl8_group_end; i++) {
1551                 if (tbl8[i].valid)
1552                         return -EEXIST;
1553         }
1554         /* If no valid entries are found then return -EINVAL. */
1555         return -EINVAL;
1556 }
1557
1558 static inline int32_t
1559 tbl8_recycle_check_v1604(struct rte_lpm_tbl_entry *tbl8,
1560                 uint32_t tbl8_group_start)
1561 {
1562         uint32_t tbl8_group_end, i;
1563         tbl8_group_end = tbl8_group_start + RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1564
1565         /*
1566          * Check the first entry of the given tbl8. If it is invalid we know
1567          * this tbl8 does not contain any rule with a depth < RTE_LPM_MAX_DEPTH
1568          *  (As they would affect all entries in a tbl8) and thus this table
1569          *  can not be recycled.
1570          */
1571         if (tbl8[tbl8_group_start].valid) {
1572                 /*
1573                  * If first entry is valid check if the depth is less than 24
1574                  * and if so check the rest of the entries to verify that they
1575                  * are all of this depth.
1576                  */
1577                 if (tbl8[tbl8_group_start].depth < MAX_DEPTH_TBL24) {
1578                         for (i = (tbl8_group_start + 1); i < tbl8_group_end;
1579                                         i++) {
1580
1581                                 if (tbl8[i].depth !=
1582                                                 tbl8[tbl8_group_start].depth) {
1583
1584                                         return -EEXIST;
1585                                 }
1586                         }
1587                         /* If all entries are the same return the tb8 index */
1588                         return tbl8_group_start;
1589                 }
1590
1591                 return -EEXIST;
1592         }
1593         /*
1594          * If the first entry is invalid check if the rest of the entries in
1595          * the tbl8 are invalid.
1596          */
1597         for (i = (tbl8_group_start + 1); i < tbl8_group_end; i++) {
1598                 if (tbl8[i].valid)
1599                         return -EEXIST;
1600         }
1601         /* If no valid entries are found then return -EINVAL. */
1602         return -EINVAL;
1603 }
1604
1605 static inline int32_t
1606 delete_depth_big_v20(struct rte_lpm_v20 *lpm, uint32_t ip_masked,
1607         uint8_t depth, int32_t sub_rule_index, uint8_t sub_rule_depth)
1608 {
1609         uint32_t tbl24_index, tbl8_group_index, tbl8_group_start, tbl8_index,
1610                         tbl8_range, i;
1611         int32_t tbl8_recycle_index;
1612
1613         /*
1614          * Calculate the index into tbl24 and range. Note: All depths larger
1615          * than MAX_DEPTH_TBL24 are associated with only one tbl24 entry.
1616          */
1617         tbl24_index = ip_masked >> 8;
1618
1619         /* Calculate the index into tbl8 and range. */
1620         tbl8_group_index = lpm->tbl24[tbl24_index].group_idx;
1621         tbl8_group_start = tbl8_group_index * RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1622         tbl8_index = tbl8_group_start + (ip_masked & 0xFF);
1623         tbl8_range = depth_to_range(depth);
1624
1625         if (sub_rule_index < 0) {
1626                 /*
1627                  * Loop through the range of entries on tbl8 for which the
1628                  * rule_to_delete must be removed or modified.
1629                  */
1630                 for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
1631                         if (lpm->tbl8[i].depth <= depth)
1632                                 lpm->tbl8[i].valid = INVALID;
1633                 }
1634         } else {
1635                 /* Set new tbl8 entry. */
1636                 struct rte_lpm_tbl_entry_v20 new_tbl8_entry = {
1637                         .valid = VALID,
1638                         .depth = sub_rule_depth,
1639                         .valid_group = lpm->tbl8[tbl8_group_start].valid_group,
1640                         .next_hop = lpm->rules_tbl[sub_rule_index].next_hop,
1641                 };
1642
1643                 /*
1644                  * Loop through the range of entries on tbl8 for which the
1645                  * rule_to_delete must be modified.
1646                  */
1647                 for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
1648                         if (lpm->tbl8[i].depth <= depth)
1649                                 lpm->tbl8[i] = new_tbl8_entry;
1650                 }
1651         }
1652
1653         /*
1654          * Check if there are any valid entries in this tbl8 group. If all
1655          * tbl8 entries are invalid we can free the tbl8 and invalidate the
1656          * associated tbl24 entry.
1657          */
1658
1659         tbl8_recycle_index = tbl8_recycle_check_v20(lpm->tbl8, tbl8_group_start);
1660
1661         if (tbl8_recycle_index == -EINVAL) {
1662                 /* Set tbl24 before freeing tbl8 to avoid race condition. */
1663                 lpm->tbl24[tbl24_index].valid = 0;
1664                 tbl8_free_v20(lpm->tbl8, tbl8_group_start);
1665         } else if (tbl8_recycle_index > -1) {
1666                 /* Update tbl24 entry. */
1667                 struct rte_lpm_tbl_entry_v20 new_tbl24_entry = {
1668                         { .next_hop = lpm->tbl8[tbl8_recycle_index].next_hop, },
1669                         .valid = VALID,
1670                         .valid_group = 0,
1671                         .depth = lpm->tbl8[tbl8_recycle_index].depth,
1672                 };
1673
1674                 /* Set tbl24 before freeing tbl8 to avoid race condition. */
1675                 lpm->tbl24[tbl24_index] = new_tbl24_entry;
1676                 tbl8_free_v20(lpm->tbl8, tbl8_group_start);
1677         }
1678
1679         return 0;
1680 }
1681
1682 static inline int32_t
1683 delete_depth_big_v1604(struct rte_lpm *lpm, uint32_t ip_masked,
1684         uint8_t depth, int32_t sub_rule_index, uint8_t sub_rule_depth)
1685 {
1686 #define group_idx next_hop
1687         uint32_t tbl24_index, tbl8_group_index, tbl8_group_start, tbl8_index,
1688                         tbl8_range, i;
1689         int32_t tbl8_recycle_index;
1690
1691         /*
1692          * Calculate the index into tbl24 and range. Note: All depths larger
1693          * than MAX_DEPTH_TBL24 are associated with only one tbl24 entry.
1694          */
1695         tbl24_index = ip_masked >> 8;
1696
1697         /* Calculate the index into tbl8 and range. */
1698         tbl8_group_index = lpm->tbl24[tbl24_index].group_idx;
1699         tbl8_group_start = tbl8_group_index * RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1700         tbl8_index = tbl8_group_start + (ip_masked & 0xFF);
1701         tbl8_range = depth_to_range(depth);
1702
1703         if (sub_rule_index < 0) {
1704                 /*
1705                  * Loop through the range of entries on tbl8 for which the
1706                  * rule_to_delete must be removed or modified.
1707                  */
1708                 for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
1709                         if (lpm->tbl8[i].depth <= depth)
1710                                 lpm->tbl8[i].valid = INVALID;
1711                 }
1712         } else {
1713                 /* Set new tbl8 entry. */
1714                 struct rte_lpm_tbl_entry new_tbl8_entry = {
1715                         .valid = VALID,
1716                         .depth = sub_rule_depth,
1717                         .valid_group = lpm->tbl8[tbl8_group_start].valid_group,
1718                         .next_hop = lpm->rules_tbl[sub_rule_index].next_hop,
1719                 };
1720
1721                 /*
1722                  * Loop through the range of entries on tbl8 for which the
1723                  * rule_to_delete must be modified.
1724                  */
1725                 for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
1726                         if (lpm->tbl8[i].depth <= depth)
1727                                 lpm->tbl8[i] = new_tbl8_entry;
1728                 }
1729         }
1730
1731         /*
1732          * Check if there are any valid entries in this tbl8 group. If all
1733          * tbl8 entries are invalid we can free the tbl8 and invalidate the
1734          * associated tbl24 entry.
1735          */
1736
1737         tbl8_recycle_index = tbl8_recycle_check_v1604(lpm->tbl8, tbl8_group_start);
1738
1739         if (tbl8_recycle_index == -EINVAL) {
1740                 /* Set tbl24 before freeing tbl8 to avoid race condition. */
1741                 lpm->tbl24[tbl24_index].valid = 0;
1742                 tbl8_free_v1604(lpm->tbl8, tbl8_group_start);
1743         } else if (tbl8_recycle_index > -1) {
1744                 /* Update tbl24 entry. */
1745                 struct rte_lpm_tbl_entry new_tbl24_entry = {
1746                         .next_hop = lpm->tbl8[tbl8_recycle_index].next_hop,
1747                         .valid = VALID,
1748                         .valid_group = 0,
1749                         .depth = lpm->tbl8[tbl8_recycle_index].depth,
1750                 };
1751
1752                 /* Set tbl24 before freeing tbl8 to avoid race condition. */
1753                 lpm->tbl24[tbl24_index] = new_tbl24_entry;
1754                 tbl8_free_v1604(lpm->tbl8, tbl8_group_start);
1755         }
1756 #undef group_idx
1757         return 0;
1758 }
1759
1760 /*
1761  * Deletes a rule
1762  */
1763 int
1764 rte_lpm_delete_v20(struct rte_lpm_v20 *lpm, uint32_t ip, uint8_t depth)
1765 {
1766         int32_t rule_to_delete_index, sub_rule_index;
1767         uint32_t ip_masked;
1768         uint8_t sub_rule_depth;
1769         /*
1770          * Check input arguments. Note: IP must be a positive integer of 32
1771          * bits in length therefore it need not be checked.
1772          */
1773         if ((lpm == NULL) || (depth < 1) || (depth > RTE_LPM_MAX_DEPTH)) {
1774                 return -EINVAL;
1775         }
1776
1777         ip_masked = ip & depth_to_mask(depth);
1778
1779         /*
1780          * Find the index of the input rule, that needs to be deleted, in the
1781          * rule table.
1782          */
1783         rule_to_delete_index = rule_find_v20(lpm, ip_masked, depth);
1784
1785         /*
1786          * Check if rule_to_delete_index was found. If no rule was found the
1787          * function rule_find returns -EINVAL.
1788          */
1789         if (rule_to_delete_index < 0)
1790                 return -EINVAL;
1791
1792         /* Delete the rule from the rule table. */
1793         rule_delete_v20(lpm, rule_to_delete_index, depth);
1794
1795         /*
1796          * Find rule to replace the rule_to_delete. If there is no rule to
1797          * replace the rule_to_delete we return -1 and invalidate the table
1798          * entries associated with this rule.
1799          */
1800         sub_rule_depth = 0;
1801         sub_rule_index = find_previous_rule_v20(lpm, ip, depth, &sub_rule_depth);
1802
1803         /*
1804          * If the input depth value is less than 25 use function
1805          * delete_depth_small otherwise use delete_depth_big.
1806          */
1807         if (depth <= MAX_DEPTH_TBL24) {
1808                 return delete_depth_small_v20(lpm, ip_masked, depth,
1809                                 sub_rule_index, sub_rule_depth);
1810         } else { /* If depth > MAX_DEPTH_TBL24 */
1811                 return delete_depth_big_v20(lpm, ip_masked, depth, sub_rule_index,
1812                                 sub_rule_depth);
1813         }
1814 }
1815 VERSION_SYMBOL(rte_lpm_delete, _v20, 2.0);
1816
1817 int
1818 rte_lpm_delete_v1604(struct rte_lpm *lpm, uint32_t ip, uint8_t depth)
1819 {
1820         int32_t rule_to_delete_index, sub_rule_index;
1821         uint32_t ip_masked;
1822         uint8_t sub_rule_depth;
1823         /*
1824          * Check input arguments. Note: IP must be a positive integer of 32
1825          * bits in length therefore it need not be checked.
1826          */
1827         if ((lpm == NULL) || (depth < 1) || (depth > RTE_LPM_MAX_DEPTH)) {
1828                 return -EINVAL;
1829         }
1830
1831         ip_masked = ip & depth_to_mask(depth);
1832
1833         /*
1834          * Find the index of the input rule, that needs to be deleted, in the
1835          * rule table.
1836          */
1837         rule_to_delete_index = rule_find_v1604(lpm, ip_masked, depth);
1838
1839         /*
1840          * Check if rule_to_delete_index was found. If no rule was found the
1841          * function rule_find returns -EINVAL.
1842          */
1843         if (rule_to_delete_index < 0)
1844                 return -EINVAL;
1845
1846         /* Delete the rule from the rule table. */
1847         rule_delete_v1604(lpm, rule_to_delete_index, depth);
1848
1849         /*
1850          * Find rule to replace the rule_to_delete. If there is no rule to
1851          * replace the rule_to_delete we return -1 and invalidate the table
1852          * entries associated with this rule.
1853          */
1854         sub_rule_depth = 0;
1855         sub_rule_index = find_previous_rule_v1604(lpm, ip, depth, &sub_rule_depth);
1856
1857         /*
1858          * If the input depth value is less than 25 use function
1859          * delete_depth_small otherwise use delete_depth_big.
1860          */
1861         if (depth <= MAX_DEPTH_TBL24) {
1862                 return delete_depth_small_v1604(lpm, ip_masked, depth,
1863                                 sub_rule_index, sub_rule_depth);
1864         } else { /* If depth > MAX_DEPTH_TBL24 */
1865                 return delete_depth_big_v1604(lpm, ip_masked, depth, sub_rule_index,
1866                                 sub_rule_depth);
1867         }
1868 }
1869 BIND_DEFAULT_SYMBOL(rte_lpm_delete, _v1604, 16.04);
1870 MAP_STATIC_SYMBOL(int rte_lpm_delete(struct rte_lpm *lpm, uint32_t ip,
1871                 uint8_t depth), rte_lpm_delete_v1604);
1872
1873 /*
1874  * Delete all rules from the LPM table.
1875  */
1876 void
1877 rte_lpm_delete_all_v20(struct rte_lpm_v20 *lpm)
1878 {
1879         /* Zero rule information. */
1880         memset(lpm->rule_info, 0, sizeof(lpm->rule_info));
1881
1882         /* Zero tbl24. */
1883         memset(lpm->tbl24, 0, sizeof(lpm->tbl24));
1884
1885         /* Zero tbl8. */
1886         memset(lpm->tbl8, 0, sizeof(lpm->tbl8));
1887
1888         /* Delete all rules form the rules table. */
1889         memset(lpm->rules_tbl, 0, sizeof(lpm->rules_tbl[0]) * lpm->max_rules);
1890 }
1891 VERSION_SYMBOL(rte_lpm_delete_all, _v20, 2.0);
1892
1893 void
1894 rte_lpm_delete_all_v1604(struct rte_lpm *lpm)
1895 {
1896         /* Zero rule information. */
1897         memset(lpm->rule_info, 0, sizeof(lpm->rule_info));
1898
1899         /* Zero tbl24. */
1900         memset(lpm->tbl24, 0, sizeof(lpm->tbl24));
1901
1902         /* Zero tbl8. */
1903         memset(lpm->tbl8, 0, sizeof(lpm->tbl8[0])
1904                         * RTE_LPM_TBL8_GROUP_NUM_ENTRIES * lpm->number_tbl8s);
1905
1906         /* Delete all rules form the rules table. */
1907         memset(lpm->rules_tbl, 0, sizeof(lpm->rules_tbl[0]) * lpm->max_rules);
1908 }
1909 BIND_DEFAULT_SYMBOL(rte_lpm_delete_all, _v1604, 16.04);
1910 MAP_STATIC_SYMBOL(void rte_lpm_delete_all(struct rte_lpm *lpm),
1911                 rte_lpm_delete_all_v1604);