lpm: extend IPv4 next hop field
[dpdk.git] / lib / librte_lpm / rte_lpm.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <string.h>
35 #include <stdint.h>
36 #include <errno.h>
37 #include <stdarg.h>
38 #include <stdio.h>
39 #include <errno.h>
40 #include <sys/queue.h>
41
42 #include <rte_log.h>
43 #include <rte_branch_prediction.h>
44 #include <rte_common.h>
45 #include <rte_memory.h>        /* for definition of RTE_CACHE_LINE_SIZE */
46 #include <rte_malloc.h>
47 #include <rte_memzone.h>
48 #include <rte_eal.h>
49 #include <rte_eal_memconfig.h>
50 #include <rte_per_lcore.h>
51 #include <rte_string_fns.h>
52 #include <rte_errno.h>
53 #include <rte_rwlock.h>
54 #include <rte_spinlock.h>
55
56 #include "rte_lpm.h"
57
58 TAILQ_HEAD(rte_lpm_list, rte_tailq_entry);
59
60 static struct rte_tailq_elem rte_lpm_tailq = {
61         .name = "RTE_LPM",
62 };
63 EAL_REGISTER_TAILQ(rte_lpm_tailq)
64
65 #define MAX_DEPTH_TBL24 24
66
67 enum valid_flag {
68         INVALID = 0,
69         VALID
70 };
71
72 /* Macro to enable/disable run-time checks. */
73 #if defined(RTE_LIBRTE_LPM_DEBUG)
74 #include <rte_debug.h>
75 #define VERIFY_DEPTH(depth) do {                                \
76         if ((depth == 0) || (depth > RTE_LPM_MAX_DEPTH))        \
77                 rte_panic("LPM: Invalid depth (%u) at line %d", \
78                                 (unsigned)(depth), __LINE__);   \
79 } while (0)
80 #else
81 #define VERIFY_DEPTH(depth)
82 #endif
83
84 /*
85  * Converts a given depth value to its corresponding mask value.
86  *
87  * depth  (IN)          : range = 1 - 32
88  * mask   (OUT)         : 32bit mask
89  */
90 static uint32_t __attribute__((pure))
91 depth_to_mask(uint8_t depth)
92 {
93         VERIFY_DEPTH(depth);
94
95         /* To calculate a mask start with a 1 on the left hand side and right
96          * shift while populating the left hand side with 1's
97          */
98         return (int)0x80000000 >> (depth - 1);
99 }
100
101 /*
102  * Converts given depth value to its corresponding range value.
103  */
104 static inline uint32_t __attribute__((pure))
105 depth_to_range(uint8_t depth)
106 {
107         VERIFY_DEPTH(depth);
108
109         /*
110          * Calculate tbl24 range. (Note: 2^depth = 1 << depth)
111          */
112         if (depth <= MAX_DEPTH_TBL24)
113                 return 1 << (MAX_DEPTH_TBL24 - depth);
114
115         /* Else if depth is greater than 24 */
116         return 1 << (RTE_LPM_MAX_DEPTH - depth);
117 }
118
119 /*
120  * Find an existing lpm table and return a pointer to it.
121  */
122 struct rte_lpm_v20 *
123 rte_lpm_find_existing_v20(const char *name)
124 {
125         struct rte_lpm_v20 *l = NULL;
126         struct rte_tailq_entry *te;
127         struct rte_lpm_list *lpm_list;
128
129         lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list);
130
131         rte_rwlock_read_lock(RTE_EAL_TAILQ_RWLOCK);
132         TAILQ_FOREACH(te, lpm_list, next) {
133                 l = (struct rte_lpm_v20 *) te->data;
134                 if (strncmp(name, l->name, RTE_LPM_NAMESIZE) == 0)
135                         break;
136         }
137         rte_rwlock_read_unlock(RTE_EAL_TAILQ_RWLOCK);
138
139         if (te == NULL) {
140                 rte_errno = ENOENT;
141                 return NULL;
142         }
143
144         return l;
145 }
146 VERSION_SYMBOL(rte_lpm_find_existing, _v20, 2.0);
147
148 struct rte_lpm *
149 rte_lpm_find_existing_v1604(const char *name)
150 {
151         struct rte_lpm *l = NULL;
152         struct rte_tailq_entry *te;
153         struct rte_lpm_list *lpm_list;
154
155         lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list);
156
157         rte_rwlock_read_lock(RTE_EAL_TAILQ_RWLOCK);
158         TAILQ_FOREACH(te, lpm_list, next) {
159                 l = (struct rte_lpm *) te->data;
160                 if (strncmp(name, l->name, RTE_LPM_NAMESIZE) == 0)
161                         break;
162         }
163         rte_rwlock_read_unlock(RTE_EAL_TAILQ_RWLOCK);
164
165         if (te == NULL) {
166                 rte_errno = ENOENT;
167                 return NULL;
168         }
169
170         return l;
171 }
172 BIND_DEFAULT_SYMBOL(rte_lpm_find_existing, _v1604, 16.04);
173 MAP_STATIC_SYMBOL(struct rte_lpm *rte_lpm_find_existing(const char *name),
174                 rte_lpm_find_existing_v1604);
175
176 /*
177  * Allocates memory for LPM object
178  */
179 struct rte_lpm_v20 *
180 rte_lpm_create_v20(const char *name, int socket_id, int max_rules,
181                 __rte_unused int flags)
182 {
183         char mem_name[RTE_LPM_NAMESIZE];
184         struct rte_lpm_v20 *lpm = NULL;
185         struct rte_tailq_entry *te;
186         uint32_t mem_size;
187         struct rte_lpm_list *lpm_list;
188
189         lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list);
190
191         RTE_BUILD_BUG_ON(sizeof(struct rte_lpm_tbl_entry_v20) != 2);
192
193         /* Check user arguments. */
194         if ((name == NULL) || (socket_id < -1) || (max_rules == 0)) {
195                 rte_errno = EINVAL;
196                 return NULL;
197         }
198
199         snprintf(mem_name, sizeof(mem_name), "LPM_%s", name);
200
201         /* Determine the amount of memory to allocate. */
202         mem_size = sizeof(*lpm) + (sizeof(lpm->rules_tbl[0]) * max_rules);
203
204         rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
205
206         /* guarantee there's no existing */
207         TAILQ_FOREACH(te, lpm_list, next) {
208                 lpm = (struct rte_lpm_v20 *) te->data;
209                 if (strncmp(name, lpm->name, RTE_LPM_NAMESIZE) == 0)
210                         break;
211         }
212         if (te != NULL)
213                 goto exit;
214
215         /* allocate tailq entry */
216         te = rte_zmalloc("LPM_TAILQ_ENTRY", sizeof(*te), 0);
217         if (te == NULL) {
218                 RTE_LOG(ERR, LPM, "Failed to allocate tailq entry\n");
219                 goto exit;
220         }
221
222         /* Allocate memory to store the LPM data structures. */
223         lpm = (struct rte_lpm_v20 *)rte_zmalloc_socket(mem_name, mem_size,
224                         RTE_CACHE_LINE_SIZE, socket_id);
225         if (lpm == NULL) {
226                 RTE_LOG(ERR, LPM, "LPM memory allocation failed\n");
227                 rte_free(te);
228                 goto exit;
229         }
230
231         /* Save user arguments. */
232         lpm->max_rules = max_rules;
233         snprintf(lpm->name, sizeof(lpm->name), "%s", name);
234
235         te->data = (void *) lpm;
236
237         TAILQ_INSERT_TAIL(lpm_list, te, next);
238
239 exit:
240         rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
241
242         return lpm;
243 }
244 VERSION_SYMBOL(rte_lpm_create, _v20, 2.0);
245
246 struct rte_lpm *
247 rte_lpm_create_v1604(const char *name, int socket_id, int max_rules,
248                 __rte_unused int flags)
249 {
250         char mem_name[RTE_LPM_NAMESIZE];
251         struct rte_lpm *lpm = NULL;
252         struct rte_tailq_entry *te;
253         uint32_t mem_size;
254         struct rte_lpm_list *lpm_list;
255
256         lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list);
257
258         RTE_BUILD_BUG_ON(sizeof(struct rte_lpm_tbl_entry) != 4);
259
260         /* Check user arguments. */
261         if ((name == NULL) || (socket_id < -1) || (max_rules == 0)) {
262                 rte_errno = EINVAL;
263                 return NULL;
264         }
265
266         snprintf(mem_name, sizeof(mem_name), "LPM_%s", name);
267
268         /* Determine the amount of memory to allocate. */
269         mem_size = sizeof(*lpm) + (sizeof(lpm->rules_tbl[0]) * max_rules);
270
271         rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
272
273         /* guarantee there's no existing */
274         TAILQ_FOREACH(te, lpm_list, next) {
275                 lpm = (struct rte_lpm *) te->data;
276                 if (strncmp(name, lpm->name, RTE_LPM_NAMESIZE) == 0)
277                         break;
278         }
279         if (te != NULL)
280                 goto exit;
281
282         /* allocate tailq entry */
283         te = rte_zmalloc("LPM_TAILQ_ENTRY", sizeof(*te), 0);
284         if (te == NULL) {
285                 RTE_LOG(ERR, LPM, "Failed to allocate tailq entry\n");
286                 goto exit;
287         }
288
289         /* Allocate memory to store the LPM data structures. */
290         lpm = (struct rte_lpm *)rte_zmalloc_socket(mem_name, mem_size,
291                         RTE_CACHE_LINE_SIZE, socket_id);
292         if (lpm == NULL) {
293                 RTE_LOG(ERR, LPM, "LPM memory allocation failed\n");
294                 rte_free(te);
295                 goto exit;
296         }
297
298         /* Save user arguments. */
299         lpm->max_rules = max_rules;
300         snprintf(lpm->name, sizeof(lpm->name), "%s", name);
301
302         te->data = (void *) lpm;
303
304         TAILQ_INSERT_TAIL(lpm_list, te, next);
305
306 exit:
307         rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
308
309         return lpm;
310 }
311 BIND_DEFAULT_SYMBOL(rte_lpm_create, _v1604, 16.04);
312 MAP_STATIC_SYMBOL(
313         struct rte_lpm *rte_lpm_create(const char *name, int socket_id,
314                         int max_rules, int flags), rte_lpm_create_v1604);
315
316 /*
317  * Deallocates memory for given LPM table.
318  */
319 void
320 rte_lpm_free_v20(struct rte_lpm_v20 *lpm)
321 {
322         struct rte_lpm_list *lpm_list;
323         struct rte_tailq_entry *te;
324
325         /* Check user arguments. */
326         if (lpm == NULL)
327                 return;
328
329         lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list);
330
331         rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
332
333         /* find our tailq entry */
334         TAILQ_FOREACH(te, lpm_list, next) {
335                 if (te->data == (void *) lpm)
336                         break;
337         }
338         if (te == NULL) {
339                 rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
340                 return;
341         }
342
343         TAILQ_REMOVE(lpm_list, te, next);
344
345         rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
346
347         rte_free(lpm);
348         rte_free(te);
349 }
350 VERSION_SYMBOL(rte_lpm_free, _v20, 2.0);
351
352 void
353 rte_lpm_free_v1604(struct rte_lpm *lpm)
354 {
355         struct rte_lpm_list *lpm_list;
356         struct rte_tailq_entry *te;
357
358         /* Check user arguments. */
359         if (lpm == NULL)
360                 return;
361
362         lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list);
363
364         rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
365
366         /* find our tailq entry */
367         TAILQ_FOREACH(te, lpm_list, next) {
368                 if (te->data == (void *) lpm)
369                         break;
370         }
371         if (te == NULL) {
372                 rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
373                 return;
374         }
375
376         TAILQ_REMOVE(lpm_list, te, next);
377
378         rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
379
380         rte_free(lpm);
381         rte_free(te);
382 }
383 BIND_DEFAULT_SYMBOL(rte_lpm_free, _v1604, 16.04);
384 MAP_STATIC_SYMBOL(void rte_lpm_free(struct rte_lpm *lpm),
385                 rte_lpm_free_v1604);
386
387 /*
388  * Adds a rule to the rule table.
389  *
390  * NOTE: The rule table is split into 32 groups. Each group contains rules that
391  * apply to a specific prefix depth (i.e. group 1 contains rules that apply to
392  * prefixes with a depth of 1 etc.). In the following code (depth - 1) is used
393  * to refer to depth 1 because even though the depth range is 1 - 32, depths
394  * are stored in the rule table from 0 - 31.
395  * NOTE: Valid range for depth parameter is 1 .. 32 inclusive.
396  */
397 static inline int32_t
398 rule_add_v20(struct rte_lpm_v20 *lpm, uint32_t ip_masked, uint8_t depth,
399         uint8_t next_hop)
400 {
401         uint32_t rule_gindex, rule_index, last_rule;
402         int i;
403
404         VERIFY_DEPTH(depth);
405
406         /* Scan through rule group to see if rule already exists. */
407         if (lpm->rule_info[depth - 1].used_rules > 0) {
408
409                 /* rule_gindex stands for rule group index. */
410                 rule_gindex = lpm->rule_info[depth - 1].first_rule;
411                 /* Initialise rule_index to point to start of rule group. */
412                 rule_index = rule_gindex;
413                 /* Last rule = Last used rule in this rule group. */
414                 last_rule = rule_gindex + lpm->rule_info[depth - 1].used_rules;
415
416                 for (; rule_index < last_rule; rule_index++) {
417
418                         /* If rule already exists update its next_hop and return. */
419                         if (lpm->rules_tbl[rule_index].ip == ip_masked) {
420                                 lpm->rules_tbl[rule_index].next_hop = next_hop;
421
422                                 return rule_index;
423                         }
424                 }
425
426                 if (rule_index == lpm->max_rules)
427                         return -ENOSPC;
428         } else {
429                 /* Calculate the position in which the rule will be stored. */
430                 rule_index = 0;
431
432                 for (i = depth - 1; i > 0; i--) {
433                         if (lpm->rule_info[i - 1].used_rules > 0) {
434                                 rule_index = lpm->rule_info[i - 1].first_rule
435                                                 + lpm->rule_info[i - 1].used_rules;
436                                 break;
437                         }
438                 }
439                 if (rule_index == lpm->max_rules)
440                         return -ENOSPC;
441
442                 lpm->rule_info[depth - 1].first_rule = rule_index;
443         }
444
445         /* Make room for the new rule in the array. */
446         for (i = RTE_LPM_MAX_DEPTH; i > depth; i--) {
447                 if (lpm->rule_info[i - 1].first_rule
448                                 + lpm->rule_info[i - 1].used_rules == lpm->max_rules)
449                         return -ENOSPC;
450
451                 if (lpm->rule_info[i - 1].used_rules > 0) {
452                         lpm->rules_tbl[lpm->rule_info[i - 1].first_rule
453                                 + lpm->rule_info[i - 1].used_rules]
454                                         = lpm->rules_tbl[lpm->rule_info[i - 1].first_rule];
455                         lpm->rule_info[i - 1].first_rule++;
456                 }
457         }
458
459         /* Add the new rule. */
460         lpm->rules_tbl[rule_index].ip = ip_masked;
461         lpm->rules_tbl[rule_index].next_hop = next_hop;
462
463         /* Increment the used rules counter for this rule group. */
464         lpm->rule_info[depth - 1].used_rules++;
465
466         return rule_index;
467 }
468
469 static inline int32_t
470 rule_add_v1604(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth,
471         uint32_t next_hop)
472 {
473         uint32_t rule_gindex, rule_index, last_rule;
474         int i;
475
476         VERIFY_DEPTH(depth);
477
478         /* Scan through rule group to see if rule already exists. */
479         if (lpm->rule_info[depth - 1].used_rules > 0) {
480
481                 /* rule_gindex stands for rule group index. */
482                 rule_gindex = lpm->rule_info[depth - 1].first_rule;
483                 /* Initialise rule_index to point to start of rule group. */
484                 rule_index = rule_gindex;
485                 /* Last rule = Last used rule in this rule group. */
486                 last_rule = rule_gindex + lpm->rule_info[depth - 1].used_rules;
487
488                 for (; rule_index < last_rule; rule_index++) {
489
490                         /* If rule already exists update its next_hop and return. */
491                         if (lpm->rules_tbl[rule_index].ip == ip_masked) {
492                                 lpm->rules_tbl[rule_index].next_hop = next_hop;
493
494                                 return rule_index;
495                         }
496                 }
497
498                 if (rule_index == lpm->max_rules)
499                         return -ENOSPC;
500         } else {
501                 /* Calculate the position in which the rule will be stored. */
502                 rule_index = 0;
503
504                 for (i = depth - 1; i > 0; i--) {
505                         if (lpm->rule_info[i - 1].used_rules > 0) {
506                                 rule_index = lpm->rule_info[i - 1].first_rule
507                                                 + lpm->rule_info[i - 1].used_rules;
508                                 break;
509                         }
510                 }
511                 if (rule_index == lpm->max_rules)
512                         return -ENOSPC;
513
514                 lpm->rule_info[depth - 1].first_rule = rule_index;
515         }
516
517         /* Make room for the new rule in the array. */
518         for (i = RTE_LPM_MAX_DEPTH; i > depth; i--) {
519                 if (lpm->rule_info[i - 1].first_rule
520                                 + lpm->rule_info[i - 1].used_rules == lpm->max_rules)
521                         return -ENOSPC;
522
523                 if (lpm->rule_info[i - 1].used_rules > 0) {
524                         lpm->rules_tbl[lpm->rule_info[i - 1].first_rule
525                                 + lpm->rule_info[i - 1].used_rules]
526                                         = lpm->rules_tbl[lpm->rule_info[i - 1].first_rule];
527                         lpm->rule_info[i - 1].first_rule++;
528                 }
529         }
530
531         /* Add the new rule. */
532         lpm->rules_tbl[rule_index].ip = ip_masked;
533         lpm->rules_tbl[rule_index].next_hop = next_hop;
534
535         /* Increment the used rules counter for this rule group. */
536         lpm->rule_info[depth - 1].used_rules++;
537
538         return rule_index;
539 }
540
541 /*
542  * Delete a rule from the rule table.
543  * NOTE: Valid range for depth parameter is 1 .. 32 inclusive.
544  */
545 static inline void
546 rule_delete_v20(struct rte_lpm_v20 *lpm, int32_t rule_index, uint8_t depth)
547 {
548         int i;
549
550         VERIFY_DEPTH(depth);
551
552         lpm->rules_tbl[rule_index] =
553                         lpm->rules_tbl[lpm->rule_info[depth - 1].first_rule
554                                 + lpm->rule_info[depth - 1].used_rules - 1];
555
556         for (i = depth; i < RTE_LPM_MAX_DEPTH; i++) {
557                 if (lpm->rule_info[i].used_rules > 0) {
558                         lpm->rules_tbl[lpm->rule_info[i].first_rule - 1] =
559                                 lpm->rules_tbl[lpm->rule_info[i].first_rule
560                                         + lpm->rule_info[i].used_rules - 1];
561                         lpm->rule_info[i].first_rule--;
562                 }
563         }
564
565         lpm->rule_info[depth - 1].used_rules--;
566 }
567
568 static inline void
569 rule_delete_v1604(struct rte_lpm *lpm, int32_t rule_index, uint8_t depth)
570 {
571         int i;
572
573         VERIFY_DEPTH(depth);
574
575         lpm->rules_tbl[rule_index] =
576                         lpm->rules_tbl[lpm->rule_info[depth - 1].first_rule
577                         + lpm->rule_info[depth - 1].used_rules - 1];
578
579         for (i = depth; i < RTE_LPM_MAX_DEPTH; i++) {
580                 if (lpm->rule_info[i].used_rules > 0) {
581                         lpm->rules_tbl[lpm->rule_info[i].first_rule - 1] =
582                                         lpm->rules_tbl[lpm->rule_info[i].first_rule
583                                                 + lpm->rule_info[i].used_rules - 1];
584                         lpm->rule_info[i].first_rule--;
585                 }
586         }
587
588         lpm->rule_info[depth - 1].used_rules--;
589 }
590
591 /*
592  * Finds a rule in rule table.
593  * NOTE: Valid range for depth parameter is 1 .. 32 inclusive.
594  */
595 static inline int32_t
596 rule_find_v20(struct rte_lpm_v20 *lpm, uint32_t ip_masked, uint8_t depth)
597 {
598         uint32_t rule_gindex, last_rule, rule_index;
599
600         VERIFY_DEPTH(depth);
601
602         rule_gindex = lpm->rule_info[depth - 1].first_rule;
603         last_rule = rule_gindex + lpm->rule_info[depth - 1].used_rules;
604
605         /* Scan used rules at given depth to find rule. */
606         for (rule_index = rule_gindex; rule_index < last_rule; rule_index++) {
607                 /* If rule is found return the rule index. */
608                 if (lpm->rules_tbl[rule_index].ip == ip_masked)
609                         return rule_index;
610         }
611
612         /* If rule is not found return -EINVAL. */
613         return -EINVAL;
614 }
615
616 static inline int32_t
617 rule_find_v1604(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth)
618 {
619         uint32_t rule_gindex, last_rule, rule_index;
620
621         VERIFY_DEPTH(depth);
622
623         rule_gindex = lpm->rule_info[depth - 1].first_rule;
624         last_rule = rule_gindex + lpm->rule_info[depth - 1].used_rules;
625
626         /* Scan used rules at given depth to find rule. */
627         for (rule_index = rule_gindex; rule_index < last_rule; rule_index++) {
628                 /* If rule is found return the rule index. */
629                 if (lpm->rules_tbl[rule_index].ip == ip_masked)
630                         return rule_index;
631         }
632
633         /* If rule is not found return -EINVAL. */
634         return -EINVAL;
635 }
636
637 /*
638  * Find, clean and allocate a tbl8.
639  */
640 static inline int32_t
641 tbl8_alloc_v20(struct rte_lpm_tbl_entry_v20 *tbl8)
642 {
643         uint32_t group_idx; /* tbl8 group index. */
644         struct rte_lpm_tbl_entry_v20 *tbl8_entry;
645
646         /* Scan through tbl8 to find a free (i.e. INVALID) tbl8 group. */
647         for (group_idx = 0; group_idx < RTE_LPM_TBL8_NUM_GROUPS;
648                         group_idx++) {
649                 tbl8_entry = &tbl8[group_idx * RTE_LPM_TBL8_GROUP_NUM_ENTRIES];
650                 /* If a free tbl8 group is found clean it and set as VALID. */
651                 if (!tbl8_entry->valid_group) {
652                         memset(&tbl8_entry[0], 0,
653                                         RTE_LPM_TBL8_GROUP_NUM_ENTRIES *
654                                         sizeof(tbl8_entry[0]));
655
656                         tbl8_entry->valid_group = VALID;
657
658                         /* Return group index for allocated tbl8 group. */
659                         return group_idx;
660                 }
661         }
662
663         /* If there are no tbl8 groups free then return error. */
664         return -ENOSPC;
665 }
666
667 static inline int32_t
668 tbl8_alloc_v1604(struct rte_lpm_tbl_entry *tbl8)
669 {
670         uint32_t group_idx; /* tbl8 group index. */
671         struct rte_lpm_tbl_entry *tbl8_entry;
672
673         /* Scan through tbl8 to find a free (i.e. INVALID) tbl8 group. */
674         for (group_idx = 0; group_idx < RTE_LPM_TBL8_NUM_GROUPS;
675                         group_idx++) {
676                 tbl8_entry = &tbl8[group_idx * RTE_LPM_TBL8_GROUP_NUM_ENTRIES];
677                 /* If a free tbl8 group is found clean it and set as VALID. */
678                 if (!tbl8_entry->valid_group) {
679                         memset(&tbl8_entry[0], 0,
680                                         RTE_LPM_TBL8_GROUP_NUM_ENTRIES *
681                                         sizeof(tbl8_entry[0]));
682
683                         tbl8_entry->valid_group = VALID;
684
685                         /* Return group index for allocated tbl8 group. */
686                         return group_idx;
687                 }
688         }
689
690         /* If there are no tbl8 groups free then return error. */
691         return -ENOSPC;
692 }
693
694 static inline void
695 tbl8_free_v20(struct rte_lpm_tbl_entry_v20 *tbl8, uint32_t tbl8_group_start)
696 {
697         /* Set tbl8 group invalid*/
698         tbl8[tbl8_group_start].valid_group = INVALID;
699 }
700
701 static inline void
702 tbl8_free_v1604(struct rte_lpm_tbl_entry *tbl8, uint32_t tbl8_group_start)
703 {
704         /* Set tbl8 group invalid*/
705         tbl8[tbl8_group_start].valid_group = INVALID;
706 }
707
708 static inline int32_t
709 add_depth_small_v20(struct rte_lpm_v20 *lpm, uint32_t ip, uint8_t depth,
710                 uint8_t next_hop)
711 {
712         uint32_t tbl24_index, tbl24_range, tbl8_index, tbl8_group_end, i, j;
713
714         /* Calculate the index into Table24. */
715         tbl24_index = ip >> 8;
716         tbl24_range = depth_to_range(depth);
717
718         for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) {
719                 /*
720                  * For invalid OR valid and non-extended tbl 24 entries set
721                  * entry.
722                  */
723                 if (!lpm->tbl24[i].valid || (lpm->tbl24[i].valid_group == 0 &&
724                                 lpm->tbl24[i].depth <= depth)) {
725
726                         struct rte_lpm_tbl_entry_v20 new_tbl24_entry = {
727                                 { .next_hop = next_hop, },
728                                 .valid = VALID,
729                                 .valid_group = 0,
730                                 .depth = depth,
731                         };
732
733                         /* Setting tbl24 entry in one go to avoid race
734                          * conditions
735                          */
736                         lpm->tbl24[i] = new_tbl24_entry;
737
738                         continue;
739                 }
740
741                 if (lpm->tbl24[i].valid_group == 1) {
742                         /* If tbl24 entry is valid and extended calculate the
743                          *  index into tbl8.
744                          */
745                         tbl8_index = lpm->tbl24[i].group_idx *
746                                         RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
747                         tbl8_group_end = tbl8_index +
748                                         RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
749
750                         for (j = tbl8_index; j < tbl8_group_end; j++) {
751                                 if (!lpm->tbl8[j].valid ||
752                                                 lpm->tbl8[j].depth <= depth) {
753                                         struct rte_lpm_tbl_entry_v20
754                                                 new_tbl8_entry = {
755                                                 .valid = VALID,
756                                                 .valid_group = VALID,
757                                                 .depth = depth,
758                                                 .next_hop = next_hop,
759                                         };
760
761                                         /*
762                                          * Setting tbl8 entry in one go to avoid
763                                          * race conditions
764                                          */
765                                         lpm->tbl8[j] = new_tbl8_entry;
766
767                                         continue;
768                                 }
769                         }
770                 }
771         }
772
773         return 0;
774 }
775
776 static inline int32_t
777 add_depth_small_v1604(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
778                 uint32_t next_hop)
779 {
780 #define group_idx next_hop
781         uint32_t tbl24_index, tbl24_range, tbl8_index, tbl8_group_end, i, j;
782
783         /* Calculate the index into Table24. */
784         tbl24_index = ip >> 8;
785         tbl24_range = depth_to_range(depth);
786
787         for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) {
788                 /*
789                  * For invalid OR valid and non-extended tbl 24 entries set
790                  * entry.
791                  */
792                 if (!lpm->tbl24[i].valid || (lpm->tbl24[i].valid_group == 0 &&
793                                 lpm->tbl24[i].depth <= depth)) {
794
795                         struct rte_lpm_tbl_entry new_tbl24_entry = {
796                                 .next_hop = next_hop,
797                                 .valid = VALID,
798                                 .valid_group = 0,
799                                 .depth = depth,
800                         };
801
802                         /* Setting tbl24 entry in one go to avoid race
803                          * conditions
804                          */
805                         lpm->tbl24[i] = new_tbl24_entry;
806
807                         continue;
808                 }
809
810                 if (lpm->tbl24[i].valid_group == 1) {
811                         /* If tbl24 entry is valid and extended calculate the
812                          *  index into tbl8.
813                          */
814                         tbl8_index = lpm->tbl24[i].group_idx *
815                                         RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
816                         tbl8_group_end = tbl8_index +
817                                         RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
818
819                         for (j = tbl8_index; j < tbl8_group_end; j++) {
820                                 if (!lpm->tbl8[j].valid ||
821                                                 lpm->tbl8[j].depth <= depth) {
822                                         struct rte_lpm_tbl_entry
823                                                 new_tbl8_entry = {
824                                                 .valid = VALID,
825                                                 .valid_group = VALID,
826                                                 .depth = depth,
827                                                 .next_hop = next_hop,
828                                         };
829
830                                         /*
831                                          * Setting tbl8 entry in one go to avoid
832                                          * race conditions
833                                          */
834                                         lpm->tbl8[j] = new_tbl8_entry;
835
836                                         continue;
837                                 }
838                         }
839                 }
840         }
841 #undef group_idx
842         return 0;
843 }
844
845 static inline int32_t
846 add_depth_big_v20(struct rte_lpm_v20 *lpm, uint32_t ip_masked, uint8_t depth,
847                 uint8_t next_hop)
848 {
849         uint32_t tbl24_index;
850         int32_t tbl8_group_index, tbl8_group_start, tbl8_group_end, tbl8_index,
851                 tbl8_range, i;
852
853         tbl24_index = (ip_masked >> 8);
854         tbl8_range = depth_to_range(depth);
855
856         if (!lpm->tbl24[tbl24_index].valid) {
857                 /* Search for a free tbl8 group. */
858                 tbl8_group_index = tbl8_alloc_v20(lpm->tbl8);
859
860                 /* Check tbl8 allocation was successful. */
861                 if (tbl8_group_index < 0) {
862                         return tbl8_group_index;
863                 }
864
865                 /* Find index into tbl8 and range. */
866                 tbl8_index = (tbl8_group_index *
867                                 RTE_LPM_TBL8_GROUP_NUM_ENTRIES) +
868                                 (ip_masked & 0xFF);
869
870                 /* Set tbl8 entry. */
871                 for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
872                         lpm->tbl8[i].depth = depth;
873                         lpm->tbl8[i].next_hop = next_hop;
874                         lpm->tbl8[i].valid = VALID;
875                 }
876
877                 /*
878                  * Update tbl24 entry to point to new tbl8 entry. Note: The
879                  * ext_flag and tbl8_index need to be updated simultaneously,
880                  * so assign whole structure in one go
881                  */
882
883                 struct rte_lpm_tbl_entry_v20 new_tbl24_entry = {
884                         { .group_idx = (uint8_t)tbl8_group_index, },
885                         .valid = VALID,
886                         .valid_group = 1,
887                         .depth = 0,
888                 };
889
890                 lpm->tbl24[tbl24_index] = new_tbl24_entry;
891
892         } /* If valid entry but not extended calculate the index into Table8. */
893         else if (lpm->tbl24[tbl24_index].valid_group == 0) {
894                 /* Search for free tbl8 group. */
895                 tbl8_group_index = tbl8_alloc_v20(lpm->tbl8);
896
897                 if (tbl8_group_index < 0) {
898                         return tbl8_group_index;
899                 }
900
901                 tbl8_group_start = tbl8_group_index *
902                                 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
903                 tbl8_group_end = tbl8_group_start +
904                                 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
905
906                 /* Populate new tbl8 with tbl24 value. */
907                 for (i = tbl8_group_start; i < tbl8_group_end; i++) {
908                         lpm->tbl8[i].valid = VALID;
909                         lpm->tbl8[i].depth = lpm->tbl24[tbl24_index].depth;
910                         lpm->tbl8[i].next_hop =
911                                         lpm->tbl24[tbl24_index].next_hop;
912                 }
913
914                 tbl8_index = tbl8_group_start + (ip_masked & 0xFF);
915
916                 /* Insert new rule into the tbl8 entry. */
917                 for (i = tbl8_index; i < tbl8_index + tbl8_range; i++) {
918                         if (!lpm->tbl8[i].valid ||
919                                         lpm->tbl8[i].depth <= depth) {
920                                 lpm->tbl8[i].valid = VALID;
921                                 lpm->tbl8[i].depth = depth;
922                                 lpm->tbl8[i].next_hop = next_hop;
923
924                                 continue;
925                         }
926                 }
927
928                 /*
929                  * Update tbl24 entry to point to new tbl8 entry. Note: The
930                  * ext_flag and tbl8_index need to be updated simultaneously,
931                  * so assign whole structure in one go.
932                  */
933
934                 struct rte_lpm_tbl_entry_v20 new_tbl24_entry = {
935                                 { .group_idx = (uint8_t)tbl8_group_index, },
936                                 .valid = VALID,
937                                 .valid_group = 1,
938                                 .depth = 0,
939                 };
940
941                 lpm->tbl24[tbl24_index] = new_tbl24_entry;
942
943         } else { /*
944                 * If it is valid, extended entry calculate the index into tbl8.
945                 */
946                 tbl8_group_index = lpm->tbl24[tbl24_index].group_idx;
947                 tbl8_group_start = tbl8_group_index *
948                                 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
949                 tbl8_index = tbl8_group_start + (ip_masked & 0xFF);
950
951                 for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
952
953                         if (!lpm->tbl8[i].valid ||
954                                         lpm->tbl8[i].depth <= depth) {
955                                 struct rte_lpm_tbl_entry_v20 new_tbl8_entry = {
956                                         .valid = VALID,
957                                         .depth = depth,
958                                         .next_hop = next_hop,
959                                         .valid_group = lpm->tbl8[i].valid_group,
960                                 };
961
962                                 /*
963                                  * Setting tbl8 entry in one go to avoid race
964                                  * condition
965                                  */
966                                 lpm->tbl8[i] = new_tbl8_entry;
967
968                                 continue;
969                         }
970                 }
971         }
972
973         return 0;
974 }
975
976 static inline int32_t
977 add_depth_big_v1604(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth,
978                 uint32_t next_hop)
979 {
980 #define group_idx next_hop
981         uint32_t tbl24_index;
982         int32_t tbl8_group_index, tbl8_group_start, tbl8_group_end, tbl8_index,
983                 tbl8_range, i;
984
985         tbl24_index = (ip_masked >> 8);
986         tbl8_range = depth_to_range(depth);
987
988         if (!lpm->tbl24[tbl24_index].valid) {
989                 /* Search for a free tbl8 group. */
990                 tbl8_group_index = tbl8_alloc_v1604(lpm->tbl8);
991
992                 /* Check tbl8 allocation was successful. */
993                 if (tbl8_group_index < 0) {
994                         return tbl8_group_index;
995                 }
996
997                 /* Find index into tbl8 and range. */
998                 tbl8_index = (tbl8_group_index *
999                                 RTE_LPM_TBL8_GROUP_NUM_ENTRIES) +
1000                                 (ip_masked & 0xFF);
1001
1002                 /* Set tbl8 entry. */
1003                 for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
1004                         lpm->tbl8[i].depth = depth;
1005                         lpm->tbl8[i].next_hop = next_hop;
1006                         lpm->tbl8[i].valid = VALID;
1007                 }
1008
1009                 /*
1010                  * Update tbl24 entry to point to new tbl8 entry. Note: The
1011                  * ext_flag and tbl8_index need to be updated simultaneously,
1012                  * so assign whole structure in one go
1013                  */
1014
1015                 struct rte_lpm_tbl_entry new_tbl24_entry = {
1016                         .group_idx = (uint8_t)tbl8_group_index,
1017                         .valid = VALID,
1018                         .valid_group = 1,
1019                         .depth = 0,
1020                 };
1021
1022                 lpm->tbl24[tbl24_index] = new_tbl24_entry;
1023
1024         } /* If valid entry but not extended calculate the index into Table8. */
1025         else if (lpm->tbl24[tbl24_index].valid_group == 0) {
1026                 /* Search for free tbl8 group. */
1027                 tbl8_group_index = tbl8_alloc_v1604(lpm->tbl8);
1028
1029                 if (tbl8_group_index < 0) {
1030                         return tbl8_group_index;
1031                 }
1032
1033                 tbl8_group_start = tbl8_group_index *
1034                                 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1035                 tbl8_group_end = tbl8_group_start +
1036                                 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1037
1038                 /* Populate new tbl8 with tbl24 value. */
1039                 for (i = tbl8_group_start; i < tbl8_group_end; i++) {
1040                         lpm->tbl8[i].valid = VALID;
1041                         lpm->tbl8[i].depth = lpm->tbl24[tbl24_index].depth;
1042                         lpm->tbl8[i].next_hop =
1043                                         lpm->tbl24[tbl24_index].next_hop;
1044                 }
1045
1046                 tbl8_index = tbl8_group_start + (ip_masked & 0xFF);
1047
1048                 /* Insert new rule into the tbl8 entry. */
1049                 for (i = tbl8_index; i < tbl8_index + tbl8_range; i++) {
1050                         if (!lpm->tbl8[i].valid ||
1051                                         lpm->tbl8[i].depth <= depth) {
1052                                 lpm->tbl8[i].valid = VALID;
1053                                 lpm->tbl8[i].depth = depth;
1054                                 lpm->tbl8[i].next_hop = next_hop;
1055
1056                                 continue;
1057                         }
1058                 }
1059
1060                 /*
1061                  * Update tbl24 entry to point to new tbl8 entry. Note: The
1062                  * ext_flag and tbl8_index need to be updated simultaneously,
1063                  * so assign whole structure in one go.
1064                  */
1065
1066                 struct rte_lpm_tbl_entry new_tbl24_entry = {
1067                                 .group_idx = (uint8_t)tbl8_group_index,
1068                                 .valid = VALID,
1069                                 .valid_group = 1,
1070                                 .depth = 0,
1071                 };
1072
1073                 lpm->tbl24[tbl24_index] = new_tbl24_entry;
1074
1075         } else { /*
1076                 * If it is valid, extended entry calculate the index into tbl8.
1077                 */
1078                 tbl8_group_index = lpm->tbl24[tbl24_index].group_idx;
1079                 tbl8_group_start = tbl8_group_index *
1080                                 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1081                 tbl8_index = tbl8_group_start + (ip_masked & 0xFF);
1082
1083                 for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
1084
1085                         if (!lpm->tbl8[i].valid ||
1086                                         lpm->tbl8[i].depth <= depth) {
1087                                 struct rte_lpm_tbl_entry new_tbl8_entry = {
1088                                         .valid = VALID,
1089                                         .depth = depth,
1090                                         .next_hop = next_hop,
1091                                         .valid_group = lpm->tbl8[i].valid_group,
1092                                 };
1093
1094                                 /*
1095                                  * Setting tbl8 entry in one go to avoid race
1096                                  * condition
1097                                  */
1098                                 lpm->tbl8[i] = new_tbl8_entry;
1099
1100                                 continue;
1101                         }
1102                 }
1103         }
1104 #undef group_idx
1105         return 0;
1106 }
1107
1108 /*
1109  * Add a route
1110  */
1111 int
1112 rte_lpm_add_v20(struct rte_lpm_v20 *lpm, uint32_t ip, uint8_t depth,
1113                 uint8_t next_hop)
1114 {
1115         int32_t rule_index, status = 0;
1116         uint32_t ip_masked;
1117
1118         /* Check user arguments. */
1119         if ((lpm == NULL) || (depth < 1) || (depth > RTE_LPM_MAX_DEPTH))
1120                 return -EINVAL;
1121
1122         ip_masked = ip & depth_to_mask(depth);
1123
1124         /* Add the rule to the rule table. */
1125         rule_index = rule_add_v20(lpm, ip_masked, depth, next_hop);
1126
1127         /* If the is no space available for new rule return error. */
1128         if (rule_index < 0) {
1129                 return rule_index;
1130         }
1131
1132         if (depth <= MAX_DEPTH_TBL24) {
1133                 status = add_depth_small_v20(lpm, ip_masked, depth, next_hop);
1134         } else { /* If depth > RTE_LPM_MAX_DEPTH_TBL24 */
1135                 status = add_depth_big_v20(lpm, ip_masked, depth, next_hop);
1136
1137                 /*
1138                  * If add fails due to exhaustion of tbl8 extensions delete
1139                  * rule that was added to rule table.
1140                  */
1141                 if (status < 0) {
1142                         rule_delete_v20(lpm, rule_index, depth);
1143
1144                         return status;
1145                 }
1146         }
1147
1148         return 0;
1149 }
1150 VERSION_SYMBOL(rte_lpm_add, _v20, 2.0);
1151
1152 int
1153 rte_lpm_add_v1604(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
1154                 uint32_t next_hop)
1155 {
1156         int32_t rule_index, status = 0;
1157         uint32_t ip_masked;
1158
1159         /* Check user arguments. */
1160         if ((lpm == NULL) || (depth < 1) || (depth > RTE_LPM_MAX_DEPTH))
1161                 return -EINVAL;
1162
1163         ip_masked = ip & depth_to_mask(depth);
1164
1165         /* Add the rule to the rule table. */
1166         rule_index = rule_add_v1604(lpm, ip_masked, depth, next_hop);
1167
1168         /* If the is no space available for new rule return error. */
1169         if (rule_index < 0) {
1170                 return rule_index;
1171         }
1172
1173         if (depth <= MAX_DEPTH_TBL24) {
1174                 status = add_depth_small_v1604(lpm, ip_masked, depth, next_hop);
1175         } else { /* If depth > RTE_LPM_MAX_DEPTH_TBL24 */
1176                 status = add_depth_big_v1604(lpm, ip_masked, depth, next_hop);
1177
1178                 /*
1179                  * If add fails due to exhaustion of tbl8 extensions delete
1180                  * rule that was added to rule table.
1181                  */
1182                 if (status < 0) {
1183                         rule_delete_v1604(lpm, rule_index, depth);
1184
1185                         return status;
1186                 }
1187         }
1188
1189         return 0;
1190 }
1191 BIND_DEFAULT_SYMBOL(rte_lpm_add, _v1604, 16.04);
1192 MAP_STATIC_SYMBOL(int rte_lpm_add(struct rte_lpm *lpm, uint32_t ip,
1193                 uint8_t depth, uint32_t next_hop), rte_lpm_add_v1604);
1194
1195 /*
1196  * Look for a rule in the high-level rules table
1197  */
1198 int
1199 rte_lpm_is_rule_present_v20(struct rte_lpm_v20 *lpm, uint32_t ip, uint8_t depth,
1200 uint8_t *next_hop)
1201 {
1202         uint32_t ip_masked;
1203         int32_t rule_index;
1204
1205         /* Check user arguments. */
1206         if ((lpm == NULL) ||
1207                 (next_hop == NULL) ||
1208                 (depth < 1) || (depth > RTE_LPM_MAX_DEPTH))
1209                 return -EINVAL;
1210
1211         /* Look for the rule using rule_find. */
1212         ip_masked = ip & depth_to_mask(depth);
1213         rule_index = rule_find_v20(lpm, ip_masked, depth);
1214
1215         if (rule_index >= 0) {
1216                 *next_hop = lpm->rules_tbl[rule_index].next_hop;
1217                 return 1;
1218         }
1219
1220         /* If rule is not found return 0. */
1221         return 0;
1222 }
1223 VERSION_SYMBOL(rte_lpm_is_rule_present, _v20, 2.0);
1224
1225 int
1226 rte_lpm_is_rule_present_v1604(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
1227 uint32_t *next_hop)
1228 {
1229         uint32_t ip_masked;
1230         int32_t rule_index;
1231
1232         /* Check user arguments. */
1233         if ((lpm == NULL) ||
1234                 (next_hop == NULL) ||
1235                 (depth < 1) || (depth > RTE_LPM_MAX_DEPTH))
1236                 return -EINVAL;
1237
1238         /* Look for the rule using rule_find. */
1239         ip_masked = ip & depth_to_mask(depth);
1240         rule_index = rule_find_v1604(lpm, ip_masked, depth);
1241
1242         if (rule_index >= 0) {
1243                 *next_hop = lpm->rules_tbl[rule_index].next_hop;
1244                 return 1;
1245         }
1246
1247         /* If rule is not found return 0. */
1248         return 0;
1249 }
1250 BIND_DEFAULT_SYMBOL(rte_lpm_is_rule_present, _v1604, 16.04);
1251 MAP_STATIC_SYMBOL(int rte_lpm_is_rule_present(struct rte_lpm *lpm, uint32_t ip,
1252                 uint8_t depth, uint32_t *next_hop), rte_lpm_is_rule_present_v1604);
1253
1254 static inline int32_t
1255 find_previous_rule_v20(struct rte_lpm_v20 *lpm, uint32_t ip, uint8_t depth,
1256                 uint8_t *sub_rule_depth)
1257 {
1258         int32_t rule_index;
1259         uint32_t ip_masked;
1260         uint8_t prev_depth;
1261
1262         for (prev_depth = (uint8_t)(depth - 1); prev_depth > 0; prev_depth--) {
1263                 ip_masked = ip & depth_to_mask(prev_depth);
1264
1265                 rule_index = rule_find_v20(lpm, ip_masked, prev_depth);
1266
1267                 if (rule_index >= 0) {
1268                         *sub_rule_depth = prev_depth;
1269                         return rule_index;
1270                 }
1271         }
1272
1273         return -1;
1274 }
1275
1276 static inline int32_t
1277 find_previous_rule_v1604(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
1278                 uint8_t *sub_rule_depth)
1279 {
1280         int32_t rule_index;
1281         uint32_t ip_masked;
1282         uint8_t prev_depth;
1283
1284         for (prev_depth = (uint8_t)(depth - 1); prev_depth > 0; prev_depth--) {
1285                 ip_masked = ip & depth_to_mask(prev_depth);
1286
1287                 rule_index = rule_find_v1604(lpm, ip_masked, prev_depth);
1288
1289                 if (rule_index >= 0) {
1290                         *sub_rule_depth = prev_depth;
1291                         return rule_index;
1292                 }
1293         }
1294
1295         return -1;
1296 }
1297
1298 static inline int32_t
1299 delete_depth_small_v20(struct rte_lpm_v20 *lpm, uint32_t ip_masked,
1300         uint8_t depth, int32_t sub_rule_index, uint8_t sub_rule_depth)
1301 {
1302         uint32_t tbl24_range, tbl24_index, tbl8_group_index, tbl8_index, i, j;
1303
1304         /* Calculate the range and index into Table24. */
1305         tbl24_range = depth_to_range(depth);
1306         tbl24_index = (ip_masked >> 8);
1307
1308         /*
1309          * Firstly check the sub_rule_index. A -1 indicates no replacement rule
1310          * and a positive number indicates a sub_rule_index.
1311          */
1312         if (sub_rule_index < 0) {
1313                 /*
1314                  * If no replacement rule exists then invalidate entries
1315                  * associated with this rule.
1316                  */
1317                 for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) {
1318
1319                         if (lpm->tbl24[i].valid_group == 0 &&
1320                                         lpm->tbl24[i].depth <= depth) {
1321                                 lpm->tbl24[i].valid = INVALID;
1322                         } else if (lpm->tbl24[i].valid_group == 1) {
1323                                 /*
1324                                  * If TBL24 entry is extended, then there has
1325                                  * to be a rule with depth >= 25 in the
1326                                  * associated TBL8 group.
1327                                  */
1328
1329                                 tbl8_group_index = lpm->tbl24[i].group_idx;
1330                                 tbl8_index = tbl8_group_index *
1331                                                 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1332
1333                                 for (j = tbl8_index; j < (tbl8_index +
1334                                         RTE_LPM_TBL8_GROUP_NUM_ENTRIES); j++) {
1335
1336                                         if (lpm->tbl8[j].depth <= depth)
1337                                                 lpm->tbl8[j].valid = INVALID;
1338                                 }
1339                         }
1340                 }
1341         } else {
1342                 /*
1343                  * If a replacement rule exists then modify entries
1344                  * associated with this rule.
1345                  */
1346
1347                 struct rte_lpm_tbl_entry_v20 new_tbl24_entry = {
1348                         {.next_hop = lpm->rules_tbl[sub_rule_index].next_hop,},
1349                         .valid = VALID,
1350                         .valid_group = 0,
1351                         .depth = sub_rule_depth,
1352                 };
1353
1354                 struct rte_lpm_tbl_entry_v20 new_tbl8_entry = {
1355                         .valid = VALID,
1356                         .valid_group = VALID,
1357                         .depth = sub_rule_depth,
1358                         .next_hop = lpm->rules_tbl
1359                         [sub_rule_index].next_hop,
1360                 };
1361
1362                 for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) {
1363
1364                         if (lpm->tbl24[i].valid_group == 0 &&
1365                                         lpm->tbl24[i].depth <= depth) {
1366                                 lpm->tbl24[i] = new_tbl24_entry;
1367                         } else  if (lpm->tbl24[i].valid_group == 1) {
1368                                 /*
1369                                  * If TBL24 entry is extended, then there has
1370                                  * to be a rule with depth >= 25 in the
1371                                  * associated TBL8 group.
1372                                  */
1373
1374                                 tbl8_group_index = lpm->tbl24[i].group_idx;
1375                                 tbl8_index = tbl8_group_index *
1376                                                 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1377
1378                                 for (j = tbl8_index; j < (tbl8_index +
1379                                         RTE_LPM_TBL8_GROUP_NUM_ENTRIES); j++) {
1380
1381                                         if (lpm->tbl8[j].depth <= depth)
1382                                                 lpm->tbl8[j] = new_tbl8_entry;
1383                                 }
1384                         }
1385                 }
1386         }
1387
1388         return 0;
1389 }
1390
1391 static inline int32_t
1392 delete_depth_small_v1604(struct rte_lpm *lpm, uint32_t ip_masked,
1393         uint8_t depth, int32_t sub_rule_index, uint8_t sub_rule_depth)
1394 {
1395 #define group_idx next_hop
1396         uint32_t tbl24_range, tbl24_index, tbl8_group_index, tbl8_index, i, j;
1397
1398         /* Calculate the range and index into Table24. */
1399         tbl24_range = depth_to_range(depth);
1400         tbl24_index = (ip_masked >> 8);
1401
1402         /*
1403          * Firstly check the sub_rule_index. A -1 indicates no replacement rule
1404          * and a positive number indicates a sub_rule_index.
1405          */
1406         if (sub_rule_index < 0) {
1407                 /*
1408                  * If no replacement rule exists then invalidate entries
1409                  * associated with this rule.
1410                  */
1411                 for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) {
1412
1413                         if (lpm->tbl24[i].valid_group == 0 &&
1414                                         lpm->tbl24[i].depth <= depth) {
1415                                 lpm->tbl24[i].valid = INVALID;
1416                         } else if (lpm->tbl24[i].valid_group == 1) {
1417                                 /*
1418                                  * If TBL24 entry is extended, then there has
1419                                  * to be a rule with depth >= 25 in the
1420                                  * associated TBL8 group.
1421                                  */
1422
1423                                 tbl8_group_index = lpm->tbl24[i].group_idx;
1424                                 tbl8_index = tbl8_group_index *
1425                                                 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1426
1427                                 for (j = tbl8_index; j < (tbl8_index +
1428                                         RTE_LPM_TBL8_GROUP_NUM_ENTRIES); j++) {
1429
1430                                         if (lpm->tbl8[j].depth <= depth)
1431                                                 lpm->tbl8[j].valid = INVALID;
1432                                 }
1433                         }
1434                 }
1435         } else {
1436                 /*
1437                  * If a replacement rule exists then modify entries
1438                  * associated with this rule.
1439                  */
1440
1441                 struct rte_lpm_tbl_entry new_tbl24_entry = {
1442                         .next_hop = lpm->rules_tbl[sub_rule_index].next_hop,
1443                         .valid = VALID,
1444                         .valid_group = 0,
1445                         .depth = sub_rule_depth,
1446                 };
1447
1448                 struct rte_lpm_tbl_entry new_tbl8_entry = {
1449                         .valid = VALID,
1450                         .valid_group = VALID,
1451                         .depth = sub_rule_depth,
1452                         .next_hop = lpm->rules_tbl
1453                         [sub_rule_index].next_hop,
1454                 };
1455
1456                 for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) {
1457
1458                         if (lpm->tbl24[i].valid_group == 0 &&
1459                                         lpm->tbl24[i].depth <= depth) {
1460                                 lpm->tbl24[i] = new_tbl24_entry;
1461                         } else  if (lpm->tbl24[i].valid_group == 1) {
1462                                 /*
1463                                  * If TBL24 entry is extended, then there has
1464                                  * to be a rule with depth >= 25 in the
1465                                  * associated TBL8 group.
1466                                  */
1467
1468                                 tbl8_group_index = lpm->tbl24[i].group_idx;
1469                                 tbl8_index = tbl8_group_index *
1470                                                 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1471
1472                                 for (j = tbl8_index; j < (tbl8_index +
1473                                         RTE_LPM_TBL8_GROUP_NUM_ENTRIES); j++) {
1474
1475                                         if (lpm->tbl8[j].depth <= depth)
1476                                                 lpm->tbl8[j] = new_tbl8_entry;
1477                                 }
1478                         }
1479                 }
1480         }
1481 #undef group_idx
1482         return 0;
1483 }
1484
1485 /*
1486  * Checks if table 8 group can be recycled.
1487  *
1488  * Return of -EEXIST means tbl8 is in use and thus can not be recycled.
1489  * Return of -EINVAL means tbl8 is empty and thus can be recycled
1490  * Return of value > -1 means tbl8 is in use but has all the same values and
1491  * thus can be recycled
1492  */
1493 static inline int32_t
1494 tbl8_recycle_check_v20(struct rte_lpm_tbl_entry_v20 *tbl8,
1495                 uint32_t tbl8_group_start)
1496 {
1497         uint32_t tbl8_group_end, i;
1498         tbl8_group_end = tbl8_group_start + RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1499
1500         /*
1501          * Check the first entry of the given tbl8. If it is invalid we know
1502          * this tbl8 does not contain any rule with a depth < RTE_LPM_MAX_DEPTH
1503          *  (As they would affect all entries in a tbl8) and thus this table
1504          *  can not be recycled.
1505          */
1506         if (tbl8[tbl8_group_start].valid) {
1507                 /*
1508                  * If first entry is valid check if the depth is less than 24
1509                  * and if so check the rest of the entries to verify that they
1510                  * are all of this depth.
1511                  */
1512                 if (tbl8[tbl8_group_start].depth < MAX_DEPTH_TBL24) {
1513                         for (i = (tbl8_group_start + 1); i < tbl8_group_end;
1514                                         i++) {
1515
1516                                 if (tbl8[i].depth !=
1517                                                 tbl8[tbl8_group_start].depth) {
1518
1519                                         return -EEXIST;
1520                                 }
1521                         }
1522                         /* If all entries are the same return the tb8 index */
1523                         return tbl8_group_start;
1524                 }
1525
1526                 return -EEXIST;
1527         }
1528         /*
1529          * If the first entry is invalid check if the rest of the entries in
1530          * the tbl8 are invalid.
1531          */
1532         for (i = (tbl8_group_start + 1); i < tbl8_group_end; i++) {
1533                 if (tbl8[i].valid)
1534                         return -EEXIST;
1535         }
1536         /* If no valid entries are found then return -EINVAL. */
1537         return -EINVAL;
1538 }
1539
1540 static inline int32_t
1541 tbl8_recycle_check_v1604(struct rte_lpm_tbl_entry *tbl8,
1542                 uint32_t tbl8_group_start)
1543 {
1544         uint32_t tbl8_group_end, i;
1545         tbl8_group_end = tbl8_group_start + RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1546
1547         /*
1548          * Check the first entry of the given tbl8. If it is invalid we know
1549          * this tbl8 does not contain any rule with a depth < RTE_LPM_MAX_DEPTH
1550          *  (As they would affect all entries in a tbl8) and thus this table
1551          *  can not be recycled.
1552          */
1553         if (tbl8[tbl8_group_start].valid) {
1554                 /*
1555                  * If first entry is valid check if the depth is less than 24
1556                  * and if so check the rest of the entries to verify that they
1557                  * are all of this depth.
1558                  */
1559                 if (tbl8[tbl8_group_start].depth < MAX_DEPTH_TBL24) {
1560                         for (i = (tbl8_group_start + 1); i < tbl8_group_end;
1561                                         i++) {
1562
1563                                 if (tbl8[i].depth !=
1564                                                 tbl8[tbl8_group_start].depth) {
1565
1566                                         return -EEXIST;
1567                                 }
1568                         }
1569                         /* If all entries are the same return the tb8 index */
1570                         return tbl8_group_start;
1571                 }
1572
1573                 return -EEXIST;
1574         }
1575         /*
1576          * If the first entry is invalid check if the rest of the entries in
1577          * the tbl8 are invalid.
1578          */
1579         for (i = (tbl8_group_start + 1); i < tbl8_group_end; i++) {
1580                 if (tbl8[i].valid)
1581                         return -EEXIST;
1582         }
1583         /* If no valid entries are found then return -EINVAL. */
1584         return -EINVAL;
1585 }
1586
1587 static inline int32_t
1588 delete_depth_big_v20(struct rte_lpm_v20 *lpm, uint32_t ip_masked,
1589         uint8_t depth, int32_t sub_rule_index, uint8_t sub_rule_depth)
1590 {
1591         uint32_t tbl24_index, tbl8_group_index, tbl8_group_start, tbl8_index,
1592                         tbl8_range, i;
1593         int32_t tbl8_recycle_index;
1594
1595         /*
1596          * Calculate the index into tbl24 and range. Note: All depths larger
1597          * than MAX_DEPTH_TBL24 are associated with only one tbl24 entry.
1598          */
1599         tbl24_index = ip_masked >> 8;
1600
1601         /* Calculate the index into tbl8 and range. */
1602         tbl8_group_index = lpm->tbl24[tbl24_index].group_idx;
1603         tbl8_group_start = tbl8_group_index * RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1604         tbl8_index = tbl8_group_start + (ip_masked & 0xFF);
1605         tbl8_range = depth_to_range(depth);
1606
1607         if (sub_rule_index < 0) {
1608                 /*
1609                  * Loop through the range of entries on tbl8 for which the
1610                  * rule_to_delete must be removed or modified.
1611                  */
1612                 for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
1613                         if (lpm->tbl8[i].depth <= depth)
1614                                 lpm->tbl8[i].valid = INVALID;
1615                 }
1616         } else {
1617                 /* Set new tbl8 entry. */
1618                 struct rte_lpm_tbl_entry_v20 new_tbl8_entry = {
1619                         .valid = VALID,
1620                         .depth = sub_rule_depth,
1621                         .valid_group = lpm->tbl8[tbl8_group_start].valid_group,
1622                         .next_hop = lpm->rules_tbl[sub_rule_index].next_hop,
1623                 };
1624
1625                 /*
1626                  * Loop through the range of entries on tbl8 for which the
1627                  * rule_to_delete must be modified.
1628                  */
1629                 for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
1630                         if (lpm->tbl8[i].depth <= depth)
1631                                 lpm->tbl8[i] = new_tbl8_entry;
1632                 }
1633         }
1634
1635         /*
1636          * Check if there are any valid entries in this tbl8 group. If all
1637          * tbl8 entries are invalid we can free the tbl8 and invalidate the
1638          * associated tbl24 entry.
1639          */
1640
1641         tbl8_recycle_index = tbl8_recycle_check_v20(lpm->tbl8, tbl8_group_start);
1642
1643         if (tbl8_recycle_index == -EINVAL) {
1644                 /* Set tbl24 before freeing tbl8 to avoid race condition. */
1645                 lpm->tbl24[tbl24_index].valid = 0;
1646                 tbl8_free_v20(lpm->tbl8, tbl8_group_start);
1647         } else if (tbl8_recycle_index > -1) {
1648                 /* Update tbl24 entry. */
1649                 struct rte_lpm_tbl_entry_v20 new_tbl24_entry = {
1650                         { .next_hop = lpm->tbl8[tbl8_recycle_index].next_hop, },
1651                         .valid = VALID,
1652                         .valid_group = 0,
1653                         .depth = lpm->tbl8[tbl8_recycle_index].depth,
1654                 };
1655
1656                 /* Set tbl24 before freeing tbl8 to avoid race condition. */
1657                 lpm->tbl24[tbl24_index] = new_tbl24_entry;
1658                 tbl8_free_v20(lpm->tbl8, tbl8_group_start);
1659         }
1660
1661         return 0;
1662 }
1663
1664 static inline int32_t
1665 delete_depth_big_v1604(struct rte_lpm *lpm, uint32_t ip_masked,
1666         uint8_t depth, int32_t sub_rule_index, uint8_t sub_rule_depth)
1667 {
1668 #define group_idx next_hop
1669         uint32_t tbl24_index, tbl8_group_index, tbl8_group_start, tbl8_index,
1670                         tbl8_range, i;
1671         int32_t tbl8_recycle_index;
1672
1673         /*
1674          * Calculate the index into tbl24 and range. Note: All depths larger
1675          * than MAX_DEPTH_TBL24 are associated with only one tbl24 entry.
1676          */
1677         tbl24_index = ip_masked >> 8;
1678
1679         /* Calculate the index into tbl8 and range. */
1680         tbl8_group_index = lpm->tbl24[tbl24_index].group_idx;
1681         tbl8_group_start = tbl8_group_index * RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1682         tbl8_index = tbl8_group_start + (ip_masked & 0xFF);
1683         tbl8_range = depth_to_range(depth);
1684
1685         if (sub_rule_index < 0) {
1686                 /*
1687                  * Loop through the range of entries on tbl8 for which the
1688                  * rule_to_delete must be removed or modified.
1689                  */
1690                 for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
1691                         if (lpm->tbl8[i].depth <= depth)
1692                                 lpm->tbl8[i].valid = INVALID;
1693                 }
1694         } else {
1695                 /* Set new tbl8 entry. */
1696                 struct rte_lpm_tbl_entry new_tbl8_entry = {
1697                         .valid = VALID,
1698                         .depth = sub_rule_depth,
1699                         .valid_group = lpm->tbl8[tbl8_group_start].valid_group,
1700                         .next_hop = lpm->rules_tbl[sub_rule_index].next_hop,
1701                 };
1702
1703                 /*
1704                  * Loop through the range of entries on tbl8 for which the
1705                  * rule_to_delete must be modified.
1706                  */
1707                 for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
1708                         if (lpm->tbl8[i].depth <= depth)
1709                                 lpm->tbl8[i] = new_tbl8_entry;
1710                 }
1711         }
1712
1713         /*
1714          * Check if there are any valid entries in this tbl8 group. If all
1715          * tbl8 entries are invalid we can free the tbl8 and invalidate the
1716          * associated tbl24 entry.
1717          */
1718
1719         tbl8_recycle_index = tbl8_recycle_check_v1604(lpm->tbl8, tbl8_group_start);
1720
1721         if (tbl8_recycle_index == -EINVAL) {
1722                 /* Set tbl24 before freeing tbl8 to avoid race condition. */
1723                 lpm->tbl24[tbl24_index].valid = 0;
1724                 tbl8_free_v1604(lpm->tbl8, tbl8_group_start);
1725         } else if (tbl8_recycle_index > -1) {
1726                 /* Update tbl24 entry. */
1727                 struct rte_lpm_tbl_entry new_tbl24_entry = {
1728                         .next_hop = lpm->tbl8[tbl8_recycle_index].next_hop,
1729                         .valid = VALID,
1730                         .valid_group = 0,
1731                         .depth = lpm->tbl8[tbl8_recycle_index].depth,
1732                 };
1733
1734                 /* Set tbl24 before freeing tbl8 to avoid race condition. */
1735                 lpm->tbl24[tbl24_index] = new_tbl24_entry;
1736                 tbl8_free_v1604(lpm->tbl8, tbl8_group_start);
1737         }
1738 #undef group_idx
1739         return 0;
1740 }
1741
1742 /*
1743  * Deletes a rule
1744  */
1745 int
1746 rte_lpm_delete_v20(struct rte_lpm_v20 *lpm, uint32_t ip, uint8_t depth)
1747 {
1748         int32_t rule_to_delete_index, sub_rule_index;
1749         uint32_t ip_masked;
1750         uint8_t sub_rule_depth;
1751         /*
1752          * Check input arguments. Note: IP must be a positive integer of 32
1753          * bits in length therefore it need not be checked.
1754          */
1755         if ((lpm == NULL) || (depth < 1) || (depth > RTE_LPM_MAX_DEPTH)) {
1756                 return -EINVAL;
1757         }
1758
1759         ip_masked = ip & depth_to_mask(depth);
1760
1761         /*
1762          * Find the index of the input rule, that needs to be deleted, in the
1763          * rule table.
1764          */
1765         rule_to_delete_index = rule_find_v20(lpm, ip_masked, depth);
1766
1767         /*
1768          * Check if rule_to_delete_index was found. If no rule was found the
1769          * function rule_find returns -EINVAL.
1770          */
1771         if (rule_to_delete_index < 0)
1772                 return -EINVAL;
1773
1774         /* Delete the rule from the rule table. */
1775         rule_delete_v20(lpm, rule_to_delete_index, depth);
1776
1777         /*
1778          * Find rule to replace the rule_to_delete. If there is no rule to
1779          * replace the rule_to_delete we return -1 and invalidate the table
1780          * entries associated with this rule.
1781          */
1782         sub_rule_depth = 0;
1783         sub_rule_index = find_previous_rule_v20(lpm, ip, depth, &sub_rule_depth);
1784
1785         /*
1786          * If the input depth value is less than 25 use function
1787          * delete_depth_small otherwise use delete_depth_big.
1788          */
1789         if (depth <= MAX_DEPTH_TBL24) {
1790                 return delete_depth_small_v20(lpm, ip_masked, depth,
1791                                 sub_rule_index, sub_rule_depth);
1792         } else { /* If depth > MAX_DEPTH_TBL24 */
1793                 return delete_depth_big_v20(lpm, ip_masked, depth, sub_rule_index,
1794                                 sub_rule_depth);
1795         }
1796 }
1797 VERSION_SYMBOL(rte_lpm_delete, _v20, 2.0);
1798
1799 int
1800 rte_lpm_delete_v1604(struct rte_lpm *lpm, uint32_t ip, uint8_t depth)
1801 {
1802         int32_t rule_to_delete_index, sub_rule_index;
1803         uint32_t ip_masked;
1804         uint8_t sub_rule_depth;
1805         /*
1806          * Check input arguments. Note: IP must be a positive integer of 32
1807          * bits in length therefore it need not be checked.
1808          */
1809         if ((lpm == NULL) || (depth < 1) || (depth > RTE_LPM_MAX_DEPTH)) {
1810                 return -EINVAL;
1811         }
1812
1813         ip_masked = ip & depth_to_mask(depth);
1814
1815         /*
1816          * Find the index of the input rule, that needs to be deleted, in the
1817          * rule table.
1818          */
1819         rule_to_delete_index = rule_find_v1604(lpm, ip_masked, depth);
1820
1821         /*
1822          * Check if rule_to_delete_index was found. If no rule was found the
1823          * function rule_find returns -EINVAL.
1824          */
1825         if (rule_to_delete_index < 0)
1826                 return -EINVAL;
1827
1828         /* Delete the rule from the rule table. */
1829         rule_delete_v1604(lpm, rule_to_delete_index, depth);
1830
1831         /*
1832          * Find rule to replace the rule_to_delete. If there is no rule to
1833          * replace the rule_to_delete we return -1 and invalidate the table
1834          * entries associated with this rule.
1835          */
1836         sub_rule_depth = 0;
1837         sub_rule_index = find_previous_rule_v1604(lpm, ip, depth, &sub_rule_depth);
1838
1839         /*
1840          * If the input depth value is less than 25 use function
1841          * delete_depth_small otherwise use delete_depth_big.
1842          */
1843         if (depth <= MAX_DEPTH_TBL24) {
1844                 return delete_depth_small_v1604(lpm, ip_masked, depth,
1845                                 sub_rule_index, sub_rule_depth);
1846         } else { /* If depth > MAX_DEPTH_TBL24 */
1847                 return delete_depth_big_v1604(lpm, ip_masked, depth, sub_rule_index,
1848                                 sub_rule_depth);
1849         }
1850 }
1851 BIND_DEFAULT_SYMBOL(rte_lpm_delete, _v1604, 16.04);
1852 MAP_STATIC_SYMBOL(int rte_lpm_delete(struct rte_lpm *lpm, uint32_t ip,
1853                 uint8_t depth), rte_lpm_delete_v1604);
1854
1855 /*
1856  * Delete all rules from the LPM table.
1857  */
1858 void
1859 rte_lpm_delete_all_v20(struct rte_lpm_v20 *lpm)
1860 {
1861         /* Zero rule information. */
1862         memset(lpm->rule_info, 0, sizeof(lpm->rule_info));
1863
1864         /* Zero tbl24. */
1865         memset(lpm->tbl24, 0, sizeof(lpm->tbl24));
1866
1867         /* Zero tbl8. */
1868         memset(lpm->tbl8, 0, sizeof(lpm->tbl8));
1869
1870         /* Delete all rules form the rules table. */
1871         memset(lpm->rules_tbl, 0, sizeof(lpm->rules_tbl[0]) * lpm->max_rules);
1872 }
1873 VERSION_SYMBOL(rte_lpm_delete_all, _v20, 2.0);
1874
1875 void
1876 rte_lpm_delete_all_v1604(struct rte_lpm *lpm)
1877 {
1878         /* Zero rule information. */
1879         memset(lpm->rule_info, 0, sizeof(lpm->rule_info));
1880
1881         /* Zero tbl24. */
1882         memset(lpm->tbl24, 0, sizeof(lpm->tbl24));
1883
1884         /* Zero tbl8. */
1885         memset(lpm->tbl8, 0, sizeof(lpm->tbl8));
1886
1887         /* Delete all rules form the rules table. */
1888         memset(lpm->rules_tbl, 0, sizeof(lpm->rules_tbl[0]) * lpm->max_rules);
1889 }
1890 BIND_DEFAULT_SYMBOL(rte_lpm_delete_all, _v1604, 16.04);
1891 MAP_STATIC_SYMBOL(void rte_lpm_delete_all(struct rte_lpm *lpm),
1892                 rte_lpm_delete_all_v1604);