event/octeontx: add dump function for easier debugging
[dpdk.git] / lib / librte_lpm / rte_lpm.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <string.h>
35 #include <stdint.h>
36 #include <errno.h>
37 #include <stdarg.h>
38 #include <stdio.h>
39 #include <errno.h>
40 #include <sys/queue.h>
41
42 #include <rte_log.h>
43 #include <rte_branch_prediction.h>
44 #include <rte_common.h>
45 #include <rte_memory.h>        /* for definition of RTE_CACHE_LINE_SIZE */
46 #include <rte_malloc.h>
47 #include <rte_memzone.h>
48 #include <rte_eal.h>
49 #include <rte_eal_memconfig.h>
50 #include <rte_per_lcore.h>
51 #include <rte_string_fns.h>
52 #include <rte_errno.h>
53 #include <rte_rwlock.h>
54 #include <rte_spinlock.h>
55
56 #include "rte_lpm.h"
57
58 TAILQ_HEAD(rte_lpm_list, rte_tailq_entry);
59
60 static struct rte_tailq_elem rte_lpm_tailq = {
61         .name = "RTE_LPM",
62 };
63 EAL_REGISTER_TAILQ(rte_lpm_tailq)
64
65 #define MAX_DEPTH_TBL24 24
66
67 enum valid_flag {
68         INVALID = 0,
69         VALID
70 };
71
72 /* Macro to enable/disable run-time checks. */
73 #if defined(RTE_LIBRTE_LPM_DEBUG)
74 #include <rte_debug.h>
75 #define VERIFY_DEPTH(depth) do {                                \
76         if ((depth == 0) || (depth > RTE_LPM_MAX_DEPTH))        \
77                 rte_panic("LPM: Invalid depth (%u) at line %d", \
78                                 (unsigned)(depth), __LINE__);   \
79 } while (0)
80 #else
81 #define VERIFY_DEPTH(depth)
82 #endif
83
84 /*
85  * Converts a given depth value to its corresponding mask value.
86  *
87  * depth  (IN)          : range = 1 - 32
88  * mask   (OUT)         : 32bit mask
89  */
90 static uint32_t __attribute__((pure))
91 depth_to_mask(uint8_t depth)
92 {
93         VERIFY_DEPTH(depth);
94
95         /* To calculate a mask start with a 1 on the left hand side and right
96          * shift while populating the left hand side with 1's
97          */
98         return (int)0x80000000 >> (depth - 1);
99 }
100
101 /*
102  * Converts given depth value to its corresponding range value.
103  */
104 static inline uint32_t __attribute__((pure))
105 depth_to_range(uint8_t depth)
106 {
107         VERIFY_DEPTH(depth);
108
109         /*
110          * Calculate tbl24 range. (Note: 2^depth = 1 << depth)
111          */
112         if (depth <= MAX_DEPTH_TBL24)
113                 return 1 << (MAX_DEPTH_TBL24 - depth);
114
115         /* Else if depth is greater than 24 */
116         return 1 << (RTE_LPM_MAX_DEPTH - depth);
117 }
118
119 /*
120  * Find an existing lpm table and return a pointer to it.
121  */
122 struct rte_lpm_v20 *
123 rte_lpm_find_existing_v20(const char *name)
124 {
125         struct rte_lpm_v20 *l = NULL;
126         struct rte_tailq_entry *te;
127         struct rte_lpm_list *lpm_list;
128
129         lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list);
130
131         rte_rwlock_read_lock(RTE_EAL_TAILQ_RWLOCK);
132         TAILQ_FOREACH(te, lpm_list, next) {
133                 l = (struct rte_lpm_v20 *) te->data;
134                 if (strncmp(name, l->name, RTE_LPM_NAMESIZE) == 0)
135                         break;
136         }
137         rte_rwlock_read_unlock(RTE_EAL_TAILQ_RWLOCK);
138
139         if (te == NULL) {
140                 rte_errno = ENOENT;
141                 return NULL;
142         }
143
144         return l;
145 }
146 VERSION_SYMBOL(rte_lpm_find_existing, _v20, 2.0);
147
148 struct rte_lpm *
149 rte_lpm_find_existing_v1604(const char *name)
150 {
151         struct rte_lpm *l = NULL;
152         struct rte_tailq_entry *te;
153         struct rte_lpm_list *lpm_list;
154
155         lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list);
156
157         rte_rwlock_read_lock(RTE_EAL_TAILQ_RWLOCK);
158         TAILQ_FOREACH(te, lpm_list, next) {
159                 l = (struct rte_lpm *) te->data;
160                 if (strncmp(name, l->name, RTE_LPM_NAMESIZE) == 0)
161                         break;
162         }
163         rte_rwlock_read_unlock(RTE_EAL_TAILQ_RWLOCK);
164
165         if (te == NULL) {
166                 rte_errno = ENOENT;
167                 return NULL;
168         }
169
170         return l;
171 }
172 BIND_DEFAULT_SYMBOL(rte_lpm_find_existing, _v1604, 16.04);
173 MAP_STATIC_SYMBOL(struct rte_lpm *rte_lpm_find_existing(const char *name),
174                 rte_lpm_find_existing_v1604);
175
176 /*
177  * Allocates memory for LPM object
178  */
179 struct rte_lpm_v20 *
180 rte_lpm_create_v20(const char *name, int socket_id, int max_rules,
181                 __rte_unused int flags)
182 {
183         char mem_name[RTE_LPM_NAMESIZE];
184         struct rte_lpm_v20 *lpm = NULL;
185         struct rte_tailq_entry *te;
186         uint32_t mem_size;
187         struct rte_lpm_list *lpm_list;
188
189         lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list);
190
191         RTE_BUILD_BUG_ON(sizeof(struct rte_lpm_tbl_entry_v20) != 2);
192
193         /* Check user arguments. */
194         if ((name == NULL) || (socket_id < -1) || (max_rules == 0)) {
195                 rte_errno = EINVAL;
196                 return NULL;
197         }
198
199         snprintf(mem_name, sizeof(mem_name), "LPM_%s", name);
200
201         /* Determine the amount of memory to allocate. */
202         mem_size = sizeof(*lpm) + (sizeof(lpm->rules_tbl[0]) * max_rules);
203
204         rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
205
206         /* guarantee there's no existing */
207         TAILQ_FOREACH(te, lpm_list, next) {
208                 lpm = (struct rte_lpm_v20 *) te->data;
209                 if (strncmp(name, lpm->name, RTE_LPM_NAMESIZE) == 0)
210                         break;
211         }
212         lpm = NULL;
213         if (te != NULL) {
214                 rte_errno = EEXIST;
215                 goto exit;
216         }
217
218         /* allocate tailq entry */
219         te = rte_zmalloc("LPM_TAILQ_ENTRY", sizeof(*te), 0);
220         if (te == NULL) {
221                 RTE_LOG(ERR, LPM, "Failed to allocate tailq entry\n");
222                 goto exit;
223         }
224
225         /* Allocate memory to store the LPM data structures. */
226         lpm = (struct rte_lpm_v20 *)rte_zmalloc_socket(mem_name, mem_size,
227                         RTE_CACHE_LINE_SIZE, socket_id);
228         if (lpm == NULL) {
229                 RTE_LOG(ERR, LPM, "LPM memory allocation failed\n");
230                 rte_free(te);
231                 goto exit;
232         }
233
234         /* Save user arguments. */
235         lpm->max_rules = max_rules;
236         snprintf(lpm->name, sizeof(lpm->name), "%s", name);
237
238         te->data = (void *) lpm;
239
240         TAILQ_INSERT_TAIL(lpm_list, te, next);
241
242 exit:
243         rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
244
245         return lpm;
246 }
247 VERSION_SYMBOL(rte_lpm_create, _v20, 2.0);
248
249 struct rte_lpm *
250 rte_lpm_create_v1604(const char *name, int socket_id,
251                 const struct rte_lpm_config *config)
252 {
253         char mem_name[RTE_LPM_NAMESIZE];
254         struct rte_lpm *lpm = NULL;
255         struct rte_tailq_entry *te;
256         uint32_t mem_size, rules_size, tbl8s_size;
257         struct rte_lpm_list *lpm_list;
258
259         lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list);
260
261         RTE_BUILD_BUG_ON(sizeof(struct rte_lpm_tbl_entry) != 4);
262
263         /* Check user arguments. */
264         if ((name == NULL) || (socket_id < -1) || (config->max_rules == 0)
265                         || config->number_tbl8s > RTE_LPM_MAX_TBL8_NUM_GROUPS) {
266                 rte_errno = EINVAL;
267                 return NULL;
268         }
269
270         snprintf(mem_name, sizeof(mem_name), "LPM_%s", name);
271
272         /* Determine the amount of memory to allocate. */
273         mem_size = sizeof(*lpm);
274         rules_size = sizeof(struct rte_lpm_rule) * config->max_rules;
275         tbl8s_size = (sizeof(struct rte_lpm_tbl_entry) *
276                         RTE_LPM_TBL8_GROUP_NUM_ENTRIES * config->number_tbl8s);
277
278         rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
279
280         /* guarantee there's no existing */
281         TAILQ_FOREACH(te, lpm_list, next) {
282                 lpm = (struct rte_lpm *) te->data;
283                 if (strncmp(name, lpm->name, RTE_LPM_NAMESIZE) == 0)
284                         break;
285         }
286         lpm = NULL;
287         if (te != NULL) {
288                 rte_errno = EEXIST;
289                 goto exit;
290         }
291
292         /* allocate tailq entry */
293         te = rte_zmalloc("LPM_TAILQ_ENTRY", sizeof(*te), 0);
294         if (te == NULL) {
295                 RTE_LOG(ERR, LPM, "Failed to allocate tailq entry\n");
296                 goto exit;
297         }
298
299         /* Allocate memory to store the LPM data structures. */
300         lpm = (struct rte_lpm *)rte_zmalloc_socket(mem_name, mem_size,
301                         RTE_CACHE_LINE_SIZE, socket_id);
302         if (lpm == NULL) {
303                 RTE_LOG(ERR, LPM, "LPM memory allocation failed\n");
304                 rte_free(te);
305                 goto exit;
306         }
307
308         lpm->rules_tbl = (struct rte_lpm_rule *)rte_zmalloc_socket(NULL,
309                         (size_t)rules_size, RTE_CACHE_LINE_SIZE, socket_id);
310
311         if (lpm->rules_tbl == NULL) {
312                 RTE_LOG(ERR, LPM, "LPM rules_tbl memory allocation failed\n");
313                 rte_free(lpm);
314                 lpm = NULL;
315                 rte_free(te);
316                 goto exit;
317         }
318
319         lpm->tbl8 = (struct rte_lpm_tbl_entry *)rte_zmalloc_socket(NULL,
320                         (size_t)tbl8s_size, RTE_CACHE_LINE_SIZE, socket_id);
321
322         if (lpm->tbl8 == NULL) {
323                 RTE_LOG(ERR, LPM, "LPM tbl8 memory allocation failed\n");
324                 rte_free(lpm->rules_tbl);
325                 rte_free(lpm);
326                 lpm = NULL;
327                 rte_free(te);
328                 goto exit;
329         }
330
331         /* Save user arguments. */
332         lpm->max_rules = config->max_rules;
333         lpm->number_tbl8s = config->number_tbl8s;
334         snprintf(lpm->name, sizeof(lpm->name), "%s", name);
335
336         te->data = (void *) lpm;
337
338         TAILQ_INSERT_TAIL(lpm_list, te, next);
339
340 exit:
341         rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
342
343         return lpm;
344 }
345 BIND_DEFAULT_SYMBOL(rte_lpm_create, _v1604, 16.04);
346 MAP_STATIC_SYMBOL(
347         struct rte_lpm *rte_lpm_create(const char *name, int socket_id,
348                         const struct rte_lpm_config *config), rte_lpm_create_v1604);
349
350 /*
351  * Deallocates memory for given LPM table.
352  */
353 void
354 rte_lpm_free_v20(struct rte_lpm_v20 *lpm)
355 {
356         struct rte_lpm_list *lpm_list;
357         struct rte_tailq_entry *te;
358
359         /* Check user arguments. */
360         if (lpm == NULL)
361                 return;
362
363         lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list);
364
365         rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
366
367         /* find our tailq entry */
368         TAILQ_FOREACH(te, lpm_list, next) {
369                 if (te->data == (void *) lpm)
370                         break;
371         }
372         if (te != NULL)
373                 TAILQ_REMOVE(lpm_list, te, next);
374
375         rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
376
377         rte_free(lpm);
378         rte_free(te);
379 }
380 VERSION_SYMBOL(rte_lpm_free, _v20, 2.0);
381
382 void
383 rte_lpm_free_v1604(struct rte_lpm *lpm)
384 {
385         struct rte_lpm_list *lpm_list;
386         struct rte_tailq_entry *te;
387
388         /* Check user arguments. */
389         if (lpm == NULL)
390                 return;
391
392         lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list);
393
394         rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
395
396         /* find our tailq entry */
397         TAILQ_FOREACH(te, lpm_list, next) {
398                 if (te->data == (void *) lpm)
399                         break;
400         }
401         if (te != NULL)
402                 TAILQ_REMOVE(lpm_list, te, next);
403
404         rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
405
406         rte_free(lpm->tbl8);
407         rte_free(lpm->rules_tbl);
408         rte_free(lpm);
409         rte_free(te);
410 }
411 BIND_DEFAULT_SYMBOL(rte_lpm_free, _v1604, 16.04);
412 MAP_STATIC_SYMBOL(void rte_lpm_free(struct rte_lpm *lpm),
413                 rte_lpm_free_v1604);
414
415 /*
416  * Adds a rule to the rule table.
417  *
418  * NOTE: The rule table is split into 32 groups. Each group contains rules that
419  * apply to a specific prefix depth (i.e. group 1 contains rules that apply to
420  * prefixes with a depth of 1 etc.). In the following code (depth - 1) is used
421  * to refer to depth 1 because even though the depth range is 1 - 32, depths
422  * are stored in the rule table from 0 - 31.
423  * NOTE: Valid range for depth parameter is 1 .. 32 inclusive.
424  */
425 static inline int32_t
426 rule_add_v20(struct rte_lpm_v20 *lpm, uint32_t ip_masked, uint8_t depth,
427         uint8_t next_hop)
428 {
429         uint32_t rule_gindex, rule_index, last_rule;
430         int i;
431
432         VERIFY_DEPTH(depth);
433
434         /* Scan through rule group to see if rule already exists. */
435         if (lpm->rule_info[depth - 1].used_rules > 0) {
436
437                 /* rule_gindex stands for rule group index. */
438                 rule_gindex = lpm->rule_info[depth - 1].first_rule;
439                 /* Initialise rule_index to point to start of rule group. */
440                 rule_index = rule_gindex;
441                 /* Last rule = Last used rule in this rule group. */
442                 last_rule = rule_gindex + lpm->rule_info[depth - 1].used_rules;
443
444                 for (; rule_index < last_rule; rule_index++) {
445
446                         /* If rule already exists update its next_hop and return. */
447                         if (lpm->rules_tbl[rule_index].ip == ip_masked) {
448                                 lpm->rules_tbl[rule_index].next_hop = next_hop;
449
450                                 return rule_index;
451                         }
452                 }
453
454                 if (rule_index == lpm->max_rules)
455                         return -ENOSPC;
456         } else {
457                 /* Calculate the position in which the rule will be stored. */
458                 rule_index = 0;
459
460                 for (i = depth - 1; i > 0; i--) {
461                         if (lpm->rule_info[i - 1].used_rules > 0) {
462                                 rule_index = lpm->rule_info[i - 1].first_rule
463                                                 + lpm->rule_info[i - 1].used_rules;
464                                 break;
465                         }
466                 }
467                 if (rule_index == lpm->max_rules)
468                         return -ENOSPC;
469
470                 lpm->rule_info[depth - 1].first_rule = rule_index;
471         }
472
473         /* Make room for the new rule in the array. */
474         for (i = RTE_LPM_MAX_DEPTH; i > depth; i--) {
475                 if (lpm->rule_info[i - 1].first_rule
476                                 + lpm->rule_info[i - 1].used_rules == lpm->max_rules)
477                         return -ENOSPC;
478
479                 if (lpm->rule_info[i - 1].used_rules > 0) {
480                         lpm->rules_tbl[lpm->rule_info[i - 1].first_rule
481                                 + lpm->rule_info[i - 1].used_rules]
482                                         = lpm->rules_tbl[lpm->rule_info[i - 1].first_rule];
483                         lpm->rule_info[i - 1].first_rule++;
484                 }
485         }
486
487         /* Add the new rule. */
488         lpm->rules_tbl[rule_index].ip = ip_masked;
489         lpm->rules_tbl[rule_index].next_hop = next_hop;
490
491         /* Increment the used rules counter for this rule group. */
492         lpm->rule_info[depth - 1].used_rules++;
493
494         return rule_index;
495 }
496
497 static inline int32_t
498 rule_add_v1604(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth,
499         uint32_t next_hop)
500 {
501         uint32_t rule_gindex, rule_index, last_rule;
502         int i;
503
504         VERIFY_DEPTH(depth);
505
506         /* Scan through rule group to see if rule already exists. */
507         if (lpm->rule_info[depth - 1].used_rules > 0) {
508
509                 /* rule_gindex stands for rule group index. */
510                 rule_gindex = lpm->rule_info[depth - 1].first_rule;
511                 /* Initialise rule_index to point to start of rule group. */
512                 rule_index = rule_gindex;
513                 /* Last rule = Last used rule in this rule group. */
514                 last_rule = rule_gindex + lpm->rule_info[depth - 1].used_rules;
515
516                 for (; rule_index < last_rule; rule_index++) {
517
518                         /* If rule already exists update its next_hop and return. */
519                         if (lpm->rules_tbl[rule_index].ip == ip_masked) {
520                                 lpm->rules_tbl[rule_index].next_hop = next_hop;
521
522                                 return rule_index;
523                         }
524                 }
525
526                 if (rule_index == lpm->max_rules)
527                         return -ENOSPC;
528         } else {
529                 /* Calculate the position in which the rule will be stored. */
530                 rule_index = 0;
531
532                 for (i = depth - 1; i > 0; i--) {
533                         if (lpm->rule_info[i - 1].used_rules > 0) {
534                                 rule_index = lpm->rule_info[i - 1].first_rule
535                                                 + lpm->rule_info[i - 1].used_rules;
536                                 break;
537                         }
538                 }
539                 if (rule_index == lpm->max_rules)
540                         return -ENOSPC;
541
542                 lpm->rule_info[depth - 1].first_rule = rule_index;
543         }
544
545         /* Make room for the new rule in the array. */
546         for (i = RTE_LPM_MAX_DEPTH; i > depth; i--) {
547                 if (lpm->rule_info[i - 1].first_rule
548                                 + lpm->rule_info[i - 1].used_rules == lpm->max_rules)
549                         return -ENOSPC;
550
551                 if (lpm->rule_info[i - 1].used_rules > 0) {
552                         lpm->rules_tbl[lpm->rule_info[i - 1].first_rule
553                                 + lpm->rule_info[i - 1].used_rules]
554                                         = lpm->rules_tbl[lpm->rule_info[i - 1].first_rule];
555                         lpm->rule_info[i - 1].first_rule++;
556                 }
557         }
558
559         /* Add the new rule. */
560         lpm->rules_tbl[rule_index].ip = ip_masked;
561         lpm->rules_tbl[rule_index].next_hop = next_hop;
562
563         /* Increment the used rules counter for this rule group. */
564         lpm->rule_info[depth - 1].used_rules++;
565
566         return rule_index;
567 }
568
569 /*
570  * Delete a rule from the rule table.
571  * NOTE: Valid range for depth parameter is 1 .. 32 inclusive.
572  */
573 static inline void
574 rule_delete_v20(struct rte_lpm_v20 *lpm, int32_t rule_index, uint8_t depth)
575 {
576         int i;
577
578         VERIFY_DEPTH(depth);
579
580         lpm->rules_tbl[rule_index] =
581                         lpm->rules_tbl[lpm->rule_info[depth - 1].first_rule
582                                 + lpm->rule_info[depth - 1].used_rules - 1];
583
584         for (i = depth; i < RTE_LPM_MAX_DEPTH; i++) {
585                 if (lpm->rule_info[i].used_rules > 0) {
586                         lpm->rules_tbl[lpm->rule_info[i].first_rule - 1] =
587                                 lpm->rules_tbl[lpm->rule_info[i].first_rule
588                                         + lpm->rule_info[i].used_rules - 1];
589                         lpm->rule_info[i].first_rule--;
590                 }
591         }
592
593         lpm->rule_info[depth - 1].used_rules--;
594 }
595
596 static inline void
597 rule_delete_v1604(struct rte_lpm *lpm, int32_t rule_index, uint8_t depth)
598 {
599         int i;
600
601         VERIFY_DEPTH(depth);
602
603         lpm->rules_tbl[rule_index] =
604                         lpm->rules_tbl[lpm->rule_info[depth - 1].first_rule
605                         + lpm->rule_info[depth - 1].used_rules - 1];
606
607         for (i = depth; i < RTE_LPM_MAX_DEPTH; i++) {
608                 if (lpm->rule_info[i].used_rules > 0) {
609                         lpm->rules_tbl[lpm->rule_info[i].first_rule - 1] =
610                                         lpm->rules_tbl[lpm->rule_info[i].first_rule
611                                                 + lpm->rule_info[i].used_rules - 1];
612                         lpm->rule_info[i].first_rule--;
613                 }
614         }
615
616         lpm->rule_info[depth - 1].used_rules--;
617 }
618
619 /*
620  * Finds a rule in rule table.
621  * NOTE: Valid range for depth parameter is 1 .. 32 inclusive.
622  */
623 static inline int32_t
624 rule_find_v20(struct rte_lpm_v20 *lpm, uint32_t ip_masked, uint8_t depth)
625 {
626         uint32_t rule_gindex, last_rule, rule_index;
627
628         VERIFY_DEPTH(depth);
629
630         rule_gindex = lpm->rule_info[depth - 1].first_rule;
631         last_rule = rule_gindex + lpm->rule_info[depth - 1].used_rules;
632
633         /* Scan used rules at given depth to find rule. */
634         for (rule_index = rule_gindex; rule_index < last_rule; rule_index++) {
635                 /* If rule is found return the rule index. */
636                 if (lpm->rules_tbl[rule_index].ip == ip_masked)
637                         return rule_index;
638         }
639
640         /* If rule is not found return -EINVAL. */
641         return -EINVAL;
642 }
643
644 static inline int32_t
645 rule_find_v1604(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth)
646 {
647         uint32_t rule_gindex, last_rule, rule_index;
648
649         VERIFY_DEPTH(depth);
650
651         rule_gindex = lpm->rule_info[depth - 1].first_rule;
652         last_rule = rule_gindex + lpm->rule_info[depth - 1].used_rules;
653
654         /* Scan used rules at given depth to find rule. */
655         for (rule_index = rule_gindex; rule_index < last_rule; rule_index++) {
656                 /* If rule is found return the rule index. */
657                 if (lpm->rules_tbl[rule_index].ip == ip_masked)
658                         return rule_index;
659         }
660
661         /* If rule is not found return -EINVAL. */
662         return -EINVAL;
663 }
664
665 /*
666  * Find, clean and allocate a tbl8.
667  */
668 static inline int32_t
669 tbl8_alloc_v20(struct rte_lpm_tbl_entry_v20 *tbl8)
670 {
671         uint32_t group_idx; /* tbl8 group index. */
672         struct rte_lpm_tbl_entry_v20 *tbl8_entry;
673
674         /* Scan through tbl8 to find a free (i.e. INVALID) tbl8 group. */
675         for (group_idx = 0; group_idx < RTE_LPM_TBL8_NUM_GROUPS;
676                         group_idx++) {
677                 tbl8_entry = &tbl8[group_idx * RTE_LPM_TBL8_GROUP_NUM_ENTRIES];
678                 /* If a free tbl8 group is found clean it and set as VALID. */
679                 if (!tbl8_entry->valid_group) {
680                         memset(&tbl8_entry[0], 0,
681                                         RTE_LPM_TBL8_GROUP_NUM_ENTRIES *
682                                         sizeof(tbl8_entry[0]));
683
684                         tbl8_entry->valid_group = VALID;
685
686                         /* Return group index for allocated tbl8 group. */
687                         return group_idx;
688                 }
689         }
690
691         /* If there are no tbl8 groups free then return error. */
692         return -ENOSPC;
693 }
694
695 static inline int32_t
696 tbl8_alloc_v1604(struct rte_lpm_tbl_entry *tbl8, uint32_t number_tbl8s)
697 {
698         uint32_t group_idx; /* tbl8 group index. */
699         struct rte_lpm_tbl_entry *tbl8_entry;
700
701         /* Scan through tbl8 to find a free (i.e. INVALID) tbl8 group. */
702         for (group_idx = 0; group_idx < number_tbl8s; group_idx++) {
703                 tbl8_entry = &tbl8[group_idx * RTE_LPM_TBL8_GROUP_NUM_ENTRIES];
704                 /* If a free tbl8 group is found clean it and set as VALID. */
705                 if (!tbl8_entry->valid_group) {
706                         memset(&tbl8_entry[0], 0,
707                                         RTE_LPM_TBL8_GROUP_NUM_ENTRIES *
708                                         sizeof(tbl8_entry[0]));
709
710                         tbl8_entry->valid_group = VALID;
711
712                         /* Return group index for allocated tbl8 group. */
713                         return group_idx;
714                 }
715         }
716
717         /* If there are no tbl8 groups free then return error. */
718         return -ENOSPC;
719 }
720
721 static inline void
722 tbl8_free_v20(struct rte_lpm_tbl_entry_v20 *tbl8, uint32_t tbl8_group_start)
723 {
724         /* Set tbl8 group invalid*/
725         tbl8[tbl8_group_start].valid_group = INVALID;
726 }
727
728 static inline void
729 tbl8_free_v1604(struct rte_lpm_tbl_entry *tbl8, uint32_t tbl8_group_start)
730 {
731         /* Set tbl8 group invalid*/
732         tbl8[tbl8_group_start].valid_group = INVALID;
733 }
734
735 static inline int32_t
736 add_depth_small_v20(struct rte_lpm_v20 *lpm, uint32_t ip, uint8_t depth,
737                 uint8_t next_hop)
738 {
739         uint32_t tbl24_index, tbl24_range, tbl8_index, tbl8_group_end, i, j;
740
741         /* Calculate the index into Table24. */
742         tbl24_index = ip >> 8;
743         tbl24_range = depth_to_range(depth);
744
745         for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) {
746                 /*
747                  * For invalid OR valid and non-extended tbl 24 entries set
748                  * entry.
749                  */
750                 if (!lpm->tbl24[i].valid || (lpm->tbl24[i].valid_group == 0 &&
751                                 lpm->tbl24[i].depth <= depth)) {
752
753                         struct rte_lpm_tbl_entry_v20 new_tbl24_entry = {
754                                 .valid = VALID,
755                                 .valid_group = 0,
756                                 .depth = depth,
757                         };
758                         new_tbl24_entry.next_hop = next_hop;
759
760                         /* Setting tbl24 entry in one go to avoid race
761                          * conditions
762                          */
763                         lpm->tbl24[i] = new_tbl24_entry;
764
765                         continue;
766                 }
767
768                 if (lpm->tbl24[i].valid_group == 1) {
769                         /* If tbl24 entry is valid and extended calculate the
770                          *  index into tbl8.
771                          */
772                         tbl8_index = lpm->tbl24[i].group_idx *
773                                         RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
774                         tbl8_group_end = tbl8_index +
775                                         RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
776
777                         for (j = tbl8_index; j < tbl8_group_end; j++) {
778                                 if (!lpm->tbl8[j].valid ||
779                                                 lpm->tbl8[j].depth <= depth) {
780                                         struct rte_lpm_tbl_entry_v20
781                                                 new_tbl8_entry = {
782                                                 .valid = VALID,
783                                                 .valid_group = VALID,
784                                                 .depth = depth,
785                                         };
786                                         new_tbl8_entry.next_hop = next_hop;
787
788                                         /*
789                                          * Setting tbl8 entry in one go to avoid
790                                          * race conditions
791                                          */
792                                         lpm->tbl8[j] = new_tbl8_entry;
793
794                                         continue;
795                                 }
796                         }
797                 }
798         }
799
800         return 0;
801 }
802
803 static inline int32_t
804 add_depth_small_v1604(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
805                 uint32_t next_hop)
806 {
807 #define group_idx next_hop
808         uint32_t tbl24_index, tbl24_range, tbl8_index, tbl8_group_end, i, j;
809
810         /* Calculate the index into Table24. */
811         tbl24_index = ip >> 8;
812         tbl24_range = depth_to_range(depth);
813
814         for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) {
815                 /*
816                  * For invalid OR valid and non-extended tbl 24 entries set
817                  * entry.
818                  */
819                 if (!lpm->tbl24[i].valid || (lpm->tbl24[i].valid_group == 0 &&
820                                 lpm->tbl24[i].depth <= depth)) {
821
822                         struct rte_lpm_tbl_entry new_tbl24_entry = {
823                                 .next_hop = next_hop,
824                                 .valid = VALID,
825                                 .valid_group = 0,
826                                 .depth = depth,
827                         };
828
829                         /* Setting tbl24 entry in one go to avoid race
830                          * conditions
831                          */
832                         lpm->tbl24[i] = new_tbl24_entry;
833
834                         continue;
835                 }
836
837                 if (lpm->tbl24[i].valid_group == 1) {
838                         /* If tbl24 entry is valid and extended calculate the
839                          *  index into tbl8.
840                          */
841                         tbl8_index = lpm->tbl24[i].group_idx *
842                                         RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
843                         tbl8_group_end = tbl8_index +
844                                         RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
845
846                         for (j = tbl8_index; j < tbl8_group_end; j++) {
847                                 if (!lpm->tbl8[j].valid ||
848                                                 lpm->tbl8[j].depth <= depth) {
849                                         struct rte_lpm_tbl_entry
850                                                 new_tbl8_entry = {
851                                                 .valid = VALID,
852                                                 .valid_group = VALID,
853                                                 .depth = depth,
854                                                 .next_hop = next_hop,
855                                         };
856
857                                         /*
858                                          * Setting tbl8 entry in one go to avoid
859                                          * race conditions
860                                          */
861                                         lpm->tbl8[j] = new_tbl8_entry;
862
863                                         continue;
864                                 }
865                         }
866                 }
867         }
868 #undef group_idx
869         return 0;
870 }
871
872 static inline int32_t
873 add_depth_big_v20(struct rte_lpm_v20 *lpm, uint32_t ip_masked, uint8_t depth,
874                 uint8_t next_hop)
875 {
876         uint32_t tbl24_index;
877         int32_t tbl8_group_index, tbl8_group_start, tbl8_group_end, tbl8_index,
878                 tbl8_range, i;
879
880         tbl24_index = (ip_masked >> 8);
881         tbl8_range = depth_to_range(depth);
882
883         if (!lpm->tbl24[tbl24_index].valid) {
884                 /* Search for a free tbl8 group. */
885                 tbl8_group_index = tbl8_alloc_v20(lpm->tbl8);
886
887                 /* Check tbl8 allocation was successful. */
888                 if (tbl8_group_index < 0) {
889                         return tbl8_group_index;
890                 }
891
892                 /* Find index into tbl8 and range. */
893                 tbl8_index = (tbl8_group_index *
894                                 RTE_LPM_TBL8_GROUP_NUM_ENTRIES) +
895                                 (ip_masked & 0xFF);
896
897                 /* Set tbl8 entry. */
898                 for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
899                         lpm->tbl8[i].depth = depth;
900                         lpm->tbl8[i].next_hop = next_hop;
901                         lpm->tbl8[i].valid = VALID;
902                 }
903
904                 /*
905                  * Update tbl24 entry to point to new tbl8 entry. Note: The
906                  * ext_flag and tbl8_index need to be updated simultaneously,
907                  * so assign whole structure in one go
908                  */
909
910                 struct rte_lpm_tbl_entry_v20 new_tbl24_entry = {
911                         { .group_idx = (uint8_t)tbl8_group_index, },
912                         .valid = VALID,
913                         .valid_group = 1,
914                         .depth = 0,
915                 };
916
917                 lpm->tbl24[tbl24_index] = new_tbl24_entry;
918
919         } /* If valid entry but not extended calculate the index into Table8. */
920         else if (lpm->tbl24[tbl24_index].valid_group == 0) {
921                 /* Search for free tbl8 group. */
922                 tbl8_group_index = tbl8_alloc_v20(lpm->tbl8);
923
924                 if (tbl8_group_index < 0) {
925                         return tbl8_group_index;
926                 }
927
928                 tbl8_group_start = tbl8_group_index *
929                                 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
930                 tbl8_group_end = tbl8_group_start +
931                                 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
932
933                 /* Populate new tbl8 with tbl24 value. */
934                 for (i = tbl8_group_start; i < tbl8_group_end; i++) {
935                         lpm->tbl8[i].valid = VALID;
936                         lpm->tbl8[i].depth = lpm->tbl24[tbl24_index].depth;
937                         lpm->tbl8[i].next_hop =
938                                         lpm->tbl24[tbl24_index].next_hop;
939                 }
940
941                 tbl8_index = tbl8_group_start + (ip_masked & 0xFF);
942
943                 /* Insert new rule into the tbl8 entry. */
944                 for (i = tbl8_index; i < tbl8_index + tbl8_range; i++) {
945                         lpm->tbl8[i].valid = VALID;
946                         lpm->tbl8[i].depth = depth;
947                         lpm->tbl8[i].next_hop = next_hop;
948                 }
949
950                 /*
951                  * Update tbl24 entry to point to new tbl8 entry. Note: The
952                  * ext_flag and tbl8_index need to be updated simultaneously,
953                  * so assign whole structure in one go.
954                  */
955
956                 struct rte_lpm_tbl_entry_v20 new_tbl24_entry = {
957                                 { .group_idx = (uint8_t)tbl8_group_index, },
958                                 .valid = VALID,
959                                 .valid_group = 1,
960                                 .depth = 0,
961                 };
962
963                 lpm->tbl24[tbl24_index] = new_tbl24_entry;
964
965         } else { /*
966                 * If it is valid, extended entry calculate the index into tbl8.
967                 */
968                 tbl8_group_index = lpm->tbl24[tbl24_index].group_idx;
969                 tbl8_group_start = tbl8_group_index *
970                                 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
971                 tbl8_index = tbl8_group_start + (ip_masked & 0xFF);
972
973                 for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
974
975                         if (!lpm->tbl8[i].valid ||
976                                         lpm->tbl8[i].depth <= depth) {
977                                 struct rte_lpm_tbl_entry_v20 new_tbl8_entry = {
978                                         .valid = VALID,
979                                         .depth = depth,
980                                         .valid_group = lpm->tbl8[i].valid_group,
981                                 };
982                                 new_tbl8_entry.next_hop = next_hop;
983                                 /*
984                                  * Setting tbl8 entry in one go to avoid race
985                                  * condition
986                                  */
987                                 lpm->tbl8[i] = new_tbl8_entry;
988
989                                 continue;
990                         }
991                 }
992         }
993
994         return 0;
995 }
996
997 static inline int32_t
998 add_depth_big_v1604(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth,
999                 uint32_t next_hop)
1000 {
1001 #define group_idx next_hop
1002         uint32_t tbl24_index;
1003         int32_t tbl8_group_index, tbl8_group_start, tbl8_group_end, tbl8_index,
1004                 tbl8_range, i;
1005
1006         tbl24_index = (ip_masked >> 8);
1007         tbl8_range = depth_to_range(depth);
1008
1009         if (!lpm->tbl24[tbl24_index].valid) {
1010                 /* Search for a free tbl8 group. */
1011                 tbl8_group_index = tbl8_alloc_v1604(lpm->tbl8, lpm->number_tbl8s);
1012
1013                 /* Check tbl8 allocation was successful. */
1014                 if (tbl8_group_index < 0) {
1015                         return tbl8_group_index;
1016                 }
1017
1018                 /* Find index into tbl8 and range. */
1019                 tbl8_index = (tbl8_group_index *
1020                                 RTE_LPM_TBL8_GROUP_NUM_ENTRIES) +
1021                                 (ip_masked & 0xFF);
1022
1023                 /* Set tbl8 entry. */
1024                 for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
1025                         lpm->tbl8[i].depth = depth;
1026                         lpm->tbl8[i].next_hop = next_hop;
1027                         lpm->tbl8[i].valid = VALID;
1028                 }
1029
1030                 /*
1031                  * Update tbl24 entry to point to new tbl8 entry. Note: The
1032                  * ext_flag and tbl8_index need to be updated simultaneously,
1033                  * so assign whole structure in one go
1034                  */
1035
1036                 struct rte_lpm_tbl_entry new_tbl24_entry = {
1037                         .group_idx = (uint8_t)tbl8_group_index,
1038                         .valid = VALID,
1039                         .valid_group = 1,
1040                         .depth = 0,
1041                 };
1042
1043                 lpm->tbl24[tbl24_index] = new_tbl24_entry;
1044
1045         } /* If valid entry but not extended calculate the index into Table8. */
1046         else if (lpm->tbl24[tbl24_index].valid_group == 0) {
1047                 /* Search for free tbl8 group. */
1048                 tbl8_group_index = tbl8_alloc_v1604(lpm->tbl8, lpm->number_tbl8s);
1049
1050                 if (tbl8_group_index < 0) {
1051                         return tbl8_group_index;
1052                 }
1053
1054                 tbl8_group_start = tbl8_group_index *
1055                                 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1056                 tbl8_group_end = tbl8_group_start +
1057                                 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1058
1059                 /* Populate new tbl8 with tbl24 value. */
1060                 for (i = tbl8_group_start; i < tbl8_group_end; i++) {
1061                         lpm->tbl8[i].valid = VALID;
1062                         lpm->tbl8[i].depth = lpm->tbl24[tbl24_index].depth;
1063                         lpm->tbl8[i].next_hop =
1064                                         lpm->tbl24[tbl24_index].next_hop;
1065                 }
1066
1067                 tbl8_index = tbl8_group_start + (ip_masked & 0xFF);
1068
1069                 /* Insert new rule into the tbl8 entry. */
1070                 for (i = tbl8_index; i < tbl8_index + tbl8_range; i++) {
1071                         lpm->tbl8[i].valid = VALID;
1072                         lpm->tbl8[i].depth = depth;
1073                         lpm->tbl8[i].next_hop = next_hop;
1074                 }
1075
1076                 /*
1077                  * Update tbl24 entry to point to new tbl8 entry. Note: The
1078                  * ext_flag and tbl8_index need to be updated simultaneously,
1079                  * so assign whole structure in one go.
1080                  */
1081
1082                 struct rte_lpm_tbl_entry new_tbl24_entry = {
1083                                 .group_idx = (uint8_t)tbl8_group_index,
1084                                 .valid = VALID,
1085                                 .valid_group = 1,
1086                                 .depth = 0,
1087                 };
1088
1089                 lpm->tbl24[tbl24_index] = new_tbl24_entry;
1090
1091         } else { /*
1092                 * If it is valid, extended entry calculate the index into tbl8.
1093                 */
1094                 tbl8_group_index = lpm->tbl24[tbl24_index].group_idx;
1095                 tbl8_group_start = tbl8_group_index *
1096                                 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1097                 tbl8_index = tbl8_group_start + (ip_masked & 0xFF);
1098
1099                 for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
1100
1101                         if (!lpm->tbl8[i].valid ||
1102                                         lpm->tbl8[i].depth <= depth) {
1103                                 struct rte_lpm_tbl_entry new_tbl8_entry = {
1104                                         .valid = VALID,
1105                                         .depth = depth,
1106                                         .next_hop = next_hop,
1107                                         .valid_group = lpm->tbl8[i].valid_group,
1108                                 };
1109
1110                                 /*
1111                                  * Setting tbl8 entry in one go to avoid race
1112                                  * condition
1113                                  */
1114                                 lpm->tbl8[i] = new_tbl8_entry;
1115
1116                                 continue;
1117                         }
1118                 }
1119         }
1120 #undef group_idx
1121         return 0;
1122 }
1123
1124 /*
1125  * Add a route
1126  */
1127 int
1128 rte_lpm_add_v20(struct rte_lpm_v20 *lpm, uint32_t ip, uint8_t depth,
1129                 uint8_t next_hop)
1130 {
1131         int32_t rule_index, status = 0;
1132         uint32_t ip_masked;
1133
1134         /* Check user arguments. */
1135         if ((lpm == NULL) || (depth < 1) || (depth > RTE_LPM_MAX_DEPTH))
1136                 return -EINVAL;
1137
1138         ip_masked = ip & depth_to_mask(depth);
1139
1140         /* Add the rule to the rule table. */
1141         rule_index = rule_add_v20(lpm, ip_masked, depth, next_hop);
1142
1143         /* If the is no space available for new rule return error. */
1144         if (rule_index < 0) {
1145                 return rule_index;
1146         }
1147
1148         if (depth <= MAX_DEPTH_TBL24) {
1149                 status = add_depth_small_v20(lpm, ip_masked, depth, next_hop);
1150         } else { /* If depth > RTE_LPM_MAX_DEPTH_TBL24 */
1151                 status = add_depth_big_v20(lpm, ip_masked, depth, next_hop);
1152
1153                 /*
1154                  * If add fails due to exhaustion of tbl8 extensions delete
1155                  * rule that was added to rule table.
1156                  */
1157                 if (status < 0) {
1158                         rule_delete_v20(lpm, rule_index, depth);
1159
1160                         return status;
1161                 }
1162         }
1163
1164         return 0;
1165 }
1166 VERSION_SYMBOL(rte_lpm_add, _v20, 2.0);
1167
1168 int
1169 rte_lpm_add_v1604(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
1170                 uint32_t next_hop)
1171 {
1172         int32_t rule_index, status = 0;
1173         uint32_t ip_masked;
1174
1175         /* Check user arguments. */
1176         if ((lpm == NULL) || (depth < 1) || (depth > RTE_LPM_MAX_DEPTH))
1177                 return -EINVAL;
1178
1179         ip_masked = ip & depth_to_mask(depth);
1180
1181         /* Add the rule to the rule table. */
1182         rule_index = rule_add_v1604(lpm, ip_masked, depth, next_hop);
1183
1184         /* If the is no space available for new rule return error. */
1185         if (rule_index < 0) {
1186                 return rule_index;
1187         }
1188
1189         if (depth <= MAX_DEPTH_TBL24) {
1190                 status = add_depth_small_v1604(lpm, ip_masked, depth, next_hop);
1191         } else { /* If depth > RTE_LPM_MAX_DEPTH_TBL24 */
1192                 status = add_depth_big_v1604(lpm, ip_masked, depth, next_hop);
1193
1194                 /*
1195                  * If add fails due to exhaustion of tbl8 extensions delete
1196                  * rule that was added to rule table.
1197                  */
1198                 if (status < 0) {
1199                         rule_delete_v1604(lpm, rule_index, depth);
1200
1201                         return status;
1202                 }
1203         }
1204
1205         return 0;
1206 }
1207 BIND_DEFAULT_SYMBOL(rte_lpm_add, _v1604, 16.04);
1208 MAP_STATIC_SYMBOL(int rte_lpm_add(struct rte_lpm *lpm, uint32_t ip,
1209                 uint8_t depth, uint32_t next_hop), rte_lpm_add_v1604);
1210
1211 /*
1212  * Look for a rule in the high-level rules table
1213  */
1214 int
1215 rte_lpm_is_rule_present_v20(struct rte_lpm_v20 *lpm, uint32_t ip, uint8_t depth,
1216 uint8_t *next_hop)
1217 {
1218         uint32_t ip_masked;
1219         int32_t rule_index;
1220
1221         /* Check user arguments. */
1222         if ((lpm == NULL) ||
1223                 (next_hop == NULL) ||
1224                 (depth < 1) || (depth > RTE_LPM_MAX_DEPTH))
1225                 return -EINVAL;
1226
1227         /* Look for the rule using rule_find. */
1228         ip_masked = ip & depth_to_mask(depth);
1229         rule_index = rule_find_v20(lpm, ip_masked, depth);
1230
1231         if (rule_index >= 0) {
1232                 *next_hop = lpm->rules_tbl[rule_index].next_hop;
1233                 return 1;
1234         }
1235
1236         /* If rule is not found return 0. */
1237         return 0;
1238 }
1239 VERSION_SYMBOL(rte_lpm_is_rule_present, _v20, 2.0);
1240
1241 int
1242 rte_lpm_is_rule_present_v1604(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
1243 uint32_t *next_hop)
1244 {
1245         uint32_t ip_masked;
1246         int32_t rule_index;
1247
1248         /* Check user arguments. */
1249         if ((lpm == NULL) ||
1250                 (next_hop == NULL) ||
1251                 (depth < 1) || (depth > RTE_LPM_MAX_DEPTH))
1252                 return -EINVAL;
1253
1254         /* Look for the rule using rule_find. */
1255         ip_masked = ip & depth_to_mask(depth);
1256         rule_index = rule_find_v1604(lpm, ip_masked, depth);
1257
1258         if (rule_index >= 0) {
1259                 *next_hop = lpm->rules_tbl[rule_index].next_hop;
1260                 return 1;
1261         }
1262
1263         /* If rule is not found return 0. */
1264         return 0;
1265 }
1266 BIND_DEFAULT_SYMBOL(rte_lpm_is_rule_present, _v1604, 16.04);
1267 MAP_STATIC_SYMBOL(int rte_lpm_is_rule_present(struct rte_lpm *lpm, uint32_t ip,
1268                 uint8_t depth, uint32_t *next_hop), rte_lpm_is_rule_present_v1604);
1269
1270 static inline int32_t
1271 find_previous_rule_v20(struct rte_lpm_v20 *lpm, uint32_t ip, uint8_t depth,
1272                 uint8_t *sub_rule_depth)
1273 {
1274         int32_t rule_index;
1275         uint32_t ip_masked;
1276         uint8_t prev_depth;
1277
1278         for (prev_depth = (uint8_t)(depth - 1); prev_depth > 0; prev_depth--) {
1279                 ip_masked = ip & depth_to_mask(prev_depth);
1280
1281                 rule_index = rule_find_v20(lpm, ip_masked, prev_depth);
1282
1283                 if (rule_index >= 0) {
1284                         *sub_rule_depth = prev_depth;
1285                         return rule_index;
1286                 }
1287         }
1288
1289         return -1;
1290 }
1291
1292 static inline int32_t
1293 find_previous_rule_v1604(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
1294                 uint8_t *sub_rule_depth)
1295 {
1296         int32_t rule_index;
1297         uint32_t ip_masked;
1298         uint8_t prev_depth;
1299
1300         for (prev_depth = (uint8_t)(depth - 1); prev_depth > 0; prev_depth--) {
1301                 ip_masked = ip & depth_to_mask(prev_depth);
1302
1303                 rule_index = rule_find_v1604(lpm, ip_masked, prev_depth);
1304
1305                 if (rule_index >= 0) {
1306                         *sub_rule_depth = prev_depth;
1307                         return rule_index;
1308                 }
1309         }
1310
1311         return -1;
1312 }
1313
1314 static inline int32_t
1315 delete_depth_small_v20(struct rte_lpm_v20 *lpm, uint32_t ip_masked,
1316         uint8_t depth, int32_t sub_rule_index, uint8_t sub_rule_depth)
1317 {
1318         uint32_t tbl24_range, tbl24_index, tbl8_group_index, tbl8_index, i, j;
1319
1320         /* Calculate the range and index into Table24. */
1321         tbl24_range = depth_to_range(depth);
1322         tbl24_index = (ip_masked >> 8);
1323
1324         /*
1325          * Firstly check the sub_rule_index. A -1 indicates no replacement rule
1326          * and a positive number indicates a sub_rule_index.
1327          */
1328         if (sub_rule_index < 0) {
1329                 /*
1330                  * If no replacement rule exists then invalidate entries
1331                  * associated with this rule.
1332                  */
1333                 for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) {
1334
1335                         if (lpm->tbl24[i].valid_group == 0 &&
1336                                         lpm->tbl24[i].depth <= depth) {
1337                                 lpm->tbl24[i].valid = INVALID;
1338                         } else if (lpm->tbl24[i].valid_group == 1) {
1339                                 /*
1340                                  * If TBL24 entry is extended, then there has
1341                                  * to be a rule with depth >= 25 in the
1342                                  * associated TBL8 group.
1343                                  */
1344
1345                                 tbl8_group_index = lpm->tbl24[i].group_idx;
1346                                 tbl8_index = tbl8_group_index *
1347                                                 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1348
1349                                 for (j = tbl8_index; j < (tbl8_index +
1350                                         RTE_LPM_TBL8_GROUP_NUM_ENTRIES); j++) {
1351
1352                                         if (lpm->tbl8[j].depth <= depth)
1353                                                 lpm->tbl8[j].valid = INVALID;
1354                                 }
1355                         }
1356                 }
1357         } else {
1358                 /*
1359                  * If a replacement rule exists then modify entries
1360                  * associated with this rule.
1361                  */
1362
1363                 struct rte_lpm_tbl_entry_v20 new_tbl24_entry = {
1364                         {.next_hop = lpm->rules_tbl[sub_rule_index].next_hop,},
1365                         .valid = VALID,
1366                         .valid_group = 0,
1367                         .depth = sub_rule_depth,
1368                 };
1369
1370                 struct rte_lpm_tbl_entry_v20 new_tbl8_entry = {
1371                         .valid = VALID,
1372                         .valid_group = VALID,
1373                         .depth = sub_rule_depth,
1374                 };
1375                 new_tbl8_entry.next_hop =
1376                                 lpm->rules_tbl[sub_rule_index].next_hop;
1377
1378                 for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) {
1379
1380                         if (lpm->tbl24[i].valid_group == 0 &&
1381                                         lpm->tbl24[i].depth <= depth) {
1382                                 lpm->tbl24[i] = new_tbl24_entry;
1383                         } else  if (lpm->tbl24[i].valid_group == 1) {
1384                                 /*
1385                                  * If TBL24 entry is extended, then there has
1386                                  * to be a rule with depth >= 25 in the
1387                                  * associated TBL8 group.
1388                                  */
1389
1390                                 tbl8_group_index = lpm->tbl24[i].group_idx;
1391                                 tbl8_index = tbl8_group_index *
1392                                                 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1393
1394                                 for (j = tbl8_index; j < (tbl8_index +
1395                                         RTE_LPM_TBL8_GROUP_NUM_ENTRIES); j++) {
1396
1397                                         if (lpm->tbl8[j].depth <= depth)
1398                                                 lpm->tbl8[j] = new_tbl8_entry;
1399                                 }
1400                         }
1401                 }
1402         }
1403
1404         return 0;
1405 }
1406
1407 static inline int32_t
1408 delete_depth_small_v1604(struct rte_lpm *lpm, uint32_t ip_masked,
1409         uint8_t depth, int32_t sub_rule_index, uint8_t sub_rule_depth)
1410 {
1411 #define group_idx next_hop
1412         uint32_t tbl24_range, tbl24_index, tbl8_group_index, tbl8_index, i, j;
1413
1414         /* Calculate the range and index into Table24. */
1415         tbl24_range = depth_to_range(depth);
1416         tbl24_index = (ip_masked >> 8);
1417
1418         /*
1419          * Firstly check the sub_rule_index. A -1 indicates no replacement rule
1420          * and a positive number indicates a sub_rule_index.
1421          */
1422         if (sub_rule_index < 0) {
1423                 /*
1424                  * If no replacement rule exists then invalidate entries
1425                  * associated with this rule.
1426                  */
1427                 for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) {
1428
1429                         if (lpm->tbl24[i].valid_group == 0 &&
1430                                         lpm->tbl24[i].depth <= depth) {
1431                                 lpm->tbl24[i].valid = INVALID;
1432                         } else if (lpm->tbl24[i].valid_group == 1) {
1433                                 /*
1434                                  * If TBL24 entry is extended, then there has
1435                                  * to be a rule with depth >= 25 in the
1436                                  * associated TBL8 group.
1437                                  */
1438
1439                                 tbl8_group_index = lpm->tbl24[i].group_idx;
1440                                 tbl8_index = tbl8_group_index *
1441                                                 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1442
1443                                 for (j = tbl8_index; j < (tbl8_index +
1444                                         RTE_LPM_TBL8_GROUP_NUM_ENTRIES); j++) {
1445
1446                                         if (lpm->tbl8[j].depth <= depth)
1447                                                 lpm->tbl8[j].valid = INVALID;
1448                                 }
1449                         }
1450                 }
1451         } else {
1452                 /*
1453                  * If a replacement rule exists then modify entries
1454                  * associated with this rule.
1455                  */
1456
1457                 struct rte_lpm_tbl_entry new_tbl24_entry = {
1458                         .next_hop = lpm->rules_tbl[sub_rule_index].next_hop,
1459                         .valid = VALID,
1460                         .valid_group = 0,
1461                         .depth = sub_rule_depth,
1462                 };
1463
1464                 struct rte_lpm_tbl_entry new_tbl8_entry = {
1465                         .valid = VALID,
1466                         .valid_group = VALID,
1467                         .depth = sub_rule_depth,
1468                         .next_hop = lpm->rules_tbl
1469                         [sub_rule_index].next_hop,
1470                 };
1471
1472                 for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) {
1473
1474                         if (lpm->tbl24[i].valid_group == 0 &&
1475                                         lpm->tbl24[i].depth <= depth) {
1476                                 lpm->tbl24[i] = new_tbl24_entry;
1477                         } else  if (lpm->tbl24[i].valid_group == 1) {
1478                                 /*
1479                                  * If TBL24 entry is extended, then there has
1480                                  * to be a rule with depth >= 25 in the
1481                                  * associated TBL8 group.
1482                                  */
1483
1484                                 tbl8_group_index = lpm->tbl24[i].group_idx;
1485                                 tbl8_index = tbl8_group_index *
1486                                                 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1487
1488                                 for (j = tbl8_index; j < (tbl8_index +
1489                                         RTE_LPM_TBL8_GROUP_NUM_ENTRIES); j++) {
1490
1491                                         if (lpm->tbl8[j].depth <= depth)
1492                                                 lpm->tbl8[j] = new_tbl8_entry;
1493                                 }
1494                         }
1495                 }
1496         }
1497 #undef group_idx
1498         return 0;
1499 }
1500
1501 /*
1502  * Checks if table 8 group can be recycled.
1503  *
1504  * Return of -EEXIST means tbl8 is in use and thus can not be recycled.
1505  * Return of -EINVAL means tbl8 is empty and thus can be recycled
1506  * Return of value > -1 means tbl8 is in use but has all the same values and
1507  * thus can be recycled
1508  */
1509 static inline int32_t
1510 tbl8_recycle_check_v20(struct rte_lpm_tbl_entry_v20 *tbl8,
1511                 uint32_t tbl8_group_start)
1512 {
1513         uint32_t tbl8_group_end, i;
1514         tbl8_group_end = tbl8_group_start + RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1515
1516         /*
1517          * Check the first entry of the given tbl8. If it is invalid we know
1518          * this tbl8 does not contain any rule with a depth < RTE_LPM_MAX_DEPTH
1519          *  (As they would affect all entries in a tbl8) and thus this table
1520          *  can not be recycled.
1521          */
1522         if (tbl8[tbl8_group_start].valid) {
1523                 /*
1524                  * If first entry is valid check if the depth is less than 24
1525                  * and if so check the rest of the entries to verify that they
1526                  * are all of this depth.
1527                  */
1528                 if (tbl8[tbl8_group_start].depth <= MAX_DEPTH_TBL24) {
1529                         for (i = (tbl8_group_start + 1); i < tbl8_group_end;
1530                                         i++) {
1531
1532                                 if (tbl8[i].depth !=
1533                                                 tbl8[tbl8_group_start].depth) {
1534
1535                                         return -EEXIST;
1536                                 }
1537                         }
1538                         /* If all entries are the same return the tb8 index */
1539                         return tbl8_group_start;
1540                 }
1541
1542                 return -EEXIST;
1543         }
1544         /*
1545          * If the first entry is invalid check if the rest of the entries in
1546          * the tbl8 are invalid.
1547          */
1548         for (i = (tbl8_group_start + 1); i < tbl8_group_end; i++) {
1549                 if (tbl8[i].valid)
1550                         return -EEXIST;
1551         }
1552         /* If no valid entries are found then return -EINVAL. */
1553         return -EINVAL;
1554 }
1555
1556 static inline int32_t
1557 tbl8_recycle_check_v1604(struct rte_lpm_tbl_entry *tbl8,
1558                 uint32_t tbl8_group_start)
1559 {
1560         uint32_t tbl8_group_end, i;
1561         tbl8_group_end = tbl8_group_start + RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1562
1563         /*
1564          * Check the first entry of the given tbl8. If it is invalid we know
1565          * this tbl8 does not contain any rule with a depth < RTE_LPM_MAX_DEPTH
1566          *  (As they would affect all entries in a tbl8) and thus this table
1567          *  can not be recycled.
1568          */
1569         if (tbl8[tbl8_group_start].valid) {
1570                 /*
1571                  * If first entry is valid check if the depth is less than 24
1572                  * and if so check the rest of the entries to verify that they
1573                  * are all of this depth.
1574                  */
1575                 if (tbl8[tbl8_group_start].depth <= MAX_DEPTH_TBL24) {
1576                         for (i = (tbl8_group_start + 1); i < tbl8_group_end;
1577                                         i++) {
1578
1579                                 if (tbl8[i].depth !=
1580                                                 tbl8[tbl8_group_start].depth) {
1581
1582                                         return -EEXIST;
1583                                 }
1584                         }
1585                         /* If all entries are the same return the tb8 index */
1586                         return tbl8_group_start;
1587                 }
1588
1589                 return -EEXIST;
1590         }
1591         /*
1592          * If the first entry is invalid check if the rest of the entries in
1593          * the tbl8 are invalid.
1594          */
1595         for (i = (tbl8_group_start + 1); i < tbl8_group_end; i++) {
1596                 if (tbl8[i].valid)
1597                         return -EEXIST;
1598         }
1599         /* If no valid entries are found then return -EINVAL. */
1600         return -EINVAL;
1601 }
1602
1603 static inline int32_t
1604 delete_depth_big_v20(struct rte_lpm_v20 *lpm, uint32_t ip_masked,
1605         uint8_t depth, int32_t sub_rule_index, uint8_t sub_rule_depth)
1606 {
1607         uint32_t tbl24_index, tbl8_group_index, tbl8_group_start, tbl8_index,
1608                         tbl8_range, i;
1609         int32_t tbl8_recycle_index;
1610
1611         /*
1612          * Calculate the index into tbl24 and range. Note: All depths larger
1613          * than MAX_DEPTH_TBL24 are associated with only one tbl24 entry.
1614          */
1615         tbl24_index = ip_masked >> 8;
1616
1617         /* Calculate the index into tbl8 and range. */
1618         tbl8_group_index = lpm->tbl24[tbl24_index].group_idx;
1619         tbl8_group_start = tbl8_group_index * RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1620         tbl8_index = tbl8_group_start + (ip_masked & 0xFF);
1621         tbl8_range = depth_to_range(depth);
1622
1623         if (sub_rule_index < 0) {
1624                 /*
1625                  * Loop through the range of entries on tbl8 for which the
1626                  * rule_to_delete must be removed or modified.
1627                  */
1628                 for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
1629                         if (lpm->tbl8[i].depth <= depth)
1630                                 lpm->tbl8[i].valid = INVALID;
1631                 }
1632         } else {
1633                 /* Set new tbl8 entry. */
1634                 struct rte_lpm_tbl_entry_v20 new_tbl8_entry = {
1635                         .valid = VALID,
1636                         .depth = sub_rule_depth,
1637                         .valid_group = lpm->tbl8[tbl8_group_start].valid_group,
1638                 };
1639
1640                 new_tbl8_entry.next_hop =
1641                                 lpm->rules_tbl[sub_rule_index].next_hop;
1642                 /*
1643                  * Loop through the range of entries on tbl8 for which the
1644                  * rule_to_delete must be modified.
1645                  */
1646                 for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
1647                         if (lpm->tbl8[i].depth <= depth)
1648                                 lpm->tbl8[i] = new_tbl8_entry;
1649                 }
1650         }
1651
1652         /*
1653          * Check if there are any valid entries in this tbl8 group. If all
1654          * tbl8 entries are invalid we can free the tbl8 and invalidate the
1655          * associated tbl24 entry.
1656          */
1657
1658         tbl8_recycle_index = tbl8_recycle_check_v20(lpm->tbl8, tbl8_group_start);
1659
1660         if (tbl8_recycle_index == -EINVAL) {
1661                 /* Set tbl24 before freeing tbl8 to avoid race condition. */
1662                 lpm->tbl24[tbl24_index].valid = 0;
1663                 tbl8_free_v20(lpm->tbl8, tbl8_group_start);
1664         } else if (tbl8_recycle_index > -1) {
1665                 /* Update tbl24 entry. */
1666                 struct rte_lpm_tbl_entry_v20 new_tbl24_entry = {
1667                         { .next_hop = lpm->tbl8[tbl8_recycle_index].next_hop, },
1668                         .valid = VALID,
1669                         .valid_group = 0,
1670                         .depth = lpm->tbl8[tbl8_recycle_index].depth,
1671                 };
1672
1673                 /* Set tbl24 before freeing tbl8 to avoid race condition. */
1674                 lpm->tbl24[tbl24_index] = new_tbl24_entry;
1675                 tbl8_free_v20(lpm->tbl8, tbl8_group_start);
1676         }
1677
1678         return 0;
1679 }
1680
1681 static inline int32_t
1682 delete_depth_big_v1604(struct rte_lpm *lpm, uint32_t ip_masked,
1683         uint8_t depth, int32_t sub_rule_index, uint8_t sub_rule_depth)
1684 {
1685 #define group_idx next_hop
1686         uint32_t tbl24_index, tbl8_group_index, tbl8_group_start, tbl8_index,
1687                         tbl8_range, i;
1688         int32_t tbl8_recycle_index;
1689
1690         /*
1691          * Calculate the index into tbl24 and range. Note: All depths larger
1692          * than MAX_DEPTH_TBL24 are associated with only one tbl24 entry.
1693          */
1694         tbl24_index = ip_masked >> 8;
1695
1696         /* Calculate the index into tbl8 and range. */
1697         tbl8_group_index = lpm->tbl24[tbl24_index].group_idx;
1698         tbl8_group_start = tbl8_group_index * RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1699         tbl8_index = tbl8_group_start + (ip_masked & 0xFF);
1700         tbl8_range = depth_to_range(depth);
1701
1702         if (sub_rule_index < 0) {
1703                 /*
1704                  * Loop through the range of entries on tbl8 for which the
1705                  * rule_to_delete must be removed or modified.
1706                  */
1707                 for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
1708                         if (lpm->tbl8[i].depth <= depth)
1709                                 lpm->tbl8[i].valid = INVALID;
1710                 }
1711         } else {
1712                 /* Set new tbl8 entry. */
1713                 struct rte_lpm_tbl_entry new_tbl8_entry = {
1714                         .valid = VALID,
1715                         .depth = sub_rule_depth,
1716                         .valid_group = lpm->tbl8[tbl8_group_start].valid_group,
1717                         .next_hop = lpm->rules_tbl[sub_rule_index].next_hop,
1718                 };
1719
1720                 /*
1721                  * Loop through the range of entries on tbl8 for which the
1722                  * rule_to_delete must be modified.
1723                  */
1724                 for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
1725                         if (lpm->tbl8[i].depth <= depth)
1726                                 lpm->tbl8[i] = new_tbl8_entry;
1727                 }
1728         }
1729
1730         /*
1731          * Check if there are any valid entries in this tbl8 group. If all
1732          * tbl8 entries are invalid we can free the tbl8 and invalidate the
1733          * associated tbl24 entry.
1734          */
1735
1736         tbl8_recycle_index = tbl8_recycle_check_v1604(lpm->tbl8, tbl8_group_start);
1737
1738         if (tbl8_recycle_index == -EINVAL) {
1739                 /* Set tbl24 before freeing tbl8 to avoid race condition. */
1740                 lpm->tbl24[tbl24_index].valid = 0;
1741                 tbl8_free_v1604(lpm->tbl8, tbl8_group_start);
1742         } else if (tbl8_recycle_index > -1) {
1743                 /* Update tbl24 entry. */
1744                 struct rte_lpm_tbl_entry new_tbl24_entry = {
1745                         .next_hop = lpm->tbl8[tbl8_recycle_index].next_hop,
1746                         .valid = VALID,
1747                         .valid_group = 0,
1748                         .depth = lpm->tbl8[tbl8_recycle_index].depth,
1749                 };
1750
1751                 /* Set tbl24 before freeing tbl8 to avoid race condition. */
1752                 lpm->tbl24[tbl24_index] = new_tbl24_entry;
1753                 tbl8_free_v1604(lpm->tbl8, tbl8_group_start);
1754         }
1755 #undef group_idx
1756         return 0;
1757 }
1758
1759 /*
1760  * Deletes a rule
1761  */
1762 int
1763 rte_lpm_delete_v20(struct rte_lpm_v20 *lpm, uint32_t ip, uint8_t depth)
1764 {
1765         int32_t rule_to_delete_index, sub_rule_index;
1766         uint32_t ip_masked;
1767         uint8_t sub_rule_depth;
1768         /*
1769          * Check input arguments. Note: IP must be a positive integer of 32
1770          * bits in length therefore it need not be checked.
1771          */
1772         if ((lpm == NULL) || (depth < 1) || (depth > RTE_LPM_MAX_DEPTH)) {
1773                 return -EINVAL;
1774         }
1775
1776         ip_masked = ip & depth_to_mask(depth);
1777
1778         /*
1779          * Find the index of the input rule, that needs to be deleted, in the
1780          * rule table.
1781          */
1782         rule_to_delete_index = rule_find_v20(lpm, ip_masked, depth);
1783
1784         /*
1785          * Check if rule_to_delete_index was found. If no rule was found the
1786          * function rule_find returns -EINVAL.
1787          */
1788         if (rule_to_delete_index < 0)
1789                 return -EINVAL;
1790
1791         /* Delete the rule from the rule table. */
1792         rule_delete_v20(lpm, rule_to_delete_index, depth);
1793
1794         /*
1795          * Find rule to replace the rule_to_delete. If there is no rule to
1796          * replace the rule_to_delete we return -1 and invalidate the table
1797          * entries associated with this rule.
1798          */
1799         sub_rule_depth = 0;
1800         sub_rule_index = find_previous_rule_v20(lpm, ip, depth, &sub_rule_depth);
1801
1802         /*
1803          * If the input depth value is less than 25 use function
1804          * delete_depth_small otherwise use delete_depth_big.
1805          */
1806         if (depth <= MAX_DEPTH_TBL24) {
1807                 return delete_depth_small_v20(lpm, ip_masked, depth,
1808                                 sub_rule_index, sub_rule_depth);
1809         } else { /* If depth > MAX_DEPTH_TBL24 */
1810                 return delete_depth_big_v20(lpm, ip_masked, depth, sub_rule_index,
1811                                 sub_rule_depth);
1812         }
1813 }
1814 VERSION_SYMBOL(rte_lpm_delete, _v20, 2.0);
1815
1816 int
1817 rte_lpm_delete_v1604(struct rte_lpm *lpm, uint32_t ip, uint8_t depth)
1818 {
1819         int32_t rule_to_delete_index, sub_rule_index;
1820         uint32_t ip_masked;
1821         uint8_t sub_rule_depth;
1822         /*
1823          * Check input arguments. Note: IP must be a positive integer of 32
1824          * bits in length therefore it need not be checked.
1825          */
1826         if ((lpm == NULL) || (depth < 1) || (depth > RTE_LPM_MAX_DEPTH)) {
1827                 return -EINVAL;
1828         }
1829
1830         ip_masked = ip & depth_to_mask(depth);
1831
1832         /*
1833          * Find the index of the input rule, that needs to be deleted, in the
1834          * rule table.
1835          */
1836         rule_to_delete_index = rule_find_v1604(lpm, ip_masked, depth);
1837
1838         /*
1839          * Check if rule_to_delete_index was found. If no rule was found the
1840          * function rule_find returns -EINVAL.
1841          */
1842         if (rule_to_delete_index < 0)
1843                 return -EINVAL;
1844
1845         /* Delete the rule from the rule table. */
1846         rule_delete_v1604(lpm, rule_to_delete_index, depth);
1847
1848         /*
1849          * Find rule to replace the rule_to_delete. If there is no rule to
1850          * replace the rule_to_delete we return -1 and invalidate the table
1851          * entries associated with this rule.
1852          */
1853         sub_rule_depth = 0;
1854         sub_rule_index = find_previous_rule_v1604(lpm, ip, depth, &sub_rule_depth);
1855
1856         /*
1857          * If the input depth value is less than 25 use function
1858          * delete_depth_small otherwise use delete_depth_big.
1859          */
1860         if (depth <= MAX_DEPTH_TBL24) {
1861                 return delete_depth_small_v1604(lpm, ip_masked, depth,
1862                                 sub_rule_index, sub_rule_depth);
1863         } else { /* If depth > MAX_DEPTH_TBL24 */
1864                 return delete_depth_big_v1604(lpm, ip_masked, depth, sub_rule_index,
1865                                 sub_rule_depth);
1866         }
1867 }
1868 BIND_DEFAULT_SYMBOL(rte_lpm_delete, _v1604, 16.04);
1869 MAP_STATIC_SYMBOL(int rte_lpm_delete(struct rte_lpm *lpm, uint32_t ip,
1870                 uint8_t depth), rte_lpm_delete_v1604);
1871
1872 /*
1873  * Delete all rules from the LPM table.
1874  */
1875 void
1876 rte_lpm_delete_all_v20(struct rte_lpm_v20 *lpm)
1877 {
1878         /* Zero rule information. */
1879         memset(lpm->rule_info, 0, sizeof(lpm->rule_info));
1880
1881         /* Zero tbl24. */
1882         memset(lpm->tbl24, 0, sizeof(lpm->tbl24));
1883
1884         /* Zero tbl8. */
1885         memset(lpm->tbl8, 0, sizeof(lpm->tbl8));
1886
1887         /* Delete all rules form the rules table. */
1888         memset(lpm->rules_tbl, 0, sizeof(lpm->rules_tbl[0]) * lpm->max_rules);
1889 }
1890 VERSION_SYMBOL(rte_lpm_delete_all, _v20, 2.0);
1891
1892 void
1893 rte_lpm_delete_all_v1604(struct rte_lpm *lpm)
1894 {
1895         /* Zero rule information. */
1896         memset(lpm->rule_info, 0, sizeof(lpm->rule_info));
1897
1898         /* Zero tbl24. */
1899         memset(lpm->tbl24, 0, sizeof(lpm->tbl24));
1900
1901         /* Zero tbl8. */
1902         memset(lpm->tbl8, 0, sizeof(lpm->tbl8[0])
1903                         * RTE_LPM_TBL8_GROUP_NUM_ENTRIES * lpm->number_tbl8s);
1904
1905         /* Delete all rules form the rules table. */
1906         memset(lpm->rules_tbl, 0, sizeof(lpm->rules_tbl[0]) * lpm->max_rules);
1907 }
1908 BIND_DEFAULT_SYMBOL(rte_lpm_delete_all, _v1604, 16.04);
1909 MAP_STATIC_SYMBOL(void rte_lpm_delete_all(struct rte_lpm *lpm),
1910                 rte_lpm_delete_all_v1604);