140dbb2f35723c0c4bbb70a15f692d1ceaa4221b
[dpdk.git] / lib / librte_lpm / rte_lpm.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <string.h>
35 #include <stdint.h>
36 #include <errno.h>
37 #include <stdarg.h>
38 #include <stdio.h>
39 #include <sys/queue.h>
40
41 #include <rte_log.h>
42 #include <rte_branch_prediction.h>
43 #include <rte_common.h>
44 #include <rte_memory.h>        /* for definition of RTE_CACHE_LINE_SIZE */
45 #include <rte_malloc.h>
46 #include <rte_memzone.h>
47 #include <rte_eal.h>
48 #include <rte_eal_memconfig.h>
49 #include <rte_per_lcore.h>
50 #include <rte_string_fns.h>
51 #include <rte_errno.h>
52 #include <rte_rwlock.h>
53 #include <rte_spinlock.h>
54
55 #include "rte_lpm.h"
56
57 TAILQ_HEAD(rte_lpm_list, rte_tailq_entry);
58
59 static struct rte_tailq_elem rte_lpm_tailq = {
60         .name = "RTE_LPM",
61 };
62 EAL_REGISTER_TAILQ(rte_lpm_tailq)
63
64 #define MAX_DEPTH_TBL24 24
65
66 enum valid_flag {
67         INVALID = 0,
68         VALID
69 };
70
71 /* Macro to enable/disable run-time checks. */
72 #if defined(RTE_LIBRTE_LPM_DEBUG)
73 #include <rte_debug.h>
74 #define VERIFY_DEPTH(depth) do {                                \
75         if ((depth == 0) || (depth > RTE_LPM_MAX_DEPTH))        \
76                 rte_panic("LPM: Invalid depth (%u) at line %d", \
77                                 (unsigned)(depth), __LINE__);   \
78 } while (0)
79 #else
80 #define VERIFY_DEPTH(depth)
81 #endif
82
83 /*
84  * Converts a given depth value to its corresponding mask value.
85  *
86  * depth  (IN)          : range = 1 - 32
87  * mask   (OUT)         : 32bit mask
88  */
89 static uint32_t __attribute__((pure))
90 depth_to_mask(uint8_t depth)
91 {
92         VERIFY_DEPTH(depth);
93
94         /* To calculate a mask start with a 1 on the left hand side and right
95          * shift while populating the left hand side with 1's
96          */
97         return (int)0x80000000 >> (depth - 1);
98 }
99
100 /*
101  * Converts given depth value to its corresponding range value.
102  */
103 static inline uint32_t __attribute__((pure))
104 depth_to_range(uint8_t depth)
105 {
106         VERIFY_DEPTH(depth);
107
108         /*
109          * Calculate tbl24 range. (Note: 2^depth = 1 << depth)
110          */
111         if (depth <= MAX_DEPTH_TBL24)
112                 return 1 << (MAX_DEPTH_TBL24 - depth);
113
114         /* Else if depth is greater than 24 */
115         return 1 << (RTE_LPM_MAX_DEPTH - depth);
116 }
117
118 /*
119  * Find an existing lpm table and return a pointer to it.
120  */
121 struct rte_lpm_v20 *
122 rte_lpm_find_existing_v20(const char *name)
123 {
124         struct rte_lpm_v20 *l = NULL;
125         struct rte_tailq_entry *te;
126         struct rte_lpm_list *lpm_list;
127
128         lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list);
129
130         rte_rwlock_read_lock(RTE_EAL_TAILQ_RWLOCK);
131         TAILQ_FOREACH(te, lpm_list, next) {
132                 l = (struct rte_lpm_v20 *) te->data;
133                 if (strncmp(name, l->name, RTE_LPM_NAMESIZE) == 0)
134                         break;
135         }
136         rte_rwlock_read_unlock(RTE_EAL_TAILQ_RWLOCK);
137
138         if (te == NULL) {
139                 rte_errno = ENOENT;
140                 return NULL;
141         }
142
143         return l;
144 }
145 VERSION_SYMBOL(rte_lpm_find_existing, _v20, 2.0);
146
147 struct rte_lpm *
148 rte_lpm_find_existing_v1604(const char *name)
149 {
150         struct rte_lpm *l = NULL;
151         struct rte_tailq_entry *te;
152         struct rte_lpm_list *lpm_list;
153
154         lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list);
155
156         rte_rwlock_read_lock(RTE_EAL_TAILQ_RWLOCK);
157         TAILQ_FOREACH(te, lpm_list, next) {
158                 l = (struct rte_lpm *) te->data;
159                 if (strncmp(name, l->name, RTE_LPM_NAMESIZE) == 0)
160                         break;
161         }
162         rte_rwlock_read_unlock(RTE_EAL_TAILQ_RWLOCK);
163
164         if (te == NULL) {
165                 rte_errno = ENOENT;
166                 return NULL;
167         }
168
169         return l;
170 }
171 BIND_DEFAULT_SYMBOL(rte_lpm_find_existing, _v1604, 16.04);
172 MAP_STATIC_SYMBOL(struct rte_lpm *rte_lpm_find_existing(const char *name),
173                 rte_lpm_find_existing_v1604);
174
175 /*
176  * Allocates memory for LPM object
177  */
178 struct rte_lpm_v20 *
179 rte_lpm_create_v20(const char *name, int socket_id, int max_rules,
180                 __rte_unused int flags)
181 {
182         char mem_name[RTE_LPM_NAMESIZE];
183         struct rte_lpm_v20 *lpm = NULL;
184         struct rte_tailq_entry *te;
185         uint32_t mem_size;
186         struct rte_lpm_list *lpm_list;
187
188         lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list);
189
190         RTE_BUILD_BUG_ON(sizeof(struct rte_lpm_tbl_entry_v20) != 2);
191
192         /* Check user arguments. */
193         if ((name == NULL) || (socket_id < -1) || (max_rules == 0)) {
194                 rte_errno = EINVAL;
195                 return NULL;
196         }
197
198         snprintf(mem_name, sizeof(mem_name), "LPM_%s", name);
199
200         /* Determine the amount of memory to allocate. */
201         mem_size = sizeof(*lpm) + (sizeof(lpm->rules_tbl[0]) * max_rules);
202
203         rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
204
205         /* guarantee there's no existing */
206         TAILQ_FOREACH(te, lpm_list, next) {
207                 lpm = (struct rte_lpm_v20 *) te->data;
208                 if (strncmp(name, lpm->name, RTE_LPM_NAMESIZE) == 0)
209                         break;
210         }
211         lpm = NULL;
212         if (te != NULL) {
213                 rte_errno = EEXIST;
214                 goto exit;
215         }
216
217         /* allocate tailq entry */
218         te = rte_zmalloc("LPM_TAILQ_ENTRY", sizeof(*te), 0);
219         if (te == NULL) {
220                 RTE_LOG(ERR, LPM, "Failed to allocate tailq entry\n");
221                 rte_errno = ENOMEM;
222                 goto exit;
223         }
224
225         /* Allocate memory to store the LPM data structures. */
226         lpm = (struct rte_lpm_v20 *)rte_zmalloc_socket(mem_name, mem_size,
227                         RTE_CACHE_LINE_SIZE, socket_id);
228         if (lpm == NULL) {
229                 RTE_LOG(ERR, LPM, "LPM memory allocation failed\n");
230                 rte_free(te);
231                 rte_errno = ENOMEM;
232                 goto exit;
233         }
234
235         /* Save user arguments. */
236         lpm->max_rules = max_rules;
237         snprintf(lpm->name, sizeof(lpm->name), "%s", name);
238
239         te->data = (void *) lpm;
240
241         TAILQ_INSERT_TAIL(lpm_list, te, next);
242
243 exit:
244         rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
245
246         return lpm;
247 }
248 VERSION_SYMBOL(rte_lpm_create, _v20, 2.0);
249
250 struct rte_lpm *
251 rte_lpm_create_v1604(const char *name, int socket_id,
252                 const struct rte_lpm_config *config)
253 {
254         char mem_name[RTE_LPM_NAMESIZE];
255         struct rte_lpm *lpm = NULL;
256         struct rte_tailq_entry *te;
257         uint32_t mem_size, rules_size, tbl8s_size;
258         struct rte_lpm_list *lpm_list;
259
260         lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list);
261
262         RTE_BUILD_BUG_ON(sizeof(struct rte_lpm_tbl_entry) != 4);
263
264         /* Check user arguments. */
265         if ((name == NULL) || (socket_id < -1) || (config->max_rules == 0)
266                         || config->number_tbl8s > RTE_LPM_MAX_TBL8_NUM_GROUPS) {
267                 rte_errno = EINVAL;
268                 return NULL;
269         }
270
271         snprintf(mem_name, sizeof(mem_name), "LPM_%s", name);
272
273         /* Determine the amount of memory to allocate. */
274         mem_size = sizeof(*lpm);
275         rules_size = sizeof(struct rte_lpm_rule) * config->max_rules;
276         tbl8s_size = (sizeof(struct rte_lpm_tbl_entry) *
277                         RTE_LPM_TBL8_GROUP_NUM_ENTRIES * config->number_tbl8s);
278
279         rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
280
281         /* guarantee there's no existing */
282         TAILQ_FOREACH(te, lpm_list, next) {
283                 lpm = (struct rte_lpm *) te->data;
284                 if (strncmp(name, lpm->name, RTE_LPM_NAMESIZE) == 0)
285                         break;
286         }
287         lpm = NULL;
288         if (te != NULL) {
289                 rte_errno = EEXIST;
290                 goto exit;
291         }
292
293         /* allocate tailq entry */
294         te = rte_zmalloc("LPM_TAILQ_ENTRY", sizeof(*te), 0);
295         if (te == NULL) {
296                 RTE_LOG(ERR, LPM, "Failed to allocate tailq entry\n");
297                 rte_errno = ENOMEM;
298                 goto exit;
299         }
300
301         /* Allocate memory to store the LPM data structures. */
302         lpm = (struct rte_lpm *)rte_zmalloc_socket(mem_name, mem_size,
303                         RTE_CACHE_LINE_SIZE, socket_id);
304         if (lpm == NULL) {
305                 RTE_LOG(ERR, LPM, "LPM memory allocation failed\n");
306                 rte_free(te);
307                 rte_errno = ENOMEM;
308                 goto exit;
309         }
310
311         lpm->rules_tbl = (struct rte_lpm_rule *)rte_zmalloc_socket(NULL,
312                         (size_t)rules_size, RTE_CACHE_LINE_SIZE, socket_id);
313
314         if (lpm->rules_tbl == NULL) {
315                 RTE_LOG(ERR, LPM, "LPM rules_tbl memory allocation failed\n");
316                 rte_free(lpm);
317                 lpm = NULL;
318                 rte_free(te);
319                 rte_errno = ENOMEM;
320                 goto exit;
321         }
322
323         lpm->tbl8 = (struct rte_lpm_tbl_entry *)rte_zmalloc_socket(NULL,
324                         (size_t)tbl8s_size, RTE_CACHE_LINE_SIZE, socket_id);
325
326         if (lpm->tbl8 == NULL) {
327                 RTE_LOG(ERR, LPM, "LPM tbl8 memory allocation failed\n");
328                 rte_free(lpm->rules_tbl);
329                 rte_free(lpm);
330                 lpm = NULL;
331                 rte_free(te);
332                 rte_errno = ENOMEM;
333                 goto exit;
334         }
335
336         /* Save user arguments. */
337         lpm->max_rules = config->max_rules;
338         lpm->number_tbl8s = config->number_tbl8s;
339         snprintf(lpm->name, sizeof(lpm->name), "%s", name);
340
341         te->data = (void *) lpm;
342
343         TAILQ_INSERT_TAIL(lpm_list, te, next);
344
345 exit:
346         rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
347
348         return lpm;
349 }
350 BIND_DEFAULT_SYMBOL(rte_lpm_create, _v1604, 16.04);
351 MAP_STATIC_SYMBOL(
352         struct rte_lpm *rte_lpm_create(const char *name, int socket_id,
353                         const struct rte_lpm_config *config), rte_lpm_create_v1604);
354
355 /*
356  * Deallocates memory for given LPM table.
357  */
358 void
359 rte_lpm_free_v20(struct rte_lpm_v20 *lpm)
360 {
361         struct rte_lpm_list *lpm_list;
362         struct rte_tailq_entry *te;
363
364         /* Check user arguments. */
365         if (lpm == NULL)
366                 return;
367
368         lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list);
369
370         rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
371
372         /* find our tailq entry */
373         TAILQ_FOREACH(te, lpm_list, next) {
374                 if (te->data == (void *) lpm)
375                         break;
376         }
377         if (te != NULL)
378                 TAILQ_REMOVE(lpm_list, te, next);
379
380         rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
381
382         rte_free(lpm);
383         rte_free(te);
384 }
385 VERSION_SYMBOL(rte_lpm_free, _v20, 2.0);
386
387 void
388 rte_lpm_free_v1604(struct rte_lpm *lpm)
389 {
390         struct rte_lpm_list *lpm_list;
391         struct rte_tailq_entry *te;
392
393         /* Check user arguments. */
394         if (lpm == NULL)
395                 return;
396
397         lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list);
398
399         rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
400
401         /* find our tailq entry */
402         TAILQ_FOREACH(te, lpm_list, next) {
403                 if (te->data == (void *) lpm)
404                         break;
405         }
406         if (te != NULL)
407                 TAILQ_REMOVE(lpm_list, te, next);
408
409         rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
410
411         rte_free(lpm->tbl8);
412         rte_free(lpm->rules_tbl);
413         rte_free(lpm);
414         rte_free(te);
415 }
416 BIND_DEFAULT_SYMBOL(rte_lpm_free, _v1604, 16.04);
417 MAP_STATIC_SYMBOL(void rte_lpm_free(struct rte_lpm *lpm),
418                 rte_lpm_free_v1604);
419
420 /*
421  * Adds a rule to the rule table.
422  *
423  * NOTE: The rule table is split into 32 groups. Each group contains rules that
424  * apply to a specific prefix depth (i.e. group 1 contains rules that apply to
425  * prefixes with a depth of 1 etc.). In the following code (depth - 1) is used
426  * to refer to depth 1 because even though the depth range is 1 - 32, depths
427  * are stored in the rule table from 0 - 31.
428  * NOTE: Valid range for depth parameter is 1 .. 32 inclusive.
429  */
430 static inline int32_t
431 rule_add_v20(struct rte_lpm_v20 *lpm, uint32_t ip_masked, uint8_t depth,
432         uint8_t next_hop)
433 {
434         uint32_t rule_gindex, rule_index, last_rule;
435         int i;
436
437         VERIFY_DEPTH(depth);
438
439         /* Scan through rule group to see if rule already exists. */
440         if (lpm->rule_info[depth - 1].used_rules > 0) {
441
442                 /* rule_gindex stands for rule group index. */
443                 rule_gindex = lpm->rule_info[depth - 1].first_rule;
444                 /* Initialise rule_index to point to start of rule group. */
445                 rule_index = rule_gindex;
446                 /* Last rule = Last used rule in this rule group. */
447                 last_rule = rule_gindex + lpm->rule_info[depth - 1].used_rules;
448
449                 for (; rule_index < last_rule; rule_index++) {
450
451                         /* If rule already exists update its next_hop and return. */
452                         if (lpm->rules_tbl[rule_index].ip == ip_masked) {
453                                 lpm->rules_tbl[rule_index].next_hop = next_hop;
454
455                                 return rule_index;
456                         }
457                 }
458
459                 if (rule_index == lpm->max_rules)
460                         return -ENOSPC;
461         } else {
462                 /* Calculate the position in which the rule will be stored. */
463                 rule_index = 0;
464
465                 for (i = depth - 1; i > 0; i--) {
466                         if (lpm->rule_info[i - 1].used_rules > 0) {
467                                 rule_index = lpm->rule_info[i - 1].first_rule
468                                                 + lpm->rule_info[i - 1].used_rules;
469                                 break;
470                         }
471                 }
472                 if (rule_index == lpm->max_rules)
473                         return -ENOSPC;
474
475                 lpm->rule_info[depth - 1].first_rule = rule_index;
476         }
477
478         /* Make room for the new rule in the array. */
479         for (i = RTE_LPM_MAX_DEPTH; i > depth; i--) {
480                 if (lpm->rule_info[i - 1].first_rule
481                                 + lpm->rule_info[i - 1].used_rules == lpm->max_rules)
482                         return -ENOSPC;
483
484                 if (lpm->rule_info[i - 1].used_rules > 0) {
485                         lpm->rules_tbl[lpm->rule_info[i - 1].first_rule
486                                 + lpm->rule_info[i - 1].used_rules]
487                                         = lpm->rules_tbl[lpm->rule_info[i - 1].first_rule];
488                         lpm->rule_info[i - 1].first_rule++;
489                 }
490         }
491
492         /* Add the new rule. */
493         lpm->rules_tbl[rule_index].ip = ip_masked;
494         lpm->rules_tbl[rule_index].next_hop = next_hop;
495
496         /* Increment the used rules counter for this rule group. */
497         lpm->rule_info[depth - 1].used_rules++;
498
499         return rule_index;
500 }
501
502 static inline int32_t
503 rule_add_v1604(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth,
504         uint32_t next_hop)
505 {
506         uint32_t rule_gindex, rule_index, last_rule;
507         int i;
508
509         VERIFY_DEPTH(depth);
510
511         /* Scan through rule group to see if rule already exists. */
512         if (lpm->rule_info[depth - 1].used_rules > 0) {
513
514                 /* rule_gindex stands for rule group index. */
515                 rule_gindex = lpm->rule_info[depth - 1].first_rule;
516                 /* Initialise rule_index to point to start of rule group. */
517                 rule_index = rule_gindex;
518                 /* Last rule = Last used rule in this rule group. */
519                 last_rule = rule_gindex + lpm->rule_info[depth - 1].used_rules;
520
521                 for (; rule_index < last_rule; rule_index++) {
522
523                         /* If rule already exists update its next_hop and return. */
524                         if (lpm->rules_tbl[rule_index].ip == ip_masked) {
525                                 lpm->rules_tbl[rule_index].next_hop = next_hop;
526
527                                 return rule_index;
528                         }
529                 }
530
531                 if (rule_index == lpm->max_rules)
532                         return -ENOSPC;
533         } else {
534                 /* Calculate the position in which the rule will be stored. */
535                 rule_index = 0;
536
537                 for (i = depth - 1; i > 0; i--) {
538                         if (lpm->rule_info[i - 1].used_rules > 0) {
539                                 rule_index = lpm->rule_info[i - 1].first_rule
540                                                 + lpm->rule_info[i - 1].used_rules;
541                                 break;
542                         }
543                 }
544                 if (rule_index == lpm->max_rules)
545                         return -ENOSPC;
546
547                 lpm->rule_info[depth - 1].first_rule = rule_index;
548         }
549
550         /* Make room for the new rule in the array. */
551         for (i = RTE_LPM_MAX_DEPTH; i > depth; i--) {
552                 if (lpm->rule_info[i - 1].first_rule
553                                 + lpm->rule_info[i - 1].used_rules == lpm->max_rules)
554                         return -ENOSPC;
555
556                 if (lpm->rule_info[i - 1].used_rules > 0) {
557                         lpm->rules_tbl[lpm->rule_info[i - 1].first_rule
558                                 + lpm->rule_info[i - 1].used_rules]
559                                         = lpm->rules_tbl[lpm->rule_info[i - 1].first_rule];
560                         lpm->rule_info[i - 1].first_rule++;
561                 }
562         }
563
564         /* Add the new rule. */
565         lpm->rules_tbl[rule_index].ip = ip_masked;
566         lpm->rules_tbl[rule_index].next_hop = next_hop;
567
568         /* Increment the used rules counter for this rule group. */
569         lpm->rule_info[depth - 1].used_rules++;
570
571         return rule_index;
572 }
573
574 /*
575  * Delete a rule from the rule table.
576  * NOTE: Valid range for depth parameter is 1 .. 32 inclusive.
577  */
578 static inline void
579 rule_delete_v20(struct rte_lpm_v20 *lpm, int32_t rule_index, uint8_t depth)
580 {
581         int i;
582
583         VERIFY_DEPTH(depth);
584
585         lpm->rules_tbl[rule_index] =
586                         lpm->rules_tbl[lpm->rule_info[depth - 1].first_rule
587                                 + lpm->rule_info[depth - 1].used_rules - 1];
588
589         for (i = depth; i < RTE_LPM_MAX_DEPTH; i++) {
590                 if (lpm->rule_info[i].used_rules > 0) {
591                         lpm->rules_tbl[lpm->rule_info[i].first_rule - 1] =
592                                 lpm->rules_tbl[lpm->rule_info[i].first_rule
593                                         + lpm->rule_info[i].used_rules - 1];
594                         lpm->rule_info[i].first_rule--;
595                 }
596         }
597
598         lpm->rule_info[depth - 1].used_rules--;
599 }
600
601 static inline void
602 rule_delete_v1604(struct rte_lpm *lpm, int32_t rule_index, uint8_t depth)
603 {
604         int i;
605
606         VERIFY_DEPTH(depth);
607
608         lpm->rules_tbl[rule_index] =
609                         lpm->rules_tbl[lpm->rule_info[depth - 1].first_rule
610                         + lpm->rule_info[depth - 1].used_rules - 1];
611
612         for (i = depth; i < RTE_LPM_MAX_DEPTH; i++) {
613                 if (lpm->rule_info[i].used_rules > 0) {
614                         lpm->rules_tbl[lpm->rule_info[i].first_rule - 1] =
615                                         lpm->rules_tbl[lpm->rule_info[i].first_rule
616                                                 + lpm->rule_info[i].used_rules - 1];
617                         lpm->rule_info[i].first_rule--;
618                 }
619         }
620
621         lpm->rule_info[depth - 1].used_rules--;
622 }
623
624 /*
625  * Finds a rule in rule table.
626  * NOTE: Valid range for depth parameter is 1 .. 32 inclusive.
627  */
628 static inline int32_t
629 rule_find_v20(struct rte_lpm_v20 *lpm, uint32_t ip_masked, uint8_t depth)
630 {
631         uint32_t rule_gindex, last_rule, rule_index;
632
633         VERIFY_DEPTH(depth);
634
635         rule_gindex = lpm->rule_info[depth - 1].first_rule;
636         last_rule = rule_gindex + lpm->rule_info[depth - 1].used_rules;
637
638         /* Scan used rules at given depth to find rule. */
639         for (rule_index = rule_gindex; rule_index < last_rule; rule_index++) {
640                 /* If rule is found return the rule index. */
641                 if (lpm->rules_tbl[rule_index].ip == ip_masked)
642                         return rule_index;
643         }
644
645         /* If rule is not found return -EINVAL. */
646         return -EINVAL;
647 }
648
649 static inline int32_t
650 rule_find_v1604(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth)
651 {
652         uint32_t rule_gindex, last_rule, rule_index;
653
654         VERIFY_DEPTH(depth);
655
656         rule_gindex = lpm->rule_info[depth - 1].first_rule;
657         last_rule = rule_gindex + lpm->rule_info[depth - 1].used_rules;
658
659         /* Scan used rules at given depth to find rule. */
660         for (rule_index = rule_gindex; rule_index < last_rule; rule_index++) {
661                 /* If rule is found return the rule index. */
662                 if (lpm->rules_tbl[rule_index].ip == ip_masked)
663                         return rule_index;
664         }
665
666         /* If rule is not found return -EINVAL. */
667         return -EINVAL;
668 }
669
670 /*
671  * Find, clean and allocate a tbl8.
672  */
673 static inline int32_t
674 tbl8_alloc_v20(struct rte_lpm_tbl_entry_v20 *tbl8)
675 {
676         uint32_t group_idx; /* tbl8 group index. */
677         struct rte_lpm_tbl_entry_v20 *tbl8_entry;
678
679         /* Scan through tbl8 to find a free (i.e. INVALID) tbl8 group. */
680         for (group_idx = 0; group_idx < RTE_LPM_TBL8_NUM_GROUPS;
681                         group_idx++) {
682                 tbl8_entry = &tbl8[group_idx * RTE_LPM_TBL8_GROUP_NUM_ENTRIES];
683                 /* If a free tbl8 group is found clean it and set as VALID. */
684                 if (!tbl8_entry->valid_group) {
685                         memset(&tbl8_entry[0], 0,
686                                         RTE_LPM_TBL8_GROUP_NUM_ENTRIES *
687                                         sizeof(tbl8_entry[0]));
688
689                         tbl8_entry->valid_group = VALID;
690
691                         /* Return group index for allocated tbl8 group. */
692                         return group_idx;
693                 }
694         }
695
696         /* If there are no tbl8 groups free then return error. */
697         return -ENOSPC;
698 }
699
700 static inline int32_t
701 tbl8_alloc_v1604(struct rte_lpm_tbl_entry *tbl8, uint32_t number_tbl8s)
702 {
703         uint32_t group_idx; /* tbl8 group index. */
704         struct rte_lpm_tbl_entry *tbl8_entry;
705
706         /* Scan through tbl8 to find a free (i.e. INVALID) tbl8 group. */
707         for (group_idx = 0; group_idx < number_tbl8s; group_idx++) {
708                 tbl8_entry = &tbl8[group_idx * RTE_LPM_TBL8_GROUP_NUM_ENTRIES];
709                 /* If a free tbl8 group is found clean it and set as VALID. */
710                 if (!tbl8_entry->valid_group) {
711                         memset(&tbl8_entry[0], 0,
712                                         RTE_LPM_TBL8_GROUP_NUM_ENTRIES *
713                                         sizeof(tbl8_entry[0]));
714
715                         tbl8_entry->valid_group = VALID;
716
717                         /* Return group index for allocated tbl8 group. */
718                         return group_idx;
719                 }
720         }
721
722         /* If there are no tbl8 groups free then return error. */
723         return -ENOSPC;
724 }
725
726 static inline void
727 tbl8_free_v20(struct rte_lpm_tbl_entry_v20 *tbl8, uint32_t tbl8_group_start)
728 {
729         /* Set tbl8 group invalid*/
730         tbl8[tbl8_group_start].valid_group = INVALID;
731 }
732
733 static inline void
734 tbl8_free_v1604(struct rte_lpm_tbl_entry *tbl8, uint32_t tbl8_group_start)
735 {
736         /* Set tbl8 group invalid*/
737         tbl8[tbl8_group_start].valid_group = INVALID;
738 }
739
740 static inline int32_t
741 add_depth_small_v20(struct rte_lpm_v20 *lpm, uint32_t ip, uint8_t depth,
742                 uint8_t next_hop)
743 {
744         uint32_t tbl24_index, tbl24_range, tbl8_index, tbl8_group_end, i, j;
745
746         /* Calculate the index into Table24. */
747         tbl24_index = ip >> 8;
748         tbl24_range = depth_to_range(depth);
749
750         for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) {
751                 /*
752                  * For invalid OR valid and non-extended tbl 24 entries set
753                  * entry.
754                  */
755                 if (!lpm->tbl24[i].valid || (lpm->tbl24[i].valid_group == 0 &&
756                                 lpm->tbl24[i].depth <= depth)) {
757
758                         struct rte_lpm_tbl_entry_v20 new_tbl24_entry = {
759                                 .valid = VALID,
760                                 .valid_group = 0,
761                                 .depth = depth,
762                         };
763                         new_tbl24_entry.next_hop = next_hop;
764
765                         /* Setting tbl24 entry in one go to avoid race
766                          * conditions
767                          */
768                         lpm->tbl24[i] = new_tbl24_entry;
769
770                         continue;
771                 }
772
773                 if (lpm->tbl24[i].valid_group == 1) {
774                         /* If tbl24 entry is valid and extended calculate the
775                          *  index into tbl8.
776                          */
777                         tbl8_index = lpm->tbl24[i].group_idx *
778                                         RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
779                         tbl8_group_end = tbl8_index +
780                                         RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
781
782                         for (j = tbl8_index; j < tbl8_group_end; j++) {
783                                 if (!lpm->tbl8[j].valid ||
784                                                 lpm->tbl8[j].depth <= depth) {
785                                         struct rte_lpm_tbl_entry_v20
786                                                 new_tbl8_entry = {
787                                                 .valid = VALID,
788                                                 .valid_group = VALID,
789                                                 .depth = depth,
790                                         };
791                                         new_tbl8_entry.next_hop = next_hop;
792
793                                         /*
794                                          * Setting tbl8 entry in one go to avoid
795                                          * race conditions
796                                          */
797                                         lpm->tbl8[j] = new_tbl8_entry;
798
799                                         continue;
800                                 }
801                         }
802                 }
803         }
804
805         return 0;
806 }
807
808 static inline int32_t
809 add_depth_small_v1604(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
810                 uint32_t next_hop)
811 {
812 #define group_idx next_hop
813         uint32_t tbl24_index, tbl24_range, tbl8_index, tbl8_group_end, i, j;
814
815         /* Calculate the index into Table24. */
816         tbl24_index = ip >> 8;
817         tbl24_range = depth_to_range(depth);
818
819         for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) {
820                 /*
821                  * For invalid OR valid and non-extended tbl 24 entries set
822                  * entry.
823                  */
824                 if (!lpm->tbl24[i].valid || (lpm->tbl24[i].valid_group == 0 &&
825                                 lpm->tbl24[i].depth <= depth)) {
826
827                         struct rte_lpm_tbl_entry new_tbl24_entry = {
828                                 .next_hop = next_hop,
829                                 .valid = VALID,
830                                 .valid_group = 0,
831                                 .depth = depth,
832                         };
833
834                         /* Setting tbl24 entry in one go to avoid race
835                          * conditions
836                          */
837                         lpm->tbl24[i] = new_tbl24_entry;
838
839                         continue;
840                 }
841
842                 if (lpm->tbl24[i].valid_group == 1) {
843                         /* If tbl24 entry is valid and extended calculate the
844                          *  index into tbl8.
845                          */
846                         tbl8_index = lpm->tbl24[i].group_idx *
847                                         RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
848                         tbl8_group_end = tbl8_index +
849                                         RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
850
851                         for (j = tbl8_index; j < tbl8_group_end; j++) {
852                                 if (!lpm->tbl8[j].valid ||
853                                                 lpm->tbl8[j].depth <= depth) {
854                                         struct rte_lpm_tbl_entry
855                                                 new_tbl8_entry = {
856                                                 .valid = VALID,
857                                                 .valid_group = VALID,
858                                                 .depth = depth,
859                                                 .next_hop = next_hop,
860                                         };
861
862                                         /*
863                                          * Setting tbl8 entry in one go to avoid
864                                          * race conditions
865                                          */
866                                         lpm->tbl8[j] = new_tbl8_entry;
867
868                                         continue;
869                                 }
870                         }
871                 }
872         }
873 #undef group_idx
874         return 0;
875 }
876
877 static inline int32_t
878 add_depth_big_v20(struct rte_lpm_v20 *lpm, uint32_t ip_masked, uint8_t depth,
879                 uint8_t next_hop)
880 {
881         uint32_t tbl24_index;
882         int32_t tbl8_group_index, tbl8_group_start, tbl8_group_end, tbl8_index,
883                 tbl8_range, i;
884
885         tbl24_index = (ip_masked >> 8);
886         tbl8_range = depth_to_range(depth);
887
888         if (!lpm->tbl24[tbl24_index].valid) {
889                 /* Search for a free tbl8 group. */
890                 tbl8_group_index = tbl8_alloc_v20(lpm->tbl8);
891
892                 /* Check tbl8 allocation was successful. */
893                 if (tbl8_group_index < 0) {
894                         return tbl8_group_index;
895                 }
896
897                 /* Find index into tbl8 and range. */
898                 tbl8_index = (tbl8_group_index *
899                                 RTE_LPM_TBL8_GROUP_NUM_ENTRIES) +
900                                 (ip_masked & 0xFF);
901
902                 /* Set tbl8 entry. */
903                 for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
904                         lpm->tbl8[i].depth = depth;
905                         lpm->tbl8[i].next_hop = next_hop;
906                         lpm->tbl8[i].valid = VALID;
907                 }
908
909                 /*
910                  * Update tbl24 entry to point to new tbl8 entry. Note: The
911                  * ext_flag and tbl8_index need to be updated simultaneously,
912                  * so assign whole structure in one go
913                  */
914
915                 struct rte_lpm_tbl_entry_v20 new_tbl24_entry = {
916                         { .group_idx = (uint8_t)tbl8_group_index, },
917                         .valid = VALID,
918                         .valid_group = 1,
919                         .depth = 0,
920                 };
921
922                 lpm->tbl24[tbl24_index] = new_tbl24_entry;
923
924         } /* If valid entry but not extended calculate the index into Table8. */
925         else if (lpm->tbl24[tbl24_index].valid_group == 0) {
926                 /* Search for free tbl8 group. */
927                 tbl8_group_index = tbl8_alloc_v20(lpm->tbl8);
928
929                 if (tbl8_group_index < 0) {
930                         return tbl8_group_index;
931                 }
932
933                 tbl8_group_start = tbl8_group_index *
934                                 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
935                 tbl8_group_end = tbl8_group_start +
936                                 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
937
938                 /* Populate new tbl8 with tbl24 value. */
939                 for (i = tbl8_group_start; i < tbl8_group_end; i++) {
940                         lpm->tbl8[i].valid = VALID;
941                         lpm->tbl8[i].depth = lpm->tbl24[tbl24_index].depth;
942                         lpm->tbl8[i].next_hop =
943                                         lpm->tbl24[tbl24_index].next_hop;
944                 }
945
946                 tbl8_index = tbl8_group_start + (ip_masked & 0xFF);
947
948                 /* Insert new rule into the tbl8 entry. */
949                 for (i = tbl8_index; i < tbl8_index + tbl8_range; i++) {
950                         lpm->tbl8[i].valid = VALID;
951                         lpm->tbl8[i].depth = depth;
952                         lpm->tbl8[i].next_hop = next_hop;
953                 }
954
955                 /*
956                  * Update tbl24 entry to point to new tbl8 entry. Note: The
957                  * ext_flag and tbl8_index need to be updated simultaneously,
958                  * so assign whole structure in one go.
959                  */
960
961                 struct rte_lpm_tbl_entry_v20 new_tbl24_entry = {
962                                 { .group_idx = (uint8_t)tbl8_group_index, },
963                                 .valid = VALID,
964                                 .valid_group = 1,
965                                 .depth = 0,
966                 };
967
968                 lpm->tbl24[tbl24_index] = new_tbl24_entry;
969
970         } else { /*
971                 * If it is valid, extended entry calculate the index into tbl8.
972                 */
973                 tbl8_group_index = lpm->tbl24[tbl24_index].group_idx;
974                 tbl8_group_start = tbl8_group_index *
975                                 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
976                 tbl8_index = tbl8_group_start + (ip_masked & 0xFF);
977
978                 for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
979
980                         if (!lpm->tbl8[i].valid ||
981                                         lpm->tbl8[i].depth <= depth) {
982                                 struct rte_lpm_tbl_entry_v20 new_tbl8_entry = {
983                                         .valid = VALID,
984                                         .depth = depth,
985                                         .valid_group = lpm->tbl8[i].valid_group,
986                                 };
987                                 new_tbl8_entry.next_hop = next_hop;
988                                 /*
989                                  * Setting tbl8 entry in one go to avoid race
990                                  * condition
991                                  */
992                                 lpm->tbl8[i] = new_tbl8_entry;
993
994                                 continue;
995                         }
996                 }
997         }
998
999         return 0;
1000 }
1001
1002 static inline int32_t
1003 add_depth_big_v1604(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth,
1004                 uint32_t next_hop)
1005 {
1006 #define group_idx next_hop
1007         uint32_t tbl24_index;
1008         int32_t tbl8_group_index, tbl8_group_start, tbl8_group_end, tbl8_index,
1009                 tbl8_range, i;
1010
1011         tbl24_index = (ip_masked >> 8);
1012         tbl8_range = depth_to_range(depth);
1013
1014         if (!lpm->tbl24[tbl24_index].valid) {
1015                 /* Search for a free tbl8 group. */
1016                 tbl8_group_index = tbl8_alloc_v1604(lpm->tbl8, lpm->number_tbl8s);
1017
1018                 /* Check tbl8 allocation was successful. */
1019                 if (tbl8_group_index < 0) {
1020                         return tbl8_group_index;
1021                 }
1022
1023                 /* Find index into tbl8 and range. */
1024                 tbl8_index = (tbl8_group_index *
1025                                 RTE_LPM_TBL8_GROUP_NUM_ENTRIES) +
1026                                 (ip_masked & 0xFF);
1027
1028                 /* Set tbl8 entry. */
1029                 for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
1030                         lpm->tbl8[i].depth = depth;
1031                         lpm->tbl8[i].next_hop = next_hop;
1032                         lpm->tbl8[i].valid = VALID;
1033                 }
1034
1035                 /*
1036                  * Update tbl24 entry to point to new tbl8 entry. Note: The
1037                  * ext_flag and tbl8_index need to be updated simultaneously,
1038                  * so assign whole structure in one go
1039                  */
1040
1041                 struct rte_lpm_tbl_entry new_tbl24_entry = {
1042                         .group_idx = tbl8_group_index,
1043                         .valid = VALID,
1044                         .valid_group = 1,
1045                         .depth = 0,
1046                 };
1047
1048                 lpm->tbl24[tbl24_index] = new_tbl24_entry;
1049
1050         } /* If valid entry but not extended calculate the index into Table8. */
1051         else if (lpm->tbl24[tbl24_index].valid_group == 0) {
1052                 /* Search for free tbl8 group. */
1053                 tbl8_group_index = tbl8_alloc_v1604(lpm->tbl8, lpm->number_tbl8s);
1054
1055                 if (tbl8_group_index < 0) {
1056                         return tbl8_group_index;
1057                 }
1058
1059                 tbl8_group_start = tbl8_group_index *
1060                                 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1061                 tbl8_group_end = tbl8_group_start +
1062                                 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1063
1064                 /* Populate new tbl8 with tbl24 value. */
1065                 for (i = tbl8_group_start; i < tbl8_group_end; i++) {
1066                         lpm->tbl8[i].valid = VALID;
1067                         lpm->tbl8[i].depth = lpm->tbl24[tbl24_index].depth;
1068                         lpm->tbl8[i].next_hop =
1069                                         lpm->tbl24[tbl24_index].next_hop;
1070                 }
1071
1072                 tbl8_index = tbl8_group_start + (ip_masked & 0xFF);
1073
1074                 /* Insert new rule into the tbl8 entry. */
1075                 for (i = tbl8_index; i < tbl8_index + tbl8_range; i++) {
1076                         lpm->tbl8[i].valid = VALID;
1077                         lpm->tbl8[i].depth = depth;
1078                         lpm->tbl8[i].next_hop = next_hop;
1079                 }
1080
1081                 /*
1082                  * Update tbl24 entry to point to new tbl8 entry. Note: The
1083                  * ext_flag and tbl8_index need to be updated simultaneously,
1084                  * so assign whole structure in one go.
1085                  */
1086
1087                 struct rte_lpm_tbl_entry new_tbl24_entry = {
1088                                 .group_idx = tbl8_group_index,
1089                                 .valid = VALID,
1090                                 .valid_group = 1,
1091                                 .depth = 0,
1092                 };
1093
1094                 lpm->tbl24[tbl24_index] = new_tbl24_entry;
1095
1096         } else { /*
1097                 * If it is valid, extended entry calculate the index into tbl8.
1098                 */
1099                 tbl8_group_index = lpm->tbl24[tbl24_index].group_idx;
1100                 tbl8_group_start = tbl8_group_index *
1101                                 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1102                 tbl8_index = tbl8_group_start + (ip_masked & 0xFF);
1103
1104                 for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
1105
1106                         if (!lpm->tbl8[i].valid ||
1107                                         lpm->tbl8[i].depth <= depth) {
1108                                 struct rte_lpm_tbl_entry new_tbl8_entry = {
1109                                         .valid = VALID,
1110                                         .depth = depth,
1111                                         .next_hop = next_hop,
1112                                         .valid_group = lpm->tbl8[i].valid_group,
1113                                 };
1114
1115                                 /*
1116                                  * Setting tbl8 entry in one go to avoid race
1117                                  * condition
1118                                  */
1119                                 lpm->tbl8[i] = new_tbl8_entry;
1120
1121                                 continue;
1122                         }
1123                 }
1124         }
1125 #undef group_idx
1126         return 0;
1127 }
1128
1129 /*
1130  * Add a route
1131  */
1132 int
1133 rte_lpm_add_v20(struct rte_lpm_v20 *lpm, uint32_t ip, uint8_t depth,
1134                 uint8_t next_hop)
1135 {
1136         int32_t rule_index, status = 0;
1137         uint32_t ip_masked;
1138
1139         /* Check user arguments. */
1140         if ((lpm == NULL) || (depth < 1) || (depth > RTE_LPM_MAX_DEPTH))
1141                 return -EINVAL;
1142
1143         ip_masked = ip & depth_to_mask(depth);
1144
1145         /* Add the rule to the rule table. */
1146         rule_index = rule_add_v20(lpm, ip_masked, depth, next_hop);
1147
1148         /* If the is no space available for new rule return error. */
1149         if (rule_index < 0) {
1150                 return rule_index;
1151         }
1152
1153         if (depth <= MAX_DEPTH_TBL24) {
1154                 status = add_depth_small_v20(lpm, ip_masked, depth, next_hop);
1155         } else { /* If depth > RTE_LPM_MAX_DEPTH_TBL24 */
1156                 status = add_depth_big_v20(lpm, ip_masked, depth, next_hop);
1157
1158                 /*
1159                  * If add fails due to exhaustion of tbl8 extensions delete
1160                  * rule that was added to rule table.
1161                  */
1162                 if (status < 0) {
1163                         rule_delete_v20(lpm, rule_index, depth);
1164
1165                         return status;
1166                 }
1167         }
1168
1169         return 0;
1170 }
1171 VERSION_SYMBOL(rte_lpm_add, _v20, 2.0);
1172
1173 int
1174 rte_lpm_add_v1604(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
1175                 uint32_t next_hop)
1176 {
1177         int32_t rule_index, status = 0;
1178         uint32_t ip_masked;
1179
1180         /* Check user arguments. */
1181         if ((lpm == NULL) || (depth < 1) || (depth > RTE_LPM_MAX_DEPTH))
1182                 return -EINVAL;
1183
1184         ip_masked = ip & depth_to_mask(depth);
1185
1186         /* Add the rule to the rule table. */
1187         rule_index = rule_add_v1604(lpm, ip_masked, depth, next_hop);
1188
1189         /* If the is no space available for new rule return error. */
1190         if (rule_index < 0) {
1191                 return rule_index;
1192         }
1193
1194         if (depth <= MAX_DEPTH_TBL24) {
1195                 status = add_depth_small_v1604(lpm, ip_masked, depth, next_hop);
1196         } else { /* If depth > RTE_LPM_MAX_DEPTH_TBL24 */
1197                 status = add_depth_big_v1604(lpm, ip_masked, depth, next_hop);
1198
1199                 /*
1200                  * If add fails due to exhaustion of tbl8 extensions delete
1201                  * rule that was added to rule table.
1202                  */
1203                 if (status < 0) {
1204                         rule_delete_v1604(lpm, rule_index, depth);
1205
1206                         return status;
1207                 }
1208         }
1209
1210         return 0;
1211 }
1212 BIND_DEFAULT_SYMBOL(rte_lpm_add, _v1604, 16.04);
1213 MAP_STATIC_SYMBOL(int rte_lpm_add(struct rte_lpm *lpm, uint32_t ip,
1214                 uint8_t depth, uint32_t next_hop), rte_lpm_add_v1604);
1215
1216 /*
1217  * Look for a rule in the high-level rules table
1218  */
1219 int
1220 rte_lpm_is_rule_present_v20(struct rte_lpm_v20 *lpm, uint32_t ip, uint8_t depth,
1221 uint8_t *next_hop)
1222 {
1223         uint32_t ip_masked;
1224         int32_t rule_index;
1225
1226         /* Check user arguments. */
1227         if ((lpm == NULL) ||
1228                 (next_hop == NULL) ||
1229                 (depth < 1) || (depth > RTE_LPM_MAX_DEPTH))
1230                 return -EINVAL;
1231
1232         /* Look for the rule using rule_find. */
1233         ip_masked = ip & depth_to_mask(depth);
1234         rule_index = rule_find_v20(lpm, ip_masked, depth);
1235
1236         if (rule_index >= 0) {
1237                 *next_hop = lpm->rules_tbl[rule_index].next_hop;
1238                 return 1;
1239         }
1240
1241         /* If rule is not found return 0. */
1242         return 0;
1243 }
1244 VERSION_SYMBOL(rte_lpm_is_rule_present, _v20, 2.0);
1245
1246 int
1247 rte_lpm_is_rule_present_v1604(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
1248 uint32_t *next_hop)
1249 {
1250         uint32_t ip_masked;
1251         int32_t rule_index;
1252
1253         /* Check user arguments. */
1254         if ((lpm == NULL) ||
1255                 (next_hop == NULL) ||
1256                 (depth < 1) || (depth > RTE_LPM_MAX_DEPTH))
1257                 return -EINVAL;
1258
1259         /* Look for the rule using rule_find. */
1260         ip_masked = ip & depth_to_mask(depth);
1261         rule_index = rule_find_v1604(lpm, ip_masked, depth);
1262
1263         if (rule_index >= 0) {
1264                 *next_hop = lpm->rules_tbl[rule_index].next_hop;
1265                 return 1;
1266         }
1267
1268         /* If rule is not found return 0. */
1269         return 0;
1270 }
1271 BIND_DEFAULT_SYMBOL(rte_lpm_is_rule_present, _v1604, 16.04);
1272 MAP_STATIC_SYMBOL(int rte_lpm_is_rule_present(struct rte_lpm *lpm, uint32_t ip,
1273                 uint8_t depth, uint32_t *next_hop), rte_lpm_is_rule_present_v1604);
1274
1275 static inline int32_t
1276 find_previous_rule_v20(struct rte_lpm_v20 *lpm, uint32_t ip, uint8_t depth,
1277                 uint8_t *sub_rule_depth)
1278 {
1279         int32_t rule_index;
1280         uint32_t ip_masked;
1281         uint8_t prev_depth;
1282
1283         for (prev_depth = (uint8_t)(depth - 1); prev_depth > 0; prev_depth--) {
1284                 ip_masked = ip & depth_to_mask(prev_depth);
1285
1286                 rule_index = rule_find_v20(lpm, ip_masked, prev_depth);
1287
1288                 if (rule_index >= 0) {
1289                         *sub_rule_depth = prev_depth;
1290                         return rule_index;
1291                 }
1292         }
1293
1294         return -1;
1295 }
1296
1297 static inline int32_t
1298 find_previous_rule_v1604(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
1299                 uint8_t *sub_rule_depth)
1300 {
1301         int32_t rule_index;
1302         uint32_t ip_masked;
1303         uint8_t prev_depth;
1304
1305         for (prev_depth = (uint8_t)(depth - 1); prev_depth > 0; prev_depth--) {
1306                 ip_masked = ip & depth_to_mask(prev_depth);
1307
1308                 rule_index = rule_find_v1604(lpm, ip_masked, prev_depth);
1309
1310                 if (rule_index >= 0) {
1311                         *sub_rule_depth = prev_depth;
1312                         return rule_index;
1313                 }
1314         }
1315
1316         return -1;
1317 }
1318
1319 static inline int32_t
1320 delete_depth_small_v20(struct rte_lpm_v20 *lpm, uint32_t ip_masked,
1321         uint8_t depth, int32_t sub_rule_index, uint8_t sub_rule_depth)
1322 {
1323         uint32_t tbl24_range, tbl24_index, tbl8_group_index, tbl8_index, i, j;
1324
1325         /* Calculate the range and index into Table24. */
1326         tbl24_range = depth_to_range(depth);
1327         tbl24_index = (ip_masked >> 8);
1328
1329         /*
1330          * Firstly check the sub_rule_index. A -1 indicates no replacement rule
1331          * and a positive number indicates a sub_rule_index.
1332          */
1333         if (sub_rule_index < 0) {
1334                 /*
1335                  * If no replacement rule exists then invalidate entries
1336                  * associated with this rule.
1337                  */
1338                 for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) {
1339
1340                         if (lpm->tbl24[i].valid_group == 0 &&
1341                                         lpm->tbl24[i].depth <= depth) {
1342                                 lpm->tbl24[i].valid = INVALID;
1343                         } else if (lpm->tbl24[i].valid_group == 1) {
1344                                 /*
1345                                  * If TBL24 entry is extended, then there has
1346                                  * to be a rule with depth >= 25 in the
1347                                  * associated TBL8 group.
1348                                  */
1349
1350                                 tbl8_group_index = lpm->tbl24[i].group_idx;
1351                                 tbl8_index = tbl8_group_index *
1352                                                 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1353
1354                                 for (j = tbl8_index; j < (tbl8_index +
1355                                         RTE_LPM_TBL8_GROUP_NUM_ENTRIES); j++) {
1356
1357                                         if (lpm->tbl8[j].depth <= depth)
1358                                                 lpm->tbl8[j].valid = INVALID;
1359                                 }
1360                         }
1361                 }
1362         } else {
1363                 /*
1364                  * If a replacement rule exists then modify entries
1365                  * associated with this rule.
1366                  */
1367
1368                 struct rte_lpm_tbl_entry_v20 new_tbl24_entry = {
1369                         {.next_hop = lpm->rules_tbl[sub_rule_index].next_hop,},
1370                         .valid = VALID,
1371                         .valid_group = 0,
1372                         .depth = sub_rule_depth,
1373                 };
1374
1375                 struct rte_lpm_tbl_entry_v20 new_tbl8_entry = {
1376                         .valid = VALID,
1377                         .valid_group = VALID,
1378                         .depth = sub_rule_depth,
1379                 };
1380                 new_tbl8_entry.next_hop =
1381                                 lpm->rules_tbl[sub_rule_index].next_hop;
1382
1383                 for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) {
1384
1385                         if (lpm->tbl24[i].valid_group == 0 &&
1386                                         lpm->tbl24[i].depth <= depth) {
1387                                 lpm->tbl24[i] = new_tbl24_entry;
1388                         } else  if (lpm->tbl24[i].valid_group == 1) {
1389                                 /*
1390                                  * If TBL24 entry is extended, then there has
1391                                  * to be a rule with depth >= 25 in the
1392                                  * associated TBL8 group.
1393                                  */
1394
1395                                 tbl8_group_index = lpm->tbl24[i].group_idx;
1396                                 tbl8_index = tbl8_group_index *
1397                                                 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1398
1399                                 for (j = tbl8_index; j < (tbl8_index +
1400                                         RTE_LPM_TBL8_GROUP_NUM_ENTRIES); j++) {
1401
1402                                         if (lpm->tbl8[j].depth <= depth)
1403                                                 lpm->tbl8[j] = new_tbl8_entry;
1404                                 }
1405                         }
1406                 }
1407         }
1408
1409         return 0;
1410 }
1411
1412 static inline int32_t
1413 delete_depth_small_v1604(struct rte_lpm *lpm, uint32_t ip_masked,
1414         uint8_t depth, int32_t sub_rule_index, uint8_t sub_rule_depth)
1415 {
1416 #define group_idx next_hop
1417         uint32_t tbl24_range, tbl24_index, tbl8_group_index, tbl8_index, i, j;
1418
1419         /* Calculate the range and index into Table24. */
1420         tbl24_range = depth_to_range(depth);
1421         tbl24_index = (ip_masked >> 8);
1422
1423         /*
1424          * Firstly check the sub_rule_index. A -1 indicates no replacement rule
1425          * and a positive number indicates a sub_rule_index.
1426          */
1427         if (sub_rule_index < 0) {
1428                 /*
1429                  * If no replacement rule exists then invalidate entries
1430                  * associated with this rule.
1431                  */
1432                 for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) {
1433
1434                         if (lpm->tbl24[i].valid_group == 0 &&
1435                                         lpm->tbl24[i].depth <= depth) {
1436                                 lpm->tbl24[i].valid = INVALID;
1437                         } else if (lpm->tbl24[i].valid_group == 1) {
1438                                 /*
1439                                  * If TBL24 entry is extended, then there has
1440                                  * to be a rule with depth >= 25 in the
1441                                  * associated TBL8 group.
1442                                  */
1443
1444                                 tbl8_group_index = lpm->tbl24[i].group_idx;
1445                                 tbl8_index = tbl8_group_index *
1446                                                 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1447
1448                                 for (j = tbl8_index; j < (tbl8_index +
1449                                         RTE_LPM_TBL8_GROUP_NUM_ENTRIES); j++) {
1450
1451                                         if (lpm->tbl8[j].depth <= depth)
1452                                                 lpm->tbl8[j].valid = INVALID;
1453                                 }
1454                         }
1455                 }
1456         } else {
1457                 /*
1458                  * If a replacement rule exists then modify entries
1459                  * associated with this rule.
1460                  */
1461
1462                 struct rte_lpm_tbl_entry new_tbl24_entry = {
1463                         .next_hop = lpm->rules_tbl[sub_rule_index].next_hop,
1464                         .valid = VALID,
1465                         .valid_group = 0,
1466                         .depth = sub_rule_depth,
1467                 };
1468
1469                 struct rte_lpm_tbl_entry new_tbl8_entry = {
1470                         .valid = VALID,
1471                         .valid_group = VALID,
1472                         .depth = sub_rule_depth,
1473                         .next_hop = lpm->rules_tbl
1474                         [sub_rule_index].next_hop,
1475                 };
1476
1477                 for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) {
1478
1479                         if (lpm->tbl24[i].valid_group == 0 &&
1480                                         lpm->tbl24[i].depth <= depth) {
1481                                 lpm->tbl24[i] = new_tbl24_entry;
1482                         } else  if (lpm->tbl24[i].valid_group == 1) {
1483                                 /*
1484                                  * If TBL24 entry is extended, then there has
1485                                  * to be a rule with depth >= 25 in the
1486                                  * associated TBL8 group.
1487                                  */
1488
1489                                 tbl8_group_index = lpm->tbl24[i].group_idx;
1490                                 tbl8_index = tbl8_group_index *
1491                                                 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1492
1493                                 for (j = tbl8_index; j < (tbl8_index +
1494                                         RTE_LPM_TBL8_GROUP_NUM_ENTRIES); j++) {
1495
1496                                         if (lpm->tbl8[j].depth <= depth)
1497                                                 lpm->tbl8[j] = new_tbl8_entry;
1498                                 }
1499                         }
1500                 }
1501         }
1502 #undef group_idx
1503         return 0;
1504 }
1505
1506 /*
1507  * Checks if table 8 group can be recycled.
1508  *
1509  * Return of -EEXIST means tbl8 is in use and thus can not be recycled.
1510  * Return of -EINVAL means tbl8 is empty and thus can be recycled
1511  * Return of value > -1 means tbl8 is in use but has all the same values and
1512  * thus can be recycled
1513  */
1514 static inline int32_t
1515 tbl8_recycle_check_v20(struct rte_lpm_tbl_entry_v20 *tbl8,
1516                 uint32_t tbl8_group_start)
1517 {
1518         uint32_t tbl8_group_end, i;
1519         tbl8_group_end = tbl8_group_start + RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1520
1521         /*
1522          * Check the first entry of the given tbl8. If it is invalid we know
1523          * this tbl8 does not contain any rule with a depth < RTE_LPM_MAX_DEPTH
1524          *  (As they would affect all entries in a tbl8) and thus this table
1525          *  can not be recycled.
1526          */
1527         if (tbl8[tbl8_group_start].valid) {
1528                 /*
1529                  * If first entry is valid check if the depth is less than 24
1530                  * and if so check the rest of the entries to verify that they
1531                  * are all of this depth.
1532                  */
1533                 if (tbl8[tbl8_group_start].depth <= MAX_DEPTH_TBL24) {
1534                         for (i = (tbl8_group_start + 1); i < tbl8_group_end;
1535                                         i++) {
1536
1537                                 if (tbl8[i].depth !=
1538                                                 tbl8[tbl8_group_start].depth) {
1539
1540                                         return -EEXIST;
1541                                 }
1542                         }
1543                         /* If all entries are the same return the tb8 index */
1544                         return tbl8_group_start;
1545                 }
1546
1547                 return -EEXIST;
1548         }
1549         /*
1550          * If the first entry is invalid check if the rest of the entries in
1551          * the tbl8 are invalid.
1552          */
1553         for (i = (tbl8_group_start + 1); i < tbl8_group_end; i++) {
1554                 if (tbl8[i].valid)
1555                         return -EEXIST;
1556         }
1557         /* If no valid entries are found then return -EINVAL. */
1558         return -EINVAL;
1559 }
1560
1561 static inline int32_t
1562 tbl8_recycle_check_v1604(struct rte_lpm_tbl_entry *tbl8,
1563                 uint32_t tbl8_group_start)
1564 {
1565         uint32_t tbl8_group_end, i;
1566         tbl8_group_end = tbl8_group_start + RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1567
1568         /*
1569          * Check the first entry of the given tbl8. If it is invalid we know
1570          * this tbl8 does not contain any rule with a depth < RTE_LPM_MAX_DEPTH
1571          *  (As they would affect all entries in a tbl8) and thus this table
1572          *  can not be recycled.
1573          */
1574         if (tbl8[tbl8_group_start].valid) {
1575                 /*
1576                  * If first entry is valid check if the depth is less than 24
1577                  * and if so check the rest of the entries to verify that they
1578                  * are all of this depth.
1579                  */
1580                 if (tbl8[tbl8_group_start].depth <= MAX_DEPTH_TBL24) {
1581                         for (i = (tbl8_group_start + 1); i < tbl8_group_end;
1582                                         i++) {
1583
1584                                 if (tbl8[i].depth !=
1585                                                 tbl8[tbl8_group_start].depth) {
1586
1587                                         return -EEXIST;
1588                                 }
1589                         }
1590                         /* If all entries are the same return the tb8 index */
1591                         return tbl8_group_start;
1592                 }
1593
1594                 return -EEXIST;
1595         }
1596         /*
1597          * If the first entry is invalid check if the rest of the entries in
1598          * the tbl8 are invalid.
1599          */
1600         for (i = (tbl8_group_start + 1); i < tbl8_group_end; i++) {
1601                 if (tbl8[i].valid)
1602                         return -EEXIST;
1603         }
1604         /* If no valid entries are found then return -EINVAL. */
1605         return -EINVAL;
1606 }
1607
1608 static inline int32_t
1609 delete_depth_big_v20(struct rte_lpm_v20 *lpm, uint32_t ip_masked,
1610         uint8_t depth, int32_t sub_rule_index, uint8_t sub_rule_depth)
1611 {
1612         uint32_t tbl24_index, tbl8_group_index, tbl8_group_start, tbl8_index,
1613                         tbl8_range, i;
1614         int32_t tbl8_recycle_index;
1615
1616         /*
1617          * Calculate the index into tbl24 and range. Note: All depths larger
1618          * than MAX_DEPTH_TBL24 are associated with only one tbl24 entry.
1619          */
1620         tbl24_index = ip_masked >> 8;
1621
1622         /* Calculate the index into tbl8 and range. */
1623         tbl8_group_index = lpm->tbl24[tbl24_index].group_idx;
1624         tbl8_group_start = tbl8_group_index * RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1625         tbl8_index = tbl8_group_start + (ip_masked & 0xFF);
1626         tbl8_range = depth_to_range(depth);
1627
1628         if (sub_rule_index < 0) {
1629                 /*
1630                  * Loop through the range of entries on tbl8 for which the
1631                  * rule_to_delete must be removed or modified.
1632                  */
1633                 for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
1634                         if (lpm->tbl8[i].depth <= depth)
1635                                 lpm->tbl8[i].valid = INVALID;
1636                 }
1637         } else {
1638                 /* Set new tbl8 entry. */
1639                 struct rte_lpm_tbl_entry_v20 new_tbl8_entry = {
1640                         .valid = VALID,
1641                         .depth = sub_rule_depth,
1642                         .valid_group = lpm->tbl8[tbl8_group_start].valid_group,
1643                 };
1644
1645                 new_tbl8_entry.next_hop =
1646                                 lpm->rules_tbl[sub_rule_index].next_hop;
1647                 /*
1648                  * Loop through the range of entries on tbl8 for which the
1649                  * rule_to_delete must be modified.
1650                  */
1651                 for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
1652                         if (lpm->tbl8[i].depth <= depth)
1653                                 lpm->tbl8[i] = new_tbl8_entry;
1654                 }
1655         }
1656
1657         /*
1658          * Check if there are any valid entries in this tbl8 group. If all
1659          * tbl8 entries are invalid we can free the tbl8 and invalidate the
1660          * associated tbl24 entry.
1661          */
1662
1663         tbl8_recycle_index = tbl8_recycle_check_v20(lpm->tbl8, tbl8_group_start);
1664
1665         if (tbl8_recycle_index == -EINVAL) {
1666                 /* Set tbl24 before freeing tbl8 to avoid race condition. */
1667                 lpm->tbl24[tbl24_index].valid = 0;
1668                 tbl8_free_v20(lpm->tbl8, tbl8_group_start);
1669         } else if (tbl8_recycle_index > -1) {
1670                 /* Update tbl24 entry. */
1671                 struct rte_lpm_tbl_entry_v20 new_tbl24_entry = {
1672                         { .next_hop = lpm->tbl8[tbl8_recycle_index].next_hop, },
1673                         .valid = VALID,
1674                         .valid_group = 0,
1675                         .depth = lpm->tbl8[tbl8_recycle_index].depth,
1676                 };
1677
1678                 /* Set tbl24 before freeing tbl8 to avoid race condition. */
1679                 lpm->tbl24[tbl24_index] = new_tbl24_entry;
1680                 tbl8_free_v20(lpm->tbl8, tbl8_group_start);
1681         }
1682
1683         return 0;
1684 }
1685
1686 static inline int32_t
1687 delete_depth_big_v1604(struct rte_lpm *lpm, uint32_t ip_masked,
1688         uint8_t depth, int32_t sub_rule_index, uint8_t sub_rule_depth)
1689 {
1690 #define group_idx next_hop
1691         uint32_t tbl24_index, tbl8_group_index, tbl8_group_start, tbl8_index,
1692                         tbl8_range, i;
1693         int32_t tbl8_recycle_index;
1694
1695         /*
1696          * Calculate the index into tbl24 and range. Note: All depths larger
1697          * than MAX_DEPTH_TBL24 are associated with only one tbl24 entry.
1698          */
1699         tbl24_index = ip_masked >> 8;
1700
1701         /* Calculate the index into tbl8 and range. */
1702         tbl8_group_index = lpm->tbl24[tbl24_index].group_idx;
1703         tbl8_group_start = tbl8_group_index * RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1704         tbl8_index = tbl8_group_start + (ip_masked & 0xFF);
1705         tbl8_range = depth_to_range(depth);
1706
1707         if (sub_rule_index < 0) {
1708                 /*
1709                  * Loop through the range of entries on tbl8 for which the
1710                  * rule_to_delete must be removed or modified.
1711                  */
1712                 for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
1713                         if (lpm->tbl8[i].depth <= depth)
1714                                 lpm->tbl8[i].valid = INVALID;
1715                 }
1716         } else {
1717                 /* Set new tbl8 entry. */
1718                 struct rte_lpm_tbl_entry new_tbl8_entry = {
1719                         .valid = VALID,
1720                         .depth = sub_rule_depth,
1721                         .valid_group = lpm->tbl8[tbl8_group_start].valid_group,
1722                         .next_hop = lpm->rules_tbl[sub_rule_index].next_hop,
1723                 };
1724
1725                 /*
1726                  * Loop through the range of entries on tbl8 for which the
1727                  * rule_to_delete must be modified.
1728                  */
1729                 for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
1730                         if (lpm->tbl8[i].depth <= depth)
1731                                 lpm->tbl8[i] = new_tbl8_entry;
1732                 }
1733         }
1734
1735         /*
1736          * Check if there are any valid entries in this tbl8 group. If all
1737          * tbl8 entries are invalid we can free the tbl8 and invalidate the
1738          * associated tbl24 entry.
1739          */
1740
1741         tbl8_recycle_index = tbl8_recycle_check_v1604(lpm->tbl8, tbl8_group_start);
1742
1743         if (tbl8_recycle_index == -EINVAL) {
1744                 /* Set tbl24 before freeing tbl8 to avoid race condition. */
1745                 lpm->tbl24[tbl24_index].valid = 0;
1746                 tbl8_free_v1604(lpm->tbl8, tbl8_group_start);
1747         } else if (tbl8_recycle_index > -1) {
1748                 /* Update tbl24 entry. */
1749                 struct rte_lpm_tbl_entry new_tbl24_entry = {
1750                         .next_hop = lpm->tbl8[tbl8_recycle_index].next_hop,
1751                         .valid = VALID,
1752                         .valid_group = 0,
1753                         .depth = lpm->tbl8[tbl8_recycle_index].depth,
1754                 };
1755
1756                 /* Set tbl24 before freeing tbl8 to avoid race condition. */
1757                 lpm->tbl24[tbl24_index] = new_tbl24_entry;
1758                 tbl8_free_v1604(lpm->tbl8, tbl8_group_start);
1759         }
1760 #undef group_idx
1761         return 0;
1762 }
1763
1764 /*
1765  * Deletes a rule
1766  */
1767 int
1768 rte_lpm_delete_v20(struct rte_lpm_v20 *lpm, uint32_t ip, uint8_t depth)
1769 {
1770         int32_t rule_to_delete_index, sub_rule_index;
1771         uint32_t ip_masked;
1772         uint8_t sub_rule_depth;
1773         /*
1774          * Check input arguments. Note: IP must be a positive integer of 32
1775          * bits in length therefore it need not be checked.
1776          */
1777         if ((lpm == NULL) || (depth < 1) || (depth > RTE_LPM_MAX_DEPTH)) {
1778                 return -EINVAL;
1779         }
1780
1781         ip_masked = ip & depth_to_mask(depth);
1782
1783         /*
1784          * Find the index of the input rule, that needs to be deleted, in the
1785          * rule table.
1786          */
1787         rule_to_delete_index = rule_find_v20(lpm, ip_masked, depth);
1788
1789         /*
1790          * Check if rule_to_delete_index was found. If no rule was found the
1791          * function rule_find returns -EINVAL.
1792          */
1793         if (rule_to_delete_index < 0)
1794                 return -EINVAL;
1795
1796         /* Delete the rule from the rule table. */
1797         rule_delete_v20(lpm, rule_to_delete_index, depth);
1798
1799         /*
1800          * Find rule to replace the rule_to_delete. If there is no rule to
1801          * replace the rule_to_delete we return -1 and invalidate the table
1802          * entries associated with this rule.
1803          */
1804         sub_rule_depth = 0;
1805         sub_rule_index = find_previous_rule_v20(lpm, ip, depth, &sub_rule_depth);
1806
1807         /*
1808          * If the input depth value is less than 25 use function
1809          * delete_depth_small otherwise use delete_depth_big.
1810          */
1811         if (depth <= MAX_DEPTH_TBL24) {
1812                 return delete_depth_small_v20(lpm, ip_masked, depth,
1813                                 sub_rule_index, sub_rule_depth);
1814         } else { /* If depth > MAX_DEPTH_TBL24 */
1815                 return delete_depth_big_v20(lpm, ip_masked, depth, sub_rule_index,
1816                                 sub_rule_depth);
1817         }
1818 }
1819 VERSION_SYMBOL(rte_lpm_delete, _v20, 2.0);
1820
1821 int
1822 rte_lpm_delete_v1604(struct rte_lpm *lpm, uint32_t ip, uint8_t depth)
1823 {
1824         int32_t rule_to_delete_index, sub_rule_index;
1825         uint32_t ip_masked;
1826         uint8_t sub_rule_depth;
1827         /*
1828          * Check input arguments. Note: IP must be a positive integer of 32
1829          * bits in length therefore it need not be checked.
1830          */
1831         if ((lpm == NULL) || (depth < 1) || (depth > RTE_LPM_MAX_DEPTH)) {
1832                 return -EINVAL;
1833         }
1834
1835         ip_masked = ip & depth_to_mask(depth);
1836
1837         /*
1838          * Find the index of the input rule, that needs to be deleted, in the
1839          * rule table.
1840          */
1841         rule_to_delete_index = rule_find_v1604(lpm, ip_masked, depth);
1842
1843         /*
1844          * Check if rule_to_delete_index was found. If no rule was found the
1845          * function rule_find returns -EINVAL.
1846          */
1847         if (rule_to_delete_index < 0)
1848                 return -EINVAL;
1849
1850         /* Delete the rule from the rule table. */
1851         rule_delete_v1604(lpm, rule_to_delete_index, depth);
1852
1853         /*
1854          * Find rule to replace the rule_to_delete. If there is no rule to
1855          * replace the rule_to_delete we return -1 and invalidate the table
1856          * entries associated with this rule.
1857          */
1858         sub_rule_depth = 0;
1859         sub_rule_index = find_previous_rule_v1604(lpm, ip, depth, &sub_rule_depth);
1860
1861         /*
1862          * If the input depth value is less than 25 use function
1863          * delete_depth_small otherwise use delete_depth_big.
1864          */
1865         if (depth <= MAX_DEPTH_TBL24) {
1866                 return delete_depth_small_v1604(lpm, ip_masked, depth,
1867                                 sub_rule_index, sub_rule_depth);
1868         } else { /* If depth > MAX_DEPTH_TBL24 */
1869                 return delete_depth_big_v1604(lpm, ip_masked, depth, sub_rule_index,
1870                                 sub_rule_depth);
1871         }
1872 }
1873 BIND_DEFAULT_SYMBOL(rte_lpm_delete, _v1604, 16.04);
1874 MAP_STATIC_SYMBOL(int rte_lpm_delete(struct rte_lpm *lpm, uint32_t ip,
1875                 uint8_t depth), rte_lpm_delete_v1604);
1876
1877 /*
1878  * Delete all rules from the LPM table.
1879  */
1880 void
1881 rte_lpm_delete_all_v20(struct rte_lpm_v20 *lpm)
1882 {
1883         /* Zero rule information. */
1884         memset(lpm->rule_info, 0, sizeof(lpm->rule_info));
1885
1886         /* Zero tbl24. */
1887         memset(lpm->tbl24, 0, sizeof(lpm->tbl24));
1888
1889         /* Zero tbl8. */
1890         memset(lpm->tbl8, 0, sizeof(lpm->tbl8));
1891
1892         /* Delete all rules form the rules table. */
1893         memset(lpm->rules_tbl, 0, sizeof(lpm->rules_tbl[0]) * lpm->max_rules);
1894 }
1895 VERSION_SYMBOL(rte_lpm_delete_all, _v20, 2.0);
1896
1897 void
1898 rte_lpm_delete_all_v1604(struct rte_lpm *lpm)
1899 {
1900         /* Zero rule information. */
1901         memset(lpm->rule_info, 0, sizeof(lpm->rule_info));
1902
1903         /* Zero tbl24. */
1904         memset(lpm->tbl24, 0, sizeof(lpm->tbl24));
1905
1906         /* Zero tbl8. */
1907         memset(lpm->tbl8, 0, sizeof(lpm->tbl8[0])
1908                         * RTE_LPM_TBL8_GROUP_NUM_ENTRIES * lpm->number_tbl8s);
1909
1910         /* Delete all rules form the rules table. */
1911         memset(lpm->rules_tbl, 0, sizeof(lpm->rules_tbl[0]) * lpm->max_rules);
1912 }
1913 BIND_DEFAULT_SYMBOL(rte_lpm_delete_all, _v1604, 16.04);
1914 MAP_STATIC_SYMBOL(void rte_lpm_delete_all(struct rte_lpm *lpm),
1915                 rte_lpm_delete_all_v1604);