lpm: add a new config structure for IPv4
[dpdk.git] / lib / librte_lpm / rte_lpm.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <string.h>
35 #include <stdint.h>
36 #include <errno.h>
37 #include <stdarg.h>
38 #include <stdio.h>
39 #include <errno.h>
40 #include <sys/queue.h>
41
42 #include <rte_log.h>
43 #include <rte_branch_prediction.h>
44 #include <rte_common.h>
45 #include <rte_memory.h>        /* for definition of RTE_CACHE_LINE_SIZE */
46 #include <rte_malloc.h>
47 #include <rte_memzone.h>
48 #include <rte_eal.h>
49 #include <rte_eal_memconfig.h>
50 #include <rte_per_lcore.h>
51 #include <rte_string_fns.h>
52 #include <rte_errno.h>
53 #include <rte_rwlock.h>
54 #include <rte_spinlock.h>
55
56 #include "rte_lpm.h"
57
58 TAILQ_HEAD(rte_lpm_list, rte_tailq_entry);
59
60 static struct rte_tailq_elem rte_lpm_tailq = {
61         .name = "RTE_LPM",
62 };
63 EAL_REGISTER_TAILQ(rte_lpm_tailq)
64
65 #define MAX_DEPTH_TBL24 24
66
67 enum valid_flag {
68         INVALID = 0,
69         VALID
70 };
71
72 /* Macro to enable/disable run-time checks. */
73 #if defined(RTE_LIBRTE_LPM_DEBUG)
74 #include <rte_debug.h>
75 #define VERIFY_DEPTH(depth) do {                                \
76         if ((depth == 0) || (depth > RTE_LPM_MAX_DEPTH))        \
77                 rte_panic("LPM: Invalid depth (%u) at line %d", \
78                                 (unsigned)(depth), __LINE__);   \
79 } while (0)
80 #else
81 #define VERIFY_DEPTH(depth)
82 #endif
83
84 /*
85  * Converts a given depth value to its corresponding mask value.
86  *
87  * depth  (IN)          : range = 1 - 32
88  * mask   (OUT)         : 32bit mask
89  */
90 static uint32_t __attribute__((pure))
91 depth_to_mask(uint8_t depth)
92 {
93         VERIFY_DEPTH(depth);
94
95         /* To calculate a mask start with a 1 on the left hand side and right
96          * shift while populating the left hand side with 1's
97          */
98         return (int)0x80000000 >> (depth - 1);
99 }
100
101 /*
102  * Converts given depth value to its corresponding range value.
103  */
104 static inline uint32_t __attribute__((pure))
105 depth_to_range(uint8_t depth)
106 {
107         VERIFY_DEPTH(depth);
108
109         /*
110          * Calculate tbl24 range. (Note: 2^depth = 1 << depth)
111          */
112         if (depth <= MAX_DEPTH_TBL24)
113                 return 1 << (MAX_DEPTH_TBL24 - depth);
114
115         /* Else if depth is greater than 24 */
116         return 1 << (RTE_LPM_MAX_DEPTH - depth);
117 }
118
119 /*
120  * Find an existing lpm table and return a pointer to it.
121  */
122 struct rte_lpm_v20 *
123 rte_lpm_find_existing_v20(const char *name)
124 {
125         struct rte_lpm_v20 *l = NULL;
126         struct rte_tailq_entry *te;
127         struct rte_lpm_list *lpm_list;
128
129         lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list);
130
131         rte_rwlock_read_lock(RTE_EAL_TAILQ_RWLOCK);
132         TAILQ_FOREACH(te, lpm_list, next) {
133                 l = (struct rte_lpm_v20 *) te->data;
134                 if (strncmp(name, l->name, RTE_LPM_NAMESIZE) == 0)
135                         break;
136         }
137         rte_rwlock_read_unlock(RTE_EAL_TAILQ_RWLOCK);
138
139         if (te == NULL) {
140                 rte_errno = ENOENT;
141                 return NULL;
142         }
143
144         return l;
145 }
146 VERSION_SYMBOL(rte_lpm_find_existing, _v20, 2.0);
147
148 struct rte_lpm *
149 rte_lpm_find_existing_v1604(const char *name)
150 {
151         struct rte_lpm *l = NULL;
152         struct rte_tailq_entry *te;
153         struct rte_lpm_list *lpm_list;
154
155         lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list);
156
157         rte_rwlock_read_lock(RTE_EAL_TAILQ_RWLOCK);
158         TAILQ_FOREACH(te, lpm_list, next) {
159                 l = (struct rte_lpm *) te->data;
160                 if (strncmp(name, l->name, RTE_LPM_NAMESIZE) == 0)
161                         break;
162         }
163         rte_rwlock_read_unlock(RTE_EAL_TAILQ_RWLOCK);
164
165         if (te == NULL) {
166                 rte_errno = ENOENT;
167                 return NULL;
168         }
169
170         return l;
171 }
172 BIND_DEFAULT_SYMBOL(rte_lpm_find_existing, _v1604, 16.04);
173 MAP_STATIC_SYMBOL(struct rte_lpm *rte_lpm_find_existing(const char *name),
174                 rte_lpm_find_existing_v1604);
175
176 /*
177  * Allocates memory for LPM object
178  */
179 struct rte_lpm_v20 *
180 rte_lpm_create_v20(const char *name, int socket_id, int max_rules,
181                 __rte_unused int flags)
182 {
183         char mem_name[RTE_LPM_NAMESIZE];
184         struct rte_lpm_v20 *lpm = NULL;
185         struct rte_tailq_entry *te;
186         uint32_t mem_size;
187         struct rte_lpm_list *lpm_list;
188
189         lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list);
190
191         RTE_BUILD_BUG_ON(sizeof(struct rte_lpm_tbl_entry_v20) != 2);
192
193         /* Check user arguments. */
194         if ((name == NULL) || (socket_id < -1) || (max_rules == 0)) {
195                 rte_errno = EINVAL;
196                 return NULL;
197         }
198
199         snprintf(mem_name, sizeof(mem_name), "LPM_%s", name);
200
201         /* Determine the amount of memory to allocate. */
202         mem_size = sizeof(*lpm) + (sizeof(lpm->rules_tbl[0]) * max_rules);
203
204         rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
205
206         /* guarantee there's no existing */
207         TAILQ_FOREACH(te, lpm_list, next) {
208                 lpm = (struct rte_lpm_v20 *) te->data;
209                 if (strncmp(name, lpm->name, RTE_LPM_NAMESIZE) == 0)
210                         break;
211         }
212         if (te != NULL)
213                 goto exit;
214
215         /* allocate tailq entry */
216         te = rte_zmalloc("LPM_TAILQ_ENTRY", sizeof(*te), 0);
217         if (te == NULL) {
218                 RTE_LOG(ERR, LPM, "Failed to allocate tailq entry\n");
219                 goto exit;
220         }
221
222         /* Allocate memory to store the LPM data structures. */
223         lpm = (struct rte_lpm_v20 *)rte_zmalloc_socket(mem_name, mem_size,
224                         RTE_CACHE_LINE_SIZE, socket_id);
225         if (lpm == NULL) {
226                 RTE_LOG(ERR, LPM, "LPM memory allocation failed\n");
227                 rte_free(te);
228                 goto exit;
229         }
230
231         /* Save user arguments. */
232         lpm->max_rules = max_rules;
233         snprintf(lpm->name, sizeof(lpm->name), "%s", name);
234
235         te->data = (void *) lpm;
236
237         TAILQ_INSERT_TAIL(lpm_list, te, next);
238
239 exit:
240         rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
241
242         return lpm;
243 }
244 VERSION_SYMBOL(rte_lpm_create, _v20, 2.0);
245
246 struct rte_lpm *
247 rte_lpm_create_v1604(const char *name, int socket_id,
248                 const struct rte_lpm_config *config)
249 {
250         char mem_name[RTE_LPM_NAMESIZE];
251         struct rte_lpm *lpm = NULL;
252         struct rte_tailq_entry *te;
253         uint32_t mem_size, rules_size, tbl8s_size;
254         struct rte_lpm_list *lpm_list;
255
256         lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list);
257
258         RTE_BUILD_BUG_ON(sizeof(struct rte_lpm_tbl_entry) != 4);
259
260         /* Check user arguments. */
261         if ((name == NULL) || (socket_id < -1) || (config->max_rules == 0)
262                         || config->number_tbl8s > RTE_LPM_MAX_TBL8_NUM_GROUPS) {
263                 rte_errno = EINVAL;
264                 return NULL;
265         }
266
267         snprintf(mem_name, sizeof(mem_name), "LPM_%s", name);
268
269         /* Determine the amount of memory to allocate. */
270         mem_size = sizeof(*lpm);
271         rules_size = sizeof(struct rte_lpm_rule) * config->max_rules;
272         tbl8s_size = (sizeof(struct rte_lpm_tbl_entry) *
273                         RTE_LPM_TBL8_GROUP_NUM_ENTRIES * config->number_tbl8s);
274
275         rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
276
277         /* guarantee there's no existing */
278         TAILQ_FOREACH(te, lpm_list, next) {
279                 lpm = (struct rte_lpm *) te->data;
280                 if (strncmp(name, lpm->name, RTE_LPM_NAMESIZE) == 0)
281                         break;
282         }
283         if (te != NULL)
284                 goto exit;
285
286         /* allocate tailq entry */
287         te = rte_zmalloc("LPM_TAILQ_ENTRY", sizeof(*te), 0);
288         if (te == NULL) {
289                 RTE_LOG(ERR, LPM, "Failed to allocate tailq entry\n");
290                 goto exit;
291         }
292
293         /* Allocate memory to store the LPM data structures. */
294         lpm = (struct rte_lpm *)rte_zmalloc_socket(mem_name, mem_size,
295                         RTE_CACHE_LINE_SIZE, socket_id);
296         if (lpm == NULL) {
297                 RTE_LOG(ERR, LPM, "LPM memory allocation failed\n");
298                 rte_free(te);
299                 goto exit;
300         }
301
302         lpm->rules_tbl = (struct rte_lpm_rule *)rte_zmalloc_socket(NULL,
303                         (size_t)rules_size, RTE_CACHE_LINE_SIZE, socket_id);
304
305         if (lpm->rules_tbl == NULL) {
306                 RTE_LOG(ERR, LPM, "LPM memory allocation failed\n");
307                 rte_free(lpm);
308                 rte_free(te);
309                 goto exit;
310         }
311
312         lpm->tbl8 = (struct rte_lpm_tbl_entry *)rte_zmalloc_socket(NULL,
313                         (size_t)tbl8s_size, RTE_CACHE_LINE_SIZE, socket_id);
314
315         if (lpm->tbl8 == NULL) {
316                 RTE_LOG(ERR, LPM, "LPM memory allocation failed\n");
317                 rte_free(lpm);
318                 rte_free(te);
319                 goto exit;
320         }
321
322         /* Save user arguments. */
323         lpm->max_rules = config->max_rules;
324         lpm->number_tbl8s = config->number_tbl8s;
325         snprintf(lpm->name, sizeof(lpm->name), "%s", name);
326
327         te->data = (void *) lpm;
328
329         TAILQ_INSERT_TAIL(lpm_list, te, next);
330
331 exit:
332         rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
333
334         return lpm;
335 }
336 BIND_DEFAULT_SYMBOL(rte_lpm_create, _v1604, 16.04);
337 MAP_STATIC_SYMBOL(
338         struct rte_lpm *rte_lpm_create(const char *name, int socket_id,
339                         const struct rte_lpm_config *config), rte_lpm_create_v1604);
340
341 /*
342  * Deallocates memory for given LPM table.
343  */
344 void
345 rte_lpm_free_v20(struct rte_lpm_v20 *lpm)
346 {
347         struct rte_lpm_list *lpm_list;
348         struct rte_tailq_entry *te;
349
350         /* Check user arguments. */
351         if (lpm == NULL)
352                 return;
353
354         lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list);
355
356         rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
357
358         /* find our tailq entry */
359         TAILQ_FOREACH(te, lpm_list, next) {
360                 if (te->data == (void *) lpm)
361                         break;
362         }
363         if (te == NULL) {
364                 rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
365                 return;
366         }
367
368         TAILQ_REMOVE(lpm_list, te, next);
369
370         rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
371
372         rte_free(lpm);
373         rte_free(te);
374 }
375 VERSION_SYMBOL(rte_lpm_free, _v20, 2.0);
376
377 void
378 rte_lpm_free_v1604(struct rte_lpm *lpm)
379 {
380         struct rte_lpm_list *lpm_list;
381         struct rte_tailq_entry *te;
382
383         /* Check user arguments. */
384         if (lpm == NULL)
385                 return;
386
387         lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list);
388
389         rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
390
391         /* find our tailq entry */
392         TAILQ_FOREACH(te, lpm_list, next) {
393                 if (te->data == (void *) lpm)
394                         break;
395         }
396         if (te == NULL) {
397                 rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
398                 return;
399         }
400
401         TAILQ_REMOVE(lpm_list, te, next);
402
403         rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
404
405         rte_free(lpm);
406         rte_free(te);
407 }
408 BIND_DEFAULT_SYMBOL(rte_lpm_free, _v1604, 16.04);
409 MAP_STATIC_SYMBOL(void rte_lpm_free(struct rte_lpm *lpm),
410                 rte_lpm_free_v1604);
411
412 /*
413  * Adds a rule to the rule table.
414  *
415  * NOTE: The rule table is split into 32 groups. Each group contains rules that
416  * apply to a specific prefix depth (i.e. group 1 contains rules that apply to
417  * prefixes with a depth of 1 etc.). In the following code (depth - 1) is used
418  * to refer to depth 1 because even though the depth range is 1 - 32, depths
419  * are stored in the rule table from 0 - 31.
420  * NOTE: Valid range for depth parameter is 1 .. 32 inclusive.
421  */
422 static inline int32_t
423 rule_add_v20(struct rte_lpm_v20 *lpm, uint32_t ip_masked, uint8_t depth,
424         uint8_t next_hop)
425 {
426         uint32_t rule_gindex, rule_index, last_rule;
427         int i;
428
429         VERIFY_DEPTH(depth);
430
431         /* Scan through rule group to see if rule already exists. */
432         if (lpm->rule_info[depth - 1].used_rules > 0) {
433
434                 /* rule_gindex stands for rule group index. */
435                 rule_gindex = lpm->rule_info[depth - 1].first_rule;
436                 /* Initialise rule_index to point to start of rule group. */
437                 rule_index = rule_gindex;
438                 /* Last rule = Last used rule in this rule group. */
439                 last_rule = rule_gindex + lpm->rule_info[depth - 1].used_rules;
440
441                 for (; rule_index < last_rule; rule_index++) {
442
443                         /* If rule already exists update its next_hop and return. */
444                         if (lpm->rules_tbl[rule_index].ip == ip_masked) {
445                                 lpm->rules_tbl[rule_index].next_hop = next_hop;
446
447                                 return rule_index;
448                         }
449                 }
450
451                 if (rule_index == lpm->max_rules)
452                         return -ENOSPC;
453         } else {
454                 /* Calculate the position in which the rule will be stored. */
455                 rule_index = 0;
456
457                 for (i = depth - 1; i > 0; i--) {
458                         if (lpm->rule_info[i - 1].used_rules > 0) {
459                                 rule_index = lpm->rule_info[i - 1].first_rule
460                                                 + lpm->rule_info[i - 1].used_rules;
461                                 break;
462                         }
463                 }
464                 if (rule_index == lpm->max_rules)
465                         return -ENOSPC;
466
467                 lpm->rule_info[depth - 1].first_rule = rule_index;
468         }
469
470         /* Make room for the new rule in the array. */
471         for (i = RTE_LPM_MAX_DEPTH; i > depth; i--) {
472                 if (lpm->rule_info[i - 1].first_rule
473                                 + lpm->rule_info[i - 1].used_rules == lpm->max_rules)
474                         return -ENOSPC;
475
476                 if (lpm->rule_info[i - 1].used_rules > 0) {
477                         lpm->rules_tbl[lpm->rule_info[i - 1].first_rule
478                                 + lpm->rule_info[i - 1].used_rules]
479                                         = lpm->rules_tbl[lpm->rule_info[i - 1].first_rule];
480                         lpm->rule_info[i - 1].first_rule++;
481                 }
482         }
483
484         /* Add the new rule. */
485         lpm->rules_tbl[rule_index].ip = ip_masked;
486         lpm->rules_tbl[rule_index].next_hop = next_hop;
487
488         /* Increment the used rules counter for this rule group. */
489         lpm->rule_info[depth - 1].used_rules++;
490
491         return rule_index;
492 }
493
494 static inline int32_t
495 rule_add_v1604(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth,
496         uint32_t next_hop)
497 {
498         uint32_t rule_gindex, rule_index, last_rule;
499         int i;
500
501         VERIFY_DEPTH(depth);
502
503         /* Scan through rule group to see if rule already exists. */
504         if (lpm->rule_info[depth - 1].used_rules > 0) {
505
506                 /* rule_gindex stands for rule group index. */
507                 rule_gindex = lpm->rule_info[depth - 1].first_rule;
508                 /* Initialise rule_index to point to start of rule group. */
509                 rule_index = rule_gindex;
510                 /* Last rule = Last used rule in this rule group. */
511                 last_rule = rule_gindex + lpm->rule_info[depth - 1].used_rules;
512
513                 for (; rule_index < last_rule; rule_index++) {
514
515                         /* If rule already exists update its next_hop and return. */
516                         if (lpm->rules_tbl[rule_index].ip == ip_masked) {
517                                 lpm->rules_tbl[rule_index].next_hop = next_hop;
518
519                                 return rule_index;
520                         }
521                 }
522
523                 if (rule_index == lpm->max_rules)
524                         return -ENOSPC;
525         } else {
526                 /* Calculate the position in which the rule will be stored. */
527                 rule_index = 0;
528
529                 for (i = depth - 1; i > 0; i--) {
530                         if (lpm->rule_info[i - 1].used_rules > 0) {
531                                 rule_index = lpm->rule_info[i - 1].first_rule
532                                                 + lpm->rule_info[i - 1].used_rules;
533                                 break;
534                         }
535                 }
536                 if (rule_index == lpm->max_rules)
537                         return -ENOSPC;
538
539                 lpm->rule_info[depth - 1].first_rule = rule_index;
540         }
541
542         /* Make room for the new rule in the array. */
543         for (i = RTE_LPM_MAX_DEPTH; i > depth; i--) {
544                 if (lpm->rule_info[i - 1].first_rule
545                                 + lpm->rule_info[i - 1].used_rules == lpm->max_rules)
546                         return -ENOSPC;
547
548                 if (lpm->rule_info[i - 1].used_rules > 0) {
549                         lpm->rules_tbl[lpm->rule_info[i - 1].first_rule
550                                 + lpm->rule_info[i - 1].used_rules]
551                                         = lpm->rules_tbl[lpm->rule_info[i - 1].first_rule];
552                         lpm->rule_info[i - 1].first_rule++;
553                 }
554         }
555
556         /* Add the new rule. */
557         lpm->rules_tbl[rule_index].ip = ip_masked;
558         lpm->rules_tbl[rule_index].next_hop = next_hop;
559
560         /* Increment the used rules counter for this rule group. */
561         lpm->rule_info[depth - 1].used_rules++;
562
563         return rule_index;
564 }
565
566 /*
567  * Delete a rule from the rule table.
568  * NOTE: Valid range for depth parameter is 1 .. 32 inclusive.
569  */
570 static inline void
571 rule_delete_v20(struct rte_lpm_v20 *lpm, int32_t rule_index, uint8_t depth)
572 {
573         int i;
574
575         VERIFY_DEPTH(depth);
576
577         lpm->rules_tbl[rule_index] =
578                         lpm->rules_tbl[lpm->rule_info[depth - 1].first_rule
579                                 + lpm->rule_info[depth - 1].used_rules - 1];
580
581         for (i = depth; i < RTE_LPM_MAX_DEPTH; i++) {
582                 if (lpm->rule_info[i].used_rules > 0) {
583                         lpm->rules_tbl[lpm->rule_info[i].first_rule - 1] =
584                                 lpm->rules_tbl[lpm->rule_info[i].first_rule
585                                         + lpm->rule_info[i].used_rules - 1];
586                         lpm->rule_info[i].first_rule--;
587                 }
588         }
589
590         lpm->rule_info[depth - 1].used_rules--;
591 }
592
593 static inline void
594 rule_delete_v1604(struct rte_lpm *lpm, int32_t rule_index, uint8_t depth)
595 {
596         int i;
597
598         VERIFY_DEPTH(depth);
599
600         lpm->rules_tbl[rule_index] =
601                         lpm->rules_tbl[lpm->rule_info[depth - 1].first_rule
602                         + lpm->rule_info[depth - 1].used_rules - 1];
603
604         for (i = depth; i < RTE_LPM_MAX_DEPTH; i++) {
605                 if (lpm->rule_info[i].used_rules > 0) {
606                         lpm->rules_tbl[lpm->rule_info[i].first_rule - 1] =
607                                         lpm->rules_tbl[lpm->rule_info[i].first_rule
608                                                 + lpm->rule_info[i].used_rules - 1];
609                         lpm->rule_info[i].first_rule--;
610                 }
611         }
612
613         lpm->rule_info[depth - 1].used_rules--;
614 }
615
616 /*
617  * Finds a rule in rule table.
618  * NOTE: Valid range for depth parameter is 1 .. 32 inclusive.
619  */
620 static inline int32_t
621 rule_find_v20(struct rte_lpm_v20 *lpm, uint32_t ip_masked, uint8_t depth)
622 {
623         uint32_t rule_gindex, last_rule, rule_index;
624
625         VERIFY_DEPTH(depth);
626
627         rule_gindex = lpm->rule_info[depth - 1].first_rule;
628         last_rule = rule_gindex + lpm->rule_info[depth - 1].used_rules;
629
630         /* Scan used rules at given depth to find rule. */
631         for (rule_index = rule_gindex; rule_index < last_rule; rule_index++) {
632                 /* If rule is found return the rule index. */
633                 if (lpm->rules_tbl[rule_index].ip == ip_masked)
634                         return rule_index;
635         }
636
637         /* If rule is not found return -EINVAL. */
638         return -EINVAL;
639 }
640
641 static inline int32_t
642 rule_find_v1604(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth)
643 {
644         uint32_t rule_gindex, last_rule, rule_index;
645
646         VERIFY_DEPTH(depth);
647
648         rule_gindex = lpm->rule_info[depth - 1].first_rule;
649         last_rule = rule_gindex + lpm->rule_info[depth - 1].used_rules;
650
651         /* Scan used rules at given depth to find rule. */
652         for (rule_index = rule_gindex; rule_index < last_rule; rule_index++) {
653                 /* If rule is found return the rule index. */
654                 if (lpm->rules_tbl[rule_index].ip == ip_masked)
655                         return rule_index;
656         }
657
658         /* If rule is not found return -EINVAL. */
659         return -EINVAL;
660 }
661
662 /*
663  * Find, clean and allocate a tbl8.
664  */
665 static inline int32_t
666 tbl8_alloc_v20(struct rte_lpm_tbl_entry_v20 *tbl8)
667 {
668         uint32_t group_idx; /* tbl8 group index. */
669         struct rte_lpm_tbl_entry_v20 *tbl8_entry;
670
671         /* Scan through tbl8 to find a free (i.e. INVALID) tbl8 group. */
672         for (group_idx = 0; group_idx < RTE_LPM_TBL8_NUM_GROUPS;
673                         group_idx++) {
674                 tbl8_entry = &tbl8[group_idx * RTE_LPM_TBL8_GROUP_NUM_ENTRIES];
675                 /* If a free tbl8 group is found clean it and set as VALID. */
676                 if (!tbl8_entry->valid_group) {
677                         memset(&tbl8_entry[0], 0,
678                                         RTE_LPM_TBL8_GROUP_NUM_ENTRIES *
679                                         sizeof(tbl8_entry[0]));
680
681                         tbl8_entry->valid_group = VALID;
682
683                         /* Return group index for allocated tbl8 group. */
684                         return group_idx;
685                 }
686         }
687
688         /* If there are no tbl8 groups free then return error. */
689         return -ENOSPC;
690 }
691
692 static inline int32_t
693 tbl8_alloc_v1604(struct rte_lpm_tbl_entry *tbl8, uint32_t number_tbl8s)
694 {
695         uint32_t group_idx; /* tbl8 group index. */
696         struct rte_lpm_tbl_entry *tbl8_entry;
697
698         /* Scan through tbl8 to find a free (i.e. INVALID) tbl8 group. */
699         for (group_idx = 0; group_idx < number_tbl8s; group_idx++) {
700                 tbl8_entry = &tbl8[group_idx * RTE_LPM_TBL8_GROUP_NUM_ENTRIES];
701                 /* If a free tbl8 group is found clean it and set as VALID. */
702                 if (!tbl8_entry->valid_group) {
703                         memset(&tbl8_entry[0], 0,
704                                         RTE_LPM_TBL8_GROUP_NUM_ENTRIES *
705                                         sizeof(tbl8_entry[0]));
706
707                         tbl8_entry->valid_group = VALID;
708
709                         /* Return group index for allocated tbl8 group. */
710                         return group_idx;
711                 }
712         }
713
714         /* If there are no tbl8 groups free then return error. */
715         return -ENOSPC;
716 }
717
718 static inline void
719 tbl8_free_v20(struct rte_lpm_tbl_entry_v20 *tbl8, uint32_t tbl8_group_start)
720 {
721         /* Set tbl8 group invalid*/
722         tbl8[tbl8_group_start].valid_group = INVALID;
723 }
724
725 static inline void
726 tbl8_free_v1604(struct rte_lpm_tbl_entry *tbl8, uint32_t tbl8_group_start)
727 {
728         /* Set tbl8 group invalid*/
729         tbl8[tbl8_group_start].valid_group = INVALID;
730 }
731
732 static inline int32_t
733 add_depth_small_v20(struct rte_lpm_v20 *lpm, uint32_t ip, uint8_t depth,
734                 uint8_t next_hop)
735 {
736         uint32_t tbl24_index, tbl24_range, tbl8_index, tbl8_group_end, i, j;
737
738         /* Calculate the index into Table24. */
739         tbl24_index = ip >> 8;
740         tbl24_range = depth_to_range(depth);
741
742         for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) {
743                 /*
744                  * For invalid OR valid and non-extended tbl 24 entries set
745                  * entry.
746                  */
747                 if (!lpm->tbl24[i].valid || (lpm->tbl24[i].valid_group == 0 &&
748                                 lpm->tbl24[i].depth <= depth)) {
749
750                         struct rte_lpm_tbl_entry_v20 new_tbl24_entry = {
751                                 { .next_hop = next_hop, },
752                                 .valid = VALID,
753                                 .valid_group = 0,
754                                 .depth = depth,
755                         };
756
757                         /* Setting tbl24 entry in one go to avoid race
758                          * conditions
759                          */
760                         lpm->tbl24[i] = new_tbl24_entry;
761
762                         continue;
763                 }
764
765                 if (lpm->tbl24[i].valid_group == 1) {
766                         /* If tbl24 entry is valid and extended calculate the
767                          *  index into tbl8.
768                          */
769                         tbl8_index = lpm->tbl24[i].group_idx *
770                                         RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
771                         tbl8_group_end = tbl8_index +
772                                         RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
773
774                         for (j = tbl8_index; j < tbl8_group_end; j++) {
775                                 if (!lpm->tbl8[j].valid ||
776                                                 lpm->tbl8[j].depth <= depth) {
777                                         struct rte_lpm_tbl_entry_v20
778                                                 new_tbl8_entry = {
779                                                 .valid = VALID,
780                                                 .valid_group = VALID,
781                                                 .depth = depth,
782                                                 .next_hop = next_hop,
783                                         };
784
785                                         /*
786                                          * Setting tbl8 entry in one go to avoid
787                                          * race conditions
788                                          */
789                                         lpm->tbl8[j] = new_tbl8_entry;
790
791                                         continue;
792                                 }
793                         }
794                 }
795         }
796
797         return 0;
798 }
799
800 static inline int32_t
801 add_depth_small_v1604(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
802                 uint32_t next_hop)
803 {
804 #define group_idx next_hop
805         uint32_t tbl24_index, tbl24_range, tbl8_index, tbl8_group_end, i, j;
806
807         /* Calculate the index into Table24. */
808         tbl24_index = ip >> 8;
809         tbl24_range = depth_to_range(depth);
810
811         for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) {
812                 /*
813                  * For invalid OR valid and non-extended tbl 24 entries set
814                  * entry.
815                  */
816                 if (!lpm->tbl24[i].valid || (lpm->tbl24[i].valid_group == 0 &&
817                                 lpm->tbl24[i].depth <= depth)) {
818
819                         struct rte_lpm_tbl_entry new_tbl24_entry = {
820                                 .next_hop = next_hop,
821                                 .valid = VALID,
822                                 .valid_group = 0,
823                                 .depth = depth,
824                         };
825
826                         /* Setting tbl24 entry in one go to avoid race
827                          * conditions
828                          */
829                         lpm->tbl24[i] = new_tbl24_entry;
830
831                         continue;
832                 }
833
834                 if (lpm->tbl24[i].valid_group == 1) {
835                         /* If tbl24 entry is valid and extended calculate the
836                          *  index into tbl8.
837                          */
838                         tbl8_index = lpm->tbl24[i].group_idx *
839                                         RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
840                         tbl8_group_end = tbl8_index +
841                                         RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
842
843                         for (j = tbl8_index; j < tbl8_group_end; j++) {
844                                 if (!lpm->tbl8[j].valid ||
845                                                 lpm->tbl8[j].depth <= depth) {
846                                         struct rte_lpm_tbl_entry
847                                                 new_tbl8_entry = {
848                                                 .valid = VALID,
849                                                 .valid_group = VALID,
850                                                 .depth = depth,
851                                                 .next_hop = next_hop,
852                                         };
853
854                                         /*
855                                          * Setting tbl8 entry in one go to avoid
856                                          * race conditions
857                                          */
858                                         lpm->tbl8[j] = new_tbl8_entry;
859
860                                         continue;
861                                 }
862                         }
863                 }
864         }
865 #undef group_idx
866         return 0;
867 }
868
869 static inline int32_t
870 add_depth_big_v20(struct rte_lpm_v20 *lpm, uint32_t ip_masked, uint8_t depth,
871                 uint8_t next_hop)
872 {
873         uint32_t tbl24_index;
874         int32_t tbl8_group_index, tbl8_group_start, tbl8_group_end, tbl8_index,
875                 tbl8_range, i;
876
877         tbl24_index = (ip_masked >> 8);
878         tbl8_range = depth_to_range(depth);
879
880         if (!lpm->tbl24[tbl24_index].valid) {
881                 /* Search for a free tbl8 group. */
882                 tbl8_group_index = tbl8_alloc_v20(lpm->tbl8);
883
884                 /* Check tbl8 allocation was successful. */
885                 if (tbl8_group_index < 0) {
886                         return tbl8_group_index;
887                 }
888
889                 /* Find index into tbl8 and range. */
890                 tbl8_index = (tbl8_group_index *
891                                 RTE_LPM_TBL8_GROUP_NUM_ENTRIES) +
892                                 (ip_masked & 0xFF);
893
894                 /* Set tbl8 entry. */
895                 for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
896                         lpm->tbl8[i].depth = depth;
897                         lpm->tbl8[i].next_hop = next_hop;
898                         lpm->tbl8[i].valid = VALID;
899                 }
900
901                 /*
902                  * Update tbl24 entry to point to new tbl8 entry. Note: The
903                  * ext_flag and tbl8_index need to be updated simultaneously,
904                  * so assign whole structure in one go
905                  */
906
907                 struct rte_lpm_tbl_entry_v20 new_tbl24_entry = {
908                         { .group_idx = (uint8_t)tbl8_group_index, },
909                         .valid = VALID,
910                         .valid_group = 1,
911                         .depth = 0,
912                 };
913
914                 lpm->tbl24[tbl24_index] = new_tbl24_entry;
915
916         } /* If valid entry but not extended calculate the index into Table8. */
917         else if (lpm->tbl24[tbl24_index].valid_group == 0) {
918                 /* Search for free tbl8 group. */
919                 tbl8_group_index = tbl8_alloc_v20(lpm->tbl8);
920
921                 if (tbl8_group_index < 0) {
922                         return tbl8_group_index;
923                 }
924
925                 tbl8_group_start = tbl8_group_index *
926                                 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
927                 tbl8_group_end = tbl8_group_start +
928                                 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
929
930                 /* Populate new tbl8 with tbl24 value. */
931                 for (i = tbl8_group_start; i < tbl8_group_end; i++) {
932                         lpm->tbl8[i].valid = VALID;
933                         lpm->tbl8[i].depth = lpm->tbl24[tbl24_index].depth;
934                         lpm->tbl8[i].next_hop =
935                                         lpm->tbl24[tbl24_index].next_hop;
936                 }
937
938                 tbl8_index = tbl8_group_start + (ip_masked & 0xFF);
939
940                 /* Insert new rule into the tbl8 entry. */
941                 for (i = tbl8_index; i < tbl8_index + tbl8_range; i++) {
942                         if (!lpm->tbl8[i].valid ||
943                                         lpm->tbl8[i].depth <= depth) {
944                                 lpm->tbl8[i].valid = VALID;
945                                 lpm->tbl8[i].depth = depth;
946                                 lpm->tbl8[i].next_hop = next_hop;
947
948                                 continue;
949                         }
950                 }
951
952                 /*
953                  * Update tbl24 entry to point to new tbl8 entry. Note: The
954                  * ext_flag and tbl8_index need to be updated simultaneously,
955                  * so assign whole structure in one go.
956                  */
957
958                 struct rte_lpm_tbl_entry_v20 new_tbl24_entry = {
959                                 { .group_idx = (uint8_t)tbl8_group_index, },
960                                 .valid = VALID,
961                                 .valid_group = 1,
962                                 .depth = 0,
963                 };
964
965                 lpm->tbl24[tbl24_index] = new_tbl24_entry;
966
967         } else { /*
968                 * If it is valid, extended entry calculate the index into tbl8.
969                 */
970                 tbl8_group_index = lpm->tbl24[tbl24_index].group_idx;
971                 tbl8_group_start = tbl8_group_index *
972                                 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
973                 tbl8_index = tbl8_group_start + (ip_masked & 0xFF);
974
975                 for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
976
977                         if (!lpm->tbl8[i].valid ||
978                                         lpm->tbl8[i].depth <= depth) {
979                                 struct rte_lpm_tbl_entry_v20 new_tbl8_entry = {
980                                         .valid = VALID,
981                                         .depth = depth,
982                                         .next_hop = next_hop,
983                                         .valid_group = lpm->tbl8[i].valid_group,
984                                 };
985
986                                 /*
987                                  * Setting tbl8 entry in one go to avoid race
988                                  * condition
989                                  */
990                                 lpm->tbl8[i] = new_tbl8_entry;
991
992                                 continue;
993                         }
994                 }
995         }
996
997         return 0;
998 }
999
1000 static inline int32_t
1001 add_depth_big_v1604(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth,
1002                 uint32_t next_hop)
1003 {
1004 #define group_idx next_hop
1005         uint32_t tbl24_index;
1006         int32_t tbl8_group_index, tbl8_group_start, tbl8_group_end, tbl8_index,
1007                 tbl8_range, i;
1008
1009         tbl24_index = (ip_masked >> 8);
1010         tbl8_range = depth_to_range(depth);
1011
1012         if (!lpm->tbl24[tbl24_index].valid) {
1013                 /* Search for a free tbl8 group. */
1014                 tbl8_group_index = tbl8_alloc_v1604(lpm->tbl8, lpm->number_tbl8s);
1015
1016                 /* Check tbl8 allocation was successful. */
1017                 if (tbl8_group_index < 0) {
1018                         return tbl8_group_index;
1019                 }
1020
1021                 /* Find index into tbl8 and range. */
1022                 tbl8_index = (tbl8_group_index *
1023                                 RTE_LPM_TBL8_GROUP_NUM_ENTRIES) +
1024                                 (ip_masked & 0xFF);
1025
1026                 /* Set tbl8 entry. */
1027                 for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
1028                         lpm->tbl8[i].depth = depth;
1029                         lpm->tbl8[i].next_hop = next_hop;
1030                         lpm->tbl8[i].valid = VALID;
1031                 }
1032
1033                 /*
1034                  * Update tbl24 entry to point to new tbl8 entry. Note: The
1035                  * ext_flag and tbl8_index need to be updated simultaneously,
1036                  * so assign whole structure in one go
1037                  */
1038
1039                 struct rte_lpm_tbl_entry new_tbl24_entry = {
1040                         .group_idx = (uint8_t)tbl8_group_index,
1041                         .valid = VALID,
1042                         .valid_group = 1,
1043                         .depth = 0,
1044                 };
1045
1046                 lpm->tbl24[tbl24_index] = new_tbl24_entry;
1047
1048         } /* If valid entry but not extended calculate the index into Table8. */
1049         else if (lpm->tbl24[tbl24_index].valid_group == 0) {
1050                 /* Search for free tbl8 group. */
1051                 tbl8_group_index = tbl8_alloc_v1604(lpm->tbl8, lpm->number_tbl8s);
1052
1053                 if (tbl8_group_index < 0) {
1054                         return tbl8_group_index;
1055                 }
1056
1057                 tbl8_group_start = tbl8_group_index *
1058                                 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1059                 tbl8_group_end = tbl8_group_start +
1060                                 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1061
1062                 /* Populate new tbl8 with tbl24 value. */
1063                 for (i = tbl8_group_start; i < tbl8_group_end; i++) {
1064                         lpm->tbl8[i].valid = VALID;
1065                         lpm->tbl8[i].depth = lpm->tbl24[tbl24_index].depth;
1066                         lpm->tbl8[i].next_hop =
1067                                         lpm->tbl24[tbl24_index].next_hop;
1068                 }
1069
1070                 tbl8_index = tbl8_group_start + (ip_masked & 0xFF);
1071
1072                 /* Insert new rule into the tbl8 entry. */
1073                 for (i = tbl8_index; i < tbl8_index + tbl8_range; i++) {
1074                         if (!lpm->tbl8[i].valid ||
1075                                         lpm->tbl8[i].depth <= depth) {
1076                                 lpm->tbl8[i].valid = VALID;
1077                                 lpm->tbl8[i].depth = depth;
1078                                 lpm->tbl8[i].next_hop = next_hop;
1079
1080                                 continue;
1081                         }
1082                 }
1083
1084                 /*
1085                  * Update tbl24 entry to point to new tbl8 entry. Note: The
1086                  * ext_flag and tbl8_index need to be updated simultaneously,
1087                  * so assign whole structure in one go.
1088                  */
1089
1090                 struct rte_lpm_tbl_entry new_tbl24_entry = {
1091                                 .group_idx = (uint8_t)tbl8_group_index,
1092                                 .valid = VALID,
1093                                 .valid_group = 1,
1094                                 .depth = 0,
1095                 };
1096
1097                 lpm->tbl24[tbl24_index] = new_tbl24_entry;
1098
1099         } else { /*
1100                 * If it is valid, extended entry calculate the index into tbl8.
1101                 */
1102                 tbl8_group_index = lpm->tbl24[tbl24_index].group_idx;
1103                 tbl8_group_start = tbl8_group_index *
1104                                 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1105                 tbl8_index = tbl8_group_start + (ip_masked & 0xFF);
1106
1107                 for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
1108
1109                         if (!lpm->tbl8[i].valid ||
1110                                         lpm->tbl8[i].depth <= depth) {
1111                                 struct rte_lpm_tbl_entry new_tbl8_entry = {
1112                                         .valid = VALID,
1113                                         .depth = depth,
1114                                         .next_hop = next_hop,
1115                                         .valid_group = lpm->tbl8[i].valid_group,
1116                                 };
1117
1118                                 /*
1119                                  * Setting tbl8 entry in one go to avoid race
1120                                  * condition
1121                                  */
1122                                 lpm->tbl8[i] = new_tbl8_entry;
1123
1124                                 continue;
1125                         }
1126                 }
1127         }
1128 #undef group_idx
1129         return 0;
1130 }
1131
1132 /*
1133  * Add a route
1134  */
1135 int
1136 rte_lpm_add_v20(struct rte_lpm_v20 *lpm, uint32_t ip, uint8_t depth,
1137                 uint8_t next_hop)
1138 {
1139         int32_t rule_index, status = 0;
1140         uint32_t ip_masked;
1141
1142         /* Check user arguments. */
1143         if ((lpm == NULL) || (depth < 1) || (depth > RTE_LPM_MAX_DEPTH))
1144                 return -EINVAL;
1145
1146         ip_masked = ip & depth_to_mask(depth);
1147
1148         /* Add the rule to the rule table. */
1149         rule_index = rule_add_v20(lpm, ip_masked, depth, next_hop);
1150
1151         /* If the is no space available for new rule return error. */
1152         if (rule_index < 0) {
1153                 return rule_index;
1154         }
1155
1156         if (depth <= MAX_DEPTH_TBL24) {
1157                 status = add_depth_small_v20(lpm, ip_masked, depth, next_hop);
1158         } else { /* If depth > RTE_LPM_MAX_DEPTH_TBL24 */
1159                 status = add_depth_big_v20(lpm, ip_masked, depth, next_hop);
1160
1161                 /*
1162                  * If add fails due to exhaustion of tbl8 extensions delete
1163                  * rule that was added to rule table.
1164                  */
1165                 if (status < 0) {
1166                         rule_delete_v20(lpm, rule_index, depth);
1167
1168                         return status;
1169                 }
1170         }
1171
1172         return 0;
1173 }
1174 VERSION_SYMBOL(rte_lpm_add, _v20, 2.0);
1175
1176 int
1177 rte_lpm_add_v1604(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
1178                 uint32_t next_hop)
1179 {
1180         int32_t rule_index, status = 0;
1181         uint32_t ip_masked;
1182
1183         /* Check user arguments. */
1184         if ((lpm == NULL) || (depth < 1) || (depth > RTE_LPM_MAX_DEPTH))
1185                 return -EINVAL;
1186
1187         ip_masked = ip & depth_to_mask(depth);
1188
1189         /* Add the rule to the rule table. */
1190         rule_index = rule_add_v1604(lpm, ip_masked, depth, next_hop);
1191
1192         /* If the is no space available for new rule return error. */
1193         if (rule_index < 0) {
1194                 return rule_index;
1195         }
1196
1197         if (depth <= MAX_DEPTH_TBL24) {
1198                 status = add_depth_small_v1604(lpm, ip_masked, depth, next_hop);
1199         } else { /* If depth > RTE_LPM_MAX_DEPTH_TBL24 */
1200                 status = add_depth_big_v1604(lpm, ip_masked, depth, next_hop);
1201
1202                 /*
1203                  * If add fails due to exhaustion of tbl8 extensions delete
1204                  * rule that was added to rule table.
1205                  */
1206                 if (status < 0) {
1207                         rule_delete_v1604(lpm, rule_index, depth);
1208
1209                         return status;
1210                 }
1211         }
1212
1213         return 0;
1214 }
1215 BIND_DEFAULT_SYMBOL(rte_lpm_add, _v1604, 16.04);
1216 MAP_STATIC_SYMBOL(int rte_lpm_add(struct rte_lpm *lpm, uint32_t ip,
1217                 uint8_t depth, uint32_t next_hop), rte_lpm_add_v1604);
1218
1219 /*
1220  * Look for a rule in the high-level rules table
1221  */
1222 int
1223 rte_lpm_is_rule_present_v20(struct rte_lpm_v20 *lpm, uint32_t ip, uint8_t depth,
1224 uint8_t *next_hop)
1225 {
1226         uint32_t ip_masked;
1227         int32_t rule_index;
1228
1229         /* Check user arguments. */
1230         if ((lpm == NULL) ||
1231                 (next_hop == NULL) ||
1232                 (depth < 1) || (depth > RTE_LPM_MAX_DEPTH))
1233                 return -EINVAL;
1234
1235         /* Look for the rule using rule_find. */
1236         ip_masked = ip & depth_to_mask(depth);
1237         rule_index = rule_find_v20(lpm, ip_masked, depth);
1238
1239         if (rule_index >= 0) {
1240                 *next_hop = lpm->rules_tbl[rule_index].next_hop;
1241                 return 1;
1242         }
1243
1244         /* If rule is not found return 0. */
1245         return 0;
1246 }
1247 VERSION_SYMBOL(rte_lpm_is_rule_present, _v20, 2.0);
1248
1249 int
1250 rte_lpm_is_rule_present_v1604(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
1251 uint32_t *next_hop)
1252 {
1253         uint32_t ip_masked;
1254         int32_t rule_index;
1255
1256         /* Check user arguments. */
1257         if ((lpm == NULL) ||
1258                 (next_hop == NULL) ||
1259                 (depth < 1) || (depth > RTE_LPM_MAX_DEPTH))
1260                 return -EINVAL;
1261
1262         /* Look for the rule using rule_find. */
1263         ip_masked = ip & depth_to_mask(depth);
1264         rule_index = rule_find_v1604(lpm, ip_masked, depth);
1265
1266         if (rule_index >= 0) {
1267                 *next_hop = lpm->rules_tbl[rule_index].next_hop;
1268                 return 1;
1269         }
1270
1271         /* If rule is not found return 0. */
1272         return 0;
1273 }
1274 BIND_DEFAULT_SYMBOL(rte_lpm_is_rule_present, _v1604, 16.04);
1275 MAP_STATIC_SYMBOL(int rte_lpm_is_rule_present(struct rte_lpm *lpm, uint32_t ip,
1276                 uint8_t depth, uint32_t *next_hop), rte_lpm_is_rule_present_v1604);
1277
1278 static inline int32_t
1279 find_previous_rule_v20(struct rte_lpm_v20 *lpm, uint32_t ip, uint8_t depth,
1280                 uint8_t *sub_rule_depth)
1281 {
1282         int32_t rule_index;
1283         uint32_t ip_masked;
1284         uint8_t prev_depth;
1285
1286         for (prev_depth = (uint8_t)(depth - 1); prev_depth > 0; prev_depth--) {
1287                 ip_masked = ip & depth_to_mask(prev_depth);
1288
1289                 rule_index = rule_find_v20(lpm, ip_masked, prev_depth);
1290
1291                 if (rule_index >= 0) {
1292                         *sub_rule_depth = prev_depth;
1293                         return rule_index;
1294                 }
1295         }
1296
1297         return -1;
1298 }
1299
1300 static inline int32_t
1301 find_previous_rule_v1604(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
1302                 uint8_t *sub_rule_depth)
1303 {
1304         int32_t rule_index;
1305         uint32_t ip_masked;
1306         uint8_t prev_depth;
1307
1308         for (prev_depth = (uint8_t)(depth - 1); prev_depth > 0; prev_depth--) {
1309                 ip_masked = ip & depth_to_mask(prev_depth);
1310
1311                 rule_index = rule_find_v1604(lpm, ip_masked, prev_depth);
1312
1313                 if (rule_index >= 0) {
1314                         *sub_rule_depth = prev_depth;
1315                         return rule_index;
1316                 }
1317         }
1318
1319         return -1;
1320 }
1321
1322 static inline int32_t
1323 delete_depth_small_v20(struct rte_lpm_v20 *lpm, uint32_t ip_masked,
1324         uint8_t depth, int32_t sub_rule_index, uint8_t sub_rule_depth)
1325 {
1326         uint32_t tbl24_range, tbl24_index, tbl8_group_index, tbl8_index, i, j;
1327
1328         /* Calculate the range and index into Table24. */
1329         tbl24_range = depth_to_range(depth);
1330         tbl24_index = (ip_masked >> 8);
1331
1332         /*
1333          * Firstly check the sub_rule_index. A -1 indicates no replacement rule
1334          * and a positive number indicates a sub_rule_index.
1335          */
1336         if (sub_rule_index < 0) {
1337                 /*
1338                  * If no replacement rule exists then invalidate entries
1339                  * associated with this rule.
1340                  */
1341                 for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) {
1342
1343                         if (lpm->tbl24[i].valid_group == 0 &&
1344                                         lpm->tbl24[i].depth <= depth) {
1345                                 lpm->tbl24[i].valid = INVALID;
1346                         } else if (lpm->tbl24[i].valid_group == 1) {
1347                                 /*
1348                                  * If TBL24 entry is extended, then there has
1349                                  * to be a rule with depth >= 25 in the
1350                                  * associated TBL8 group.
1351                                  */
1352
1353                                 tbl8_group_index = lpm->tbl24[i].group_idx;
1354                                 tbl8_index = tbl8_group_index *
1355                                                 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1356
1357                                 for (j = tbl8_index; j < (tbl8_index +
1358                                         RTE_LPM_TBL8_GROUP_NUM_ENTRIES); j++) {
1359
1360                                         if (lpm->tbl8[j].depth <= depth)
1361                                                 lpm->tbl8[j].valid = INVALID;
1362                                 }
1363                         }
1364                 }
1365         } else {
1366                 /*
1367                  * If a replacement rule exists then modify entries
1368                  * associated with this rule.
1369                  */
1370
1371                 struct rte_lpm_tbl_entry_v20 new_tbl24_entry = {
1372                         {.next_hop = lpm->rules_tbl[sub_rule_index].next_hop,},
1373                         .valid = VALID,
1374                         .valid_group = 0,
1375                         .depth = sub_rule_depth,
1376                 };
1377
1378                 struct rte_lpm_tbl_entry_v20 new_tbl8_entry = {
1379                         .valid = VALID,
1380                         .valid_group = VALID,
1381                         .depth = sub_rule_depth,
1382                         .next_hop = lpm->rules_tbl
1383                         [sub_rule_index].next_hop,
1384                 };
1385
1386                 for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) {
1387
1388                         if (lpm->tbl24[i].valid_group == 0 &&
1389                                         lpm->tbl24[i].depth <= depth) {
1390                                 lpm->tbl24[i] = new_tbl24_entry;
1391                         } else  if (lpm->tbl24[i].valid_group == 1) {
1392                                 /*
1393                                  * If TBL24 entry is extended, then there has
1394                                  * to be a rule with depth >= 25 in the
1395                                  * associated TBL8 group.
1396                                  */
1397
1398                                 tbl8_group_index = lpm->tbl24[i].group_idx;
1399                                 tbl8_index = tbl8_group_index *
1400                                                 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1401
1402                                 for (j = tbl8_index; j < (tbl8_index +
1403                                         RTE_LPM_TBL8_GROUP_NUM_ENTRIES); j++) {
1404
1405                                         if (lpm->tbl8[j].depth <= depth)
1406                                                 lpm->tbl8[j] = new_tbl8_entry;
1407                                 }
1408                         }
1409                 }
1410         }
1411
1412         return 0;
1413 }
1414
1415 static inline int32_t
1416 delete_depth_small_v1604(struct rte_lpm *lpm, uint32_t ip_masked,
1417         uint8_t depth, int32_t sub_rule_index, uint8_t sub_rule_depth)
1418 {
1419 #define group_idx next_hop
1420         uint32_t tbl24_range, tbl24_index, tbl8_group_index, tbl8_index, i, j;
1421
1422         /* Calculate the range and index into Table24. */
1423         tbl24_range = depth_to_range(depth);
1424         tbl24_index = (ip_masked >> 8);
1425
1426         /*
1427          * Firstly check the sub_rule_index. A -1 indicates no replacement rule
1428          * and a positive number indicates a sub_rule_index.
1429          */
1430         if (sub_rule_index < 0) {
1431                 /*
1432                  * If no replacement rule exists then invalidate entries
1433                  * associated with this rule.
1434                  */
1435                 for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) {
1436
1437                         if (lpm->tbl24[i].valid_group == 0 &&
1438                                         lpm->tbl24[i].depth <= depth) {
1439                                 lpm->tbl24[i].valid = INVALID;
1440                         } else if (lpm->tbl24[i].valid_group == 1) {
1441                                 /*
1442                                  * If TBL24 entry is extended, then there has
1443                                  * to be a rule with depth >= 25 in the
1444                                  * associated TBL8 group.
1445                                  */
1446
1447                                 tbl8_group_index = lpm->tbl24[i].group_idx;
1448                                 tbl8_index = tbl8_group_index *
1449                                                 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1450
1451                                 for (j = tbl8_index; j < (tbl8_index +
1452                                         RTE_LPM_TBL8_GROUP_NUM_ENTRIES); j++) {
1453
1454                                         if (lpm->tbl8[j].depth <= depth)
1455                                                 lpm->tbl8[j].valid = INVALID;
1456                                 }
1457                         }
1458                 }
1459         } else {
1460                 /*
1461                  * If a replacement rule exists then modify entries
1462                  * associated with this rule.
1463                  */
1464
1465                 struct rte_lpm_tbl_entry new_tbl24_entry = {
1466                         .next_hop = lpm->rules_tbl[sub_rule_index].next_hop,
1467                         .valid = VALID,
1468                         .valid_group = 0,
1469                         .depth = sub_rule_depth,
1470                 };
1471
1472                 struct rte_lpm_tbl_entry new_tbl8_entry = {
1473                         .valid = VALID,
1474                         .valid_group = VALID,
1475                         .depth = sub_rule_depth,
1476                         .next_hop = lpm->rules_tbl
1477                         [sub_rule_index].next_hop,
1478                 };
1479
1480                 for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) {
1481
1482                         if (lpm->tbl24[i].valid_group == 0 &&
1483                                         lpm->tbl24[i].depth <= depth) {
1484                                 lpm->tbl24[i] = new_tbl24_entry;
1485                         } else  if (lpm->tbl24[i].valid_group == 1) {
1486                                 /*
1487                                  * If TBL24 entry is extended, then there has
1488                                  * to be a rule with depth >= 25 in the
1489                                  * associated TBL8 group.
1490                                  */
1491
1492                                 tbl8_group_index = lpm->tbl24[i].group_idx;
1493                                 tbl8_index = tbl8_group_index *
1494                                                 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1495
1496                                 for (j = tbl8_index; j < (tbl8_index +
1497                                         RTE_LPM_TBL8_GROUP_NUM_ENTRIES); j++) {
1498
1499                                         if (lpm->tbl8[j].depth <= depth)
1500                                                 lpm->tbl8[j] = new_tbl8_entry;
1501                                 }
1502                         }
1503                 }
1504         }
1505 #undef group_idx
1506         return 0;
1507 }
1508
1509 /*
1510  * Checks if table 8 group can be recycled.
1511  *
1512  * Return of -EEXIST means tbl8 is in use and thus can not be recycled.
1513  * Return of -EINVAL means tbl8 is empty and thus can be recycled
1514  * Return of value > -1 means tbl8 is in use but has all the same values and
1515  * thus can be recycled
1516  */
1517 static inline int32_t
1518 tbl8_recycle_check_v20(struct rte_lpm_tbl_entry_v20 *tbl8,
1519                 uint32_t tbl8_group_start)
1520 {
1521         uint32_t tbl8_group_end, i;
1522         tbl8_group_end = tbl8_group_start + RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1523
1524         /*
1525          * Check the first entry of the given tbl8. If it is invalid we know
1526          * this tbl8 does not contain any rule with a depth < RTE_LPM_MAX_DEPTH
1527          *  (As they would affect all entries in a tbl8) and thus this table
1528          *  can not be recycled.
1529          */
1530         if (tbl8[tbl8_group_start].valid) {
1531                 /*
1532                  * If first entry is valid check if the depth is less than 24
1533                  * and if so check the rest of the entries to verify that they
1534                  * are all of this depth.
1535                  */
1536                 if (tbl8[tbl8_group_start].depth < MAX_DEPTH_TBL24) {
1537                         for (i = (tbl8_group_start + 1); i < tbl8_group_end;
1538                                         i++) {
1539
1540                                 if (tbl8[i].depth !=
1541                                                 tbl8[tbl8_group_start].depth) {
1542
1543                                         return -EEXIST;
1544                                 }
1545                         }
1546                         /* If all entries are the same return the tb8 index */
1547                         return tbl8_group_start;
1548                 }
1549
1550                 return -EEXIST;
1551         }
1552         /*
1553          * If the first entry is invalid check if the rest of the entries in
1554          * the tbl8 are invalid.
1555          */
1556         for (i = (tbl8_group_start + 1); i < tbl8_group_end; i++) {
1557                 if (tbl8[i].valid)
1558                         return -EEXIST;
1559         }
1560         /* If no valid entries are found then return -EINVAL. */
1561         return -EINVAL;
1562 }
1563
1564 static inline int32_t
1565 tbl8_recycle_check_v1604(struct rte_lpm_tbl_entry *tbl8,
1566                 uint32_t tbl8_group_start)
1567 {
1568         uint32_t tbl8_group_end, i;
1569         tbl8_group_end = tbl8_group_start + RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1570
1571         /*
1572          * Check the first entry of the given tbl8. If it is invalid we know
1573          * this tbl8 does not contain any rule with a depth < RTE_LPM_MAX_DEPTH
1574          *  (As they would affect all entries in a tbl8) and thus this table
1575          *  can not be recycled.
1576          */
1577         if (tbl8[tbl8_group_start].valid) {
1578                 /*
1579                  * If first entry is valid check if the depth is less than 24
1580                  * and if so check the rest of the entries to verify that they
1581                  * are all of this depth.
1582                  */
1583                 if (tbl8[tbl8_group_start].depth < MAX_DEPTH_TBL24) {
1584                         for (i = (tbl8_group_start + 1); i < tbl8_group_end;
1585                                         i++) {
1586
1587                                 if (tbl8[i].depth !=
1588                                                 tbl8[tbl8_group_start].depth) {
1589
1590                                         return -EEXIST;
1591                                 }
1592                         }
1593                         /* If all entries are the same return the tb8 index */
1594                         return tbl8_group_start;
1595                 }
1596
1597                 return -EEXIST;
1598         }
1599         /*
1600          * If the first entry is invalid check if the rest of the entries in
1601          * the tbl8 are invalid.
1602          */
1603         for (i = (tbl8_group_start + 1); i < tbl8_group_end; i++) {
1604                 if (tbl8[i].valid)
1605                         return -EEXIST;
1606         }
1607         /* If no valid entries are found then return -EINVAL. */
1608         return -EINVAL;
1609 }
1610
1611 static inline int32_t
1612 delete_depth_big_v20(struct rte_lpm_v20 *lpm, uint32_t ip_masked,
1613         uint8_t depth, int32_t sub_rule_index, uint8_t sub_rule_depth)
1614 {
1615         uint32_t tbl24_index, tbl8_group_index, tbl8_group_start, tbl8_index,
1616                         tbl8_range, i;
1617         int32_t tbl8_recycle_index;
1618
1619         /*
1620          * Calculate the index into tbl24 and range. Note: All depths larger
1621          * than MAX_DEPTH_TBL24 are associated with only one tbl24 entry.
1622          */
1623         tbl24_index = ip_masked >> 8;
1624
1625         /* Calculate the index into tbl8 and range. */
1626         tbl8_group_index = lpm->tbl24[tbl24_index].group_idx;
1627         tbl8_group_start = tbl8_group_index * RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1628         tbl8_index = tbl8_group_start + (ip_masked & 0xFF);
1629         tbl8_range = depth_to_range(depth);
1630
1631         if (sub_rule_index < 0) {
1632                 /*
1633                  * Loop through the range of entries on tbl8 for which the
1634                  * rule_to_delete must be removed or modified.
1635                  */
1636                 for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
1637                         if (lpm->tbl8[i].depth <= depth)
1638                                 lpm->tbl8[i].valid = INVALID;
1639                 }
1640         } else {
1641                 /* Set new tbl8 entry. */
1642                 struct rte_lpm_tbl_entry_v20 new_tbl8_entry = {
1643                         .valid = VALID,
1644                         .depth = sub_rule_depth,
1645                         .valid_group = lpm->tbl8[tbl8_group_start].valid_group,
1646                         .next_hop = lpm->rules_tbl[sub_rule_index].next_hop,
1647                 };
1648
1649                 /*
1650                  * Loop through the range of entries on tbl8 for which the
1651                  * rule_to_delete must be modified.
1652                  */
1653                 for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
1654                         if (lpm->tbl8[i].depth <= depth)
1655                                 lpm->tbl8[i] = new_tbl8_entry;
1656                 }
1657         }
1658
1659         /*
1660          * Check if there are any valid entries in this tbl8 group. If all
1661          * tbl8 entries are invalid we can free the tbl8 and invalidate the
1662          * associated tbl24 entry.
1663          */
1664
1665         tbl8_recycle_index = tbl8_recycle_check_v20(lpm->tbl8, tbl8_group_start);
1666
1667         if (tbl8_recycle_index == -EINVAL) {
1668                 /* Set tbl24 before freeing tbl8 to avoid race condition. */
1669                 lpm->tbl24[tbl24_index].valid = 0;
1670                 tbl8_free_v20(lpm->tbl8, tbl8_group_start);
1671         } else if (tbl8_recycle_index > -1) {
1672                 /* Update tbl24 entry. */
1673                 struct rte_lpm_tbl_entry_v20 new_tbl24_entry = {
1674                         { .next_hop = lpm->tbl8[tbl8_recycle_index].next_hop, },
1675                         .valid = VALID,
1676                         .valid_group = 0,
1677                         .depth = lpm->tbl8[tbl8_recycle_index].depth,
1678                 };
1679
1680                 /* Set tbl24 before freeing tbl8 to avoid race condition. */
1681                 lpm->tbl24[tbl24_index] = new_tbl24_entry;
1682                 tbl8_free_v20(lpm->tbl8, tbl8_group_start);
1683         }
1684
1685         return 0;
1686 }
1687
1688 static inline int32_t
1689 delete_depth_big_v1604(struct rte_lpm *lpm, uint32_t ip_masked,
1690         uint8_t depth, int32_t sub_rule_index, uint8_t sub_rule_depth)
1691 {
1692 #define group_idx next_hop
1693         uint32_t tbl24_index, tbl8_group_index, tbl8_group_start, tbl8_index,
1694                         tbl8_range, i;
1695         int32_t tbl8_recycle_index;
1696
1697         /*
1698          * Calculate the index into tbl24 and range. Note: All depths larger
1699          * than MAX_DEPTH_TBL24 are associated with only one tbl24 entry.
1700          */
1701         tbl24_index = ip_masked >> 8;
1702
1703         /* Calculate the index into tbl8 and range. */
1704         tbl8_group_index = lpm->tbl24[tbl24_index].group_idx;
1705         tbl8_group_start = tbl8_group_index * RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1706         tbl8_index = tbl8_group_start + (ip_masked & 0xFF);
1707         tbl8_range = depth_to_range(depth);
1708
1709         if (sub_rule_index < 0) {
1710                 /*
1711                  * Loop through the range of entries on tbl8 for which the
1712                  * rule_to_delete must be removed or modified.
1713                  */
1714                 for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
1715                         if (lpm->tbl8[i].depth <= depth)
1716                                 lpm->tbl8[i].valid = INVALID;
1717                 }
1718         } else {
1719                 /* Set new tbl8 entry. */
1720                 struct rte_lpm_tbl_entry new_tbl8_entry = {
1721                         .valid = VALID,
1722                         .depth = sub_rule_depth,
1723                         .valid_group = lpm->tbl8[tbl8_group_start].valid_group,
1724                         .next_hop = lpm->rules_tbl[sub_rule_index].next_hop,
1725                 };
1726
1727                 /*
1728                  * Loop through the range of entries on tbl8 for which the
1729                  * rule_to_delete must be modified.
1730                  */
1731                 for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
1732                         if (lpm->tbl8[i].depth <= depth)
1733                                 lpm->tbl8[i] = new_tbl8_entry;
1734                 }
1735         }
1736
1737         /*
1738          * Check if there are any valid entries in this tbl8 group. If all
1739          * tbl8 entries are invalid we can free the tbl8 and invalidate the
1740          * associated tbl24 entry.
1741          */
1742
1743         tbl8_recycle_index = tbl8_recycle_check_v1604(lpm->tbl8, tbl8_group_start);
1744
1745         if (tbl8_recycle_index == -EINVAL) {
1746                 /* Set tbl24 before freeing tbl8 to avoid race condition. */
1747                 lpm->tbl24[tbl24_index].valid = 0;
1748                 tbl8_free_v1604(lpm->tbl8, tbl8_group_start);
1749         } else if (tbl8_recycle_index > -1) {
1750                 /* Update tbl24 entry. */
1751                 struct rte_lpm_tbl_entry new_tbl24_entry = {
1752                         .next_hop = lpm->tbl8[tbl8_recycle_index].next_hop,
1753                         .valid = VALID,
1754                         .valid_group = 0,
1755                         .depth = lpm->tbl8[tbl8_recycle_index].depth,
1756                 };
1757
1758                 /* Set tbl24 before freeing tbl8 to avoid race condition. */
1759                 lpm->tbl24[tbl24_index] = new_tbl24_entry;
1760                 tbl8_free_v1604(lpm->tbl8, tbl8_group_start);
1761         }
1762 #undef group_idx
1763         return 0;
1764 }
1765
1766 /*
1767  * Deletes a rule
1768  */
1769 int
1770 rte_lpm_delete_v20(struct rte_lpm_v20 *lpm, uint32_t ip, uint8_t depth)
1771 {
1772         int32_t rule_to_delete_index, sub_rule_index;
1773         uint32_t ip_masked;
1774         uint8_t sub_rule_depth;
1775         /*
1776          * Check input arguments. Note: IP must be a positive integer of 32
1777          * bits in length therefore it need not be checked.
1778          */
1779         if ((lpm == NULL) || (depth < 1) || (depth > RTE_LPM_MAX_DEPTH)) {
1780                 return -EINVAL;
1781         }
1782
1783         ip_masked = ip & depth_to_mask(depth);
1784
1785         /*
1786          * Find the index of the input rule, that needs to be deleted, in the
1787          * rule table.
1788          */
1789         rule_to_delete_index = rule_find_v20(lpm, ip_masked, depth);
1790
1791         /*
1792          * Check if rule_to_delete_index was found. If no rule was found the
1793          * function rule_find returns -EINVAL.
1794          */
1795         if (rule_to_delete_index < 0)
1796                 return -EINVAL;
1797
1798         /* Delete the rule from the rule table. */
1799         rule_delete_v20(lpm, rule_to_delete_index, depth);
1800
1801         /*
1802          * Find rule to replace the rule_to_delete. If there is no rule to
1803          * replace the rule_to_delete we return -1 and invalidate the table
1804          * entries associated with this rule.
1805          */
1806         sub_rule_depth = 0;
1807         sub_rule_index = find_previous_rule_v20(lpm, ip, depth, &sub_rule_depth);
1808
1809         /*
1810          * If the input depth value is less than 25 use function
1811          * delete_depth_small otherwise use delete_depth_big.
1812          */
1813         if (depth <= MAX_DEPTH_TBL24) {
1814                 return delete_depth_small_v20(lpm, ip_masked, depth,
1815                                 sub_rule_index, sub_rule_depth);
1816         } else { /* If depth > MAX_DEPTH_TBL24 */
1817                 return delete_depth_big_v20(lpm, ip_masked, depth, sub_rule_index,
1818                                 sub_rule_depth);
1819         }
1820 }
1821 VERSION_SYMBOL(rte_lpm_delete, _v20, 2.0);
1822
1823 int
1824 rte_lpm_delete_v1604(struct rte_lpm *lpm, uint32_t ip, uint8_t depth)
1825 {
1826         int32_t rule_to_delete_index, sub_rule_index;
1827         uint32_t ip_masked;
1828         uint8_t sub_rule_depth;
1829         /*
1830          * Check input arguments. Note: IP must be a positive integer of 32
1831          * bits in length therefore it need not be checked.
1832          */
1833         if ((lpm == NULL) || (depth < 1) || (depth > RTE_LPM_MAX_DEPTH)) {
1834                 return -EINVAL;
1835         }
1836
1837         ip_masked = ip & depth_to_mask(depth);
1838
1839         /*
1840          * Find the index of the input rule, that needs to be deleted, in the
1841          * rule table.
1842          */
1843         rule_to_delete_index = rule_find_v1604(lpm, ip_masked, depth);
1844
1845         /*
1846          * Check if rule_to_delete_index was found. If no rule was found the
1847          * function rule_find returns -EINVAL.
1848          */
1849         if (rule_to_delete_index < 0)
1850                 return -EINVAL;
1851
1852         /* Delete the rule from the rule table. */
1853         rule_delete_v1604(lpm, rule_to_delete_index, depth);
1854
1855         /*
1856          * Find rule to replace the rule_to_delete. If there is no rule to
1857          * replace the rule_to_delete we return -1 and invalidate the table
1858          * entries associated with this rule.
1859          */
1860         sub_rule_depth = 0;
1861         sub_rule_index = find_previous_rule_v1604(lpm, ip, depth, &sub_rule_depth);
1862
1863         /*
1864          * If the input depth value is less than 25 use function
1865          * delete_depth_small otherwise use delete_depth_big.
1866          */
1867         if (depth <= MAX_DEPTH_TBL24) {
1868                 return delete_depth_small_v1604(lpm, ip_masked, depth,
1869                                 sub_rule_index, sub_rule_depth);
1870         } else { /* If depth > MAX_DEPTH_TBL24 */
1871                 return delete_depth_big_v1604(lpm, ip_masked, depth, sub_rule_index,
1872                                 sub_rule_depth);
1873         }
1874 }
1875 BIND_DEFAULT_SYMBOL(rte_lpm_delete, _v1604, 16.04);
1876 MAP_STATIC_SYMBOL(int rte_lpm_delete(struct rte_lpm *lpm, uint32_t ip,
1877                 uint8_t depth), rte_lpm_delete_v1604);
1878
1879 /*
1880  * Delete all rules from the LPM table.
1881  */
1882 void
1883 rte_lpm_delete_all_v20(struct rte_lpm_v20 *lpm)
1884 {
1885         /* Zero rule information. */
1886         memset(lpm->rule_info, 0, sizeof(lpm->rule_info));
1887
1888         /* Zero tbl24. */
1889         memset(lpm->tbl24, 0, sizeof(lpm->tbl24));
1890
1891         /* Zero tbl8. */
1892         memset(lpm->tbl8, 0, sizeof(lpm->tbl8));
1893
1894         /* Delete all rules form the rules table. */
1895         memset(lpm->rules_tbl, 0, sizeof(lpm->rules_tbl[0]) * lpm->max_rules);
1896 }
1897 VERSION_SYMBOL(rte_lpm_delete_all, _v20, 2.0);
1898
1899 void
1900 rte_lpm_delete_all_v1604(struct rte_lpm *lpm)
1901 {
1902         /* Zero rule information. */
1903         memset(lpm->rule_info, 0, sizeof(lpm->rule_info));
1904
1905         /* Zero tbl24. */
1906         memset(lpm->tbl24, 0, sizeof(lpm->tbl24));
1907
1908         /* Zero tbl8. */
1909         memset(lpm->tbl8, 0, sizeof(lpm->tbl8[0])
1910                         * RTE_LPM_TBL8_GROUP_NUM_ENTRIES * lpm->number_tbl8s);
1911
1912         /* Delete all rules form the rules table. */
1913         memset(lpm->rules_tbl, 0, sizeof(lpm->rules_tbl[0]) * lpm->max_rules);
1914 }
1915 BIND_DEFAULT_SYMBOL(rte_lpm_delete_all, _v1604, 16.04);
1916 MAP_STATIC_SYMBOL(void rte_lpm_delete_all(struct rte_lpm *lpm),
1917                 rte_lpm_delete_all_v1604);