lpm: avoid race conditions for v20
[dpdk.git] / lib / librte_lpm / rte_lpm.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2014 Intel Corporation
3  */
4
5 #include <string.h>
6 #include <stdint.h>
7 #include <errno.h>
8 #include <stdarg.h>
9 #include <stdio.h>
10 #include <sys/queue.h>
11
12 #include <rte_log.h>
13 #include <rte_branch_prediction.h>
14 #include <rte_common.h>
15 #include <rte_memory.h>        /* for definition of RTE_CACHE_LINE_SIZE */
16 #include <rte_malloc.h>
17 #include <rte_eal.h>
18 #include <rte_eal_memconfig.h>
19 #include <rte_per_lcore.h>
20 #include <rte_string_fns.h>
21 #include <rte_errno.h>
22 #include <rte_rwlock.h>
23 #include <rte_spinlock.h>
24 #include <rte_tailq.h>
25
26 #include "rte_lpm.h"
27
28 TAILQ_HEAD(rte_lpm_list, rte_tailq_entry);
29
30 static struct rte_tailq_elem rte_lpm_tailq = {
31         .name = "RTE_LPM",
32 };
33 EAL_REGISTER_TAILQ(rte_lpm_tailq)
34
35 #define MAX_DEPTH_TBL24 24
36
37 enum valid_flag {
38         INVALID = 0,
39         VALID
40 };
41
42 /* Macro to enable/disable run-time checks. */
43 #if defined(RTE_LIBRTE_LPM_DEBUG)
44 #include <rte_debug.h>
45 #define VERIFY_DEPTH(depth) do {                                \
46         if ((depth == 0) || (depth > RTE_LPM_MAX_DEPTH))        \
47                 rte_panic("LPM: Invalid depth (%u) at line %d", \
48                                 (unsigned)(depth), __LINE__);   \
49 } while (0)
50 #else
51 #define VERIFY_DEPTH(depth)
52 #endif
53
54 /*
55  * Converts a given depth value to its corresponding mask value.
56  *
57  * depth  (IN)          : range = 1 - 32
58  * mask   (OUT)         : 32bit mask
59  */
60 static uint32_t __attribute__((pure))
61 depth_to_mask(uint8_t depth)
62 {
63         VERIFY_DEPTH(depth);
64
65         /* To calculate a mask start with a 1 on the left hand side and right
66          * shift while populating the left hand side with 1's
67          */
68         return (int)0x80000000 >> (depth - 1);
69 }
70
71 /*
72  * Converts given depth value to its corresponding range value.
73  */
74 static uint32_t __attribute__((pure))
75 depth_to_range(uint8_t depth)
76 {
77         VERIFY_DEPTH(depth);
78
79         /*
80          * Calculate tbl24 range. (Note: 2^depth = 1 << depth)
81          */
82         if (depth <= MAX_DEPTH_TBL24)
83                 return 1 << (MAX_DEPTH_TBL24 - depth);
84
85         /* Else if depth is greater than 24 */
86         return 1 << (RTE_LPM_MAX_DEPTH - depth);
87 }
88
89 /*
90  * Find an existing lpm table and return a pointer to it.
91  */
92 struct rte_lpm_v20 *
93 rte_lpm_find_existing_v20(const char *name)
94 {
95         struct rte_lpm_v20 *l = NULL;
96         struct rte_tailq_entry *te;
97         struct rte_lpm_list *lpm_list;
98
99         lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list);
100
101         rte_mcfg_tailq_read_lock();
102         TAILQ_FOREACH(te, lpm_list, next) {
103                 l = te->data;
104                 if (strncmp(name, l->name, RTE_LPM_NAMESIZE) == 0)
105                         break;
106         }
107         rte_mcfg_tailq_read_unlock();
108
109         if (te == NULL) {
110                 rte_errno = ENOENT;
111                 return NULL;
112         }
113
114         return l;
115 }
116 VERSION_SYMBOL(rte_lpm_find_existing, _v20, 2.0);
117
118 struct rte_lpm *
119 rte_lpm_find_existing_v1604(const char *name)
120 {
121         struct rte_lpm *l = NULL;
122         struct rte_tailq_entry *te;
123         struct rte_lpm_list *lpm_list;
124
125         lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list);
126
127         rte_mcfg_tailq_read_lock();
128         TAILQ_FOREACH(te, lpm_list, next) {
129                 l = te->data;
130                 if (strncmp(name, l->name, RTE_LPM_NAMESIZE) == 0)
131                         break;
132         }
133         rte_mcfg_tailq_read_unlock();
134
135         if (te == NULL) {
136                 rte_errno = ENOENT;
137                 return NULL;
138         }
139
140         return l;
141 }
142 BIND_DEFAULT_SYMBOL(rte_lpm_find_existing, _v1604, 16.04);
143 MAP_STATIC_SYMBOL(struct rte_lpm *rte_lpm_find_existing(const char *name),
144                 rte_lpm_find_existing_v1604);
145
146 /*
147  * Allocates memory for LPM object
148  */
149 struct rte_lpm_v20 *
150 rte_lpm_create_v20(const char *name, int socket_id, int max_rules,
151                 __rte_unused int flags)
152 {
153         char mem_name[RTE_LPM_NAMESIZE];
154         struct rte_lpm_v20 *lpm = NULL;
155         struct rte_tailq_entry *te;
156         uint32_t mem_size;
157         struct rte_lpm_list *lpm_list;
158
159         lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list);
160
161         RTE_BUILD_BUG_ON(sizeof(struct rte_lpm_tbl_entry_v20) != 2);
162
163         /* Check user arguments. */
164         if ((name == NULL) || (socket_id < -1) || (max_rules == 0)) {
165                 rte_errno = EINVAL;
166                 return NULL;
167         }
168
169         snprintf(mem_name, sizeof(mem_name), "LPM_%s", name);
170
171         /* Determine the amount of memory to allocate. */
172         mem_size = sizeof(*lpm) + (sizeof(lpm->rules_tbl[0]) * max_rules);
173
174         rte_mcfg_tailq_write_lock();
175
176         /* guarantee there's no existing */
177         TAILQ_FOREACH(te, lpm_list, next) {
178                 lpm = te->data;
179                 if (strncmp(name, lpm->name, RTE_LPM_NAMESIZE) == 0)
180                         break;
181         }
182
183         if (te != NULL) {
184                 lpm = NULL;
185                 rte_errno = EEXIST;
186                 goto exit;
187         }
188
189         /* allocate tailq entry */
190         te = rte_zmalloc("LPM_TAILQ_ENTRY", sizeof(*te), 0);
191         if (te == NULL) {
192                 RTE_LOG(ERR, LPM, "Failed to allocate tailq entry\n");
193                 rte_errno = ENOMEM;
194                 goto exit;
195         }
196
197         /* Allocate memory to store the LPM data structures. */
198         lpm = rte_zmalloc_socket(mem_name, mem_size,
199                         RTE_CACHE_LINE_SIZE, socket_id);
200         if (lpm == NULL) {
201                 RTE_LOG(ERR, LPM, "LPM memory allocation failed\n");
202                 rte_free(te);
203                 rte_errno = ENOMEM;
204                 goto exit;
205         }
206
207         /* Save user arguments. */
208         lpm->max_rules = max_rules;
209         strlcpy(lpm->name, name, sizeof(lpm->name));
210
211         te->data = lpm;
212
213         TAILQ_INSERT_TAIL(lpm_list, te, next);
214
215 exit:
216         rte_mcfg_tailq_write_unlock();
217
218         return lpm;
219 }
220 VERSION_SYMBOL(rte_lpm_create, _v20, 2.0);
221
222 struct rte_lpm *
223 rte_lpm_create_v1604(const char *name, int socket_id,
224                 const struct rte_lpm_config *config)
225 {
226         char mem_name[RTE_LPM_NAMESIZE];
227         struct rte_lpm *lpm = NULL;
228         struct rte_tailq_entry *te;
229         uint32_t mem_size, rules_size, tbl8s_size;
230         struct rte_lpm_list *lpm_list;
231
232         lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list);
233
234         RTE_BUILD_BUG_ON(sizeof(struct rte_lpm_tbl_entry) != 4);
235
236         /* Check user arguments. */
237         if ((name == NULL) || (socket_id < -1) || (config->max_rules == 0)
238                         || config->number_tbl8s > RTE_LPM_MAX_TBL8_NUM_GROUPS) {
239                 rte_errno = EINVAL;
240                 return NULL;
241         }
242
243         snprintf(mem_name, sizeof(mem_name), "LPM_%s", name);
244
245         /* Determine the amount of memory to allocate. */
246         mem_size = sizeof(*lpm);
247         rules_size = sizeof(struct rte_lpm_rule) * config->max_rules;
248         tbl8s_size = (sizeof(struct rte_lpm_tbl_entry) *
249                         RTE_LPM_TBL8_GROUP_NUM_ENTRIES * config->number_tbl8s);
250
251         rte_mcfg_tailq_write_lock();
252
253         /* guarantee there's no existing */
254         TAILQ_FOREACH(te, lpm_list, next) {
255                 lpm = te->data;
256                 if (strncmp(name, lpm->name, RTE_LPM_NAMESIZE) == 0)
257                         break;
258         }
259
260         if (te != NULL) {
261                 lpm = NULL;
262                 rte_errno = EEXIST;
263                 goto exit;
264         }
265
266         /* allocate tailq entry */
267         te = rte_zmalloc("LPM_TAILQ_ENTRY", sizeof(*te), 0);
268         if (te == NULL) {
269                 RTE_LOG(ERR, LPM, "Failed to allocate tailq entry\n");
270                 rte_errno = ENOMEM;
271                 goto exit;
272         }
273
274         /* Allocate memory to store the LPM data structures. */
275         lpm = rte_zmalloc_socket(mem_name, mem_size,
276                         RTE_CACHE_LINE_SIZE, socket_id);
277         if (lpm == NULL) {
278                 RTE_LOG(ERR, LPM, "LPM memory allocation failed\n");
279                 rte_free(te);
280                 rte_errno = ENOMEM;
281                 goto exit;
282         }
283
284         lpm->rules_tbl = rte_zmalloc_socket(NULL,
285                         (size_t)rules_size, RTE_CACHE_LINE_SIZE, socket_id);
286
287         if (lpm->rules_tbl == NULL) {
288                 RTE_LOG(ERR, LPM, "LPM rules_tbl memory allocation failed\n");
289                 rte_free(lpm);
290                 lpm = NULL;
291                 rte_free(te);
292                 rte_errno = ENOMEM;
293                 goto exit;
294         }
295
296         lpm->tbl8 = rte_zmalloc_socket(NULL,
297                         (size_t)tbl8s_size, RTE_CACHE_LINE_SIZE, socket_id);
298
299         if (lpm->tbl8 == NULL) {
300                 RTE_LOG(ERR, LPM, "LPM tbl8 memory allocation failed\n");
301                 rte_free(lpm->rules_tbl);
302                 rte_free(lpm);
303                 lpm = NULL;
304                 rte_free(te);
305                 rte_errno = ENOMEM;
306                 goto exit;
307         }
308
309         /* Save user arguments. */
310         lpm->max_rules = config->max_rules;
311         lpm->number_tbl8s = config->number_tbl8s;
312         strlcpy(lpm->name, name, sizeof(lpm->name));
313
314         te->data = lpm;
315
316         TAILQ_INSERT_TAIL(lpm_list, te, next);
317
318 exit:
319         rte_mcfg_tailq_write_unlock();
320
321         return lpm;
322 }
323 BIND_DEFAULT_SYMBOL(rte_lpm_create, _v1604, 16.04);
324 MAP_STATIC_SYMBOL(
325         struct rte_lpm *rte_lpm_create(const char *name, int socket_id,
326                         const struct rte_lpm_config *config), rte_lpm_create_v1604);
327
328 /*
329  * Deallocates memory for given LPM table.
330  */
331 void
332 rte_lpm_free_v20(struct rte_lpm_v20 *lpm)
333 {
334         struct rte_lpm_list *lpm_list;
335         struct rte_tailq_entry *te;
336
337         /* Check user arguments. */
338         if (lpm == NULL)
339                 return;
340
341         lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list);
342
343         rte_mcfg_tailq_write_lock();
344
345         /* find our tailq entry */
346         TAILQ_FOREACH(te, lpm_list, next) {
347                 if (te->data == (void *) lpm)
348                         break;
349         }
350         if (te != NULL)
351                 TAILQ_REMOVE(lpm_list, te, next);
352
353         rte_mcfg_tailq_write_unlock();
354
355         rte_free(lpm);
356         rte_free(te);
357 }
358 VERSION_SYMBOL(rte_lpm_free, _v20, 2.0);
359
360 void
361 rte_lpm_free_v1604(struct rte_lpm *lpm)
362 {
363         struct rte_lpm_list *lpm_list;
364         struct rte_tailq_entry *te;
365
366         /* Check user arguments. */
367         if (lpm == NULL)
368                 return;
369
370         lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list);
371
372         rte_mcfg_tailq_write_lock();
373
374         /* find our tailq entry */
375         TAILQ_FOREACH(te, lpm_list, next) {
376                 if (te->data == (void *) lpm)
377                         break;
378         }
379         if (te != NULL)
380                 TAILQ_REMOVE(lpm_list, te, next);
381
382         rte_mcfg_tailq_write_unlock();
383
384         rte_free(lpm->tbl8);
385         rte_free(lpm->rules_tbl);
386         rte_free(lpm);
387         rte_free(te);
388 }
389 BIND_DEFAULT_SYMBOL(rte_lpm_free, _v1604, 16.04);
390 MAP_STATIC_SYMBOL(void rte_lpm_free(struct rte_lpm *lpm),
391                 rte_lpm_free_v1604);
392
393 /*
394  * Adds a rule to the rule table.
395  *
396  * NOTE: The rule table is split into 32 groups. Each group contains rules that
397  * apply to a specific prefix depth (i.e. group 1 contains rules that apply to
398  * prefixes with a depth of 1 etc.). In the following code (depth - 1) is used
399  * to refer to depth 1 because even though the depth range is 1 - 32, depths
400  * are stored in the rule table from 0 - 31.
401  * NOTE: Valid range for depth parameter is 1 .. 32 inclusive.
402  */
403 static int32_t
404 rule_add_v20(struct rte_lpm_v20 *lpm, uint32_t ip_masked, uint8_t depth,
405         uint8_t next_hop)
406 {
407         uint32_t rule_gindex, rule_index, last_rule;
408         int i;
409
410         VERIFY_DEPTH(depth);
411
412         /* Scan through rule group to see if rule already exists. */
413         if (lpm->rule_info[depth - 1].used_rules > 0) {
414
415                 /* rule_gindex stands for rule group index. */
416                 rule_gindex = lpm->rule_info[depth - 1].first_rule;
417                 /* Initialise rule_index to point to start of rule group. */
418                 rule_index = rule_gindex;
419                 /* Last rule = Last used rule in this rule group. */
420                 last_rule = rule_gindex + lpm->rule_info[depth - 1].used_rules;
421
422                 for (; rule_index < last_rule; rule_index++) {
423
424                         /* If rule already exists update its next_hop and return. */
425                         if (lpm->rules_tbl[rule_index].ip == ip_masked) {
426                                 lpm->rules_tbl[rule_index].next_hop = next_hop;
427
428                                 return rule_index;
429                         }
430                 }
431
432                 if (rule_index == lpm->max_rules)
433                         return -ENOSPC;
434         } else {
435                 /* Calculate the position in which the rule will be stored. */
436                 rule_index = 0;
437
438                 for (i = depth - 1; i > 0; i--) {
439                         if (lpm->rule_info[i - 1].used_rules > 0) {
440                                 rule_index = lpm->rule_info[i - 1].first_rule
441                                                 + lpm->rule_info[i - 1].used_rules;
442                                 break;
443                         }
444                 }
445                 if (rule_index == lpm->max_rules)
446                         return -ENOSPC;
447
448                 lpm->rule_info[depth - 1].first_rule = rule_index;
449         }
450
451         /* Make room for the new rule in the array. */
452         for (i = RTE_LPM_MAX_DEPTH; i > depth; i--) {
453                 if (lpm->rule_info[i - 1].first_rule
454                                 + lpm->rule_info[i - 1].used_rules == lpm->max_rules)
455                         return -ENOSPC;
456
457                 if (lpm->rule_info[i - 1].used_rules > 0) {
458                         lpm->rules_tbl[lpm->rule_info[i - 1].first_rule
459                                 + lpm->rule_info[i - 1].used_rules]
460                                         = lpm->rules_tbl[lpm->rule_info[i - 1].first_rule];
461                         lpm->rule_info[i - 1].first_rule++;
462                 }
463         }
464
465         /* Add the new rule. */
466         lpm->rules_tbl[rule_index].ip = ip_masked;
467         lpm->rules_tbl[rule_index].next_hop = next_hop;
468
469         /* Increment the used rules counter for this rule group. */
470         lpm->rule_info[depth - 1].used_rules++;
471
472         return rule_index;
473 }
474
475 static int32_t
476 rule_add_v1604(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth,
477         uint32_t next_hop)
478 {
479         uint32_t rule_gindex, rule_index, last_rule;
480         int i;
481
482         VERIFY_DEPTH(depth);
483
484         /* Scan through rule group to see if rule already exists. */
485         if (lpm->rule_info[depth - 1].used_rules > 0) {
486
487                 /* rule_gindex stands for rule group index. */
488                 rule_gindex = lpm->rule_info[depth - 1].first_rule;
489                 /* Initialise rule_index to point to start of rule group. */
490                 rule_index = rule_gindex;
491                 /* Last rule = Last used rule in this rule group. */
492                 last_rule = rule_gindex + lpm->rule_info[depth - 1].used_rules;
493
494                 for (; rule_index < last_rule; rule_index++) {
495
496                         /* If rule already exists update its next_hop and return. */
497                         if (lpm->rules_tbl[rule_index].ip == ip_masked) {
498                                 lpm->rules_tbl[rule_index].next_hop = next_hop;
499
500                                 return rule_index;
501                         }
502                 }
503
504                 if (rule_index == lpm->max_rules)
505                         return -ENOSPC;
506         } else {
507                 /* Calculate the position in which the rule will be stored. */
508                 rule_index = 0;
509
510                 for (i = depth - 1; i > 0; i--) {
511                         if (lpm->rule_info[i - 1].used_rules > 0) {
512                                 rule_index = lpm->rule_info[i - 1].first_rule
513                                                 + lpm->rule_info[i - 1].used_rules;
514                                 break;
515                         }
516                 }
517                 if (rule_index == lpm->max_rules)
518                         return -ENOSPC;
519
520                 lpm->rule_info[depth - 1].first_rule = rule_index;
521         }
522
523         /* Make room for the new rule in the array. */
524         for (i = RTE_LPM_MAX_DEPTH; i > depth; i--) {
525                 if (lpm->rule_info[i - 1].first_rule
526                                 + lpm->rule_info[i - 1].used_rules == lpm->max_rules)
527                         return -ENOSPC;
528
529                 if (lpm->rule_info[i - 1].used_rules > 0) {
530                         lpm->rules_tbl[lpm->rule_info[i - 1].first_rule
531                                 + lpm->rule_info[i - 1].used_rules]
532                                         = lpm->rules_tbl[lpm->rule_info[i - 1].first_rule];
533                         lpm->rule_info[i - 1].first_rule++;
534                 }
535         }
536
537         /* Add the new rule. */
538         lpm->rules_tbl[rule_index].ip = ip_masked;
539         lpm->rules_tbl[rule_index].next_hop = next_hop;
540
541         /* Increment the used rules counter for this rule group. */
542         lpm->rule_info[depth - 1].used_rules++;
543
544         return rule_index;
545 }
546
547 /*
548  * Delete a rule from the rule table.
549  * NOTE: Valid range for depth parameter is 1 .. 32 inclusive.
550  */
551 static void
552 rule_delete_v20(struct rte_lpm_v20 *lpm, int32_t rule_index, uint8_t depth)
553 {
554         int i;
555
556         VERIFY_DEPTH(depth);
557
558         lpm->rules_tbl[rule_index] =
559                         lpm->rules_tbl[lpm->rule_info[depth - 1].first_rule
560                                 + lpm->rule_info[depth - 1].used_rules - 1];
561
562         for (i = depth; i < RTE_LPM_MAX_DEPTH; i++) {
563                 if (lpm->rule_info[i].used_rules > 0) {
564                         lpm->rules_tbl[lpm->rule_info[i].first_rule - 1] =
565                                 lpm->rules_tbl[lpm->rule_info[i].first_rule
566                                         + lpm->rule_info[i].used_rules - 1];
567                         lpm->rule_info[i].first_rule--;
568                 }
569         }
570
571         lpm->rule_info[depth - 1].used_rules--;
572 }
573
574 static void
575 rule_delete_v1604(struct rte_lpm *lpm, int32_t rule_index, uint8_t depth)
576 {
577         int i;
578
579         VERIFY_DEPTH(depth);
580
581         lpm->rules_tbl[rule_index] =
582                         lpm->rules_tbl[lpm->rule_info[depth - 1].first_rule
583                         + lpm->rule_info[depth - 1].used_rules - 1];
584
585         for (i = depth; i < RTE_LPM_MAX_DEPTH; i++) {
586                 if (lpm->rule_info[i].used_rules > 0) {
587                         lpm->rules_tbl[lpm->rule_info[i].first_rule - 1] =
588                                         lpm->rules_tbl[lpm->rule_info[i].first_rule
589                                                 + lpm->rule_info[i].used_rules - 1];
590                         lpm->rule_info[i].first_rule--;
591                 }
592         }
593
594         lpm->rule_info[depth - 1].used_rules--;
595 }
596
597 /*
598  * Finds a rule in rule table.
599  * NOTE: Valid range for depth parameter is 1 .. 32 inclusive.
600  */
601 static int32_t
602 rule_find_v20(struct rte_lpm_v20 *lpm, uint32_t ip_masked, uint8_t depth)
603 {
604         uint32_t rule_gindex, last_rule, rule_index;
605
606         VERIFY_DEPTH(depth);
607
608         rule_gindex = lpm->rule_info[depth - 1].first_rule;
609         last_rule = rule_gindex + lpm->rule_info[depth - 1].used_rules;
610
611         /* Scan used rules at given depth to find rule. */
612         for (rule_index = rule_gindex; rule_index < last_rule; rule_index++) {
613                 /* If rule is found return the rule index. */
614                 if (lpm->rules_tbl[rule_index].ip == ip_masked)
615                         return rule_index;
616         }
617
618         /* If rule is not found return -EINVAL. */
619         return -EINVAL;
620 }
621
622 static int32_t
623 rule_find_v1604(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth)
624 {
625         uint32_t rule_gindex, last_rule, rule_index;
626
627         VERIFY_DEPTH(depth);
628
629         rule_gindex = lpm->rule_info[depth - 1].first_rule;
630         last_rule = rule_gindex + lpm->rule_info[depth - 1].used_rules;
631
632         /* Scan used rules at given depth to find rule. */
633         for (rule_index = rule_gindex; rule_index < last_rule; rule_index++) {
634                 /* If rule is found return the rule index. */
635                 if (lpm->rules_tbl[rule_index].ip == ip_masked)
636                         return rule_index;
637         }
638
639         /* If rule is not found return -EINVAL. */
640         return -EINVAL;
641 }
642
643 /*
644  * Find, clean and allocate a tbl8.
645  */
646 static int32_t
647 tbl8_alloc_v20(struct rte_lpm_tbl_entry_v20 *tbl8)
648 {
649         uint32_t group_idx; /* tbl8 group index. */
650         struct rte_lpm_tbl_entry_v20 *tbl8_entry;
651
652         /* Scan through tbl8 to find a free (i.e. INVALID) tbl8 group. */
653         for (group_idx = 0; group_idx < RTE_LPM_TBL8_NUM_GROUPS;
654                         group_idx++) {
655                 tbl8_entry = &tbl8[group_idx * RTE_LPM_TBL8_GROUP_NUM_ENTRIES];
656                 /* If a free tbl8 group is found clean it and set as VALID. */
657                 if (!tbl8_entry->valid_group) {
658                         memset(&tbl8_entry[0], 0,
659                                         RTE_LPM_TBL8_GROUP_NUM_ENTRIES *
660                                         sizeof(tbl8_entry[0]));
661
662                         tbl8_entry->valid_group = VALID;
663
664                         /* Return group index for allocated tbl8 group. */
665                         return group_idx;
666                 }
667         }
668
669         /* If there are no tbl8 groups free then return error. */
670         return -ENOSPC;
671 }
672
673 static int32_t
674 tbl8_alloc_v1604(struct rte_lpm_tbl_entry *tbl8, uint32_t number_tbl8s)
675 {
676         uint32_t group_idx; /* tbl8 group index. */
677         struct rte_lpm_tbl_entry *tbl8_entry;
678
679         /* Scan through tbl8 to find a free (i.e. INVALID) tbl8 group. */
680         for (group_idx = 0; group_idx < number_tbl8s; group_idx++) {
681                 tbl8_entry = &tbl8[group_idx * RTE_LPM_TBL8_GROUP_NUM_ENTRIES];
682                 /* If a free tbl8 group is found clean it and set as VALID. */
683                 if (!tbl8_entry->valid_group) {
684                         memset(&tbl8_entry[0], 0,
685                                         RTE_LPM_TBL8_GROUP_NUM_ENTRIES *
686                                         sizeof(tbl8_entry[0]));
687
688                         tbl8_entry->valid_group = VALID;
689
690                         /* Return group index for allocated tbl8 group. */
691                         return group_idx;
692                 }
693         }
694
695         /* If there are no tbl8 groups free then return error. */
696         return -ENOSPC;
697 }
698
699 static void
700 tbl8_free_v20(struct rte_lpm_tbl_entry_v20 *tbl8, uint32_t tbl8_group_start)
701 {
702         /* Set tbl8 group invalid*/
703         tbl8[tbl8_group_start].valid_group = INVALID;
704 }
705
706 static void
707 tbl8_free_v1604(struct rte_lpm_tbl_entry *tbl8, uint32_t tbl8_group_start)
708 {
709         /* Set tbl8 group invalid*/
710         tbl8[tbl8_group_start].valid_group = INVALID;
711 }
712
713 static __rte_noinline int32_t
714 add_depth_small_v20(struct rte_lpm_v20 *lpm, uint32_t ip, uint8_t depth,
715                 uint8_t next_hop)
716 {
717         uint32_t tbl24_index, tbl24_range, tbl8_index, tbl8_group_end, i, j;
718
719         /* Calculate the index into Table24. */
720         tbl24_index = ip >> 8;
721         tbl24_range = depth_to_range(depth);
722
723         for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) {
724                 /*
725                  * For invalid OR valid and non-extended tbl 24 entries set
726                  * entry.
727                  */
728                 if (!lpm->tbl24[i].valid || (lpm->tbl24[i].valid_group == 0 &&
729                                 lpm->tbl24[i].depth <= depth)) {
730
731                         struct rte_lpm_tbl_entry_v20 new_tbl24_entry = {
732                                 .valid = VALID,
733                                 .valid_group = 0,
734                                 .depth = depth,
735                         };
736                         new_tbl24_entry.next_hop = next_hop;
737
738                         /* Setting tbl24 entry in one go to avoid race
739                          * conditions
740                          */
741                         __atomic_store(&lpm->tbl24[i], &new_tbl24_entry,
742                                         __ATOMIC_RELEASE);
743
744                         continue;
745                 }
746
747                 if (lpm->tbl24[i].valid_group == 1) {
748                         /* If tbl24 entry is valid and extended calculate the
749                          *  index into tbl8.
750                          */
751                         tbl8_index = lpm->tbl24[i].group_idx *
752                                         RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
753                         tbl8_group_end = tbl8_index +
754                                         RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
755
756                         for (j = tbl8_index; j < tbl8_group_end; j++) {
757                                 if (!lpm->tbl8[j].valid ||
758                                                 lpm->tbl8[j].depth <= depth) {
759                                         struct rte_lpm_tbl_entry_v20
760                                                 new_tbl8_entry = {
761                                                 .valid = VALID,
762                                                 .valid_group = VALID,
763                                                 .depth = depth,
764                                         };
765                                         new_tbl8_entry.next_hop = next_hop;
766
767                                         /*
768                                          * Setting tbl8 entry in one go to avoid
769                                          * race conditions
770                                          */
771                                         lpm->tbl8[j] = new_tbl8_entry;
772
773                                         continue;
774                                 }
775                         }
776                 }
777         }
778
779         return 0;
780 }
781
782 static __rte_noinline int32_t
783 add_depth_small_v1604(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
784                 uint32_t next_hop)
785 {
786 #define group_idx next_hop
787         uint32_t tbl24_index, tbl24_range, tbl8_index, tbl8_group_end, i, j;
788
789         /* Calculate the index into Table24. */
790         tbl24_index = ip >> 8;
791         tbl24_range = depth_to_range(depth);
792
793         for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) {
794                 /*
795                  * For invalid OR valid and non-extended tbl 24 entries set
796                  * entry.
797                  */
798                 if (!lpm->tbl24[i].valid || (lpm->tbl24[i].valid_group == 0 &&
799                                 lpm->tbl24[i].depth <= depth)) {
800
801                         struct rte_lpm_tbl_entry new_tbl24_entry = {
802                                 .next_hop = next_hop,
803                                 .valid = VALID,
804                                 .valid_group = 0,
805                                 .depth = depth,
806                         };
807
808                         /* Setting tbl24 entry in one go to avoid race
809                          * conditions
810                          */
811                         __atomic_store(&lpm->tbl24[i], &new_tbl24_entry,
812                                         __ATOMIC_RELEASE);
813
814                         continue;
815                 }
816
817                 if (lpm->tbl24[i].valid_group == 1) {
818                         /* If tbl24 entry is valid and extended calculate the
819                          *  index into tbl8.
820                          */
821                         tbl8_index = lpm->tbl24[i].group_idx *
822                                         RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
823                         tbl8_group_end = tbl8_index +
824                                         RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
825
826                         for (j = tbl8_index; j < tbl8_group_end; j++) {
827                                 if (!lpm->tbl8[j].valid ||
828                                                 lpm->tbl8[j].depth <= depth) {
829                                         struct rte_lpm_tbl_entry
830                                                 new_tbl8_entry = {
831                                                 .valid = VALID,
832                                                 .valid_group = VALID,
833                                                 .depth = depth,
834                                                 .next_hop = next_hop,
835                                         };
836
837                                         /*
838                                          * Setting tbl8 entry in one go to avoid
839                                          * race conditions
840                                          */
841                                         lpm->tbl8[j] = new_tbl8_entry;
842
843                                         continue;
844                                 }
845                         }
846                 }
847         }
848 #undef group_idx
849         return 0;
850 }
851
852 static __rte_noinline int32_t
853 add_depth_big_v20(struct rte_lpm_v20 *lpm, uint32_t ip_masked, uint8_t depth,
854                 uint8_t next_hop)
855 {
856         uint32_t tbl24_index;
857         int32_t tbl8_group_index, tbl8_group_start, tbl8_group_end, tbl8_index,
858                 tbl8_range, i;
859
860         tbl24_index = (ip_masked >> 8);
861         tbl8_range = depth_to_range(depth);
862
863         if (!lpm->tbl24[tbl24_index].valid) {
864                 /* Search for a free tbl8 group. */
865                 tbl8_group_index = tbl8_alloc_v20(lpm->tbl8);
866
867                 /* Check tbl8 allocation was successful. */
868                 if (tbl8_group_index < 0) {
869                         return tbl8_group_index;
870                 }
871
872                 /* Find index into tbl8 and range. */
873                 tbl8_index = (tbl8_group_index *
874                                 RTE_LPM_TBL8_GROUP_NUM_ENTRIES) +
875                                 (ip_masked & 0xFF);
876
877                 /* Set tbl8 entry. */
878                 for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
879                         lpm->tbl8[i].depth = depth;
880                         lpm->tbl8[i].next_hop = next_hop;
881                         lpm->tbl8[i].valid = VALID;
882                 }
883
884                 /*
885                  * Update tbl24 entry to point to new tbl8 entry. Note: The
886                  * ext_flag and tbl8_index need to be updated simultaneously,
887                  * so assign whole structure in one go
888                  */
889
890                 struct rte_lpm_tbl_entry_v20 new_tbl24_entry = {
891                         .group_idx = (uint8_t)tbl8_group_index,
892                         .valid = VALID,
893                         .valid_group = 1,
894                         .depth = 0,
895                 };
896
897                 __atomic_store(&lpm->tbl24[tbl24_index], &new_tbl24_entry,
898                                 __ATOMIC_RELEASE);
899
900         } /* If valid entry but not extended calculate the index into Table8. */
901         else if (lpm->tbl24[tbl24_index].valid_group == 0) {
902                 /* Search for free tbl8 group. */
903                 tbl8_group_index = tbl8_alloc_v20(lpm->tbl8);
904
905                 if (tbl8_group_index < 0) {
906                         return tbl8_group_index;
907                 }
908
909                 tbl8_group_start = tbl8_group_index *
910                                 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
911                 tbl8_group_end = tbl8_group_start +
912                                 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
913
914                 /* Populate new tbl8 with tbl24 value. */
915                 for (i = tbl8_group_start; i < tbl8_group_end; i++) {
916                         lpm->tbl8[i].valid = VALID;
917                         lpm->tbl8[i].depth = lpm->tbl24[tbl24_index].depth;
918                         lpm->tbl8[i].next_hop =
919                                         lpm->tbl24[tbl24_index].next_hop;
920                 }
921
922                 tbl8_index = tbl8_group_start + (ip_masked & 0xFF);
923
924                 /* Insert new rule into the tbl8 entry. */
925                 for (i = tbl8_index; i < tbl8_index + tbl8_range; i++) {
926                         lpm->tbl8[i].valid = VALID;
927                         lpm->tbl8[i].depth = depth;
928                         lpm->tbl8[i].next_hop = next_hop;
929                 }
930
931                 /*
932                  * Update tbl24 entry to point to new tbl8 entry. Note: The
933                  * ext_flag and tbl8_index need to be updated simultaneously,
934                  * so assign whole structure in one go.
935                  */
936
937                 struct rte_lpm_tbl_entry_v20 new_tbl24_entry = {
938                                 .group_idx = (uint8_t)tbl8_group_index,
939                                 .valid = VALID,
940                                 .valid_group = 1,
941                                 .depth = 0,
942                 };
943
944                 __atomic_store(&lpm->tbl24[tbl24_index], &new_tbl24_entry,
945                                 __ATOMIC_RELEASE);
946
947         } else { /*
948                 * If it is valid, extended entry calculate the index into tbl8.
949                 */
950                 tbl8_group_index = lpm->tbl24[tbl24_index].group_idx;
951                 tbl8_group_start = tbl8_group_index *
952                                 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
953                 tbl8_index = tbl8_group_start + (ip_masked & 0xFF);
954
955                 for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
956
957                         if (!lpm->tbl8[i].valid ||
958                                         lpm->tbl8[i].depth <= depth) {
959                                 struct rte_lpm_tbl_entry_v20 new_tbl8_entry = {
960                                         .valid = VALID,
961                                         .depth = depth,
962                                         .valid_group = lpm->tbl8[i].valid_group,
963                                 };
964                                 new_tbl8_entry.next_hop = next_hop;
965                                 /*
966                                  * Setting tbl8 entry in one go to avoid race
967                                  * condition
968                                  */
969                                 lpm->tbl8[i] = new_tbl8_entry;
970
971                                 continue;
972                         }
973                 }
974         }
975
976         return 0;
977 }
978
979 static __rte_noinline int32_t
980 add_depth_big_v1604(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth,
981                 uint32_t next_hop)
982 {
983 #define group_idx next_hop
984         uint32_t tbl24_index;
985         int32_t tbl8_group_index, tbl8_group_start, tbl8_group_end, tbl8_index,
986                 tbl8_range, i;
987
988         tbl24_index = (ip_masked >> 8);
989         tbl8_range = depth_to_range(depth);
990
991         if (!lpm->tbl24[tbl24_index].valid) {
992                 /* Search for a free tbl8 group. */
993                 tbl8_group_index = tbl8_alloc_v1604(lpm->tbl8, lpm->number_tbl8s);
994
995                 /* Check tbl8 allocation was successful. */
996                 if (tbl8_group_index < 0) {
997                         return tbl8_group_index;
998                 }
999
1000                 /* Find index into tbl8 and range. */
1001                 tbl8_index = (tbl8_group_index *
1002                                 RTE_LPM_TBL8_GROUP_NUM_ENTRIES) +
1003                                 (ip_masked & 0xFF);
1004
1005                 /* Set tbl8 entry. */
1006                 for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
1007                         lpm->tbl8[i].depth = depth;
1008                         lpm->tbl8[i].next_hop = next_hop;
1009                         lpm->tbl8[i].valid = VALID;
1010                 }
1011
1012                 /*
1013                  * Update tbl24 entry to point to new tbl8 entry. Note: The
1014                  * ext_flag and tbl8_index need to be updated simultaneously,
1015                  * so assign whole structure in one go
1016                  */
1017
1018                 struct rte_lpm_tbl_entry new_tbl24_entry = {
1019                         .group_idx = tbl8_group_index,
1020                         .valid = VALID,
1021                         .valid_group = 1,
1022                         .depth = 0,
1023                 };
1024
1025                 /* The tbl24 entry must be written only after the
1026                  * tbl8 entries are written.
1027                  */
1028                 __atomic_store(&lpm->tbl24[tbl24_index], &new_tbl24_entry,
1029                                 __ATOMIC_RELEASE);
1030
1031         } /* If valid entry but not extended calculate the index into Table8. */
1032         else if (lpm->tbl24[tbl24_index].valid_group == 0) {
1033                 /* Search for free tbl8 group. */
1034                 tbl8_group_index = tbl8_alloc_v1604(lpm->tbl8, lpm->number_tbl8s);
1035
1036                 if (tbl8_group_index < 0) {
1037                         return tbl8_group_index;
1038                 }
1039
1040                 tbl8_group_start = tbl8_group_index *
1041                                 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1042                 tbl8_group_end = tbl8_group_start +
1043                                 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1044
1045                 /* Populate new tbl8 with tbl24 value. */
1046                 for (i = tbl8_group_start; i < tbl8_group_end; i++) {
1047                         lpm->tbl8[i].valid = VALID;
1048                         lpm->tbl8[i].depth = lpm->tbl24[tbl24_index].depth;
1049                         lpm->tbl8[i].next_hop =
1050                                         lpm->tbl24[tbl24_index].next_hop;
1051                 }
1052
1053                 tbl8_index = tbl8_group_start + (ip_masked & 0xFF);
1054
1055                 /* Insert new rule into the tbl8 entry. */
1056                 for (i = tbl8_index; i < tbl8_index + tbl8_range; i++) {
1057                         lpm->tbl8[i].valid = VALID;
1058                         lpm->tbl8[i].depth = depth;
1059                         lpm->tbl8[i].next_hop = next_hop;
1060                 }
1061
1062                 /*
1063                  * Update tbl24 entry to point to new tbl8 entry. Note: The
1064                  * ext_flag and tbl8_index need to be updated simultaneously,
1065                  * so assign whole structure in one go.
1066                  */
1067
1068                 struct rte_lpm_tbl_entry new_tbl24_entry = {
1069                                 .group_idx = tbl8_group_index,
1070                                 .valid = VALID,
1071                                 .valid_group = 1,
1072                                 .depth = 0,
1073                 };
1074
1075                 /* The tbl24 entry must be written only after the
1076                  * tbl8 entries are written.
1077                  */
1078                 __atomic_store(&lpm->tbl24[tbl24_index], &new_tbl24_entry,
1079                                 __ATOMIC_RELEASE);
1080
1081         } else { /*
1082                 * If it is valid, extended entry calculate the index into tbl8.
1083                 */
1084                 tbl8_group_index = lpm->tbl24[tbl24_index].group_idx;
1085                 tbl8_group_start = tbl8_group_index *
1086                                 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1087                 tbl8_index = tbl8_group_start + (ip_masked & 0xFF);
1088
1089                 for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
1090
1091                         if (!lpm->tbl8[i].valid ||
1092                                         lpm->tbl8[i].depth <= depth) {
1093                                 struct rte_lpm_tbl_entry new_tbl8_entry = {
1094                                         .valid = VALID,
1095                                         .depth = depth,
1096                                         .next_hop = next_hop,
1097                                         .valid_group = lpm->tbl8[i].valid_group,
1098                                 };
1099
1100                                 /*
1101                                  * Setting tbl8 entry in one go to avoid race
1102                                  * condition
1103                                  */
1104                                 lpm->tbl8[i] = new_tbl8_entry;
1105
1106                                 continue;
1107                         }
1108                 }
1109         }
1110 #undef group_idx
1111         return 0;
1112 }
1113
1114 /*
1115  * Add a route
1116  */
1117 int
1118 rte_lpm_add_v20(struct rte_lpm_v20 *lpm, uint32_t ip, uint8_t depth,
1119                 uint8_t next_hop)
1120 {
1121         int32_t rule_index, status = 0;
1122         uint32_t ip_masked;
1123
1124         /* Check user arguments. */
1125         if ((lpm == NULL) || (depth < 1) || (depth > RTE_LPM_MAX_DEPTH))
1126                 return -EINVAL;
1127
1128         ip_masked = ip & depth_to_mask(depth);
1129
1130         /* Add the rule to the rule table. */
1131         rule_index = rule_add_v20(lpm, ip_masked, depth, next_hop);
1132
1133         /* If the is no space available for new rule return error. */
1134         if (rule_index < 0) {
1135                 return rule_index;
1136         }
1137
1138         if (depth <= MAX_DEPTH_TBL24) {
1139                 status = add_depth_small_v20(lpm, ip_masked, depth, next_hop);
1140         } else { /* If depth > RTE_LPM_MAX_DEPTH_TBL24 */
1141                 status = add_depth_big_v20(lpm, ip_masked, depth, next_hop);
1142
1143                 /*
1144                  * If add fails due to exhaustion of tbl8 extensions delete
1145                  * rule that was added to rule table.
1146                  */
1147                 if (status < 0) {
1148                         rule_delete_v20(lpm, rule_index, depth);
1149
1150                         return status;
1151                 }
1152         }
1153
1154         return 0;
1155 }
1156 VERSION_SYMBOL(rte_lpm_add, _v20, 2.0);
1157
1158 int
1159 rte_lpm_add_v1604(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
1160                 uint32_t next_hop)
1161 {
1162         int32_t rule_index, status = 0;
1163         uint32_t ip_masked;
1164
1165         /* Check user arguments. */
1166         if ((lpm == NULL) || (depth < 1) || (depth > RTE_LPM_MAX_DEPTH))
1167                 return -EINVAL;
1168
1169         ip_masked = ip & depth_to_mask(depth);
1170
1171         /* Add the rule to the rule table. */
1172         rule_index = rule_add_v1604(lpm, ip_masked, depth, next_hop);
1173
1174         /* If the is no space available for new rule return error. */
1175         if (rule_index < 0) {
1176                 return rule_index;
1177         }
1178
1179         if (depth <= MAX_DEPTH_TBL24) {
1180                 status = add_depth_small_v1604(lpm, ip_masked, depth, next_hop);
1181         } else { /* If depth > RTE_LPM_MAX_DEPTH_TBL24 */
1182                 status = add_depth_big_v1604(lpm, ip_masked, depth, next_hop);
1183
1184                 /*
1185                  * If add fails due to exhaustion of tbl8 extensions delete
1186                  * rule that was added to rule table.
1187                  */
1188                 if (status < 0) {
1189                         rule_delete_v1604(lpm, rule_index, depth);
1190
1191                         return status;
1192                 }
1193         }
1194
1195         return 0;
1196 }
1197 BIND_DEFAULT_SYMBOL(rte_lpm_add, _v1604, 16.04);
1198 MAP_STATIC_SYMBOL(int rte_lpm_add(struct rte_lpm *lpm, uint32_t ip,
1199                 uint8_t depth, uint32_t next_hop), rte_lpm_add_v1604);
1200
1201 /*
1202  * Look for a rule in the high-level rules table
1203  */
1204 int
1205 rte_lpm_is_rule_present_v20(struct rte_lpm_v20 *lpm, uint32_t ip, uint8_t depth,
1206 uint8_t *next_hop)
1207 {
1208         uint32_t ip_masked;
1209         int32_t rule_index;
1210
1211         /* Check user arguments. */
1212         if ((lpm == NULL) ||
1213                 (next_hop == NULL) ||
1214                 (depth < 1) || (depth > RTE_LPM_MAX_DEPTH))
1215                 return -EINVAL;
1216
1217         /* Look for the rule using rule_find. */
1218         ip_masked = ip & depth_to_mask(depth);
1219         rule_index = rule_find_v20(lpm, ip_masked, depth);
1220
1221         if (rule_index >= 0) {
1222                 *next_hop = lpm->rules_tbl[rule_index].next_hop;
1223                 return 1;
1224         }
1225
1226         /* If rule is not found return 0. */
1227         return 0;
1228 }
1229 VERSION_SYMBOL(rte_lpm_is_rule_present, _v20, 2.0);
1230
1231 int
1232 rte_lpm_is_rule_present_v1604(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
1233 uint32_t *next_hop)
1234 {
1235         uint32_t ip_masked;
1236         int32_t rule_index;
1237
1238         /* Check user arguments. */
1239         if ((lpm == NULL) ||
1240                 (next_hop == NULL) ||
1241                 (depth < 1) || (depth > RTE_LPM_MAX_DEPTH))
1242                 return -EINVAL;
1243
1244         /* Look for the rule using rule_find. */
1245         ip_masked = ip & depth_to_mask(depth);
1246         rule_index = rule_find_v1604(lpm, ip_masked, depth);
1247
1248         if (rule_index >= 0) {
1249                 *next_hop = lpm->rules_tbl[rule_index].next_hop;
1250                 return 1;
1251         }
1252
1253         /* If rule is not found return 0. */
1254         return 0;
1255 }
1256 BIND_DEFAULT_SYMBOL(rte_lpm_is_rule_present, _v1604, 16.04);
1257 MAP_STATIC_SYMBOL(int rte_lpm_is_rule_present(struct rte_lpm *lpm, uint32_t ip,
1258                 uint8_t depth, uint32_t *next_hop), rte_lpm_is_rule_present_v1604);
1259
1260 static int32_t
1261 find_previous_rule_v20(struct rte_lpm_v20 *lpm, uint32_t ip, uint8_t depth,
1262                 uint8_t *sub_rule_depth)
1263 {
1264         int32_t rule_index;
1265         uint32_t ip_masked;
1266         uint8_t prev_depth;
1267
1268         for (prev_depth = (uint8_t)(depth - 1); prev_depth > 0; prev_depth--) {
1269                 ip_masked = ip & depth_to_mask(prev_depth);
1270
1271                 rule_index = rule_find_v20(lpm, ip_masked, prev_depth);
1272
1273                 if (rule_index >= 0) {
1274                         *sub_rule_depth = prev_depth;
1275                         return rule_index;
1276                 }
1277         }
1278
1279         return -1;
1280 }
1281
1282 static int32_t
1283 find_previous_rule_v1604(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
1284                 uint8_t *sub_rule_depth)
1285 {
1286         int32_t rule_index;
1287         uint32_t ip_masked;
1288         uint8_t prev_depth;
1289
1290         for (prev_depth = (uint8_t)(depth - 1); prev_depth > 0; prev_depth--) {
1291                 ip_masked = ip & depth_to_mask(prev_depth);
1292
1293                 rule_index = rule_find_v1604(lpm, ip_masked, prev_depth);
1294
1295                 if (rule_index >= 0) {
1296                         *sub_rule_depth = prev_depth;
1297                         return rule_index;
1298                 }
1299         }
1300
1301         return -1;
1302 }
1303
1304 static int32_t
1305 delete_depth_small_v20(struct rte_lpm_v20 *lpm, uint32_t ip_masked,
1306         uint8_t depth, int32_t sub_rule_index, uint8_t sub_rule_depth)
1307 {
1308         uint32_t tbl24_range, tbl24_index, tbl8_group_index, tbl8_index, i, j;
1309
1310         /* Calculate the range and index into Table24. */
1311         tbl24_range = depth_to_range(depth);
1312         tbl24_index = (ip_masked >> 8);
1313
1314         /*
1315          * Firstly check the sub_rule_index. A -1 indicates no replacement rule
1316          * and a positive number indicates a sub_rule_index.
1317          */
1318         if (sub_rule_index < 0) {
1319                 /*
1320                  * If no replacement rule exists then invalidate entries
1321                  * associated with this rule.
1322                  */
1323                 for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) {
1324
1325                         if (lpm->tbl24[i].valid_group == 0 &&
1326                                         lpm->tbl24[i].depth <= depth) {
1327                                 struct rte_lpm_tbl_entry_v20
1328                                         zero_tbl24_entry = {
1329                                                 .valid = INVALID,
1330                                                 .depth = 0,
1331                                                 .valid_group = 0,
1332                                         };
1333                                         zero_tbl24_entry.next_hop = 0;
1334                                 __atomic_store(&lpm->tbl24[i],
1335                                         &zero_tbl24_entry, __ATOMIC_RELEASE);
1336                         } else if (lpm->tbl24[i].valid_group == 1) {
1337                                 /*
1338                                  * If TBL24 entry is extended, then there has
1339                                  * to be a rule with depth >= 25 in the
1340                                  * associated TBL8 group.
1341                                  */
1342
1343                                 tbl8_group_index = lpm->tbl24[i].group_idx;
1344                                 tbl8_index = tbl8_group_index *
1345                                                 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1346
1347                                 for (j = tbl8_index; j < (tbl8_index +
1348                                         RTE_LPM_TBL8_GROUP_NUM_ENTRIES); j++) {
1349
1350                                         if (lpm->tbl8[j].depth <= depth)
1351                                                 lpm->tbl8[j].valid = INVALID;
1352                                 }
1353                         }
1354                 }
1355         } else {
1356                 /*
1357                  * If a replacement rule exists then modify entries
1358                  * associated with this rule.
1359                  */
1360
1361                 struct rte_lpm_tbl_entry_v20 new_tbl24_entry = {
1362                         .next_hop = lpm->rules_tbl[sub_rule_index].next_hop,
1363                         .valid = VALID,
1364                         .valid_group = 0,
1365                         .depth = sub_rule_depth,
1366                 };
1367
1368                 struct rte_lpm_tbl_entry_v20 new_tbl8_entry = {
1369                         .valid = VALID,
1370                         .valid_group = VALID,
1371                         .depth = sub_rule_depth,
1372                 };
1373                 new_tbl8_entry.next_hop =
1374                                 lpm->rules_tbl[sub_rule_index].next_hop;
1375
1376                 for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) {
1377
1378                         if (lpm->tbl24[i].valid_group == 0 &&
1379                                         lpm->tbl24[i].depth <= depth) {
1380                                 __atomic_store(&lpm->tbl24[i], &new_tbl24_entry,
1381                                                 __ATOMIC_RELEASE);
1382                         } else  if (lpm->tbl24[i].valid_group == 1) {
1383                                 /*
1384                                  * If TBL24 entry is extended, then there has
1385                                  * to be a rule with depth >= 25 in the
1386                                  * associated TBL8 group.
1387                                  */
1388
1389                                 tbl8_group_index = lpm->tbl24[i].group_idx;
1390                                 tbl8_index = tbl8_group_index *
1391                                                 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1392
1393                                 for (j = tbl8_index; j < (tbl8_index +
1394                                         RTE_LPM_TBL8_GROUP_NUM_ENTRIES); j++) {
1395
1396                                         if (lpm->tbl8[j].depth <= depth)
1397                                                 lpm->tbl8[j] = new_tbl8_entry;
1398                                 }
1399                         }
1400                 }
1401         }
1402
1403         return 0;
1404 }
1405
1406 static int32_t
1407 delete_depth_small_v1604(struct rte_lpm *lpm, uint32_t ip_masked,
1408         uint8_t depth, int32_t sub_rule_index, uint8_t sub_rule_depth)
1409 {
1410 #define group_idx next_hop
1411         uint32_t tbl24_range, tbl24_index, tbl8_group_index, tbl8_index, i, j;
1412
1413         /* Calculate the range and index into Table24. */
1414         tbl24_range = depth_to_range(depth);
1415         tbl24_index = (ip_masked >> 8);
1416         struct rte_lpm_tbl_entry zero_tbl24_entry = {0};
1417
1418         /*
1419          * Firstly check the sub_rule_index. A -1 indicates no replacement rule
1420          * and a positive number indicates a sub_rule_index.
1421          */
1422         if (sub_rule_index < 0) {
1423                 /*
1424                  * If no replacement rule exists then invalidate entries
1425                  * associated with this rule.
1426                  */
1427                 for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) {
1428
1429                         if (lpm->tbl24[i].valid_group == 0 &&
1430                                         lpm->tbl24[i].depth <= depth) {
1431                                 __atomic_store(&lpm->tbl24[i],
1432                                         &zero_tbl24_entry, __ATOMIC_RELEASE);
1433                         } else if (lpm->tbl24[i].valid_group == 1) {
1434                                 /*
1435                                  * If TBL24 entry is extended, then there has
1436                                  * to be a rule with depth >= 25 in the
1437                                  * associated TBL8 group.
1438                                  */
1439
1440                                 tbl8_group_index = lpm->tbl24[i].group_idx;
1441                                 tbl8_index = tbl8_group_index *
1442                                                 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1443
1444                                 for (j = tbl8_index; j < (tbl8_index +
1445                                         RTE_LPM_TBL8_GROUP_NUM_ENTRIES); j++) {
1446
1447                                         if (lpm->tbl8[j].depth <= depth)
1448                                                 lpm->tbl8[j].valid = INVALID;
1449                                 }
1450                         }
1451                 }
1452         } else {
1453                 /*
1454                  * If a replacement rule exists then modify entries
1455                  * associated with this rule.
1456                  */
1457
1458                 struct rte_lpm_tbl_entry new_tbl24_entry = {
1459                         .next_hop = lpm->rules_tbl[sub_rule_index].next_hop,
1460                         .valid = VALID,
1461                         .valid_group = 0,
1462                         .depth = sub_rule_depth,
1463                 };
1464
1465                 struct rte_lpm_tbl_entry new_tbl8_entry = {
1466                         .valid = VALID,
1467                         .valid_group = VALID,
1468                         .depth = sub_rule_depth,
1469                         .next_hop = lpm->rules_tbl
1470                         [sub_rule_index].next_hop,
1471                 };
1472
1473                 for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) {
1474
1475                         if (lpm->tbl24[i].valid_group == 0 &&
1476                                         lpm->tbl24[i].depth <= depth) {
1477                                 __atomic_store(&lpm->tbl24[i], &new_tbl24_entry,
1478                                                 __ATOMIC_RELEASE);
1479                         } else  if (lpm->tbl24[i].valid_group == 1) {
1480                                 /*
1481                                  * If TBL24 entry is extended, then there has
1482                                  * to be a rule with depth >= 25 in the
1483                                  * associated TBL8 group.
1484                                  */
1485
1486                                 tbl8_group_index = lpm->tbl24[i].group_idx;
1487                                 tbl8_index = tbl8_group_index *
1488                                                 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1489
1490                                 for (j = tbl8_index; j < (tbl8_index +
1491                                         RTE_LPM_TBL8_GROUP_NUM_ENTRIES); j++) {
1492
1493                                         if (lpm->tbl8[j].depth <= depth)
1494                                                 lpm->tbl8[j] = new_tbl8_entry;
1495                                 }
1496                         }
1497                 }
1498         }
1499 #undef group_idx
1500         return 0;
1501 }
1502
1503 /*
1504  * Checks if table 8 group can be recycled.
1505  *
1506  * Return of -EEXIST means tbl8 is in use and thus can not be recycled.
1507  * Return of -EINVAL means tbl8 is empty and thus can be recycled
1508  * Return of value > -1 means tbl8 is in use but has all the same values and
1509  * thus can be recycled
1510  */
1511 static int32_t
1512 tbl8_recycle_check_v20(struct rte_lpm_tbl_entry_v20 *tbl8,
1513                 uint32_t tbl8_group_start)
1514 {
1515         uint32_t tbl8_group_end, i;
1516         tbl8_group_end = tbl8_group_start + RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1517
1518         /*
1519          * Check the first entry of the given tbl8. If it is invalid we know
1520          * this tbl8 does not contain any rule with a depth < RTE_LPM_MAX_DEPTH
1521          *  (As they would affect all entries in a tbl8) and thus this table
1522          *  can not be recycled.
1523          */
1524         if (tbl8[tbl8_group_start].valid) {
1525                 /*
1526                  * If first entry is valid check if the depth is less than 24
1527                  * and if so check the rest of the entries to verify that they
1528                  * are all of this depth.
1529                  */
1530                 if (tbl8[tbl8_group_start].depth <= MAX_DEPTH_TBL24) {
1531                         for (i = (tbl8_group_start + 1); i < tbl8_group_end;
1532                                         i++) {
1533
1534                                 if (tbl8[i].depth !=
1535                                                 tbl8[tbl8_group_start].depth) {
1536
1537                                         return -EEXIST;
1538                                 }
1539                         }
1540                         /* If all entries are the same return the tb8 index */
1541                         return tbl8_group_start;
1542                 }
1543
1544                 return -EEXIST;
1545         }
1546         /*
1547          * If the first entry is invalid check if the rest of the entries in
1548          * the tbl8 are invalid.
1549          */
1550         for (i = (tbl8_group_start + 1); i < tbl8_group_end; i++) {
1551                 if (tbl8[i].valid)
1552                         return -EEXIST;
1553         }
1554         /* If no valid entries are found then return -EINVAL. */
1555         return -EINVAL;
1556 }
1557
1558 static int32_t
1559 tbl8_recycle_check_v1604(struct rte_lpm_tbl_entry *tbl8,
1560                 uint32_t tbl8_group_start)
1561 {
1562         uint32_t tbl8_group_end, i;
1563         tbl8_group_end = tbl8_group_start + RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1564
1565         /*
1566          * Check the first entry of the given tbl8. If it is invalid we know
1567          * this tbl8 does not contain any rule with a depth < RTE_LPM_MAX_DEPTH
1568          *  (As they would affect all entries in a tbl8) and thus this table
1569          *  can not be recycled.
1570          */
1571         if (tbl8[tbl8_group_start].valid) {
1572                 /*
1573                  * If first entry is valid check if the depth is less than 24
1574                  * and if so check the rest of the entries to verify that they
1575                  * are all of this depth.
1576                  */
1577                 if (tbl8[tbl8_group_start].depth <= MAX_DEPTH_TBL24) {
1578                         for (i = (tbl8_group_start + 1); i < tbl8_group_end;
1579                                         i++) {
1580
1581                                 if (tbl8[i].depth !=
1582                                                 tbl8[tbl8_group_start].depth) {
1583
1584                                         return -EEXIST;
1585                                 }
1586                         }
1587                         /* If all entries are the same return the tb8 index */
1588                         return tbl8_group_start;
1589                 }
1590
1591                 return -EEXIST;
1592         }
1593         /*
1594          * If the first entry is invalid check if the rest of the entries in
1595          * the tbl8 are invalid.
1596          */
1597         for (i = (tbl8_group_start + 1); i < tbl8_group_end; i++) {
1598                 if (tbl8[i].valid)
1599                         return -EEXIST;
1600         }
1601         /* If no valid entries are found then return -EINVAL. */
1602         return -EINVAL;
1603 }
1604
1605 static int32_t
1606 delete_depth_big_v20(struct rte_lpm_v20 *lpm, uint32_t ip_masked,
1607         uint8_t depth, int32_t sub_rule_index, uint8_t sub_rule_depth)
1608 {
1609         uint32_t tbl24_index, tbl8_group_index, tbl8_group_start, tbl8_index,
1610                         tbl8_range, i;
1611         int32_t tbl8_recycle_index;
1612
1613         /*
1614          * Calculate the index into tbl24 and range. Note: All depths larger
1615          * than MAX_DEPTH_TBL24 are associated with only one tbl24 entry.
1616          */
1617         tbl24_index = ip_masked >> 8;
1618
1619         /* Calculate the index into tbl8 and range. */
1620         tbl8_group_index = lpm->tbl24[tbl24_index].group_idx;
1621         tbl8_group_start = tbl8_group_index * RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1622         tbl8_index = tbl8_group_start + (ip_masked & 0xFF);
1623         tbl8_range = depth_to_range(depth);
1624
1625         if (sub_rule_index < 0) {
1626                 /*
1627                  * Loop through the range of entries on tbl8 for which the
1628                  * rule_to_delete must be removed or modified.
1629                  */
1630                 for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
1631                         if (lpm->tbl8[i].depth <= depth)
1632                                 lpm->tbl8[i].valid = INVALID;
1633                 }
1634         } else {
1635                 /* Set new tbl8 entry. */
1636                 struct rte_lpm_tbl_entry_v20 new_tbl8_entry = {
1637                         .valid = VALID,
1638                         .depth = sub_rule_depth,
1639                         .valid_group = lpm->tbl8[tbl8_group_start].valid_group,
1640                 };
1641
1642                 new_tbl8_entry.next_hop =
1643                                 lpm->rules_tbl[sub_rule_index].next_hop;
1644                 /*
1645                  * Loop through the range of entries on tbl8 for which the
1646                  * rule_to_delete must be modified.
1647                  */
1648                 for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
1649                         if (lpm->tbl8[i].depth <= depth)
1650                                 lpm->tbl8[i] = new_tbl8_entry;
1651                 }
1652         }
1653
1654         /*
1655          * Check if there are any valid entries in this tbl8 group. If all
1656          * tbl8 entries are invalid we can free the tbl8 and invalidate the
1657          * associated tbl24 entry.
1658          */
1659
1660         tbl8_recycle_index = tbl8_recycle_check_v20(lpm->tbl8, tbl8_group_start);
1661
1662         if (tbl8_recycle_index == -EINVAL) {
1663                 /* Set tbl24 before freeing tbl8 to avoid race condition.
1664                  * Prevent the free of the tbl8 group from hoisting.
1665                  */
1666                 lpm->tbl24[tbl24_index].valid = 0;
1667                 __atomic_thread_fence(__ATOMIC_RELEASE);
1668                 tbl8_free_v20(lpm->tbl8, tbl8_group_start);
1669         } else if (tbl8_recycle_index > -1) {
1670                 /* Update tbl24 entry. */
1671                 struct rte_lpm_tbl_entry_v20 new_tbl24_entry = {
1672                         .next_hop = lpm->tbl8[tbl8_recycle_index].next_hop,
1673                         .valid = VALID,
1674                         .valid_group = 0,
1675                         .depth = lpm->tbl8[tbl8_recycle_index].depth,
1676                 };
1677
1678                 /* Set tbl24 before freeing tbl8 to avoid race condition.
1679                  * Prevent the free of the tbl8 group from hoisting.
1680                  */
1681                 lpm->tbl24[tbl24_index] = new_tbl24_entry;
1682                 __atomic_thread_fence(__ATOMIC_RELEASE);
1683                 tbl8_free_v20(lpm->tbl8, tbl8_group_start);
1684         }
1685
1686         return 0;
1687 }
1688
1689 static int32_t
1690 delete_depth_big_v1604(struct rte_lpm *lpm, uint32_t ip_masked,
1691         uint8_t depth, int32_t sub_rule_index, uint8_t sub_rule_depth)
1692 {
1693 #define group_idx next_hop
1694         uint32_t tbl24_index, tbl8_group_index, tbl8_group_start, tbl8_index,
1695                         tbl8_range, i;
1696         int32_t tbl8_recycle_index;
1697
1698         /*
1699          * Calculate the index into tbl24 and range. Note: All depths larger
1700          * than MAX_DEPTH_TBL24 are associated with only one tbl24 entry.
1701          */
1702         tbl24_index = ip_masked >> 8;
1703
1704         /* Calculate the index into tbl8 and range. */
1705         tbl8_group_index = lpm->tbl24[tbl24_index].group_idx;
1706         tbl8_group_start = tbl8_group_index * RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1707         tbl8_index = tbl8_group_start + (ip_masked & 0xFF);
1708         tbl8_range = depth_to_range(depth);
1709
1710         if (sub_rule_index < 0) {
1711                 /*
1712                  * Loop through the range of entries on tbl8 for which the
1713                  * rule_to_delete must be removed or modified.
1714                  */
1715                 for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
1716                         if (lpm->tbl8[i].depth <= depth)
1717                                 lpm->tbl8[i].valid = INVALID;
1718                 }
1719         } else {
1720                 /* Set new tbl8 entry. */
1721                 struct rte_lpm_tbl_entry new_tbl8_entry = {
1722                         .valid = VALID,
1723                         .depth = sub_rule_depth,
1724                         .valid_group = lpm->tbl8[tbl8_group_start].valid_group,
1725                         .next_hop = lpm->rules_tbl[sub_rule_index].next_hop,
1726                 };
1727
1728                 /*
1729                  * Loop through the range of entries on tbl8 for which the
1730                  * rule_to_delete must be modified.
1731                  */
1732                 for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
1733                         if (lpm->tbl8[i].depth <= depth)
1734                                 lpm->tbl8[i] = new_tbl8_entry;
1735                 }
1736         }
1737
1738         /*
1739          * Check if there are any valid entries in this tbl8 group. If all
1740          * tbl8 entries are invalid we can free the tbl8 and invalidate the
1741          * associated tbl24 entry.
1742          */
1743
1744         tbl8_recycle_index = tbl8_recycle_check_v1604(lpm->tbl8, tbl8_group_start);
1745
1746         if (tbl8_recycle_index == -EINVAL) {
1747                 /* Set tbl24 before freeing tbl8 to avoid race condition.
1748                  * Prevent the free of the tbl8 group from hoisting.
1749                  */
1750                 lpm->tbl24[tbl24_index].valid = 0;
1751                 __atomic_thread_fence(__ATOMIC_RELEASE);
1752                 tbl8_free_v1604(lpm->tbl8, tbl8_group_start);
1753         } else if (tbl8_recycle_index > -1) {
1754                 /* Update tbl24 entry. */
1755                 struct rte_lpm_tbl_entry new_tbl24_entry = {
1756                         .next_hop = lpm->tbl8[tbl8_recycle_index].next_hop,
1757                         .valid = VALID,
1758                         .valid_group = 0,
1759                         .depth = lpm->tbl8[tbl8_recycle_index].depth,
1760                 };
1761
1762                 /* Set tbl24 before freeing tbl8 to avoid race condition.
1763                  * Prevent the free of the tbl8 group from hoisting.
1764                  */
1765                 lpm->tbl24[tbl24_index] = new_tbl24_entry;
1766                 __atomic_thread_fence(__ATOMIC_RELEASE);
1767                 tbl8_free_v1604(lpm->tbl8, tbl8_group_start);
1768         }
1769 #undef group_idx
1770         return 0;
1771 }
1772
1773 /*
1774  * Deletes a rule
1775  */
1776 int
1777 rte_lpm_delete_v20(struct rte_lpm_v20 *lpm, uint32_t ip, uint8_t depth)
1778 {
1779         int32_t rule_to_delete_index, sub_rule_index;
1780         uint32_t ip_masked;
1781         uint8_t sub_rule_depth;
1782         /*
1783          * Check input arguments. Note: IP must be a positive integer of 32
1784          * bits in length therefore it need not be checked.
1785          */
1786         if ((lpm == NULL) || (depth < 1) || (depth > RTE_LPM_MAX_DEPTH)) {
1787                 return -EINVAL;
1788         }
1789
1790         ip_masked = ip & depth_to_mask(depth);
1791
1792         /*
1793          * Find the index of the input rule, that needs to be deleted, in the
1794          * rule table.
1795          */
1796         rule_to_delete_index = rule_find_v20(lpm, ip_masked, depth);
1797
1798         /*
1799          * Check if rule_to_delete_index was found. If no rule was found the
1800          * function rule_find returns -EINVAL.
1801          */
1802         if (rule_to_delete_index < 0)
1803                 return -EINVAL;
1804
1805         /* Delete the rule from the rule table. */
1806         rule_delete_v20(lpm, rule_to_delete_index, depth);
1807
1808         /*
1809          * Find rule to replace the rule_to_delete. If there is no rule to
1810          * replace the rule_to_delete we return -1 and invalidate the table
1811          * entries associated with this rule.
1812          */
1813         sub_rule_depth = 0;
1814         sub_rule_index = find_previous_rule_v20(lpm, ip, depth, &sub_rule_depth);
1815
1816         /*
1817          * If the input depth value is less than 25 use function
1818          * delete_depth_small otherwise use delete_depth_big.
1819          */
1820         if (depth <= MAX_DEPTH_TBL24) {
1821                 return delete_depth_small_v20(lpm, ip_masked, depth,
1822                                 sub_rule_index, sub_rule_depth);
1823         } else { /* If depth > MAX_DEPTH_TBL24 */
1824                 return delete_depth_big_v20(lpm, ip_masked, depth, sub_rule_index,
1825                                 sub_rule_depth);
1826         }
1827 }
1828 VERSION_SYMBOL(rte_lpm_delete, _v20, 2.0);
1829
1830 int
1831 rte_lpm_delete_v1604(struct rte_lpm *lpm, uint32_t ip, uint8_t depth)
1832 {
1833         int32_t rule_to_delete_index, sub_rule_index;
1834         uint32_t ip_masked;
1835         uint8_t sub_rule_depth;
1836         /*
1837          * Check input arguments. Note: IP must be a positive integer of 32
1838          * bits in length therefore it need not be checked.
1839          */
1840         if ((lpm == NULL) || (depth < 1) || (depth > RTE_LPM_MAX_DEPTH)) {
1841                 return -EINVAL;
1842         }
1843
1844         ip_masked = ip & depth_to_mask(depth);
1845
1846         /*
1847          * Find the index of the input rule, that needs to be deleted, in the
1848          * rule table.
1849          */
1850         rule_to_delete_index = rule_find_v1604(lpm, ip_masked, depth);
1851
1852         /*
1853          * Check if rule_to_delete_index was found. If no rule was found the
1854          * function rule_find returns -EINVAL.
1855          */
1856         if (rule_to_delete_index < 0)
1857                 return -EINVAL;
1858
1859         /* Delete the rule from the rule table. */
1860         rule_delete_v1604(lpm, rule_to_delete_index, depth);
1861
1862         /*
1863          * Find rule to replace the rule_to_delete. If there is no rule to
1864          * replace the rule_to_delete we return -1 and invalidate the table
1865          * entries associated with this rule.
1866          */
1867         sub_rule_depth = 0;
1868         sub_rule_index = find_previous_rule_v1604(lpm, ip, depth, &sub_rule_depth);
1869
1870         /*
1871          * If the input depth value is less than 25 use function
1872          * delete_depth_small otherwise use delete_depth_big.
1873          */
1874         if (depth <= MAX_DEPTH_TBL24) {
1875                 return delete_depth_small_v1604(lpm, ip_masked, depth,
1876                                 sub_rule_index, sub_rule_depth);
1877         } else { /* If depth > MAX_DEPTH_TBL24 */
1878                 return delete_depth_big_v1604(lpm, ip_masked, depth, sub_rule_index,
1879                                 sub_rule_depth);
1880         }
1881 }
1882 BIND_DEFAULT_SYMBOL(rte_lpm_delete, _v1604, 16.04);
1883 MAP_STATIC_SYMBOL(int rte_lpm_delete(struct rte_lpm *lpm, uint32_t ip,
1884                 uint8_t depth), rte_lpm_delete_v1604);
1885
1886 /*
1887  * Delete all rules from the LPM table.
1888  */
1889 void
1890 rte_lpm_delete_all_v20(struct rte_lpm_v20 *lpm)
1891 {
1892         /* Zero rule information. */
1893         memset(lpm->rule_info, 0, sizeof(lpm->rule_info));
1894
1895         /* Zero tbl24. */
1896         memset(lpm->tbl24, 0, sizeof(lpm->tbl24));
1897
1898         /* Zero tbl8. */
1899         memset(lpm->tbl8, 0, sizeof(lpm->tbl8));
1900
1901         /* Delete all rules form the rules table. */
1902         memset(lpm->rules_tbl, 0, sizeof(lpm->rules_tbl[0]) * lpm->max_rules);
1903 }
1904 VERSION_SYMBOL(rte_lpm_delete_all, _v20, 2.0);
1905
1906 void
1907 rte_lpm_delete_all_v1604(struct rte_lpm *lpm)
1908 {
1909         /* Zero rule information. */
1910         memset(lpm->rule_info, 0, sizeof(lpm->rule_info));
1911
1912         /* Zero tbl24. */
1913         memset(lpm->tbl24, 0, sizeof(lpm->tbl24));
1914
1915         /* Zero tbl8. */
1916         memset(lpm->tbl8, 0, sizeof(lpm->tbl8[0])
1917                         * RTE_LPM_TBL8_GROUP_NUM_ENTRIES * lpm->number_tbl8s);
1918
1919         /* Delete all rules form the rules table. */
1920         memset(lpm->rules_tbl, 0, sizeof(lpm->rules_tbl[0]) * lpm->max_rules);
1921 }
1922 BIND_DEFAULT_SYMBOL(rte_lpm_delete_all, _v1604, 16.04);
1923 MAP_STATIC_SYMBOL(void rte_lpm_delete_all(struct rte_lpm *lpm),
1924                 rte_lpm_delete_all_v1604);