4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
37 #include <rte_common.h>
39 #include <rte_malloc.h>
40 #include <rte_byteorder.h>
44 #include "rte_table_lpm.h"
46 #define RTE_TABLE_LPM_MAX_NEXT_HOPS 256
48 struct rte_table_lpm {
49 /* Input parameters */
51 uint32_t entry_unique_size;
55 /* Handle to low-level LPM table */
58 /* Next Hop Table (NHT) */
59 uint32_t nht_users[RTE_TABLE_LPM_MAX_NEXT_HOPS];
60 uint8_t nht[0] __rte_cache_aligned;
64 rte_table_lpm_create(void *params, int socket_id, uint32_t entry_size)
66 struct rte_table_lpm_params *p = (struct rte_table_lpm_params *) params;
67 struct rte_table_lpm *lpm;
68 uint32_t total_size, nht_size;
70 /* Check input parameters */
72 RTE_LOG(ERR, TABLE, "%s: NULL input parameters\n", __func__);
75 if (p->n_rules == 0) {
76 RTE_LOG(ERR, TABLE, "%s: Invalid n_rules\n", __func__);
79 if (p->entry_unique_size == 0) {
80 RTE_LOG(ERR, TABLE, "%s: Invalid entry_unique_size\n",
84 if (p->entry_unique_size > entry_size) {
85 RTE_LOG(ERR, TABLE, "%s: Invalid entry_unique_size\n",
89 if ((p->offset & 0x3) != 0) {
90 RTE_LOG(ERR, TABLE, "%s: Invalid offset\n", __func__);
94 entry_size = RTE_ALIGN(entry_size, sizeof(uint64_t));
96 /* Memory allocation */
97 nht_size = RTE_TABLE_LPM_MAX_NEXT_HOPS * entry_size;
98 total_size = sizeof(struct rte_table_lpm) + nht_size;
99 lpm = rte_zmalloc_socket("TABLE", total_size, CACHE_LINE_SIZE,
103 "%s: Cannot allocate %u bytes for LPM table\n",
104 __func__, total_size);
108 /* LPM low-level table creation */
109 lpm->lpm = rte_lpm_create("LPM", socket_id, p->n_rules, 0);
110 if (lpm->lpm == NULL) {
112 RTE_LOG(ERR, TABLE, "Unable to create low-level LPM table\n");
116 /* Memory initialization */
117 lpm->entry_size = entry_size;
118 lpm->entry_unique_size = p->entry_unique_size;
119 lpm->n_rules = p->n_rules;
120 lpm->offset = p->offset;
126 rte_table_lpm_free(void *table)
128 struct rte_table_lpm *lpm = (struct rte_table_lpm *) table;
130 /* Check input parameters */
132 RTE_LOG(ERR, TABLE, "%s: table parameter is NULL\n", __func__);
136 /* Free previously allocated resources */
137 rte_lpm_free(lpm->lpm);
144 nht_find_free(struct rte_table_lpm *lpm, uint32_t *pos)
148 for (i = 0; i < RTE_TABLE_LPM_MAX_NEXT_HOPS; i++) {
149 if (lpm->nht_users[i] == 0) {
159 nht_find_existing(struct rte_table_lpm *lpm, void *entry, uint32_t *pos)
163 for (i = 0; i < RTE_TABLE_LPM_MAX_NEXT_HOPS; i++) {
164 uint8_t *nht_entry = &lpm->nht[i * lpm->entry_size];
166 if ((lpm->nht_users[i] > 0) && (memcmp(nht_entry, entry,
167 lpm->entry_unique_size) == 0)) {
177 rte_table_lpm_entry_add(
184 struct rte_table_lpm *lpm = (struct rte_table_lpm *) table;
185 struct rte_table_lpm_key *ip_prefix = (struct rte_table_lpm_key *) key;
186 uint32_t nht_pos, nht_pos0_valid;
190 /* Check input parameters */
192 RTE_LOG(ERR, TABLE, "%s: table parameter is NULL\n", __func__);
195 if (ip_prefix == NULL) {
196 RTE_LOG(ERR, TABLE, "%s: ip_prefix parameter is NULL\n",
201 RTE_LOG(ERR, TABLE, "%s: entry parameter is NULL\n", __func__);
205 if ((ip_prefix->depth == 0) || (ip_prefix->depth > 32)) {
206 RTE_LOG(ERR, TABLE, "%s: invalid depth (%d)\n",
207 __func__, ip_prefix->depth);
211 /* Check if rule is already present in the table */
212 status = rte_lpm_is_rule_present(lpm->lpm, ip_prefix->ip,
213 ip_prefix->depth, &nht_pos0);
214 nht_pos0_valid = status > 0;
216 /* Find existing or free NHT entry */
217 if (nht_find_existing(lpm, entry, &nht_pos) == 0) {
220 if (nht_find_free(lpm, &nht_pos) == 0) {
221 RTE_LOG(ERR, TABLE, "%s: NHT full\n", __func__);
225 nht_entry = &lpm->nht[nht_pos * lpm->entry_size];
226 memcpy(nht_entry, entry, lpm->entry_size);
229 /* Add rule to low level LPM table */
230 if (rte_lpm_add(lpm->lpm, ip_prefix->ip, ip_prefix->depth,
231 (uint8_t) nht_pos) < 0) {
232 RTE_LOG(ERR, TABLE, "%s: LPM rule add failed\n", __func__);
236 /* Commit NHT changes */
237 lpm->nht_users[nht_pos]++;
238 lpm->nht_users[nht_pos0] -= nht_pos0_valid;
240 *key_found = nht_pos0_valid;
241 *entry_ptr = (void *) &lpm->nht[nht_pos * lpm->entry_size];
246 rte_table_lpm_entry_delete(
252 struct rte_table_lpm *lpm = (struct rte_table_lpm *) table;
253 struct rte_table_lpm_key *ip_prefix = (struct rte_table_lpm_key *) key;
257 /* Check input parameters */
259 RTE_LOG(ERR, TABLE, "%s: table parameter is NULL\n", __func__);
262 if (ip_prefix == NULL) {
263 RTE_LOG(ERR, TABLE, "%s: ip_prefix parameter is NULL\n",
267 if ((ip_prefix->depth == 0) || (ip_prefix->depth > 32)) {
268 RTE_LOG(ERR, TABLE, "%s: invalid depth (%d)\n", __func__,
273 /* Return if rule is not present in the table */
274 status = rte_lpm_is_rule_present(lpm->lpm, ip_prefix->ip,
275 ip_prefix->depth, &nht_pos);
277 RTE_LOG(ERR, TABLE, "%s: LPM algorithmic error\n", __func__);
285 /* Delete rule from the low-level LPM table */
286 status = rte_lpm_delete(lpm->lpm, ip_prefix->ip, ip_prefix->depth);
288 RTE_LOG(ERR, TABLE, "%s: LPM rule delete failed\n", __func__);
292 /* Commit NHT changes */
293 lpm->nht_users[nht_pos]--;
297 memcpy(entry, &lpm->nht[nht_pos * lpm->entry_size],
304 rte_table_lpm_lookup(
306 struct rte_mbuf **pkts,
308 uint64_t *lookup_hit_mask,
311 struct rte_table_lpm *lpm = (struct rte_table_lpm *) table;
312 uint64_t pkts_out_mask = 0;
316 for (i = 0; i < (uint32_t)(RTE_PORT_IN_BURST_SIZE_MAX -
317 __builtin_clzll(pkts_mask)); i++) {
318 uint64_t pkt_mask = 1LLU << i;
320 if (pkt_mask & pkts_mask) {
321 struct rte_mbuf *pkt = pkts[i];
322 uint32_t ip = rte_bswap32(
323 RTE_MBUF_METADATA_UINT32(pkt, lpm->offset));
327 status = rte_lpm_lookup(lpm->lpm, ip, &nht_pos);
329 pkts_out_mask |= pkt_mask;
330 entries[i] = (void *) &lpm->nht[nht_pos *
336 *lookup_hit_mask = pkts_out_mask;
341 struct rte_table_ops rte_table_lpm_ops = {
342 .f_create = rte_table_lpm_create,
343 .f_free = rte_table_lpm_free,
344 .f_add = rte_table_lpm_entry_add,
345 .f_delete = rte_table_lpm_entry_delete,
346 .f_lookup = rte_table_lpm_lookup,