1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation
8 #include <rte_common.h>
9 #include <rte_malloc.h>
10 #include <rte_byteorder.h>
14 #include "rte_table_lpm.h"
16 #ifndef RTE_TABLE_LPM_MAX_NEXT_HOPS
17 #define RTE_TABLE_LPM_MAX_NEXT_HOPS 65536
20 #ifdef RTE_TABLE_STATS_COLLECT
22 #define RTE_TABLE_LPM_STATS_PKTS_IN_ADD(table, val) \
23 table->stats.n_pkts_in += val
24 #define RTE_TABLE_LPM_STATS_PKTS_LOOKUP_MISS(table, val) \
25 table->stats.n_pkts_lookup_miss += val
29 #define RTE_TABLE_LPM_STATS_PKTS_IN_ADD(table, val)
30 #define RTE_TABLE_LPM_STATS_PKTS_LOOKUP_MISS(table, val)
34 struct rte_table_lpm {
35 struct rte_table_stats stats;
37 /* Input parameters */
39 uint32_t entry_unique_size;
43 /* Handle to low-level LPM table */
46 /* Next Hop Table (NHT) */
47 uint32_t nht_users[RTE_TABLE_LPM_MAX_NEXT_HOPS];
48 uint8_t nht[0] __rte_cache_aligned;
52 rte_table_lpm_create(void *params, int socket_id, uint32_t entry_size)
54 struct rte_table_lpm_params *p = params;
55 struct rte_table_lpm *lpm;
56 struct rte_lpm_config lpm_config;
58 uint32_t total_size, nht_size;
60 /* Check input parameters */
62 RTE_LOG(ERR, TABLE, "%s: NULL input parameters\n", __func__);
65 if (p->n_rules == 0) {
66 RTE_LOG(ERR, TABLE, "%s: Invalid n_rules\n", __func__);
69 if (p->number_tbl8s == 0) {
70 RTE_LOG(ERR, TABLE, "%s: Invalid number_tbl8s\n", __func__);
73 if (p->entry_unique_size == 0) {
74 RTE_LOG(ERR, TABLE, "%s: Invalid entry_unique_size\n",
78 if (p->entry_unique_size > entry_size) {
79 RTE_LOG(ERR, TABLE, "%s: Invalid entry_unique_size\n",
83 if (p->name == NULL) {
84 RTE_LOG(ERR, TABLE, "%s: Table name is NULL\n",
88 entry_size = RTE_ALIGN(entry_size, sizeof(uint64_t));
90 /* Memory allocation */
91 nht_size = RTE_TABLE_LPM_MAX_NEXT_HOPS * entry_size;
92 total_size = sizeof(struct rte_table_lpm) + nht_size;
93 lpm = rte_zmalloc_socket("TABLE", total_size, RTE_CACHE_LINE_SIZE,
97 "%s: Cannot allocate %u bytes for LPM table\n",
98 __func__, total_size);
102 /* LPM low-level table creation */
103 lpm_config.max_rules = p->n_rules;
104 lpm_config.number_tbl8s = p->number_tbl8s;
105 lpm_config.flags = p->flags;
106 lpm->lpm = rte_lpm_create(p->name, socket_id, &lpm_config);
108 if (lpm->lpm == NULL) {
110 RTE_LOG(ERR, TABLE, "Unable to create low-level LPM table\n");
114 /* Memory initialization */
115 lpm->entry_size = entry_size;
116 lpm->entry_unique_size = p->entry_unique_size;
117 lpm->n_rules = p->n_rules;
118 lpm->offset = p->offset;
124 rte_table_lpm_free(void *table)
126 struct rte_table_lpm *lpm = table;
128 /* Check input parameters */
130 RTE_LOG(ERR, TABLE, "%s: table parameter is NULL\n", __func__);
134 /* Free previously allocated resources */
135 rte_lpm_free(lpm->lpm);
142 nht_find_free(struct rte_table_lpm *lpm, uint32_t *pos)
146 for (i = 0; i < RTE_TABLE_LPM_MAX_NEXT_HOPS; i++) {
147 if (lpm->nht_users[i] == 0) {
157 nht_find_existing(struct rte_table_lpm *lpm, void *entry, uint32_t *pos)
161 for (i = 0; i < RTE_TABLE_LPM_MAX_NEXT_HOPS; i++) {
162 uint8_t *nht_entry = &lpm->nht[i * lpm->entry_size];
164 if ((lpm->nht_users[i] > 0) && (memcmp(nht_entry, entry,
165 lpm->entry_unique_size) == 0)) {
175 rte_table_lpm_entry_add(
182 struct rte_table_lpm *lpm = table;
183 struct rte_table_lpm_key *ip_prefix = key;
184 uint32_t nht_pos, nht_pos0_valid;
186 uint32_t nht_pos0 = 0;
188 /* Check input parameters */
190 RTE_LOG(ERR, TABLE, "%s: table parameter is NULL\n", __func__);
193 if (ip_prefix == NULL) {
194 RTE_LOG(ERR, TABLE, "%s: ip_prefix parameter is NULL\n",
199 RTE_LOG(ERR, TABLE, "%s: entry parameter is NULL\n", __func__);
203 if ((ip_prefix->depth == 0) || (ip_prefix->depth > 32)) {
204 RTE_LOG(ERR, TABLE, "%s: invalid depth (%d)\n",
205 __func__, ip_prefix->depth);
209 /* Check if rule is already present in the table */
210 status = rte_lpm_is_rule_present(lpm->lpm, ip_prefix->ip,
211 ip_prefix->depth, &nht_pos0);
212 nht_pos0_valid = status > 0;
214 /* Find existing or free NHT entry */
215 if (nht_find_existing(lpm, entry, &nht_pos) == 0) {
218 if (nht_find_free(lpm, &nht_pos) == 0) {
219 RTE_LOG(ERR, TABLE, "%s: NHT full\n", __func__);
223 nht_entry = &lpm->nht[nht_pos * lpm->entry_size];
224 memcpy(nht_entry, entry, lpm->entry_size);
227 /* Add rule to low level LPM table */
228 if (rte_lpm_add(lpm->lpm, ip_prefix->ip, ip_prefix->depth, nht_pos) < 0) {
229 RTE_LOG(ERR, TABLE, "%s: LPM rule add failed\n", __func__);
233 /* Commit NHT changes */
234 lpm->nht_users[nht_pos]++;
235 lpm->nht_users[nht_pos0] -= nht_pos0_valid;
237 *key_found = nht_pos0_valid;
238 *entry_ptr = (void *) &lpm->nht[nht_pos * lpm->entry_size];
243 rte_table_lpm_entry_delete(
249 struct rte_table_lpm *lpm = table;
250 struct rte_table_lpm_key *ip_prefix = key;
254 /* Check input parameters */
256 RTE_LOG(ERR, TABLE, "%s: table parameter is NULL\n", __func__);
259 if (ip_prefix == NULL) {
260 RTE_LOG(ERR, TABLE, "%s: ip_prefix parameter is NULL\n",
264 if ((ip_prefix->depth == 0) || (ip_prefix->depth > 32)) {
265 RTE_LOG(ERR, TABLE, "%s: invalid depth (%d)\n", __func__,
270 /* Return if rule is not present in the table */
271 status = rte_lpm_is_rule_present(lpm->lpm, ip_prefix->ip,
272 ip_prefix->depth, &nht_pos);
274 RTE_LOG(ERR, TABLE, "%s: LPM algorithmic error\n", __func__);
282 /* Delete rule from the low-level LPM table */
283 status = rte_lpm_delete(lpm->lpm, ip_prefix->ip, ip_prefix->depth);
285 RTE_LOG(ERR, TABLE, "%s: LPM rule delete failed\n", __func__);
289 /* Commit NHT changes */
290 lpm->nht_users[nht_pos]--;
294 memcpy(entry, &lpm->nht[nht_pos * lpm->entry_size],
301 rte_table_lpm_lookup(
303 struct rte_mbuf **pkts,
305 uint64_t *lookup_hit_mask,
308 struct rte_table_lpm *lpm = (struct rte_table_lpm *) table;
309 uint64_t pkts_out_mask = 0;
312 __rte_unused uint32_t n_pkts_in = __builtin_popcountll(pkts_mask);
313 RTE_TABLE_LPM_STATS_PKTS_IN_ADD(lpm, n_pkts_in);
316 for (i = 0; i < (uint32_t)(RTE_PORT_IN_BURST_SIZE_MAX -
317 __builtin_clzll(pkts_mask)); i++) {
318 uint64_t pkt_mask = 1LLU << i;
320 if (pkt_mask & pkts_mask) {
321 struct rte_mbuf *pkt = pkts[i];
322 uint32_t ip = rte_bswap32(
323 RTE_MBUF_METADATA_UINT32(pkt, lpm->offset));
327 status = rte_lpm_lookup(lpm->lpm, ip, &nht_pos);
329 pkts_out_mask |= pkt_mask;
330 entries[i] = (void *) &lpm->nht[nht_pos *
336 *lookup_hit_mask = pkts_out_mask;
337 RTE_TABLE_LPM_STATS_PKTS_LOOKUP_MISS(lpm, n_pkts_in - __builtin_popcountll(pkts_out_mask));
342 rte_table_lpm_stats_read(void *table, struct rte_table_stats *stats, int clear)
344 struct rte_table_lpm *t = table;
347 memcpy(stats, &t->stats, sizeof(t->stats));
350 memset(&t->stats, 0, sizeof(t->stats));
355 struct rte_table_ops rte_table_lpm_ops = {
356 .f_create = rte_table_lpm_create,
357 .f_free = rte_table_lpm_free,
358 .f_add = rte_table_lpm_entry_add,
359 .f_delete = rte_table_lpm_entry_delete,
361 .f_delete_bulk = NULL,
362 .f_lookup = rte_table_lpm_lookup,
363 .f_stats = rte_table_lpm_stats_read,