1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2020 Intel Corporation
9 #include <rte_common.h>
10 #include <rte_prefetch.h>
11 #include <rte_cycles.h>
14 #include "rte_swx_table_wm.h"
16 #ifndef RTE_SWX_TABLE_EM_USE_HUGE_PAGES
17 #define RTE_SWX_TABLE_EM_USE_HUGE_PAGES 1
20 #if RTE_SWX_TABLE_EM_USE_HUGE_PAGES
22 #include <rte_malloc.h>
25 env_malloc(size_t size, size_t alignment, int numa_node)
27 return rte_zmalloc_socket(NULL, size, alignment, numa_node);
31 env_free(void *start, size_t size __rte_unused)
41 env_malloc(size_t size, size_t alignment __rte_unused, int numa_node)
43 return numa_alloc_onnode(size, numa_node);
47 env_free(void *start, size_t size)
49 numa_free(start, size);
54 static char *get_unique_name(void)
63 tsc = (uint64_t *) name;
64 *tsc = rte_get_tsc_cycles();
69 count_entries(struct rte_swx_table_entry_list *entries)
71 struct rte_swx_table_entry *entry;
72 uint32_t n_entries = 0;
77 TAILQ_FOREACH(entry, entries, node)
84 acl_table_cfg_get(struct rte_acl_config *cfg, struct rte_swx_table_params *p)
86 uint32_t byte_id = 0, field_id = 0;
88 /* cfg->num_categories. */
89 cfg->num_categories = 1;
91 /* cfg->defs and cfg->num_fields. */
92 for (byte_id = 0; byte_id < p->key_size; ) {
93 uint32_t field_size = field_id ? 4 : 1;
94 uint8_t byte = p->key_mask0 ? p->key_mask0[byte_id] : 0xFF;
101 if (field_id == RTE_ACL_MAX_FIELDS)
104 cfg->defs[field_id].type = RTE_ACL_FIELD_TYPE_BITMASK;
105 cfg->defs[field_id].size = field_size;
106 cfg->defs[field_id].field_index = field_id;
107 cfg->defs[field_id].input_index = field_id;
108 cfg->defs[field_id].offset = p->key_offset + byte_id;
111 byte_id += field_size;
117 cfg->num_fields = field_id;
126 acl_table_rule_field8(uint8_t *value,
135 km0 = key_mask0 ? key_mask0[offset] : 0xFF;
136 km = key_mask ? key_mask[offset] : 0xFF;
138 *value = key[offset];
143 acl_table_rule_field32(uint32_t *value,
151 uint32_t km0[4], km[4], k[4];
154 /* Byte 0 = MSB, byte 3 = LSB. */
155 for (byte_id = 0; byte_id < 4; byte_id++) {
156 if (offset + byte_id >= key_size) {
163 km0[byte_id] = key_mask0 ? key_mask0[offset + byte_id] : 0xFF;
164 km[byte_id] = key_mask ? key_mask[offset + byte_id] : 0xFF;
165 k[byte_id] = key[offset + byte_id];
168 *value = (k[0] << 24) |
173 *mask = ((km[0] & km0[0]) << 24) |
174 ((km[1] & km0[1]) << 16) |
175 ((km[2] & km0[2]) << 8) |
179 RTE_ACL_RULE_DEF(acl_rule, RTE_ACL_MAX_FIELDS);
181 static struct rte_acl_rule *
182 acl_table_rules_get(struct rte_acl_config *acl_cfg,
183 struct rte_swx_table_params *p,
184 struct rte_swx_table_entry_list *entries,
187 struct rte_swx_table_entry *entry;
189 uint32_t acl_rule_size = RTE_ACL_RULE_SZ(acl_cfg->num_fields);
190 uint32_t n_fields = acl_cfg->num_fields;
196 memory = malloc(n_entries * acl_rule_size);
201 TAILQ_FOREACH(entry, entries, node) {
202 uint8_t *m = &memory[rule_id * acl_rule_size];
203 struct acl_rule *acl_rule = (struct acl_rule *)m;
206 acl_rule->data.category_mask = 1;
207 acl_rule->data.priority = RTE_ACL_MAX_PRIORITY -
209 acl_rule->data.userdata = rule_id + 1;
211 for (field_id = 0; field_id < n_fields; field_id++) {
212 struct rte_acl_field *f = &acl_rule->field[field_id];
213 uint32_t size = acl_cfg->defs[field_id].size;
214 uint32_t offset = acl_cfg->defs[field_id].offset -
220 acl_table_rule_field8(&value,
228 f->mask_range.u8 = mask;
230 uint32_t value, mask;
232 acl_table_rule_field32(&value,
240 f->value.u32 = value;
241 f->mask_range.u32 = mask;
248 return (struct rte_acl_rule *)memory;
251 /* When the table to be created has no rules, the expected behavior is to always
252 * get lookup miss for any input key. To achieve this, we add a single bogus
253 * rule to the table with the rule user data set to 0, i.e. the value returned
254 * when lookup miss takes place. Whether lookup hit (the bogus rule is hit) or
255 * miss, a user data of 0 is returned, which for the ACL library is equivalent
258 static struct rte_acl_rule *
259 acl_table_rules_default_get(struct rte_acl_config *acl_cfg)
261 struct rte_acl_rule *acl_rule;
262 uint32_t acl_rule_size = RTE_ACL_RULE_SZ(acl_cfg->num_fields);
264 acl_rule = calloc(1, acl_rule_size);
268 acl_rule->data.category_mask = 1;
269 acl_rule->data.priority = RTE_ACL_MAX_PRIORITY;
270 acl_rule->data.userdata = 0;
272 memset(&acl_rule[1], 0xFF, acl_rule_size - sizeof(struct rte_acl_rule));
277 static struct rte_acl_ctx *
278 acl_table_create(struct rte_swx_table_params *params,
279 struct rte_swx_table_entry_list *entries,
283 struct rte_acl_param acl_params = {0};
284 struct rte_acl_config acl_cfg = {0};
285 struct rte_acl_ctx *acl_ctx = NULL;
286 struct rte_acl_rule *acl_rules = NULL;
290 /* ACL config data structures. */
291 name = get_unique_name();
297 status = acl_table_cfg_get(&acl_cfg, params);
301 acl_rules = n_entries ?
302 acl_table_rules_get(&acl_cfg, params, entries, n_entries) :
303 acl_table_rules_default_get(&acl_cfg);
309 n_entries = n_entries ? n_entries : 1;
312 acl_params.name = name;
313 acl_params.socket_id = numa_node;
314 acl_params.rule_size = RTE_ACL_RULE_SZ(acl_cfg.num_fields);
315 acl_params.max_rule_num = n_entries;
317 acl_ctx = rte_acl_create(&acl_params);
324 status = rte_acl_add_rules(acl_ctx, acl_rules, n_entries);
329 status = rte_acl_build(acl_ctx, &acl_cfg);
332 if (status && acl_ctx)
333 rte_acl_free(acl_ctx);
339 return status ? NULL : acl_ctx;
343 entry_data_copy(uint8_t *data,
344 struct rte_swx_table_entry_list *entries,
346 uint32_t entry_data_size)
348 struct rte_swx_table_entry *entry;
354 TAILQ_FOREACH(entry, entries, node) {
355 uint64_t *d = (uint64_t *)&data[i * entry_data_size];
357 d[0] = entry->action_id;
358 memcpy(&d[1], entry->action_data, entry_data_size - 8);
365 struct rte_acl_ctx *acl_ctx;
368 uint32_t entry_data_size;
372 table_free(void *table)
374 struct table *t = table;
380 rte_acl_free(t->acl_ctx);
381 env_free(t, t->total_size);
385 table_create(struct rte_swx_table_params *params,
386 struct rte_swx_table_entry_list *entries,
387 const char *args __rte_unused,
390 struct table *t = NULL;
391 size_t meta_sz, data_sz, total_size;
392 uint32_t entry_data_size;
393 uint32_t n_entries = count_entries(entries);
395 /* Check input arguments. */
396 if (!params || !params->key_size)
399 /* Memory allocation and initialization. */
400 entry_data_size = 8 + params->action_data_size;
401 meta_sz = sizeof(struct table);
402 data_sz = n_entries * entry_data_size;
403 total_size = meta_sz + data_sz;
405 t = env_malloc(total_size, RTE_CACHE_LINE_SIZE, numa_node);
409 memset(t, 0, total_size);
410 t->entry_data_size = entry_data_size;
411 t->total_size = total_size;
412 t->data = (uint8_t *)&t[1];
414 t->acl_ctx = acl_table_create(params, entries, n_entries, numa_node);
418 entry_data_copy(t->data, entries, n_entries, entry_data_size);
432 table_mailbox_size_get(void)
434 return sizeof(struct mailbox);
438 table_lookup(void *table,
439 void *mailbox __rte_unused,
442 uint8_t **action_data,
445 struct table *t = table;
449 rte_acl_classify(t->acl_ctx, key, &user_data, 1, 1);
455 data = &t->data[(user_data - 1) * t->entry_data_size];
456 *action_id = ((uint64_t *)data)[0];
457 *action_data = &data[8];
462 struct rte_swx_table_ops rte_swx_table_wildcard_match_ops = {
463 .footprint_get = NULL,
464 .mailbox_size_get = table_mailbox_size_get,
465 .create = table_create,
468 .lkp = (rte_swx_table_lookup_t)table_lookup,