1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2020 Intel Corporation
9 #include <rte_common.h>
10 #include <rte_prefetch.h>
11 #include <rte_cycles.h>
14 #include "rte_swx_table_wm.h"
16 #ifndef RTE_SWX_TABLE_EM_USE_HUGE_PAGES
17 #define RTE_SWX_TABLE_EM_USE_HUGE_PAGES 1
20 #if RTE_SWX_TABLE_EM_USE_HUGE_PAGES
22 #include <rte_malloc.h>
25 env_malloc(size_t size, size_t alignment, int numa_node)
27 return rte_zmalloc_socket(NULL, size, alignment, numa_node);
31 env_free(void *start, size_t size __rte_unused)
41 env_malloc(size_t size, size_t alignment __rte_unused, int numa_node)
43 return numa_alloc_onnode(size, numa_node);
47 env_free(void *start, size_t size)
49 numa_free(start, size);
54 static char *get_unique_name(void)
56 uint64_t tsc = rte_get_tsc_cycles();
57 size_t size = sizeof(uint64_t) * 2 + 1;
58 char *name = calloc(1, size);
63 snprintf(name, size, "%016" PRIx64, tsc);
68 count_entries(struct rte_swx_table_entry_list *entries)
70 struct rte_swx_table_entry *entry;
71 uint32_t n_entries = 0;
76 TAILQ_FOREACH(entry, entries, node)
83 acl_table_cfg_get(struct rte_acl_config *cfg, struct rte_swx_table_params *p)
85 uint32_t byte_id = 0, field_id = 0;
87 /* cfg->num_categories. */
88 cfg->num_categories = 1;
90 /* cfg->defs and cfg->num_fields. */
91 for (byte_id = 0; byte_id < p->key_size; ) {
92 uint32_t field_size = field_id ? 4 : 1;
93 uint8_t byte = p->key_mask0 ? p->key_mask0[byte_id] : 0xFF;
100 if (field_id == RTE_ACL_MAX_FIELDS)
103 cfg->defs[field_id].type = RTE_ACL_FIELD_TYPE_BITMASK;
104 cfg->defs[field_id].size = field_size;
105 cfg->defs[field_id].field_index = field_id;
106 cfg->defs[field_id].input_index = field_id;
107 cfg->defs[field_id].offset = p->key_offset + byte_id;
110 byte_id += field_size;
116 cfg->num_fields = field_id;
125 acl_table_rule_field8(uint8_t *value,
134 km0 = key_mask0 ? key_mask0[offset] : 0xFF;
135 km = key_mask ? key_mask[offset] : 0xFF;
137 *value = key[offset];
142 acl_table_rule_field32(uint32_t *value,
150 uint32_t km0[4], km[4], k[4];
153 /* Byte 0 = MSB, byte 3 = LSB. */
154 for (byte_id = 0; byte_id < 4; byte_id++) {
155 if (offset + byte_id >= key_size) {
162 km0[byte_id] = key_mask0 ? key_mask0[offset + byte_id] : 0xFF;
163 km[byte_id] = key_mask ? key_mask[offset + byte_id] : 0xFF;
164 k[byte_id] = key[offset + byte_id];
167 *value = (k[0] << 24) |
172 *mask = ((km[0] & km0[0]) << 24) |
173 ((km[1] & km0[1]) << 16) |
174 ((km[2] & km0[2]) << 8) |
178 RTE_ACL_RULE_DEF(acl_rule, RTE_ACL_MAX_FIELDS);
180 static struct rte_acl_rule *
181 acl_table_rules_get(struct rte_acl_config *acl_cfg,
182 struct rte_swx_table_params *p,
183 struct rte_swx_table_entry_list *entries,
186 struct rte_swx_table_entry *entry;
188 uint32_t acl_rule_size = RTE_ACL_RULE_SZ(acl_cfg->num_fields);
189 uint32_t n_fields = acl_cfg->num_fields;
195 memory = malloc(n_entries * acl_rule_size);
200 TAILQ_FOREACH(entry, entries, node) {
201 uint8_t *m = &memory[rule_id * acl_rule_size];
202 struct acl_rule *acl_rule = (struct acl_rule *)m;
205 acl_rule->data.category_mask = 1;
206 acl_rule->data.priority = RTE_ACL_MAX_PRIORITY -
208 acl_rule->data.userdata = rule_id + 1;
210 for (field_id = 0; field_id < n_fields; field_id++) {
211 struct rte_acl_field *f = &acl_rule->field[field_id];
212 uint32_t size = acl_cfg->defs[field_id].size;
213 uint32_t offset = acl_cfg->defs[field_id].offset -
219 acl_table_rule_field8(&value,
227 f->mask_range.u8 = mask;
229 uint32_t value, mask;
231 acl_table_rule_field32(&value,
239 f->value.u32 = value;
240 f->mask_range.u32 = mask;
247 return (struct rte_acl_rule *)memory;
250 /* When the table to be created has no rules, the expected behavior is to always
251 * get lookup miss for any input key. To achieve this, we add a single bogus
252 * rule to the table with the rule user data set to 0, i.e. the value returned
253 * when lookup miss takes place. Whether lookup hit (the bogus rule is hit) or
254 * miss, a user data of 0 is returned, which for the ACL library is equivalent
257 static struct rte_acl_rule *
258 acl_table_rules_default_get(struct rte_acl_config *acl_cfg)
260 struct rte_acl_rule *acl_rule;
261 uint32_t acl_rule_size = RTE_ACL_RULE_SZ(acl_cfg->num_fields);
263 acl_rule = calloc(1, acl_rule_size);
267 acl_rule->data.category_mask = 1;
268 acl_rule->data.priority = RTE_ACL_MAX_PRIORITY;
269 acl_rule->data.userdata = 0;
271 memset(&acl_rule[1], 0xFF, acl_rule_size - sizeof(struct rte_acl_rule));
276 static struct rte_acl_ctx *
277 acl_table_create(struct rte_swx_table_params *params,
278 struct rte_swx_table_entry_list *entries,
282 struct rte_acl_param acl_params = {0};
283 struct rte_acl_config acl_cfg = {0};
284 struct rte_acl_ctx *acl_ctx = NULL;
285 struct rte_acl_rule *acl_rules = NULL;
289 /* ACL config data structures. */
290 name = get_unique_name();
296 status = acl_table_cfg_get(&acl_cfg, params);
300 acl_rules = n_entries ?
301 acl_table_rules_get(&acl_cfg, params, entries, n_entries) :
302 acl_table_rules_default_get(&acl_cfg);
308 n_entries = n_entries ? n_entries : 1;
311 acl_params.name = name;
312 acl_params.socket_id = numa_node;
313 acl_params.rule_size = RTE_ACL_RULE_SZ(acl_cfg.num_fields);
314 acl_params.max_rule_num = n_entries;
316 acl_ctx = rte_acl_create(&acl_params);
323 status = rte_acl_add_rules(acl_ctx, acl_rules, n_entries);
328 status = rte_acl_build(acl_ctx, &acl_cfg);
331 if (status && acl_ctx)
332 rte_acl_free(acl_ctx);
338 return status ? NULL : acl_ctx;
342 entry_data_copy(uint8_t *data,
343 struct rte_swx_table_entry_list *entries,
345 uint32_t entry_data_size)
347 struct rte_swx_table_entry *entry;
353 TAILQ_FOREACH(entry, entries, node) {
354 uint64_t *d = (uint64_t *)&data[i * entry_data_size];
356 d[0] = entry->action_id;
357 memcpy(&d[1], entry->action_data, entry_data_size - 8);
364 struct rte_acl_ctx *acl_ctx;
367 uint32_t entry_data_size;
371 table_free(void *table)
373 struct table *t = table;
379 rte_acl_free(t->acl_ctx);
380 env_free(t, t->total_size);
384 table_create(struct rte_swx_table_params *params,
385 struct rte_swx_table_entry_list *entries,
386 const char *args __rte_unused,
389 struct table *t = NULL;
390 size_t meta_sz, data_sz, total_size;
391 uint32_t entry_data_size;
392 uint32_t n_entries = count_entries(entries);
394 /* Check input arguments. */
395 if (!params || !params->key_size)
398 /* Memory allocation and initialization. */
399 entry_data_size = 8 + params->action_data_size;
400 meta_sz = sizeof(struct table);
401 data_sz = n_entries * entry_data_size;
402 total_size = meta_sz + data_sz;
404 t = env_malloc(total_size, RTE_CACHE_LINE_SIZE, numa_node);
408 memset(t, 0, total_size);
409 t->entry_data_size = entry_data_size;
410 t->total_size = total_size;
411 t->data = (uint8_t *)&t[1];
413 t->acl_ctx = acl_table_create(params, entries, n_entries, numa_node);
417 entry_data_copy(t->data, entries, n_entries, entry_data_size);
431 table_mailbox_size_get(void)
433 return sizeof(struct mailbox);
437 table_lookup(void *table,
438 void *mailbox __rte_unused,
441 uint8_t **action_data,
444 struct table *t = table;
448 rte_acl_classify(t->acl_ctx, key, &user_data, 1, 1);
454 data = &t->data[(user_data - 1) * t->entry_data_size];
455 *action_id = ((uint64_t *)data)[0];
456 *action_data = &data[8];
461 struct rte_swx_table_ops rte_swx_table_wildcard_match_ops = {
462 .footprint_get = NULL,
463 .mailbox_size_get = table_mailbox_size_get,
464 .create = table_create,
467 .lkp = (rte_swx_table_lookup_t)table_lookup,