acl: new library
[dpdk.git] / lib / librte_acl / rte_acl.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <rte_acl.h>
35 #include "acl.h"
36
37 #define BIT_SIZEOF(x)   (sizeof(x) * CHAR_BIT)
38
39 TAILQ_HEAD(rte_acl_list, rte_acl_ctx);
40
41 struct rte_acl_ctx *
42 rte_acl_find_existing(const char *name)
43 {
44         struct rte_acl_ctx *ctx;
45         struct rte_acl_list *acl_list;
46
47         /* check that we have an initialised tail queue */
48         acl_list = RTE_TAILQ_LOOKUP_BY_IDX(RTE_TAILQ_ACL, rte_acl_list);
49         if (acl_list == NULL) {
50                 rte_errno = E_RTE_NO_TAILQ;
51                 return NULL;
52         }
53
54         rte_rwlock_read_lock(RTE_EAL_TAILQ_RWLOCK);
55         TAILQ_FOREACH(ctx, acl_list, next) {
56                 if (strncmp(name, ctx->name, sizeof(ctx->name)) == 0)
57                         break;
58         }
59         rte_rwlock_read_unlock(RTE_EAL_TAILQ_RWLOCK);
60
61         if (ctx == NULL)
62                 rte_errno = ENOENT;
63         return ctx;
64 }
65
66 void
67 rte_acl_free(struct rte_acl_ctx *ctx)
68 {
69         if (ctx == NULL)
70                 return;
71
72         RTE_EAL_TAILQ_REMOVE(RTE_TAILQ_ACL, rte_acl_list, ctx);
73
74         rte_free(ctx->mem);
75         rte_free(ctx);
76 }
77
78 struct rte_acl_ctx *
79 rte_acl_create(const struct rte_acl_param *param)
80 {
81         size_t sz;
82         struct rte_acl_ctx *ctx;
83         struct rte_acl_list *acl_list;
84         char name[sizeof(ctx->name)];
85
86         /* check that we have an initialised tail queue */
87         acl_list = RTE_TAILQ_LOOKUP_BY_IDX(RTE_TAILQ_ACL, rte_acl_list);
88         if (acl_list == NULL) {
89                 rte_errno = E_RTE_NO_TAILQ;
90                 return NULL;
91         }
92
93         /* check that input parameters are valid. */
94         if (param == NULL || param->name == NULL) {
95                 rte_errno = EINVAL;
96                 return NULL;
97         }
98
99         rte_snprintf(name, sizeof(name), "ACL_%s", param->name);
100
101         /* calculate amount of memory required for pattern set. */
102         sz = sizeof(*ctx) + param->max_rule_num * param->rule_size;
103
104         /* get EAL TAILQ lock. */
105         rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
106
107         /* if we already have one with that name */
108         TAILQ_FOREACH(ctx, acl_list, next) {
109                 if (strncmp(param->name, ctx->name, sizeof(ctx->name)) == 0)
110                         break;
111         }
112
113         /* if ACL with such name doesn't exist, then create a new one. */
114         if (ctx == NULL && (ctx = rte_zmalloc_socket(name, sz, CACHE_LINE_SIZE,
115                         param->socket_id)) != NULL) {
116
117                 /* init new allocated context. */
118                 ctx->rules = ctx + 1;
119                 ctx->max_rules = param->max_rule_num;
120                 ctx->rule_sz = param->rule_size;
121                 ctx->socket_id = param->socket_id;
122                 rte_snprintf(ctx->name, sizeof(ctx->name), "%s", param->name);
123
124                 TAILQ_INSERT_TAIL(acl_list, ctx, next);
125
126         } else if (ctx == NULL) {
127                 RTE_LOG(ERR, ACL,
128                         "allocation of %zu bytes on socket %d for %s failed\n",
129                         sz, param->socket_id, name);
130         }
131
132         rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
133         return ctx;
134 }
135
136 static int
137 acl_add_rules(struct rte_acl_ctx *ctx, const void *rules, uint32_t num)
138 {
139         uint8_t *pos;
140
141         if (num + ctx->num_rules > ctx->max_rules)
142                 return -ENOMEM;
143
144         pos = ctx->rules;
145         pos += ctx->rule_sz * ctx->num_rules;
146         memcpy(pos, rules, num * ctx->rule_sz);
147         ctx->num_rules += num;
148
149         return 0;
150 }
151
152 static int
153 acl_check_rule(const struct rte_acl_rule_data *rd)
154 {
155         if ((rd->category_mask & LEN2MASK(RTE_ACL_MAX_CATEGORIES)) == 0 ||
156                         rd->priority > RTE_ACL_MAX_PRIORITY ||
157                         rd->priority < RTE_ACL_MIN_PRIORITY ||
158                         rd->userdata == RTE_ACL_INVALID_USERDATA)
159                 return -EINVAL;
160         return 0;
161 }
162
163 int
164 rte_acl_add_rules(struct rte_acl_ctx *ctx, const struct rte_acl_rule *rules,
165         uint32_t num)
166 {
167         const struct rte_acl_rule *rv;
168         uint32_t i;
169         int32_t rc;
170
171         if (ctx == NULL || rules == NULL || 0 == ctx->rule_sz)
172                 return -EINVAL;
173
174         for (i = 0; i != num; i++) {
175                 rv = (const struct rte_acl_rule *)
176                         ((uintptr_t)rules + i * ctx->rule_sz);
177                 rc = acl_check_rule(&rv->data);
178                 if (rc != 0) {
179                         RTE_LOG(ERR, ACL, "%s(%s): rule #%u is invalid\n",
180                                 __func__, ctx->name, i + 1);
181                         return rc;
182                 }
183         }
184
185         return acl_add_rules(ctx, rules, num);
186 }
187
188 /*
189  * Reset all rules.
190  * Note that RT structures are not affected.
191  */
192 void
193 rte_acl_reset_rules(struct rte_acl_ctx *ctx)
194 {
195         if (ctx != NULL)
196                 ctx->num_rules = 0;
197 }
198
199 /*
200  * Reset all rules and destroys RT structures.
201  */
202 void
203 rte_acl_reset(struct rte_acl_ctx *ctx)
204 {
205         if (ctx != NULL) {
206                 rte_acl_reset_rules(ctx);
207                 rte_acl_build(ctx, &ctx->config);
208         }
209 }
210
211 /*
212  * Dump ACL context to the stdout.
213  */
214 void
215 rte_acl_dump(const struct rte_acl_ctx *ctx)
216 {
217         if (!ctx)
218                 return;
219         printf("acl context <%s>@%p\n", ctx->name, ctx);
220         printf("  max_rules=%"PRIu32"\n", ctx->max_rules);
221         printf("  rule_size=%"PRIu32"\n", ctx->rule_sz);
222         printf("  num_rules=%"PRIu32"\n", ctx->num_rules);
223         printf("  num_categories=%"PRIu32"\n", ctx->num_categories);
224         printf("  num_tries=%"PRIu32"\n", ctx->num_tries);
225 }
226
227 /*
228  * Dump all ACL contexts to the stdout.
229  */
230 void
231 rte_acl_list_dump(void)
232 {
233         struct rte_acl_ctx *ctx;
234         struct rte_acl_list *acl_list;
235
236         /* check that we have an initialised tail queue */
237         acl_list = RTE_TAILQ_LOOKUP_BY_IDX(RTE_TAILQ_ACL, rte_acl_list);
238         if (acl_list == NULL) {
239                 rte_errno = E_RTE_NO_TAILQ;
240                 return;
241         }
242
243         rte_rwlock_read_lock(RTE_EAL_TAILQ_RWLOCK);
244         TAILQ_FOREACH(ctx, acl_list, next) {
245                 rte_acl_dump(ctx);
246         }
247         rte_rwlock_read_unlock(RTE_EAL_TAILQ_RWLOCK);
248 }
249
250 /*
251  * Support for legacy ipv4vlan rules.
252  */
253
254 RTE_ACL_RULE_DEF(acl_ipv4vlan_rule, RTE_ACL_IPV4VLAN_NUM_FIELDS);
255
256 static int
257 acl_ipv4vlan_check_rule(const struct rte_acl_ipv4vlan_rule *rule)
258 {
259         if (rule->src_port_low > rule->src_port_high ||
260                         rule->dst_port_low > rule->dst_port_high ||
261                         rule->src_mask_len > BIT_SIZEOF(rule->src_addr) ||
262                         rule->dst_mask_len > BIT_SIZEOF(rule->dst_addr))
263                 return -EINVAL;
264
265         return acl_check_rule(&rule->data);
266 }
267
268 static void
269 acl_ipv4vlan_convert_rule(const struct rte_acl_ipv4vlan_rule *ri,
270         struct acl_ipv4vlan_rule *ro)
271 {
272         ro->data = ri->data;
273
274         ro->field[RTE_ACL_IPV4VLAN_PROTO_FIELD].value.u8 = ri->proto;
275         ro->field[RTE_ACL_IPV4VLAN_VLAN1_FIELD].value.u16 = ri->vlan;
276         ro->field[RTE_ACL_IPV4VLAN_VLAN2_FIELD].value.u16 = ri->domain;
277         ro->field[RTE_ACL_IPV4VLAN_SRC_FIELD].value.u32 = ri->src_addr;
278         ro->field[RTE_ACL_IPV4VLAN_DST_FIELD].value.u32 = ri->dst_addr;
279         ro->field[RTE_ACL_IPV4VLAN_SRCP_FIELD].value.u16 = ri->src_port_low;
280         ro->field[RTE_ACL_IPV4VLAN_DSTP_FIELD].value.u16 = ri->dst_port_low;
281
282         ro->field[RTE_ACL_IPV4VLAN_PROTO_FIELD].mask_range.u8 = ri->proto_mask;
283         ro->field[RTE_ACL_IPV4VLAN_VLAN1_FIELD].mask_range.u16 = ri->vlan_mask;
284         ro->field[RTE_ACL_IPV4VLAN_VLAN2_FIELD].mask_range.u16 =
285                 ri->domain_mask;
286         ro->field[RTE_ACL_IPV4VLAN_SRC_FIELD].mask_range.u32 =
287                 ri->src_mask_len;
288         ro->field[RTE_ACL_IPV4VLAN_DST_FIELD].mask_range.u32 = ri->dst_mask_len;
289         ro->field[RTE_ACL_IPV4VLAN_SRCP_FIELD].mask_range.u16 =
290                 ri->src_port_high;
291         ro->field[RTE_ACL_IPV4VLAN_DSTP_FIELD].mask_range.u16 =
292                 ri->dst_port_high;
293 }
294
295 int
296 rte_acl_ipv4vlan_add_rules(struct rte_acl_ctx *ctx,
297         const struct rte_acl_ipv4vlan_rule *rules,
298         uint32_t num)
299 {
300         int32_t rc;
301         uint32_t i;
302         struct acl_ipv4vlan_rule rv;
303
304         if (ctx == NULL || rules == NULL || ctx->rule_sz != sizeof(rv))
305                 return -EINVAL;
306
307         /* check input rules. */
308         for (i = 0; i != num; i++) {
309                 rc = acl_ipv4vlan_check_rule(rules + i);
310                 if (rc != 0) {
311                         RTE_LOG(ERR, ACL, "%s(%s): rule #%u is invalid\n",
312                                 __func__, ctx->name, i + 1);
313                         return rc;
314                 }
315         }
316
317         if (num + ctx->num_rules > ctx->max_rules)
318                 return -ENOMEM;
319
320         /* perform conversion to the internal format and add to the context. */
321         for (i = 0, rc = 0; i != num && rc == 0; i++) {
322                 acl_ipv4vlan_convert_rule(rules + i, &rv);
323                 rc = acl_add_rules(ctx, &rv, 1);
324         }
325
326         return rc;
327 }
328
329 static void
330 acl_ipv4vlan_config(struct rte_acl_config *cfg,
331         const uint32_t layout[RTE_ACL_IPV4VLAN_NUM],
332         uint32_t num_categories)
333 {
334         static const struct rte_acl_field_def
335                 ipv4_defs[RTE_ACL_IPV4VLAN_NUM_FIELDS] = {
336                 {
337                         .type = RTE_ACL_FIELD_TYPE_BITMASK,
338                         .size = sizeof(uint8_t),
339                         .field_index = RTE_ACL_IPV4VLAN_PROTO_FIELD,
340                         .input_index = RTE_ACL_IPV4VLAN_PROTO,
341                 },
342                 {
343                         .type = RTE_ACL_FIELD_TYPE_BITMASK,
344                         .size = sizeof(uint16_t),
345                         .field_index = RTE_ACL_IPV4VLAN_VLAN1_FIELD,
346                         .input_index = RTE_ACL_IPV4VLAN_VLAN,
347                 },
348                 {
349                         .type = RTE_ACL_FIELD_TYPE_BITMASK,
350                         .size = sizeof(uint16_t),
351                         .field_index = RTE_ACL_IPV4VLAN_VLAN2_FIELD,
352                         .input_index = RTE_ACL_IPV4VLAN_VLAN,
353                 },
354                 {
355                         .type = RTE_ACL_FIELD_TYPE_MASK,
356                         .size = sizeof(uint32_t),
357                         .field_index = RTE_ACL_IPV4VLAN_SRC_FIELD,
358                         .input_index = RTE_ACL_IPV4VLAN_SRC,
359                 },
360                 {
361                         .type = RTE_ACL_FIELD_TYPE_MASK,
362                         .size = sizeof(uint32_t),
363                         .field_index = RTE_ACL_IPV4VLAN_DST_FIELD,
364                         .input_index = RTE_ACL_IPV4VLAN_DST,
365                 },
366                 {
367                         .type = RTE_ACL_FIELD_TYPE_RANGE,
368                         .size = sizeof(uint16_t),
369                         .field_index = RTE_ACL_IPV4VLAN_SRCP_FIELD,
370                         .input_index = RTE_ACL_IPV4VLAN_PORTS,
371                 },
372                 {
373                         .type = RTE_ACL_FIELD_TYPE_RANGE,
374                         .size = sizeof(uint16_t),
375                         .field_index = RTE_ACL_IPV4VLAN_DSTP_FIELD,
376                         .input_index = RTE_ACL_IPV4VLAN_PORTS,
377                 },
378         };
379
380         memcpy(&cfg->defs, ipv4_defs, sizeof(ipv4_defs));
381         cfg->num_fields = RTE_DIM(ipv4_defs);
382
383         cfg->defs[RTE_ACL_IPV4VLAN_PROTO_FIELD].offset =
384                 layout[RTE_ACL_IPV4VLAN_PROTO];
385         cfg->defs[RTE_ACL_IPV4VLAN_VLAN1_FIELD].offset =
386                 layout[RTE_ACL_IPV4VLAN_VLAN];
387         cfg->defs[RTE_ACL_IPV4VLAN_VLAN2_FIELD].offset =
388                 layout[RTE_ACL_IPV4VLAN_VLAN] +
389                 cfg->defs[RTE_ACL_IPV4VLAN_VLAN1_FIELD].size;
390         cfg->defs[RTE_ACL_IPV4VLAN_SRC_FIELD].offset =
391                 layout[RTE_ACL_IPV4VLAN_SRC];
392         cfg->defs[RTE_ACL_IPV4VLAN_DST_FIELD].offset =
393                 layout[RTE_ACL_IPV4VLAN_DST];
394         cfg->defs[RTE_ACL_IPV4VLAN_SRCP_FIELD].offset =
395                 layout[RTE_ACL_IPV4VLAN_PORTS];
396         cfg->defs[RTE_ACL_IPV4VLAN_DSTP_FIELD].offset =
397                 layout[RTE_ACL_IPV4VLAN_PORTS] +
398                 cfg->defs[RTE_ACL_IPV4VLAN_SRCP_FIELD].size;
399
400         cfg->num_categories = num_categories;
401 }
402
403 int
404 rte_acl_ipv4vlan_build(struct rte_acl_ctx *ctx,
405         const uint32_t layout[RTE_ACL_IPV4VLAN_NUM],
406         uint32_t num_categories)
407 {
408         struct rte_acl_config cfg;
409
410         if (ctx == NULL || layout == NULL)
411                 return -EINVAL;
412
413         acl_ipv4vlan_config(&cfg, layout, num_categories);
414         return rte_acl_build(ctx, &cfg);
415 }