flow_classify: fix ISO C in exported header
[dpdk.git] / lib / librte_ether / rte_flow.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright 2016 6WIND S.A.
5  *   Copyright 2016 Mellanox.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of 6WIND S.A. nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <errno.h>
35 #include <stddef.h>
36 #include <stdint.h>
37 #include <string.h>
38
39 #include <rte_common.h>
40 #include <rte_errno.h>
41 #include <rte_branch_prediction.h>
42 #include "rte_ethdev.h"
43 #include "rte_flow_driver.h"
44 #include "rte_flow.h"
45
46 /**
47  * Flow elements description tables.
48  */
49 struct rte_flow_desc_data {
50         const char *name;
51         size_t size;
52 };
53
54 /** Generate flow_item[] entry. */
55 #define MK_FLOW_ITEM(t, s) \
56         [RTE_FLOW_ITEM_TYPE_ ## t] = { \
57                 .name = # t, \
58                 .size = s, \
59         }
60
61 /** Information about known flow pattern items. */
62 static const struct rte_flow_desc_data rte_flow_desc_item[] = {
63         MK_FLOW_ITEM(END, 0),
64         MK_FLOW_ITEM(VOID, 0),
65         MK_FLOW_ITEM(INVERT, 0),
66         MK_FLOW_ITEM(ANY, sizeof(struct rte_flow_item_any)),
67         MK_FLOW_ITEM(PF, 0),
68         MK_FLOW_ITEM(VF, sizeof(struct rte_flow_item_vf)),
69         MK_FLOW_ITEM(PORT, sizeof(struct rte_flow_item_port)),
70         MK_FLOW_ITEM(RAW, sizeof(struct rte_flow_item_raw)), /* +pattern[] */
71         MK_FLOW_ITEM(ETH, sizeof(struct rte_flow_item_eth)),
72         MK_FLOW_ITEM(VLAN, sizeof(struct rte_flow_item_vlan)),
73         MK_FLOW_ITEM(IPV4, sizeof(struct rte_flow_item_ipv4)),
74         MK_FLOW_ITEM(IPV6, sizeof(struct rte_flow_item_ipv6)),
75         MK_FLOW_ITEM(ICMP, sizeof(struct rte_flow_item_icmp)),
76         MK_FLOW_ITEM(UDP, sizeof(struct rte_flow_item_udp)),
77         MK_FLOW_ITEM(TCP, sizeof(struct rte_flow_item_tcp)),
78         MK_FLOW_ITEM(SCTP, sizeof(struct rte_flow_item_sctp)),
79         MK_FLOW_ITEM(VXLAN, sizeof(struct rte_flow_item_vxlan)),
80         MK_FLOW_ITEM(MPLS, sizeof(struct rte_flow_item_mpls)),
81         MK_FLOW_ITEM(GRE, sizeof(struct rte_flow_item_gre)),
82         MK_FLOW_ITEM(E_TAG, sizeof(struct rte_flow_item_e_tag)),
83         MK_FLOW_ITEM(NVGRE, sizeof(struct rte_flow_item_nvgre)),
84         MK_FLOW_ITEM(GENEVE, sizeof(struct rte_flow_item_geneve)),
85 };
86
87 /** Generate flow_action[] entry. */
88 #define MK_FLOW_ACTION(t, s) \
89         [RTE_FLOW_ACTION_TYPE_ ## t] = { \
90                 .name = # t, \
91                 .size = s, \
92         }
93
94 /** Information about known flow actions. */
95 static const struct rte_flow_desc_data rte_flow_desc_action[] = {
96         MK_FLOW_ACTION(END, 0),
97         MK_FLOW_ACTION(VOID, 0),
98         MK_FLOW_ACTION(PASSTHRU, 0),
99         MK_FLOW_ACTION(MARK, sizeof(struct rte_flow_action_mark)),
100         MK_FLOW_ACTION(FLAG, 0),
101         MK_FLOW_ACTION(QUEUE, sizeof(struct rte_flow_action_queue)),
102         MK_FLOW_ACTION(DROP, 0),
103         MK_FLOW_ACTION(COUNT, 0),
104         MK_FLOW_ACTION(DUP, sizeof(struct rte_flow_action_dup)),
105         MK_FLOW_ACTION(RSS, sizeof(struct rte_flow_action_rss)), /* +queue[] */
106         MK_FLOW_ACTION(PF, 0),
107         MK_FLOW_ACTION(VF, sizeof(struct rte_flow_action_vf)),
108 };
109
110 /* Get generic flow operations structure from a port. */
111 const struct rte_flow_ops *
112 rte_flow_ops_get(uint16_t port_id, struct rte_flow_error *error)
113 {
114         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
115         const struct rte_flow_ops *ops;
116         int code;
117
118         if (unlikely(!rte_eth_dev_is_valid_port(port_id)))
119                 code = ENODEV;
120         else if (unlikely(!dev->dev_ops->filter_ctrl ||
121                           dev->dev_ops->filter_ctrl(dev,
122                                                     RTE_ETH_FILTER_GENERIC,
123                                                     RTE_ETH_FILTER_GET,
124                                                     &ops) ||
125                           !ops))
126                 code = ENOSYS;
127         else
128                 return ops;
129         rte_flow_error_set(error, code, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
130                            NULL, rte_strerror(code));
131         return NULL;
132 }
133
134 /* Check whether a flow rule can be created on a given port. */
135 int
136 rte_flow_validate(uint16_t port_id,
137                   const struct rte_flow_attr *attr,
138                   const struct rte_flow_item pattern[],
139                   const struct rte_flow_action actions[],
140                   struct rte_flow_error *error)
141 {
142         const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
143         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
144
145         if (unlikely(!ops))
146                 return -rte_errno;
147         if (likely(!!ops->validate))
148                 return ops->validate(dev, attr, pattern, actions, error);
149         return rte_flow_error_set(error, ENOSYS,
150                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
151                                   NULL, rte_strerror(ENOSYS));
152 }
153
154 /* Create a flow rule on a given port. */
155 struct rte_flow *
156 rte_flow_create(uint16_t port_id,
157                 const struct rte_flow_attr *attr,
158                 const struct rte_flow_item pattern[],
159                 const struct rte_flow_action actions[],
160                 struct rte_flow_error *error)
161 {
162         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
163         const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
164
165         if (unlikely(!ops))
166                 return NULL;
167         if (likely(!!ops->create))
168                 return ops->create(dev, attr, pattern, actions, error);
169         rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
170                            NULL, rte_strerror(ENOSYS));
171         return NULL;
172 }
173
174 /* Destroy a flow rule on a given port. */
175 int
176 rte_flow_destroy(uint16_t port_id,
177                  struct rte_flow *flow,
178                  struct rte_flow_error *error)
179 {
180         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
181         const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
182
183         if (unlikely(!ops))
184                 return -rte_errno;
185         if (likely(!!ops->destroy))
186                 return ops->destroy(dev, flow, error);
187         return rte_flow_error_set(error, ENOSYS,
188                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
189                                   NULL, rte_strerror(ENOSYS));
190 }
191
192 /* Destroy all flow rules associated with a port. */
193 int
194 rte_flow_flush(uint16_t port_id,
195                struct rte_flow_error *error)
196 {
197         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
198         const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
199
200         if (unlikely(!ops))
201                 return -rte_errno;
202         if (likely(!!ops->flush))
203                 return ops->flush(dev, error);
204         return rte_flow_error_set(error, ENOSYS,
205                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
206                                   NULL, rte_strerror(ENOSYS));
207 }
208
209 /* Query an existing flow rule. */
210 int
211 rte_flow_query(uint16_t port_id,
212                struct rte_flow *flow,
213                enum rte_flow_action_type action,
214                void *data,
215                struct rte_flow_error *error)
216 {
217         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
218         const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
219
220         if (!ops)
221                 return -rte_errno;
222         if (likely(!!ops->query))
223                 return ops->query(dev, flow, action, data, error);
224         return rte_flow_error_set(error, ENOSYS,
225                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
226                                   NULL, rte_strerror(ENOSYS));
227 }
228
229 /* Restrict ingress traffic to the defined flow rules. */
230 int
231 rte_flow_isolate(uint16_t port_id,
232                  int set,
233                  struct rte_flow_error *error)
234 {
235         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
236         const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
237
238         if (!ops)
239                 return -rte_errno;
240         if (likely(!!ops->isolate))
241                 return ops->isolate(dev, set, error);
242         return rte_flow_error_set(error, ENOSYS,
243                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
244                                   NULL, rte_strerror(ENOSYS));
245 }
246
247 /* Initialize flow error structure. */
248 int
249 rte_flow_error_set(struct rte_flow_error *error,
250                    int code,
251                    enum rte_flow_error_type type,
252                    const void *cause,
253                    const char *message)
254 {
255         if (error) {
256                 *error = (struct rte_flow_error){
257                         .type = type,
258                         .cause = cause,
259                         .message = message,
260                 };
261         }
262         rte_errno = code;
263         return -code;
264 }
265
266 /** Compute storage space needed by item specification. */
267 static void
268 flow_item_spec_size(const struct rte_flow_item *item,
269                     size_t *size, size_t *pad)
270 {
271         if (!item->spec) {
272                 *size = 0;
273                 goto empty;
274         }
275         switch (item->type) {
276                 union {
277                         const struct rte_flow_item_raw *raw;
278                 } spec;
279
280         /* Not a fall-through */
281         case RTE_FLOW_ITEM_TYPE_RAW:
282                 spec.raw = item->spec;
283                 *size = offsetof(struct rte_flow_item_raw, pattern) +
284                         spec.raw->length * sizeof(*spec.raw->pattern);
285                 break;
286         default:
287                 *size = rte_flow_desc_item[item->type].size;
288                 break;
289         }
290 empty:
291         *pad = RTE_ALIGN_CEIL(*size, sizeof(double)) - *size;
292 }
293
294 /** Compute storage space needed by action configuration. */
295 static void
296 flow_action_conf_size(const struct rte_flow_action *action,
297                       size_t *size, size_t *pad)
298 {
299         if (!action->conf) {
300                 *size = 0;
301                 goto empty;
302         }
303         switch (action->type) {
304                 union {
305                         const struct rte_flow_action_rss *rss;
306                 } conf;
307
308         /* Not a fall-through. */
309         case RTE_FLOW_ACTION_TYPE_RSS:
310                 conf.rss = action->conf;
311                 *size = offsetof(struct rte_flow_action_rss, queue) +
312                         conf.rss->num * sizeof(*conf.rss->queue);
313                 break;
314         default:
315                 *size = rte_flow_desc_action[action->type].size;
316                 break;
317         }
318 empty:
319         *pad = RTE_ALIGN_CEIL(*size, sizeof(double)) - *size;
320 }
321
322 /** Store a full rte_flow description. */
323 size_t
324 rte_flow_copy(struct rte_flow_desc *desc, size_t len,
325               const struct rte_flow_attr *attr,
326               const struct rte_flow_item *items,
327               const struct rte_flow_action *actions)
328 {
329         struct rte_flow_desc *fd = NULL;
330         size_t tmp;
331         size_t pad;
332         size_t off1 = 0;
333         size_t off2 = 0;
334         size_t size = 0;
335
336 store:
337         if (items) {
338                 const struct rte_flow_item *item;
339
340                 item = items;
341                 if (fd)
342                         fd->items = (void *)&fd->data[off1];
343                 do {
344                         struct rte_flow_item *dst = NULL;
345
346                         if ((size_t)item->type >=
347                                 RTE_DIM(rte_flow_desc_item) ||
348                             !rte_flow_desc_item[item->type].name) {
349                                 rte_errno = ENOTSUP;
350                                 return 0;
351                         }
352                         if (fd)
353                                 dst = memcpy(fd->data + off1, item,
354                                              sizeof(*item));
355                         off1 += sizeof(*item);
356                         flow_item_spec_size(item, &tmp, &pad);
357                         if (item->spec) {
358                                 if (fd)
359                                         dst->spec = memcpy(fd->data + off2,
360                                                            item->spec, tmp);
361                                 off2 += tmp + pad;
362                         }
363                         if (item->last) {
364                                 if (fd)
365                                         dst->last = memcpy(fd->data + off2,
366                                                            item->last, tmp);
367                                 off2 += tmp + pad;
368                         }
369                         if (item->mask) {
370                                 if (fd)
371                                         dst->mask = memcpy(fd->data + off2,
372                                                            item->mask, tmp);
373                                 off2 += tmp + pad;
374                         }
375                         off2 = RTE_ALIGN_CEIL(off2, sizeof(double));
376                 } while ((item++)->type != RTE_FLOW_ITEM_TYPE_END);
377                 off1 = RTE_ALIGN_CEIL(off1, sizeof(double));
378         }
379         if (actions) {
380                 const struct rte_flow_action *action;
381
382                 action = actions;
383                 if (fd)
384                         fd->actions = (void *)&fd->data[off1];
385                 do {
386                         struct rte_flow_action *dst = NULL;
387
388                         if ((size_t)action->type >=
389                                 RTE_DIM(rte_flow_desc_action) ||
390                             !rte_flow_desc_action[action->type].name) {
391                                 rte_errno = ENOTSUP;
392                                 return 0;
393                         }
394                         if (fd)
395                                 dst = memcpy(fd->data + off1, action,
396                                              sizeof(*action));
397                         off1 += sizeof(*action);
398                         flow_action_conf_size(action, &tmp, &pad);
399                         if (action->conf) {
400                                 if (fd)
401                                         dst->conf = memcpy(fd->data + off2,
402                                                            action->conf, tmp);
403                                 off2 += tmp + pad;
404                         }
405                         off2 = RTE_ALIGN_CEIL(off2, sizeof(double));
406                 } while ((action++)->type != RTE_FLOW_ACTION_TYPE_END);
407         }
408         if (fd != NULL)
409                 return size;
410         off1 = RTE_ALIGN_CEIL(off1, sizeof(double));
411         tmp = RTE_ALIGN_CEIL(offsetof(struct rte_flow_desc, data),
412                              sizeof(double));
413         size = tmp + off1 + off2;
414         if (size > len)
415                 return size;
416         fd = desc;
417         if (fd != NULL) {
418                 *fd = (const struct rte_flow_desc) {
419                         .size = size,
420                         .attr = *attr,
421                 };
422                 tmp -= offsetof(struct rte_flow_desc, data);
423                 off2 = tmp + off1;
424                 off1 = tmp;
425                 goto store;
426         }
427         return 0;
428 }