00989c73b953324ac3fe5a420e4fbc90bebed0a7
[dpdk.git] / lib / librte_ether / rte_flow.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2016 6WIND S.A.
3  * Copyright 2016 Mellanox Technologies, Ltd
4  */
5
6 #include <errno.h>
7 #include <stddef.h>
8 #include <stdint.h>
9 #include <string.h>
10
11 #include <rte_common.h>
12 #include <rte_errno.h>
13 #include <rte_branch_prediction.h>
14 #include "rte_ethdev.h"
15 #include "rte_flow_driver.h"
16 #include "rte_flow.h"
17
18 /**
19  * Flow elements description tables.
20  */
21 struct rte_flow_desc_data {
22         const char *name;
23         size_t size;
24 };
25
26 /** Generate flow_item[] entry. */
27 #define MK_FLOW_ITEM(t, s) \
28         [RTE_FLOW_ITEM_TYPE_ ## t] = { \
29                 .name = # t, \
30                 .size = s, \
31         }
32
33 /** Information about known flow pattern items. */
34 static const struct rte_flow_desc_data rte_flow_desc_item[] = {
35         MK_FLOW_ITEM(END, 0),
36         MK_FLOW_ITEM(VOID, 0),
37         MK_FLOW_ITEM(INVERT, 0),
38         MK_FLOW_ITEM(ANY, sizeof(struct rte_flow_item_any)),
39         MK_FLOW_ITEM(PF, 0),
40         MK_FLOW_ITEM(VF, sizeof(struct rte_flow_item_vf)),
41         MK_FLOW_ITEM(PHY_PORT, sizeof(struct rte_flow_item_phy_port)),
42         MK_FLOW_ITEM(RAW, sizeof(struct rte_flow_item_raw)),
43         MK_FLOW_ITEM(ETH, sizeof(struct rte_flow_item_eth)),
44         MK_FLOW_ITEM(VLAN, sizeof(struct rte_flow_item_vlan)),
45         MK_FLOW_ITEM(IPV4, sizeof(struct rte_flow_item_ipv4)),
46         MK_FLOW_ITEM(IPV6, sizeof(struct rte_flow_item_ipv6)),
47         MK_FLOW_ITEM(ICMP, sizeof(struct rte_flow_item_icmp)),
48         MK_FLOW_ITEM(UDP, sizeof(struct rte_flow_item_udp)),
49         MK_FLOW_ITEM(TCP, sizeof(struct rte_flow_item_tcp)),
50         MK_FLOW_ITEM(SCTP, sizeof(struct rte_flow_item_sctp)),
51         MK_FLOW_ITEM(VXLAN, sizeof(struct rte_flow_item_vxlan)),
52         MK_FLOW_ITEM(MPLS, sizeof(struct rte_flow_item_mpls)),
53         MK_FLOW_ITEM(GRE, sizeof(struct rte_flow_item_gre)),
54         MK_FLOW_ITEM(E_TAG, sizeof(struct rte_flow_item_e_tag)),
55         MK_FLOW_ITEM(NVGRE, sizeof(struct rte_flow_item_nvgre)),
56         MK_FLOW_ITEM(GENEVE, sizeof(struct rte_flow_item_geneve)),
57 };
58
59 /** Generate flow_action[] entry. */
60 #define MK_FLOW_ACTION(t, s) \
61         [RTE_FLOW_ACTION_TYPE_ ## t] = { \
62                 .name = # t, \
63                 .size = s, \
64         }
65
66 /** Information about known flow actions. */
67 static const struct rte_flow_desc_data rte_flow_desc_action[] = {
68         MK_FLOW_ACTION(END, 0),
69         MK_FLOW_ACTION(VOID, 0),
70         MK_FLOW_ACTION(PASSTHRU, 0),
71         MK_FLOW_ACTION(MARK, sizeof(struct rte_flow_action_mark)),
72         MK_FLOW_ACTION(FLAG, 0),
73         MK_FLOW_ACTION(QUEUE, sizeof(struct rte_flow_action_queue)),
74         MK_FLOW_ACTION(DROP, 0),
75         MK_FLOW_ACTION(COUNT, 0),
76         MK_FLOW_ACTION(RSS, sizeof(struct rte_flow_action_rss)),
77         MK_FLOW_ACTION(PF, 0),
78         MK_FLOW_ACTION(VF, sizeof(struct rte_flow_action_vf)),
79         MK_FLOW_ACTION(PHY_PORT, sizeof(struct rte_flow_action_phy_port)),
80 };
81
82 static int
83 flow_err(uint16_t port_id, int ret, struct rte_flow_error *error)
84 {
85         if (ret == 0)
86                 return 0;
87         if (rte_eth_dev_is_removed(port_id))
88                 return rte_flow_error_set(error, EIO,
89                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
90                                           NULL, rte_strerror(EIO));
91         return ret;
92 }
93
94 /* Get generic flow operations structure from a port. */
95 const struct rte_flow_ops *
96 rte_flow_ops_get(uint16_t port_id, struct rte_flow_error *error)
97 {
98         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
99         const struct rte_flow_ops *ops;
100         int code;
101
102         if (unlikely(!rte_eth_dev_is_valid_port(port_id)))
103                 code = ENODEV;
104         else if (unlikely(!dev->dev_ops->filter_ctrl ||
105                           dev->dev_ops->filter_ctrl(dev,
106                                                     RTE_ETH_FILTER_GENERIC,
107                                                     RTE_ETH_FILTER_GET,
108                                                     &ops) ||
109                           !ops))
110                 code = ENOSYS;
111         else
112                 return ops;
113         rte_flow_error_set(error, code, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
114                            NULL, rte_strerror(code));
115         return NULL;
116 }
117
118 /* Check whether a flow rule can be created on a given port. */
119 int
120 rte_flow_validate(uint16_t port_id,
121                   const struct rte_flow_attr *attr,
122                   const struct rte_flow_item pattern[],
123                   const struct rte_flow_action actions[],
124                   struct rte_flow_error *error)
125 {
126         const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
127         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
128
129         if (unlikely(!ops))
130                 return -rte_errno;
131         if (likely(!!ops->validate))
132                 return flow_err(port_id, ops->validate(dev, attr, pattern,
133                                                        actions, error), error);
134         return rte_flow_error_set(error, ENOSYS,
135                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
136                                   NULL, rte_strerror(ENOSYS));
137 }
138
139 /* Create a flow rule on a given port. */
140 struct rte_flow *
141 rte_flow_create(uint16_t port_id,
142                 const struct rte_flow_attr *attr,
143                 const struct rte_flow_item pattern[],
144                 const struct rte_flow_action actions[],
145                 struct rte_flow_error *error)
146 {
147         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
148         struct rte_flow *flow;
149         const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
150
151         if (unlikely(!ops))
152                 return NULL;
153         if (likely(!!ops->create)) {
154                 flow = ops->create(dev, attr, pattern, actions, error);
155                 if (flow == NULL)
156                         flow_err(port_id, -rte_errno, error);
157                 return flow;
158         }
159         rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
160                            NULL, rte_strerror(ENOSYS));
161         return NULL;
162 }
163
164 /* Destroy a flow rule on a given port. */
165 int
166 rte_flow_destroy(uint16_t port_id,
167                  struct rte_flow *flow,
168                  struct rte_flow_error *error)
169 {
170         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
171         const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
172
173         if (unlikely(!ops))
174                 return -rte_errno;
175         if (likely(!!ops->destroy))
176                 return flow_err(port_id, ops->destroy(dev, flow, error),
177                                 error);
178         return rte_flow_error_set(error, ENOSYS,
179                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
180                                   NULL, rte_strerror(ENOSYS));
181 }
182
183 /* Destroy all flow rules associated with a port. */
184 int
185 rte_flow_flush(uint16_t port_id,
186                struct rte_flow_error *error)
187 {
188         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
189         const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
190
191         if (unlikely(!ops))
192                 return -rte_errno;
193         if (likely(!!ops->flush))
194                 return flow_err(port_id, ops->flush(dev, error), error);
195         return rte_flow_error_set(error, ENOSYS,
196                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
197                                   NULL, rte_strerror(ENOSYS));
198 }
199
200 /* Query an existing flow rule. */
201 int
202 rte_flow_query(uint16_t port_id,
203                struct rte_flow *flow,
204                enum rte_flow_action_type action,
205                void *data,
206                struct rte_flow_error *error)
207 {
208         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
209         const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
210
211         if (!ops)
212                 return -rte_errno;
213         if (likely(!!ops->query))
214                 return flow_err(port_id, ops->query(dev, flow, action, data,
215                                                     error), error);
216         return rte_flow_error_set(error, ENOSYS,
217                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
218                                   NULL, rte_strerror(ENOSYS));
219 }
220
221 /* Restrict ingress traffic to the defined flow rules. */
222 int
223 rte_flow_isolate(uint16_t port_id,
224                  int set,
225                  struct rte_flow_error *error)
226 {
227         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
228         const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
229
230         if (!ops)
231                 return -rte_errno;
232         if (likely(!!ops->isolate))
233                 return flow_err(port_id, ops->isolate(dev, set, error), error);
234         return rte_flow_error_set(error, ENOSYS,
235                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
236                                   NULL, rte_strerror(ENOSYS));
237 }
238
239 /* Initialize flow error structure. */
240 int
241 rte_flow_error_set(struct rte_flow_error *error,
242                    int code,
243                    enum rte_flow_error_type type,
244                    const void *cause,
245                    const char *message)
246 {
247         if (error) {
248                 *error = (struct rte_flow_error){
249                         .type = type,
250                         .cause = cause,
251                         .message = message,
252                 };
253         }
254         rte_errno = code;
255         return -code;
256 }
257
258 /** Pattern item specification types. */
259 enum item_spec_type {
260         ITEM_SPEC,
261         ITEM_LAST,
262         ITEM_MASK,
263 };
264
265 /** Compute storage space needed by item specification and copy it. */
266 static size_t
267 flow_item_spec_copy(void *buf, const struct rte_flow_item *item,
268                     enum item_spec_type type)
269 {
270         size_t size = 0;
271         const void *item_spec =
272                 type == ITEM_SPEC ? item->spec :
273                 type == ITEM_LAST ? item->last :
274                 type == ITEM_MASK ? item->mask :
275                 NULL;
276
277         if (!item_spec)
278                 goto empty;
279         switch (item->type) {
280                 union {
281                         const struct rte_flow_item_raw *raw;
282                 } src;
283                 union {
284                         struct rte_flow_item_raw *raw;
285                 } dst;
286                 size_t off;
287
288         case RTE_FLOW_ITEM_TYPE_RAW:
289                 src.raw = item_spec;
290                 dst.raw = buf;
291                 off = RTE_ALIGN_CEIL(sizeof(struct rte_flow_item_raw),
292                                      sizeof(*src.raw->pattern));
293                 size = off + src.raw->length * sizeof(*src.raw->pattern);
294                 if (dst.raw) {
295                         memcpy(dst.raw, src.raw, sizeof(*src.raw));
296                         dst.raw->pattern = memcpy((uint8_t *)dst.raw + off,
297                                                   src.raw->pattern,
298                                                   size - off);
299                 }
300                 break;
301         default:
302                 size = rte_flow_desc_item[item->type].size;
303                 if (buf)
304                         memcpy(buf, item_spec, size);
305                 break;
306         }
307 empty:
308         return RTE_ALIGN_CEIL(size, sizeof(double));
309 }
310
311 /** Compute storage space needed by action configuration and copy it. */
312 static size_t
313 flow_action_conf_copy(void *buf, const struct rte_flow_action *action)
314 {
315         size_t size = 0;
316
317         if (!action->conf)
318                 goto empty;
319         switch (action->type) {
320                 union {
321                         const struct rte_flow_action_rss *rss;
322                 } src;
323                 union {
324                         struct rte_flow_action_rss *rss;
325                 } dst;
326                 size_t off;
327
328         case RTE_FLOW_ACTION_TYPE_RSS:
329                 src.rss = action->conf;
330                 dst.rss = buf;
331                 off = 0;
332                 if (dst.rss)
333                         *dst.rss = (struct rte_flow_action_rss){
334                                 .func = src.rss->func,
335                                 .level = src.rss->level,
336                                 .types = src.rss->types,
337                                 .key_len = src.rss->key_len,
338                                 .queue_num = src.rss->queue_num,
339                         };
340                 off += sizeof(*src.rss);
341                 if (src.rss->key_len) {
342                         off = RTE_ALIGN_CEIL(off, sizeof(double));
343                         size = sizeof(*src.rss->key) * src.rss->key_len;
344                         if (dst.rss)
345                                 dst.rss->key = memcpy
346                                         ((void *)((uintptr_t)dst.rss + off),
347                                          src.rss->key, size);
348                         off += size;
349                 }
350                 if (src.rss->queue_num) {
351                         off = RTE_ALIGN_CEIL(off, sizeof(double));
352                         size = sizeof(*src.rss->queue) * src.rss->queue_num;
353                         if (dst.rss)
354                                 dst.rss->queue = memcpy
355                                         ((void *)((uintptr_t)dst.rss + off),
356                                          src.rss->queue, size);
357                         off += size;
358                 }
359                 size = off;
360                 break;
361         default:
362                 size = rte_flow_desc_action[action->type].size;
363                 if (buf)
364                         memcpy(buf, action->conf, size);
365                 break;
366         }
367 empty:
368         return RTE_ALIGN_CEIL(size, sizeof(double));
369 }
370
371 /** Store a full rte_flow description. */
372 size_t
373 rte_flow_copy(struct rte_flow_desc *desc, size_t len,
374               const struct rte_flow_attr *attr,
375               const struct rte_flow_item *items,
376               const struct rte_flow_action *actions)
377 {
378         struct rte_flow_desc *fd = NULL;
379         size_t tmp;
380         size_t off1 = 0;
381         size_t off2 = 0;
382         size_t size = 0;
383
384 store:
385         if (items) {
386                 const struct rte_flow_item *item;
387
388                 item = items;
389                 if (fd)
390                         fd->items = (void *)&fd->data[off1];
391                 do {
392                         struct rte_flow_item *dst = NULL;
393
394                         if ((size_t)item->type >=
395                                 RTE_DIM(rte_flow_desc_item) ||
396                             !rte_flow_desc_item[item->type].name) {
397                                 rte_errno = ENOTSUP;
398                                 return 0;
399                         }
400                         if (fd)
401                                 dst = memcpy(fd->data + off1, item,
402                                              sizeof(*item));
403                         off1 += sizeof(*item);
404                         if (item->spec) {
405                                 if (fd)
406                                         dst->spec = fd->data + off2;
407                                 off2 += flow_item_spec_copy
408                                         (fd ? fd->data + off2 : NULL, item,
409                                          ITEM_SPEC);
410                         }
411                         if (item->last) {
412                                 if (fd)
413                                         dst->last = fd->data + off2;
414                                 off2 += flow_item_spec_copy
415                                         (fd ? fd->data + off2 : NULL, item,
416                                          ITEM_LAST);
417                         }
418                         if (item->mask) {
419                                 if (fd)
420                                         dst->mask = fd->data + off2;
421                                 off2 += flow_item_spec_copy
422                                         (fd ? fd->data + off2 : NULL, item,
423                                          ITEM_MASK);
424                         }
425                         off2 = RTE_ALIGN_CEIL(off2, sizeof(double));
426                 } while ((item++)->type != RTE_FLOW_ITEM_TYPE_END);
427                 off1 = RTE_ALIGN_CEIL(off1, sizeof(double));
428         }
429         if (actions) {
430                 const struct rte_flow_action *action;
431
432                 action = actions;
433                 if (fd)
434                         fd->actions = (void *)&fd->data[off1];
435                 do {
436                         struct rte_flow_action *dst = NULL;
437
438                         if ((size_t)action->type >=
439                                 RTE_DIM(rte_flow_desc_action) ||
440                             !rte_flow_desc_action[action->type].name) {
441                                 rte_errno = ENOTSUP;
442                                 return 0;
443                         }
444                         if (fd)
445                                 dst = memcpy(fd->data + off1, action,
446                                              sizeof(*action));
447                         off1 += sizeof(*action);
448                         if (action->conf) {
449                                 if (fd)
450                                         dst->conf = fd->data + off2;
451                                 off2 += flow_action_conf_copy
452                                         (fd ? fd->data + off2 : NULL, action);
453                         }
454                         off2 = RTE_ALIGN_CEIL(off2, sizeof(double));
455                 } while ((action++)->type != RTE_FLOW_ACTION_TYPE_END);
456         }
457         if (fd != NULL)
458                 return size;
459         off1 = RTE_ALIGN_CEIL(off1, sizeof(double));
460         tmp = RTE_ALIGN_CEIL(offsetof(struct rte_flow_desc, data),
461                              sizeof(double));
462         size = tmp + off1 + off2;
463         if (size > len)
464                 return size;
465         fd = desc;
466         if (fd != NULL) {
467                 *fd = (const struct rte_flow_desc) {
468                         .size = size,
469                         .attr = *attr,
470                 };
471                 tmp -= offsetof(struct rte_flow_desc, data);
472                 off2 = tmp + off1;
473                 off1 = tmp;
474                 goto store;
475         }
476         return 0;
477 }