ethdev: remove DUP action from flow API
[dpdk.git] / lib / librte_ether / rte_flow.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2016 6WIND S.A.
3  * Copyright 2016 Mellanox Technologies, Ltd
4  */
5
6 #include <errno.h>
7 #include <stddef.h>
8 #include <stdint.h>
9 #include <string.h>
10
11 #include <rte_common.h>
12 #include <rte_errno.h>
13 #include <rte_branch_prediction.h>
14 #include "rte_ethdev.h"
15 #include "rte_flow_driver.h"
16 #include "rte_flow.h"
17
18 /**
19  * Flow elements description tables.
20  */
21 struct rte_flow_desc_data {
22         const char *name;
23         size_t size;
24 };
25
26 /** Generate flow_item[] entry. */
27 #define MK_FLOW_ITEM(t, s) \
28         [RTE_FLOW_ITEM_TYPE_ ## t] = { \
29                 .name = # t, \
30                 .size = s, \
31         }
32
33 /** Information about known flow pattern items. */
34 static const struct rte_flow_desc_data rte_flow_desc_item[] = {
35         MK_FLOW_ITEM(END, 0),
36         MK_FLOW_ITEM(VOID, 0),
37         MK_FLOW_ITEM(INVERT, 0),
38         MK_FLOW_ITEM(ANY, sizeof(struct rte_flow_item_any)),
39         MK_FLOW_ITEM(PF, 0),
40         MK_FLOW_ITEM(VF, sizeof(struct rte_flow_item_vf)),
41         MK_FLOW_ITEM(PORT, sizeof(struct rte_flow_item_port)),
42         MK_FLOW_ITEM(RAW, sizeof(struct rte_flow_item_raw)), /* +pattern[] */
43         MK_FLOW_ITEM(ETH, sizeof(struct rte_flow_item_eth)),
44         MK_FLOW_ITEM(VLAN, sizeof(struct rte_flow_item_vlan)),
45         MK_FLOW_ITEM(IPV4, sizeof(struct rte_flow_item_ipv4)),
46         MK_FLOW_ITEM(IPV6, sizeof(struct rte_flow_item_ipv6)),
47         MK_FLOW_ITEM(ICMP, sizeof(struct rte_flow_item_icmp)),
48         MK_FLOW_ITEM(UDP, sizeof(struct rte_flow_item_udp)),
49         MK_FLOW_ITEM(TCP, sizeof(struct rte_flow_item_tcp)),
50         MK_FLOW_ITEM(SCTP, sizeof(struct rte_flow_item_sctp)),
51         MK_FLOW_ITEM(VXLAN, sizeof(struct rte_flow_item_vxlan)),
52         MK_FLOW_ITEM(MPLS, sizeof(struct rte_flow_item_mpls)),
53         MK_FLOW_ITEM(GRE, sizeof(struct rte_flow_item_gre)),
54         MK_FLOW_ITEM(E_TAG, sizeof(struct rte_flow_item_e_tag)),
55         MK_FLOW_ITEM(NVGRE, sizeof(struct rte_flow_item_nvgre)),
56         MK_FLOW_ITEM(GENEVE, sizeof(struct rte_flow_item_geneve)),
57 };
58
59 /** Generate flow_action[] entry. */
60 #define MK_FLOW_ACTION(t, s) \
61         [RTE_FLOW_ACTION_TYPE_ ## t] = { \
62                 .name = # t, \
63                 .size = s, \
64         }
65
66 /** Information about known flow actions. */
67 static const struct rte_flow_desc_data rte_flow_desc_action[] = {
68         MK_FLOW_ACTION(END, 0),
69         MK_FLOW_ACTION(VOID, 0),
70         MK_FLOW_ACTION(PASSTHRU, 0),
71         MK_FLOW_ACTION(MARK, sizeof(struct rte_flow_action_mark)),
72         MK_FLOW_ACTION(FLAG, 0),
73         MK_FLOW_ACTION(QUEUE, sizeof(struct rte_flow_action_queue)),
74         MK_FLOW_ACTION(DROP, 0),
75         MK_FLOW_ACTION(COUNT, 0),
76         MK_FLOW_ACTION(RSS, sizeof(struct rte_flow_action_rss)), /* +queue[] */
77         MK_FLOW_ACTION(PF, 0),
78         MK_FLOW_ACTION(VF, sizeof(struct rte_flow_action_vf)),
79 };
80
81 static int
82 flow_err(uint16_t port_id, int ret, struct rte_flow_error *error)
83 {
84         if (ret == 0)
85                 return 0;
86         if (rte_eth_dev_is_removed(port_id))
87                 return rte_flow_error_set(error, EIO,
88                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
89                                           NULL, rte_strerror(EIO));
90         return ret;
91 }
92
93 /* Get generic flow operations structure from a port. */
94 const struct rte_flow_ops *
95 rte_flow_ops_get(uint16_t port_id, struct rte_flow_error *error)
96 {
97         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
98         const struct rte_flow_ops *ops;
99         int code;
100
101         if (unlikely(!rte_eth_dev_is_valid_port(port_id)))
102                 code = ENODEV;
103         else if (unlikely(!dev->dev_ops->filter_ctrl ||
104                           dev->dev_ops->filter_ctrl(dev,
105                                                     RTE_ETH_FILTER_GENERIC,
106                                                     RTE_ETH_FILTER_GET,
107                                                     &ops) ||
108                           !ops))
109                 code = ENOSYS;
110         else
111                 return ops;
112         rte_flow_error_set(error, code, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
113                            NULL, rte_strerror(code));
114         return NULL;
115 }
116
117 /* Check whether a flow rule can be created on a given port. */
118 int
119 rte_flow_validate(uint16_t port_id,
120                   const struct rte_flow_attr *attr,
121                   const struct rte_flow_item pattern[],
122                   const struct rte_flow_action actions[],
123                   struct rte_flow_error *error)
124 {
125         const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
126         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
127
128         if (unlikely(!ops))
129                 return -rte_errno;
130         if (likely(!!ops->validate))
131                 return flow_err(port_id, ops->validate(dev, attr, pattern,
132                                                        actions, error), error);
133         return rte_flow_error_set(error, ENOSYS,
134                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
135                                   NULL, rte_strerror(ENOSYS));
136 }
137
138 /* Create a flow rule on a given port. */
139 struct rte_flow *
140 rte_flow_create(uint16_t port_id,
141                 const struct rte_flow_attr *attr,
142                 const struct rte_flow_item pattern[],
143                 const struct rte_flow_action actions[],
144                 struct rte_flow_error *error)
145 {
146         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
147         struct rte_flow *flow;
148         const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
149
150         if (unlikely(!ops))
151                 return NULL;
152         if (likely(!!ops->create)) {
153                 flow = ops->create(dev, attr, pattern, actions, error);
154                 if (flow == NULL)
155                         flow_err(port_id, -rte_errno, error);
156                 return flow;
157         }
158         rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
159                            NULL, rte_strerror(ENOSYS));
160         return NULL;
161 }
162
163 /* Destroy a flow rule on a given port. */
164 int
165 rte_flow_destroy(uint16_t port_id,
166                  struct rte_flow *flow,
167                  struct rte_flow_error *error)
168 {
169         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
170         const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
171
172         if (unlikely(!ops))
173                 return -rte_errno;
174         if (likely(!!ops->destroy))
175                 return flow_err(port_id, ops->destroy(dev, flow, error),
176                                 error);
177         return rte_flow_error_set(error, ENOSYS,
178                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
179                                   NULL, rte_strerror(ENOSYS));
180 }
181
182 /* Destroy all flow rules associated with a port. */
183 int
184 rte_flow_flush(uint16_t port_id,
185                struct rte_flow_error *error)
186 {
187         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
188         const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
189
190         if (unlikely(!ops))
191                 return -rte_errno;
192         if (likely(!!ops->flush))
193                 return flow_err(port_id, ops->flush(dev, error), error);
194         return rte_flow_error_set(error, ENOSYS,
195                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
196                                   NULL, rte_strerror(ENOSYS));
197 }
198
199 /* Query an existing flow rule. */
200 int
201 rte_flow_query(uint16_t port_id,
202                struct rte_flow *flow,
203                enum rte_flow_action_type action,
204                void *data,
205                struct rte_flow_error *error)
206 {
207         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
208         const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
209
210         if (!ops)
211                 return -rte_errno;
212         if (likely(!!ops->query))
213                 return flow_err(port_id, ops->query(dev, flow, action, data,
214                                                     error), error);
215         return rte_flow_error_set(error, ENOSYS,
216                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
217                                   NULL, rte_strerror(ENOSYS));
218 }
219
220 /* Restrict ingress traffic to the defined flow rules. */
221 int
222 rte_flow_isolate(uint16_t port_id,
223                  int set,
224                  struct rte_flow_error *error)
225 {
226         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
227         const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
228
229         if (!ops)
230                 return -rte_errno;
231         if (likely(!!ops->isolate))
232                 return flow_err(port_id, ops->isolate(dev, set, error), error);
233         return rte_flow_error_set(error, ENOSYS,
234                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
235                                   NULL, rte_strerror(ENOSYS));
236 }
237
238 /* Initialize flow error structure. */
239 int
240 rte_flow_error_set(struct rte_flow_error *error,
241                    int code,
242                    enum rte_flow_error_type type,
243                    const void *cause,
244                    const char *message)
245 {
246         if (error) {
247                 *error = (struct rte_flow_error){
248                         .type = type,
249                         .cause = cause,
250                         .message = message,
251                 };
252         }
253         rte_errno = code;
254         return -code;
255 }
256
257 /** Pattern item specification types. */
258 enum item_spec_type {
259         ITEM_SPEC,
260         ITEM_LAST,
261         ITEM_MASK,
262 };
263
264 /** Compute storage space needed by item specification and copy it. */
265 static size_t
266 flow_item_spec_copy(void *buf, const struct rte_flow_item *item,
267                     enum item_spec_type type)
268 {
269         size_t size = 0;
270         const void *item_spec =
271                 type == ITEM_SPEC ? item->spec :
272                 type == ITEM_LAST ? item->last :
273                 type == ITEM_MASK ? item->mask :
274                 NULL;
275
276         if (!item_spec)
277                 goto empty;
278         switch (item->type) {
279                 union {
280                         const struct rte_flow_item_raw *raw;
281                 } src;
282                 union {
283                         struct rte_flow_item_raw *raw;
284                 } dst;
285
286         case RTE_FLOW_ITEM_TYPE_RAW:
287                 src.raw = item_spec;
288                 dst.raw = buf;
289                 size = offsetof(struct rte_flow_item_raw, pattern) +
290                         src.raw->length * sizeof(*src.raw->pattern);
291                 if (dst.raw)
292                         memcpy(dst.raw, src.raw, size);
293                 break;
294         default:
295                 size = rte_flow_desc_item[item->type].size;
296                 if (buf)
297                         memcpy(buf, item_spec, size);
298                 break;
299         }
300 empty:
301         return RTE_ALIGN_CEIL(size, sizeof(double));
302 }
303
304 /** Compute storage space needed by action configuration and copy it. */
305 static size_t
306 flow_action_conf_copy(void *buf, const struct rte_flow_action *action)
307 {
308         size_t size = 0;
309
310         if (!action->conf)
311                 goto empty;
312         switch (action->type) {
313                 union {
314                         const struct rte_flow_action_rss *rss;
315                 } src;
316                 union {
317                         struct rte_flow_action_rss *rss;
318                 } dst;
319                 size_t off;
320
321         case RTE_FLOW_ACTION_TYPE_RSS:
322                 src.rss = action->conf;
323                 dst.rss = buf;
324                 off = 0;
325                 if (dst.rss)
326                         *dst.rss = (struct rte_flow_action_rss){
327                                 .num = src.rss->num,
328                         };
329                 off += offsetof(struct rte_flow_action_rss, queue);
330                 if (src.rss->num) {
331                         size = sizeof(*src.rss->queue) * src.rss->num;
332                         if (dst.rss)
333                                 memcpy(dst.rss->queue, src.rss->queue, size);
334                         off += size;
335                 }
336                 off = RTE_ALIGN_CEIL(off, sizeof(double));
337                 if (dst.rss) {
338                         dst.rss->rss_conf = (void *)((uintptr_t)dst.rss + off);
339                         *(struct rte_eth_rss_conf *)(uintptr_t)
340                                 dst.rss->rss_conf = (struct rte_eth_rss_conf){
341                                 .rss_key_len = src.rss->rss_conf->rss_key_len,
342                                 .rss_hf = src.rss->rss_conf->rss_hf,
343                         };
344                 }
345                 off += sizeof(*src.rss->rss_conf);
346                 if (src.rss->rss_conf->rss_key_len) {
347                         off = RTE_ALIGN_CEIL(off, sizeof(double));
348                         size = sizeof(*src.rss->rss_conf->rss_key) *
349                                 src.rss->rss_conf->rss_key_len;
350                         if (dst.rss) {
351                                 ((struct rte_eth_rss_conf *)(uintptr_t)
352                                  dst.rss->rss_conf)->rss_key =
353                                         (void *)((uintptr_t)dst.rss + off);
354                                 memcpy(dst.rss->rss_conf->rss_key,
355                                        src.rss->rss_conf->rss_key,
356                                        size);
357                         }
358                         off += size;
359                 }
360                 size = off;
361                 break;
362         default:
363                 size = rte_flow_desc_action[action->type].size;
364                 if (buf)
365                         memcpy(buf, action->conf, size);
366                 break;
367         }
368 empty:
369         return RTE_ALIGN_CEIL(size, sizeof(double));
370 }
371
372 /** Store a full rte_flow description. */
373 size_t
374 rte_flow_copy(struct rte_flow_desc *desc, size_t len,
375               const struct rte_flow_attr *attr,
376               const struct rte_flow_item *items,
377               const struct rte_flow_action *actions)
378 {
379         struct rte_flow_desc *fd = NULL;
380         size_t tmp;
381         size_t off1 = 0;
382         size_t off2 = 0;
383         size_t size = 0;
384
385 store:
386         if (items) {
387                 const struct rte_flow_item *item;
388
389                 item = items;
390                 if (fd)
391                         fd->items = (void *)&fd->data[off1];
392                 do {
393                         struct rte_flow_item *dst = NULL;
394
395                         if ((size_t)item->type >=
396                                 RTE_DIM(rte_flow_desc_item) ||
397                             !rte_flow_desc_item[item->type].name) {
398                                 rte_errno = ENOTSUP;
399                                 return 0;
400                         }
401                         if (fd)
402                                 dst = memcpy(fd->data + off1, item,
403                                              sizeof(*item));
404                         off1 += sizeof(*item);
405                         if (item->spec) {
406                                 if (fd)
407                                         dst->spec = fd->data + off2;
408                                 off2 += flow_item_spec_copy
409                                         (fd ? fd->data + off2 : NULL, item,
410                                          ITEM_SPEC);
411                         }
412                         if (item->last) {
413                                 if (fd)
414                                         dst->last = fd->data + off2;
415                                 off2 += flow_item_spec_copy
416                                         (fd ? fd->data + off2 : NULL, item,
417                                          ITEM_LAST);
418                         }
419                         if (item->mask) {
420                                 if (fd)
421                                         dst->mask = fd->data + off2;
422                                 off2 += flow_item_spec_copy
423                                         (fd ? fd->data + off2 : NULL, item,
424                                          ITEM_MASK);
425                         }
426                         off2 = RTE_ALIGN_CEIL(off2, sizeof(double));
427                 } while ((item++)->type != RTE_FLOW_ITEM_TYPE_END);
428                 off1 = RTE_ALIGN_CEIL(off1, sizeof(double));
429         }
430         if (actions) {
431                 const struct rte_flow_action *action;
432
433                 action = actions;
434                 if (fd)
435                         fd->actions = (void *)&fd->data[off1];
436                 do {
437                         struct rte_flow_action *dst = NULL;
438
439                         if ((size_t)action->type >=
440                                 RTE_DIM(rte_flow_desc_action) ||
441                             !rte_flow_desc_action[action->type].name) {
442                                 rte_errno = ENOTSUP;
443                                 return 0;
444                         }
445                         if (fd)
446                                 dst = memcpy(fd->data + off1, action,
447                                              sizeof(*action));
448                         off1 += sizeof(*action);
449                         if (action->conf) {
450                                 if (fd)
451                                         dst->conf = fd->data + off2;
452                                 off2 += flow_action_conf_copy
453                                         (fd ? fd->data + off2 : NULL, action);
454                         }
455                         off2 = RTE_ALIGN_CEIL(off2, sizeof(double));
456                 } while ((action++)->type != RTE_FLOW_ACTION_TYPE_END);
457         }
458         if (fd != NULL)
459                 return size;
460         off1 = RTE_ALIGN_CEIL(off1, sizeof(double));
461         tmp = RTE_ALIGN_CEIL(offsetof(struct rte_flow_desc, data),
462                              sizeof(double));
463         size = tmp + off1 + off2;
464         if (size > len)
465                 return size;
466         fd = desc;
467         if (fd != NULL) {
468                 *fd = (const struct rte_flow_desc) {
469                         .size = size,
470                         .attr = *attr,
471                 };
472                 tmp -= offsetof(struct rte_flow_desc, data);
473                 off2 = tmp + off1;
474                 off1 = tmp;
475                 goto store;
476         }
477         return 0;
478 }