ethdev: add encap level to RSS flow API action
[dpdk.git] / lib / librte_ether / rte_flow.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2016 6WIND S.A.
3  * Copyright 2016 Mellanox Technologies, Ltd
4  */
5
6 #include <errno.h>
7 #include <stddef.h>
8 #include <stdint.h>
9 #include <string.h>
10
11 #include <rte_common.h>
12 #include <rte_errno.h>
13 #include <rte_branch_prediction.h>
14 #include "rte_ethdev.h"
15 #include "rte_flow_driver.h"
16 #include "rte_flow.h"
17
18 /**
19  * Flow elements description tables.
20  */
21 struct rte_flow_desc_data {
22         const char *name;
23         size_t size;
24 };
25
26 /** Generate flow_item[] entry. */
27 #define MK_FLOW_ITEM(t, s) \
28         [RTE_FLOW_ITEM_TYPE_ ## t] = { \
29                 .name = # t, \
30                 .size = s, \
31         }
32
33 /** Information about known flow pattern items. */
34 static const struct rte_flow_desc_data rte_flow_desc_item[] = {
35         MK_FLOW_ITEM(END, 0),
36         MK_FLOW_ITEM(VOID, 0),
37         MK_FLOW_ITEM(INVERT, 0),
38         MK_FLOW_ITEM(ANY, sizeof(struct rte_flow_item_any)),
39         MK_FLOW_ITEM(PF, 0),
40         MK_FLOW_ITEM(VF, sizeof(struct rte_flow_item_vf)),
41         MK_FLOW_ITEM(PORT, sizeof(struct rte_flow_item_port)),
42         MK_FLOW_ITEM(RAW, sizeof(struct rte_flow_item_raw)),
43         MK_FLOW_ITEM(ETH, sizeof(struct rte_flow_item_eth)),
44         MK_FLOW_ITEM(VLAN, sizeof(struct rte_flow_item_vlan)),
45         MK_FLOW_ITEM(IPV4, sizeof(struct rte_flow_item_ipv4)),
46         MK_FLOW_ITEM(IPV6, sizeof(struct rte_flow_item_ipv6)),
47         MK_FLOW_ITEM(ICMP, sizeof(struct rte_flow_item_icmp)),
48         MK_FLOW_ITEM(UDP, sizeof(struct rte_flow_item_udp)),
49         MK_FLOW_ITEM(TCP, sizeof(struct rte_flow_item_tcp)),
50         MK_FLOW_ITEM(SCTP, sizeof(struct rte_flow_item_sctp)),
51         MK_FLOW_ITEM(VXLAN, sizeof(struct rte_flow_item_vxlan)),
52         MK_FLOW_ITEM(MPLS, sizeof(struct rte_flow_item_mpls)),
53         MK_FLOW_ITEM(GRE, sizeof(struct rte_flow_item_gre)),
54         MK_FLOW_ITEM(E_TAG, sizeof(struct rte_flow_item_e_tag)),
55         MK_FLOW_ITEM(NVGRE, sizeof(struct rte_flow_item_nvgre)),
56         MK_FLOW_ITEM(GENEVE, sizeof(struct rte_flow_item_geneve)),
57 };
58
59 /** Generate flow_action[] entry. */
60 #define MK_FLOW_ACTION(t, s) \
61         [RTE_FLOW_ACTION_TYPE_ ## t] = { \
62                 .name = # t, \
63                 .size = s, \
64         }
65
66 /** Information about known flow actions. */
67 static const struct rte_flow_desc_data rte_flow_desc_action[] = {
68         MK_FLOW_ACTION(END, 0),
69         MK_FLOW_ACTION(VOID, 0),
70         MK_FLOW_ACTION(PASSTHRU, 0),
71         MK_FLOW_ACTION(MARK, sizeof(struct rte_flow_action_mark)),
72         MK_FLOW_ACTION(FLAG, 0),
73         MK_FLOW_ACTION(QUEUE, sizeof(struct rte_flow_action_queue)),
74         MK_FLOW_ACTION(DROP, 0),
75         MK_FLOW_ACTION(COUNT, 0),
76         MK_FLOW_ACTION(RSS, sizeof(struct rte_flow_action_rss)),
77         MK_FLOW_ACTION(PF, 0),
78         MK_FLOW_ACTION(VF, sizeof(struct rte_flow_action_vf)),
79 };
80
81 static int
82 flow_err(uint16_t port_id, int ret, struct rte_flow_error *error)
83 {
84         if (ret == 0)
85                 return 0;
86         if (rte_eth_dev_is_removed(port_id))
87                 return rte_flow_error_set(error, EIO,
88                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
89                                           NULL, rte_strerror(EIO));
90         return ret;
91 }
92
93 /* Get generic flow operations structure from a port. */
94 const struct rte_flow_ops *
95 rte_flow_ops_get(uint16_t port_id, struct rte_flow_error *error)
96 {
97         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
98         const struct rte_flow_ops *ops;
99         int code;
100
101         if (unlikely(!rte_eth_dev_is_valid_port(port_id)))
102                 code = ENODEV;
103         else if (unlikely(!dev->dev_ops->filter_ctrl ||
104                           dev->dev_ops->filter_ctrl(dev,
105                                                     RTE_ETH_FILTER_GENERIC,
106                                                     RTE_ETH_FILTER_GET,
107                                                     &ops) ||
108                           !ops))
109                 code = ENOSYS;
110         else
111                 return ops;
112         rte_flow_error_set(error, code, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
113                            NULL, rte_strerror(code));
114         return NULL;
115 }
116
117 /* Check whether a flow rule can be created on a given port. */
118 int
119 rte_flow_validate(uint16_t port_id,
120                   const struct rte_flow_attr *attr,
121                   const struct rte_flow_item pattern[],
122                   const struct rte_flow_action actions[],
123                   struct rte_flow_error *error)
124 {
125         const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
126         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
127
128         if (unlikely(!ops))
129                 return -rte_errno;
130         if (likely(!!ops->validate))
131                 return flow_err(port_id, ops->validate(dev, attr, pattern,
132                                                        actions, error), error);
133         return rte_flow_error_set(error, ENOSYS,
134                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
135                                   NULL, rte_strerror(ENOSYS));
136 }
137
138 /* Create a flow rule on a given port. */
139 struct rte_flow *
140 rte_flow_create(uint16_t port_id,
141                 const struct rte_flow_attr *attr,
142                 const struct rte_flow_item pattern[],
143                 const struct rte_flow_action actions[],
144                 struct rte_flow_error *error)
145 {
146         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
147         struct rte_flow *flow;
148         const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
149
150         if (unlikely(!ops))
151                 return NULL;
152         if (likely(!!ops->create)) {
153                 flow = ops->create(dev, attr, pattern, actions, error);
154                 if (flow == NULL)
155                         flow_err(port_id, -rte_errno, error);
156                 return flow;
157         }
158         rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
159                            NULL, rte_strerror(ENOSYS));
160         return NULL;
161 }
162
163 /* Destroy a flow rule on a given port. */
164 int
165 rte_flow_destroy(uint16_t port_id,
166                  struct rte_flow *flow,
167                  struct rte_flow_error *error)
168 {
169         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
170         const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
171
172         if (unlikely(!ops))
173                 return -rte_errno;
174         if (likely(!!ops->destroy))
175                 return flow_err(port_id, ops->destroy(dev, flow, error),
176                                 error);
177         return rte_flow_error_set(error, ENOSYS,
178                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
179                                   NULL, rte_strerror(ENOSYS));
180 }
181
182 /* Destroy all flow rules associated with a port. */
183 int
184 rte_flow_flush(uint16_t port_id,
185                struct rte_flow_error *error)
186 {
187         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
188         const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
189
190         if (unlikely(!ops))
191                 return -rte_errno;
192         if (likely(!!ops->flush))
193                 return flow_err(port_id, ops->flush(dev, error), error);
194         return rte_flow_error_set(error, ENOSYS,
195                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
196                                   NULL, rte_strerror(ENOSYS));
197 }
198
199 /* Query an existing flow rule. */
200 int
201 rte_flow_query(uint16_t port_id,
202                struct rte_flow *flow,
203                enum rte_flow_action_type action,
204                void *data,
205                struct rte_flow_error *error)
206 {
207         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
208         const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
209
210         if (!ops)
211                 return -rte_errno;
212         if (likely(!!ops->query))
213                 return flow_err(port_id, ops->query(dev, flow, action, data,
214                                                     error), error);
215         return rte_flow_error_set(error, ENOSYS,
216                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
217                                   NULL, rte_strerror(ENOSYS));
218 }
219
220 /* Restrict ingress traffic to the defined flow rules. */
221 int
222 rte_flow_isolate(uint16_t port_id,
223                  int set,
224                  struct rte_flow_error *error)
225 {
226         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
227         const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
228
229         if (!ops)
230                 return -rte_errno;
231         if (likely(!!ops->isolate))
232                 return flow_err(port_id, ops->isolate(dev, set, error), error);
233         return rte_flow_error_set(error, ENOSYS,
234                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
235                                   NULL, rte_strerror(ENOSYS));
236 }
237
238 /* Initialize flow error structure. */
239 int
240 rte_flow_error_set(struct rte_flow_error *error,
241                    int code,
242                    enum rte_flow_error_type type,
243                    const void *cause,
244                    const char *message)
245 {
246         if (error) {
247                 *error = (struct rte_flow_error){
248                         .type = type,
249                         .cause = cause,
250                         .message = message,
251                 };
252         }
253         rte_errno = code;
254         return -code;
255 }
256
257 /** Pattern item specification types. */
258 enum item_spec_type {
259         ITEM_SPEC,
260         ITEM_LAST,
261         ITEM_MASK,
262 };
263
264 /** Compute storage space needed by item specification and copy it. */
265 static size_t
266 flow_item_spec_copy(void *buf, const struct rte_flow_item *item,
267                     enum item_spec_type type)
268 {
269         size_t size = 0;
270         const void *item_spec =
271                 type == ITEM_SPEC ? item->spec :
272                 type == ITEM_LAST ? item->last :
273                 type == ITEM_MASK ? item->mask :
274                 NULL;
275
276         if (!item_spec)
277                 goto empty;
278         switch (item->type) {
279                 union {
280                         const struct rte_flow_item_raw *raw;
281                 } src;
282                 union {
283                         struct rte_flow_item_raw *raw;
284                 } dst;
285                 size_t off;
286
287         case RTE_FLOW_ITEM_TYPE_RAW:
288                 src.raw = item_spec;
289                 dst.raw = buf;
290                 off = RTE_ALIGN_CEIL(sizeof(struct rte_flow_item_raw),
291                                      sizeof(*src.raw->pattern));
292                 size = off + src.raw->length * sizeof(*src.raw->pattern);
293                 if (dst.raw) {
294                         memcpy(dst.raw, src.raw, sizeof(*src.raw));
295                         dst.raw->pattern = memcpy((uint8_t *)dst.raw + off,
296                                                   src.raw->pattern,
297                                                   size - off);
298                 }
299                 break;
300         default:
301                 size = rte_flow_desc_item[item->type].size;
302                 if (buf)
303                         memcpy(buf, item_spec, size);
304                 break;
305         }
306 empty:
307         return RTE_ALIGN_CEIL(size, sizeof(double));
308 }
309
310 /** Compute storage space needed by action configuration and copy it. */
311 static size_t
312 flow_action_conf_copy(void *buf, const struct rte_flow_action *action)
313 {
314         size_t size = 0;
315
316         if (!action->conf)
317                 goto empty;
318         switch (action->type) {
319                 union {
320                         const struct rte_flow_action_rss *rss;
321                 } src;
322                 union {
323                         struct rte_flow_action_rss *rss;
324                 } dst;
325                 size_t off;
326
327         case RTE_FLOW_ACTION_TYPE_RSS:
328                 src.rss = action->conf;
329                 dst.rss = buf;
330                 off = 0;
331                 if (dst.rss)
332                         *dst.rss = (struct rte_flow_action_rss){
333                                 .func = src.rss->func,
334                                 .level = src.rss->level,
335                                 .types = src.rss->types,
336                                 .key_len = src.rss->key_len,
337                                 .queue_num = src.rss->queue_num,
338                         };
339                 off += sizeof(*src.rss);
340                 if (src.rss->key_len) {
341                         off = RTE_ALIGN_CEIL(off, sizeof(double));
342                         size = sizeof(*src.rss->key) * src.rss->key_len;
343                         if (dst.rss)
344                                 dst.rss->key = memcpy
345                                         ((void *)((uintptr_t)dst.rss + off),
346                                          src.rss->key, size);
347                         off += size;
348                 }
349                 if (src.rss->queue_num) {
350                         off = RTE_ALIGN_CEIL(off, sizeof(double));
351                         size = sizeof(*src.rss->queue) * src.rss->queue_num;
352                         if (dst.rss)
353                                 dst.rss->queue = memcpy
354                                         ((void *)((uintptr_t)dst.rss + off),
355                                          src.rss->queue, size);
356                         off += size;
357                 }
358                 size = off;
359                 break;
360         default:
361                 size = rte_flow_desc_action[action->type].size;
362                 if (buf)
363                         memcpy(buf, action->conf, size);
364                 break;
365         }
366 empty:
367         return RTE_ALIGN_CEIL(size, sizeof(double));
368 }
369
370 /** Store a full rte_flow description. */
371 size_t
372 rte_flow_copy(struct rte_flow_desc *desc, size_t len,
373               const struct rte_flow_attr *attr,
374               const struct rte_flow_item *items,
375               const struct rte_flow_action *actions)
376 {
377         struct rte_flow_desc *fd = NULL;
378         size_t tmp;
379         size_t off1 = 0;
380         size_t off2 = 0;
381         size_t size = 0;
382
383 store:
384         if (items) {
385                 const struct rte_flow_item *item;
386
387                 item = items;
388                 if (fd)
389                         fd->items = (void *)&fd->data[off1];
390                 do {
391                         struct rte_flow_item *dst = NULL;
392
393                         if ((size_t)item->type >=
394                                 RTE_DIM(rte_flow_desc_item) ||
395                             !rte_flow_desc_item[item->type].name) {
396                                 rte_errno = ENOTSUP;
397                                 return 0;
398                         }
399                         if (fd)
400                                 dst = memcpy(fd->data + off1, item,
401                                              sizeof(*item));
402                         off1 += sizeof(*item);
403                         if (item->spec) {
404                                 if (fd)
405                                         dst->spec = fd->data + off2;
406                                 off2 += flow_item_spec_copy
407                                         (fd ? fd->data + off2 : NULL, item,
408                                          ITEM_SPEC);
409                         }
410                         if (item->last) {
411                                 if (fd)
412                                         dst->last = fd->data + off2;
413                                 off2 += flow_item_spec_copy
414                                         (fd ? fd->data + off2 : NULL, item,
415                                          ITEM_LAST);
416                         }
417                         if (item->mask) {
418                                 if (fd)
419                                         dst->mask = fd->data + off2;
420                                 off2 += flow_item_spec_copy
421                                         (fd ? fd->data + off2 : NULL, item,
422                                          ITEM_MASK);
423                         }
424                         off2 = RTE_ALIGN_CEIL(off2, sizeof(double));
425                 } while ((item++)->type != RTE_FLOW_ITEM_TYPE_END);
426                 off1 = RTE_ALIGN_CEIL(off1, sizeof(double));
427         }
428         if (actions) {
429                 const struct rte_flow_action *action;
430
431                 action = actions;
432                 if (fd)
433                         fd->actions = (void *)&fd->data[off1];
434                 do {
435                         struct rte_flow_action *dst = NULL;
436
437                         if ((size_t)action->type >=
438                                 RTE_DIM(rte_flow_desc_action) ||
439                             !rte_flow_desc_action[action->type].name) {
440                                 rte_errno = ENOTSUP;
441                                 return 0;
442                         }
443                         if (fd)
444                                 dst = memcpy(fd->data + off1, action,
445                                              sizeof(*action));
446                         off1 += sizeof(*action);
447                         if (action->conf) {
448                                 if (fd)
449                                         dst->conf = fd->data + off2;
450                                 off2 += flow_action_conf_copy
451                                         (fd ? fd->data + off2 : NULL, action);
452                         }
453                         off2 = RTE_ALIGN_CEIL(off2, sizeof(double));
454                 } while ((action++)->type != RTE_FLOW_ACTION_TYPE_END);
455         }
456         if (fd != NULL)
457                 return size;
458         off1 = RTE_ALIGN_CEIL(off1, sizeof(double));
459         tmp = RTE_ALIGN_CEIL(offsetof(struct rte_flow_desc, data),
460                              sizeof(double));
461         size = tmp + off1 + off2;
462         if (size > len)
463                 return size;
464         fd = desc;
465         if (fd != NULL) {
466                 *fd = (const struct rte_flow_desc) {
467                         .size = size,
468                         .attr = *attr,
469                 };
470                 tmp -= offsetof(struct rte_flow_desc, data);
471                 off2 = tmp + off1;
472                 off1 = tmp;
473                 goto store;
474         }
475         return 0;
476 }