ethdev: introduce new tunnel VXLAN-GPE
[dpdk.git] / lib / librte_ether / rte_flow.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2016 6WIND S.A.
3  * Copyright 2016 Mellanox Technologies, Ltd
4  */
5
6 #include <errno.h>
7 #include <stddef.h>
8 #include <stdint.h>
9 #include <string.h>
10
11 #include <rte_common.h>
12 #include <rte_errno.h>
13 #include <rte_branch_prediction.h>
14 #include "rte_ethdev.h"
15 #include "rte_flow_driver.h"
16 #include "rte_flow.h"
17
18 /**
19  * Flow elements description tables.
20  */
21 struct rte_flow_desc_data {
22         const char *name;
23         size_t size;
24 };
25
26 /** Generate flow_item[] entry. */
27 #define MK_FLOW_ITEM(t, s) \
28         [RTE_FLOW_ITEM_TYPE_ ## t] = { \
29                 .name = # t, \
30                 .size = s, \
31         }
32
33 /** Information about known flow pattern items. */
34 static const struct rte_flow_desc_data rte_flow_desc_item[] = {
35         MK_FLOW_ITEM(END, 0),
36         MK_FLOW_ITEM(VOID, 0),
37         MK_FLOW_ITEM(INVERT, 0),
38         MK_FLOW_ITEM(ANY, sizeof(struct rte_flow_item_any)),
39         MK_FLOW_ITEM(PF, 0),
40         MK_FLOW_ITEM(VF, sizeof(struct rte_flow_item_vf)),
41         MK_FLOW_ITEM(PHY_PORT, sizeof(struct rte_flow_item_phy_port)),
42         MK_FLOW_ITEM(PORT_ID, sizeof(struct rte_flow_item_port_id)),
43         MK_FLOW_ITEM(RAW, sizeof(struct rte_flow_item_raw)),
44         MK_FLOW_ITEM(ETH, sizeof(struct rte_flow_item_eth)),
45         MK_FLOW_ITEM(VLAN, sizeof(struct rte_flow_item_vlan)),
46         MK_FLOW_ITEM(IPV4, sizeof(struct rte_flow_item_ipv4)),
47         MK_FLOW_ITEM(IPV6, sizeof(struct rte_flow_item_ipv6)),
48         MK_FLOW_ITEM(ICMP, sizeof(struct rte_flow_item_icmp)),
49         MK_FLOW_ITEM(UDP, sizeof(struct rte_flow_item_udp)),
50         MK_FLOW_ITEM(TCP, sizeof(struct rte_flow_item_tcp)),
51         MK_FLOW_ITEM(SCTP, sizeof(struct rte_flow_item_sctp)),
52         MK_FLOW_ITEM(VXLAN, sizeof(struct rte_flow_item_vxlan)),
53         MK_FLOW_ITEM(MPLS, sizeof(struct rte_flow_item_mpls)),
54         MK_FLOW_ITEM(GRE, sizeof(struct rte_flow_item_gre)),
55         MK_FLOW_ITEM(E_TAG, sizeof(struct rte_flow_item_e_tag)),
56         MK_FLOW_ITEM(NVGRE, sizeof(struct rte_flow_item_nvgre)),
57         MK_FLOW_ITEM(GENEVE, sizeof(struct rte_flow_item_geneve)),
58         MK_FLOW_ITEM(VXLAN_GPE, sizeof(struct rte_flow_item_vxlan_gpe)),
59 };
60
61 /** Generate flow_action[] entry. */
62 #define MK_FLOW_ACTION(t, s) \
63         [RTE_FLOW_ACTION_TYPE_ ## t] = { \
64                 .name = # t, \
65                 .size = s, \
66         }
67
68 /** Information about known flow actions. */
69 static const struct rte_flow_desc_data rte_flow_desc_action[] = {
70         MK_FLOW_ACTION(END, 0),
71         MK_FLOW_ACTION(VOID, 0),
72         MK_FLOW_ACTION(PASSTHRU, 0),
73         MK_FLOW_ACTION(MARK, sizeof(struct rte_flow_action_mark)),
74         MK_FLOW_ACTION(FLAG, 0),
75         MK_FLOW_ACTION(QUEUE, sizeof(struct rte_flow_action_queue)),
76         MK_FLOW_ACTION(DROP, 0),
77         MK_FLOW_ACTION(COUNT, 0),
78         MK_FLOW_ACTION(RSS, sizeof(struct rte_flow_action_rss)),
79         MK_FLOW_ACTION(PF, 0),
80         MK_FLOW_ACTION(VF, sizeof(struct rte_flow_action_vf)),
81         MK_FLOW_ACTION(PHY_PORT, sizeof(struct rte_flow_action_phy_port)),
82         MK_FLOW_ACTION(PORT_ID, sizeof(struct rte_flow_action_port_id)),
83 };
84
85 static int
86 flow_err(uint16_t port_id, int ret, struct rte_flow_error *error)
87 {
88         if (ret == 0)
89                 return 0;
90         if (rte_eth_dev_is_removed(port_id))
91                 return rte_flow_error_set(error, EIO,
92                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
93                                           NULL, rte_strerror(EIO));
94         return ret;
95 }
96
97 /* Get generic flow operations structure from a port. */
98 const struct rte_flow_ops *
99 rte_flow_ops_get(uint16_t port_id, struct rte_flow_error *error)
100 {
101         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
102         const struct rte_flow_ops *ops;
103         int code;
104
105         if (unlikely(!rte_eth_dev_is_valid_port(port_id)))
106                 code = ENODEV;
107         else if (unlikely(!dev->dev_ops->filter_ctrl ||
108                           dev->dev_ops->filter_ctrl(dev,
109                                                     RTE_ETH_FILTER_GENERIC,
110                                                     RTE_ETH_FILTER_GET,
111                                                     &ops) ||
112                           !ops))
113                 code = ENOSYS;
114         else
115                 return ops;
116         rte_flow_error_set(error, code, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
117                            NULL, rte_strerror(code));
118         return NULL;
119 }
120
121 /* Check whether a flow rule can be created on a given port. */
122 int
123 rte_flow_validate(uint16_t port_id,
124                   const struct rte_flow_attr *attr,
125                   const struct rte_flow_item pattern[],
126                   const struct rte_flow_action actions[],
127                   struct rte_flow_error *error)
128 {
129         const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
130         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
131
132         if (unlikely(!ops))
133                 return -rte_errno;
134         if (likely(!!ops->validate))
135                 return flow_err(port_id, ops->validate(dev, attr, pattern,
136                                                        actions, error), error);
137         return rte_flow_error_set(error, ENOSYS,
138                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
139                                   NULL, rte_strerror(ENOSYS));
140 }
141
142 /* Create a flow rule on a given port. */
143 struct rte_flow *
144 rte_flow_create(uint16_t port_id,
145                 const struct rte_flow_attr *attr,
146                 const struct rte_flow_item pattern[],
147                 const struct rte_flow_action actions[],
148                 struct rte_flow_error *error)
149 {
150         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
151         struct rte_flow *flow;
152         const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
153
154         if (unlikely(!ops))
155                 return NULL;
156         if (likely(!!ops->create)) {
157                 flow = ops->create(dev, attr, pattern, actions, error);
158                 if (flow == NULL)
159                         flow_err(port_id, -rte_errno, error);
160                 return flow;
161         }
162         rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
163                            NULL, rte_strerror(ENOSYS));
164         return NULL;
165 }
166
167 /* Destroy a flow rule on a given port. */
168 int
169 rte_flow_destroy(uint16_t port_id,
170                  struct rte_flow *flow,
171                  struct rte_flow_error *error)
172 {
173         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
174         const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
175
176         if (unlikely(!ops))
177                 return -rte_errno;
178         if (likely(!!ops->destroy))
179                 return flow_err(port_id, ops->destroy(dev, flow, error),
180                                 error);
181         return rte_flow_error_set(error, ENOSYS,
182                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
183                                   NULL, rte_strerror(ENOSYS));
184 }
185
186 /* Destroy all flow rules associated with a port. */
187 int
188 rte_flow_flush(uint16_t port_id,
189                struct rte_flow_error *error)
190 {
191         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
192         const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
193
194         if (unlikely(!ops))
195                 return -rte_errno;
196         if (likely(!!ops->flush))
197                 return flow_err(port_id, ops->flush(dev, error), error);
198         return rte_flow_error_set(error, ENOSYS,
199                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
200                                   NULL, rte_strerror(ENOSYS));
201 }
202
203 /* Query an existing flow rule. */
204 int
205 rte_flow_query(uint16_t port_id,
206                struct rte_flow *flow,
207                enum rte_flow_action_type action,
208                void *data,
209                struct rte_flow_error *error)
210 {
211         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
212         const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
213
214         if (!ops)
215                 return -rte_errno;
216         if (likely(!!ops->query))
217                 return flow_err(port_id, ops->query(dev, flow, action, data,
218                                                     error), error);
219         return rte_flow_error_set(error, ENOSYS,
220                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
221                                   NULL, rte_strerror(ENOSYS));
222 }
223
224 /* Restrict ingress traffic to the defined flow rules. */
225 int
226 rte_flow_isolate(uint16_t port_id,
227                  int set,
228                  struct rte_flow_error *error)
229 {
230         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
231         const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
232
233         if (!ops)
234                 return -rte_errno;
235         if (likely(!!ops->isolate))
236                 return flow_err(port_id, ops->isolate(dev, set, error), error);
237         return rte_flow_error_set(error, ENOSYS,
238                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
239                                   NULL, rte_strerror(ENOSYS));
240 }
241
242 /* Initialize flow error structure. */
243 int
244 rte_flow_error_set(struct rte_flow_error *error,
245                    int code,
246                    enum rte_flow_error_type type,
247                    const void *cause,
248                    const char *message)
249 {
250         if (error) {
251                 *error = (struct rte_flow_error){
252                         .type = type,
253                         .cause = cause,
254                         .message = message,
255                 };
256         }
257         rte_errno = code;
258         return -code;
259 }
260
261 /** Pattern item specification types. */
262 enum item_spec_type {
263         ITEM_SPEC,
264         ITEM_LAST,
265         ITEM_MASK,
266 };
267
268 /** Compute storage space needed by item specification and copy it. */
269 static size_t
270 flow_item_spec_copy(void *buf, const struct rte_flow_item *item,
271                     enum item_spec_type type)
272 {
273         size_t size = 0;
274         const void *item_spec =
275                 type == ITEM_SPEC ? item->spec :
276                 type == ITEM_LAST ? item->last :
277                 type == ITEM_MASK ? item->mask :
278                 NULL;
279
280         if (!item_spec)
281                 goto empty;
282         switch (item->type) {
283                 union {
284                         const struct rte_flow_item_raw *raw;
285                 } src;
286                 union {
287                         struct rte_flow_item_raw *raw;
288                 } dst;
289                 size_t off;
290
291         case RTE_FLOW_ITEM_TYPE_RAW:
292                 src.raw = item_spec;
293                 dst.raw = buf;
294                 off = RTE_ALIGN_CEIL(sizeof(struct rte_flow_item_raw),
295                                      sizeof(*src.raw->pattern));
296                 size = off + src.raw->length * sizeof(*src.raw->pattern);
297                 if (dst.raw) {
298                         memcpy(dst.raw, src.raw, sizeof(*src.raw));
299                         dst.raw->pattern = memcpy((uint8_t *)dst.raw + off,
300                                                   src.raw->pattern,
301                                                   size - off);
302                 }
303                 break;
304         default:
305                 size = rte_flow_desc_item[item->type].size;
306                 if (buf)
307                         memcpy(buf, item_spec, size);
308                 break;
309         }
310 empty:
311         return RTE_ALIGN_CEIL(size, sizeof(double));
312 }
313
314 /** Compute storage space needed by action configuration and copy it. */
315 static size_t
316 flow_action_conf_copy(void *buf, const struct rte_flow_action *action)
317 {
318         size_t size = 0;
319
320         if (!action->conf)
321                 goto empty;
322         switch (action->type) {
323                 union {
324                         const struct rte_flow_action_rss *rss;
325                 } src;
326                 union {
327                         struct rte_flow_action_rss *rss;
328                 } dst;
329                 size_t off;
330
331         case RTE_FLOW_ACTION_TYPE_RSS:
332                 src.rss = action->conf;
333                 dst.rss = buf;
334                 off = 0;
335                 if (dst.rss)
336                         *dst.rss = (struct rte_flow_action_rss){
337                                 .func = src.rss->func,
338                                 .level = src.rss->level,
339                                 .types = src.rss->types,
340                                 .key_len = src.rss->key_len,
341                                 .queue_num = src.rss->queue_num,
342                         };
343                 off += sizeof(*src.rss);
344                 if (src.rss->key_len) {
345                         off = RTE_ALIGN_CEIL(off, sizeof(double));
346                         size = sizeof(*src.rss->key) * src.rss->key_len;
347                         if (dst.rss)
348                                 dst.rss->key = memcpy
349                                         ((void *)((uintptr_t)dst.rss + off),
350                                          src.rss->key, size);
351                         off += size;
352                 }
353                 if (src.rss->queue_num) {
354                         off = RTE_ALIGN_CEIL(off, sizeof(double));
355                         size = sizeof(*src.rss->queue) * src.rss->queue_num;
356                         if (dst.rss)
357                                 dst.rss->queue = memcpy
358                                         ((void *)((uintptr_t)dst.rss + off),
359                                          src.rss->queue, size);
360                         off += size;
361                 }
362                 size = off;
363                 break;
364         default:
365                 size = rte_flow_desc_action[action->type].size;
366                 if (buf)
367                         memcpy(buf, action->conf, size);
368                 break;
369         }
370 empty:
371         return RTE_ALIGN_CEIL(size, sizeof(double));
372 }
373
374 /** Store a full rte_flow description. */
375 size_t
376 rte_flow_copy(struct rte_flow_desc *desc, size_t len,
377               const struct rte_flow_attr *attr,
378               const struct rte_flow_item *items,
379               const struct rte_flow_action *actions)
380 {
381         struct rte_flow_desc *fd = NULL;
382         size_t tmp;
383         size_t off1 = 0;
384         size_t off2 = 0;
385         size_t size = 0;
386
387 store:
388         if (items) {
389                 const struct rte_flow_item *item;
390
391                 item = items;
392                 if (fd)
393                         fd->items = (void *)&fd->data[off1];
394                 do {
395                         struct rte_flow_item *dst = NULL;
396
397                         if ((size_t)item->type >=
398                                 RTE_DIM(rte_flow_desc_item) ||
399                             !rte_flow_desc_item[item->type].name) {
400                                 rte_errno = ENOTSUP;
401                                 return 0;
402                         }
403                         if (fd)
404                                 dst = memcpy(fd->data + off1, item,
405                                              sizeof(*item));
406                         off1 += sizeof(*item);
407                         if (item->spec) {
408                                 if (fd)
409                                         dst->spec = fd->data + off2;
410                                 off2 += flow_item_spec_copy
411                                         (fd ? fd->data + off2 : NULL, item,
412                                          ITEM_SPEC);
413                         }
414                         if (item->last) {
415                                 if (fd)
416                                         dst->last = fd->data + off2;
417                                 off2 += flow_item_spec_copy
418                                         (fd ? fd->data + off2 : NULL, item,
419                                          ITEM_LAST);
420                         }
421                         if (item->mask) {
422                                 if (fd)
423                                         dst->mask = fd->data + off2;
424                                 off2 += flow_item_spec_copy
425                                         (fd ? fd->data + off2 : NULL, item,
426                                          ITEM_MASK);
427                         }
428                         off2 = RTE_ALIGN_CEIL(off2, sizeof(double));
429                 } while ((item++)->type != RTE_FLOW_ITEM_TYPE_END);
430                 off1 = RTE_ALIGN_CEIL(off1, sizeof(double));
431         }
432         if (actions) {
433                 const struct rte_flow_action *action;
434
435                 action = actions;
436                 if (fd)
437                         fd->actions = (void *)&fd->data[off1];
438                 do {
439                         struct rte_flow_action *dst = NULL;
440
441                         if ((size_t)action->type >=
442                                 RTE_DIM(rte_flow_desc_action) ||
443                             !rte_flow_desc_action[action->type].name) {
444                                 rte_errno = ENOTSUP;
445                                 return 0;
446                         }
447                         if (fd)
448                                 dst = memcpy(fd->data + off1, action,
449                                              sizeof(*action));
450                         off1 += sizeof(*action);
451                         if (action->conf) {
452                                 if (fd)
453                                         dst->conf = fd->data + off2;
454                                 off2 += flow_action_conf_copy
455                                         (fd ? fd->data + off2 : NULL, action);
456                         }
457                         off2 = RTE_ALIGN_CEIL(off2, sizeof(double));
458                 } while ((action++)->type != RTE_FLOW_ACTION_TYPE_END);
459         }
460         if (fd != NULL)
461                 return size;
462         off1 = RTE_ALIGN_CEIL(off1, sizeof(double));
463         tmp = RTE_ALIGN_CEIL(offsetof(struct rte_flow_desc, data),
464                              sizeof(double));
465         size = tmp + off1 + off2;
466         if (size > len)
467                 return size;
468         fd = desc;
469         if (fd != NULL) {
470                 *fd = (const struct rte_flow_desc) {
471                         .size = size,
472                         .attr = *attr,
473                 };
474                 tmp -= offsetof(struct rte_flow_desc, data);
475                 off2 = tmp + off1;
476                 off1 = tmp;
477                 goto store;
478         }
479         return 0;
480 }