ethdev: add port ID item and action to flow API
[dpdk.git] / lib / librte_ether / rte_flow.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2016 6WIND S.A.
3  * Copyright 2016 Mellanox Technologies, Ltd
4  */
5
6 #include <errno.h>
7 #include <stddef.h>
8 #include <stdint.h>
9 #include <string.h>
10
11 #include <rte_common.h>
12 #include <rte_errno.h>
13 #include <rte_branch_prediction.h>
14 #include "rte_ethdev.h"
15 #include "rte_flow_driver.h"
16 #include "rte_flow.h"
17
18 /**
19  * Flow elements description tables.
20  */
21 struct rte_flow_desc_data {
22         const char *name;
23         size_t size;
24 };
25
26 /** Generate flow_item[] entry. */
27 #define MK_FLOW_ITEM(t, s) \
28         [RTE_FLOW_ITEM_TYPE_ ## t] = { \
29                 .name = # t, \
30                 .size = s, \
31         }
32
33 /** Information about known flow pattern items. */
34 static const struct rte_flow_desc_data rte_flow_desc_item[] = {
35         MK_FLOW_ITEM(END, 0),
36         MK_FLOW_ITEM(VOID, 0),
37         MK_FLOW_ITEM(INVERT, 0),
38         MK_FLOW_ITEM(ANY, sizeof(struct rte_flow_item_any)),
39         MK_FLOW_ITEM(PF, 0),
40         MK_FLOW_ITEM(VF, sizeof(struct rte_flow_item_vf)),
41         MK_FLOW_ITEM(PHY_PORT, sizeof(struct rte_flow_item_phy_port)),
42         MK_FLOW_ITEM(PORT_ID, sizeof(struct rte_flow_item_port_id)),
43         MK_FLOW_ITEM(RAW, sizeof(struct rte_flow_item_raw)),
44         MK_FLOW_ITEM(ETH, sizeof(struct rte_flow_item_eth)),
45         MK_FLOW_ITEM(VLAN, sizeof(struct rte_flow_item_vlan)),
46         MK_FLOW_ITEM(IPV4, sizeof(struct rte_flow_item_ipv4)),
47         MK_FLOW_ITEM(IPV6, sizeof(struct rte_flow_item_ipv6)),
48         MK_FLOW_ITEM(ICMP, sizeof(struct rte_flow_item_icmp)),
49         MK_FLOW_ITEM(UDP, sizeof(struct rte_flow_item_udp)),
50         MK_FLOW_ITEM(TCP, sizeof(struct rte_flow_item_tcp)),
51         MK_FLOW_ITEM(SCTP, sizeof(struct rte_flow_item_sctp)),
52         MK_FLOW_ITEM(VXLAN, sizeof(struct rte_flow_item_vxlan)),
53         MK_FLOW_ITEM(MPLS, sizeof(struct rte_flow_item_mpls)),
54         MK_FLOW_ITEM(GRE, sizeof(struct rte_flow_item_gre)),
55         MK_FLOW_ITEM(E_TAG, sizeof(struct rte_flow_item_e_tag)),
56         MK_FLOW_ITEM(NVGRE, sizeof(struct rte_flow_item_nvgre)),
57         MK_FLOW_ITEM(GENEVE, sizeof(struct rte_flow_item_geneve)),
58 };
59
60 /** Generate flow_action[] entry. */
61 #define MK_FLOW_ACTION(t, s) \
62         [RTE_FLOW_ACTION_TYPE_ ## t] = { \
63                 .name = # t, \
64                 .size = s, \
65         }
66
67 /** Information about known flow actions. */
68 static const struct rte_flow_desc_data rte_flow_desc_action[] = {
69         MK_FLOW_ACTION(END, 0),
70         MK_FLOW_ACTION(VOID, 0),
71         MK_FLOW_ACTION(PASSTHRU, 0),
72         MK_FLOW_ACTION(MARK, sizeof(struct rte_flow_action_mark)),
73         MK_FLOW_ACTION(FLAG, 0),
74         MK_FLOW_ACTION(QUEUE, sizeof(struct rte_flow_action_queue)),
75         MK_FLOW_ACTION(DROP, 0),
76         MK_FLOW_ACTION(COUNT, 0),
77         MK_FLOW_ACTION(RSS, sizeof(struct rte_flow_action_rss)),
78         MK_FLOW_ACTION(PF, 0),
79         MK_FLOW_ACTION(VF, sizeof(struct rte_flow_action_vf)),
80         MK_FLOW_ACTION(PHY_PORT, sizeof(struct rte_flow_action_phy_port)),
81         MK_FLOW_ACTION(PORT_ID, sizeof(struct rte_flow_action_port_id)),
82 };
83
84 static int
85 flow_err(uint16_t port_id, int ret, struct rte_flow_error *error)
86 {
87         if (ret == 0)
88                 return 0;
89         if (rte_eth_dev_is_removed(port_id))
90                 return rte_flow_error_set(error, EIO,
91                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
92                                           NULL, rte_strerror(EIO));
93         return ret;
94 }
95
96 /* Get generic flow operations structure from a port. */
97 const struct rte_flow_ops *
98 rte_flow_ops_get(uint16_t port_id, struct rte_flow_error *error)
99 {
100         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
101         const struct rte_flow_ops *ops;
102         int code;
103
104         if (unlikely(!rte_eth_dev_is_valid_port(port_id)))
105                 code = ENODEV;
106         else if (unlikely(!dev->dev_ops->filter_ctrl ||
107                           dev->dev_ops->filter_ctrl(dev,
108                                                     RTE_ETH_FILTER_GENERIC,
109                                                     RTE_ETH_FILTER_GET,
110                                                     &ops) ||
111                           !ops))
112                 code = ENOSYS;
113         else
114                 return ops;
115         rte_flow_error_set(error, code, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
116                            NULL, rte_strerror(code));
117         return NULL;
118 }
119
120 /* Check whether a flow rule can be created on a given port. */
121 int
122 rte_flow_validate(uint16_t port_id,
123                   const struct rte_flow_attr *attr,
124                   const struct rte_flow_item pattern[],
125                   const struct rte_flow_action actions[],
126                   struct rte_flow_error *error)
127 {
128         const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
129         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
130
131         if (unlikely(!ops))
132                 return -rte_errno;
133         if (likely(!!ops->validate))
134                 return flow_err(port_id, ops->validate(dev, attr, pattern,
135                                                        actions, error), error);
136         return rte_flow_error_set(error, ENOSYS,
137                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
138                                   NULL, rte_strerror(ENOSYS));
139 }
140
141 /* Create a flow rule on a given port. */
142 struct rte_flow *
143 rte_flow_create(uint16_t port_id,
144                 const struct rte_flow_attr *attr,
145                 const struct rte_flow_item pattern[],
146                 const struct rte_flow_action actions[],
147                 struct rte_flow_error *error)
148 {
149         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
150         struct rte_flow *flow;
151         const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
152
153         if (unlikely(!ops))
154                 return NULL;
155         if (likely(!!ops->create)) {
156                 flow = ops->create(dev, attr, pattern, actions, error);
157                 if (flow == NULL)
158                         flow_err(port_id, -rte_errno, error);
159                 return flow;
160         }
161         rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
162                            NULL, rte_strerror(ENOSYS));
163         return NULL;
164 }
165
166 /* Destroy a flow rule on a given port. */
167 int
168 rte_flow_destroy(uint16_t port_id,
169                  struct rte_flow *flow,
170                  struct rte_flow_error *error)
171 {
172         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
173         const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
174
175         if (unlikely(!ops))
176                 return -rte_errno;
177         if (likely(!!ops->destroy))
178                 return flow_err(port_id, ops->destroy(dev, flow, error),
179                                 error);
180         return rte_flow_error_set(error, ENOSYS,
181                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
182                                   NULL, rte_strerror(ENOSYS));
183 }
184
185 /* Destroy all flow rules associated with a port. */
186 int
187 rte_flow_flush(uint16_t port_id,
188                struct rte_flow_error *error)
189 {
190         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
191         const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
192
193         if (unlikely(!ops))
194                 return -rte_errno;
195         if (likely(!!ops->flush))
196                 return flow_err(port_id, ops->flush(dev, error), error);
197         return rte_flow_error_set(error, ENOSYS,
198                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
199                                   NULL, rte_strerror(ENOSYS));
200 }
201
202 /* Query an existing flow rule. */
203 int
204 rte_flow_query(uint16_t port_id,
205                struct rte_flow *flow,
206                enum rte_flow_action_type action,
207                void *data,
208                struct rte_flow_error *error)
209 {
210         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
211         const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
212
213         if (!ops)
214                 return -rte_errno;
215         if (likely(!!ops->query))
216                 return flow_err(port_id, ops->query(dev, flow, action, data,
217                                                     error), error);
218         return rte_flow_error_set(error, ENOSYS,
219                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
220                                   NULL, rte_strerror(ENOSYS));
221 }
222
223 /* Restrict ingress traffic to the defined flow rules. */
224 int
225 rte_flow_isolate(uint16_t port_id,
226                  int set,
227                  struct rte_flow_error *error)
228 {
229         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
230         const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
231
232         if (!ops)
233                 return -rte_errno;
234         if (likely(!!ops->isolate))
235                 return flow_err(port_id, ops->isolate(dev, set, error), error);
236         return rte_flow_error_set(error, ENOSYS,
237                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
238                                   NULL, rte_strerror(ENOSYS));
239 }
240
241 /* Initialize flow error structure. */
242 int
243 rte_flow_error_set(struct rte_flow_error *error,
244                    int code,
245                    enum rte_flow_error_type type,
246                    const void *cause,
247                    const char *message)
248 {
249         if (error) {
250                 *error = (struct rte_flow_error){
251                         .type = type,
252                         .cause = cause,
253                         .message = message,
254                 };
255         }
256         rte_errno = code;
257         return -code;
258 }
259
260 /** Pattern item specification types. */
261 enum item_spec_type {
262         ITEM_SPEC,
263         ITEM_LAST,
264         ITEM_MASK,
265 };
266
267 /** Compute storage space needed by item specification and copy it. */
268 static size_t
269 flow_item_spec_copy(void *buf, const struct rte_flow_item *item,
270                     enum item_spec_type type)
271 {
272         size_t size = 0;
273         const void *item_spec =
274                 type == ITEM_SPEC ? item->spec :
275                 type == ITEM_LAST ? item->last :
276                 type == ITEM_MASK ? item->mask :
277                 NULL;
278
279         if (!item_spec)
280                 goto empty;
281         switch (item->type) {
282                 union {
283                         const struct rte_flow_item_raw *raw;
284                 } src;
285                 union {
286                         struct rte_flow_item_raw *raw;
287                 } dst;
288                 size_t off;
289
290         case RTE_FLOW_ITEM_TYPE_RAW:
291                 src.raw = item_spec;
292                 dst.raw = buf;
293                 off = RTE_ALIGN_CEIL(sizeof(struct rte_flow_item_raw),
294                                      sizeof(*src.raw->pattern));
295                 size = off + src.raw->length * sizeof(*src.raw->pattern);
296                 if (dst.raw) {
297                         memcpy(dst.raw, src.raw, sizeof(*src.raw));
298                         dst.raw->pattern = memcpy((uint8_t *)dst.raw + off,
299                                                   src.raw->pattern,
300                                                   size - off);
301                 }
302                 break;
303         default:
304                 size = rte_flow_desc_item[item->type].size;
305                 if (buf)
306                         memcpy(buf, item_spec, size);
307                 break;
308         }
309 empty:
310         return RTE_ALIGN_CEIL(size, sizeof(double));
311 }
312
313 /** Compute storage space needed by action configuration and copy it. */
314 static size_t
315 flow_action_conf_copy(void *buf, const struct rte_flow_action *action)
316 {
317         size_t size = 0;
318
319         if (!action->conf)
320                 goto empty;
321         switch (action->type) {
322                 union {
323                         const struct rte_flow_action_rss *rss;
324                 } src;
325                 union {
326                         struct rte_flow_action_rss *rss;
327                 } dst;
328                 size_t off;
329
330         case RTE_FLOW_ACTION_TYPE_RSS:
331                 src.rss = action->conf;
332                 dst.rss = buf;
333                 off = 0;
334                 if (dst.rss)
335                         *dst.rss = (struct rte_flow_action_rss){
336                                 .func = src.rss->func,
337                                 .level = src.rss->level,
338                                 .types = src.rss->types,
339                                 .key_len = src.rss->key_len,
340                                 .queue_num = src.rss->queue_num,
341                         };
342                 off += sizeof(*src.rss);
343                 if (src.rss->key_len) {
344                         off = RTE_ALIGN_CEIL(off, sizeof(double));
345                         size = sizeof(*src.rss->key) * src.rss->key_len;
346                         if (dst.rss)
347                                 dst.rss->key = memcpy
348                                         ((void *)((uintptr_t)dst.rss + off),
349                                          src.rss->key, size);
350                         off += size;
351                 }
352                 if (src.rss->queue_num) {
353                         off = RTE_ALIGN_CEIL(off, sizeof(double));
354                         size = sizeof(*src.rss->queue) * src.rss->queue_num;
355                         if (dst.rss)
356                                 dst.rss->queue = memcpy
357                                         ((void *)((uintptr_t)dst.rss + off),
358                                          src.rss->queue, size);
359                         off += size;
360                 }
361                 size = off;
362                 break;
363         default:
364                 size = rte_flow_desc_action[action->type].size;
365                 if (buf)
366                         memcpy(buf, action->conf, size);
367                 break;
368         }
369 empty:
370         return RTE_ALIGN_CEIL(size, sizeof(double));
371 }
372
373 /** Store a full rte_flow description. */
374 size_t
375 rte_flow_copy(struct rte_flow_desc *desc, size_t len,
376               const struct rte_flow_attr *attr,
377               const struct rte_flow_item *items,
378               const struct rte_flow_action *actions)
379 {
380         struct rte_flow_desc *fd = NULL;
381         size_t tmp;
382         size_t off1 = 0;
383         size_t off2 = 0;
384         size_t size = 0;
385
386 store:
387         if (items) {
388                 const struct rte_flow_item *item;
389
390                 item = items;
391                 if (fd)
392                         fd->items = (void *)&fd->data[off1];
393                 do {
394                         struct rte_flow_item *dst = NULL;
395
396                         if ((size_t)item->type >=
397                                 RTE_DIM(rte_flow_desc_item) ||
398                             !rte_flow_desc_item[item->type].name) {
399                                 rte_errno = ENOTSUP;
400                                 return 0;
401                         }
402                         if (fd)
403                                 dst = memcpy(fd->data + off1, item,
404                                              sizeof(*item));
405                         off1 += sizeof(*item);
406                         if (item->spec) {
407                                 if (fd)
408                                         dst->spec = fd->data + off2;
409                                 off2 += flow_item_spec_copy
410                                         (fd ? fd->data + off2 : NULL, item,
411                                          ITEM_SPEC);
412                         }
413                         if (item->last) {
414                                 if (fd)
415                                         dst->last = fd->data + off2;
416                                 off2 += flow_item_spec_copy
417                                         (fd ? fd->data + off2 : NULL, item,
418                                          ITEM_LAST);
419                         }
420                         if (item->mask) {
421                                 if (fd)
422                                         dst->mask = fd->data + off2;
423                                 off2 += flow_item_spec_copy
424                                         (fd ? fd->data + off2 : NULL, item,
425                                          ITEM_MASK);
426                         }
427                         off2 = RTE_ALIGN_CEIL(off2, sizeof(double));
428                 } while ((item++)->type != RTE_FLOW_ITEM_TYPE_END);
429                 off1 = RTE_ALIGN_CEIL(off1, sizeof(double));
430         }
431         if (actions) {
432                 const struct rte_flow_action *action;
433
434                 action = actions;
435                 if (fd)
436                         fd->actions = (void *)&fd->data[off1];
437                 do {
438                         struct rte_flow_action *dst = NULL;
439
440                         if ((size_t)action->type >=
441                                 RTE_DIM(rte_flow_desc_action) ||
442                             !rte_flow_desc_action[action->type].name) {
443                                 rte_errno = ENOTSUP;
444                                 return 0;
445                         }
446                         if (fd)
447                                 dst = memcpy(fd->data + off1, action,
448                                              sizeof(*action));
449                         off1 += sizeof(*action);
450                         if (action->conf) {
451                                 if (fd)
452                                         dst->conf = fd->data + off2;
453                                 off2 += flow_action_conf_copy
454                                         (fd ? fd->data + off2 : NULL, action);
455                         }
456                         off2 = RTE_ALIGN_CEIL(off2, sizeof(double));
457                 } while ((action++)->type != RTE_FLOW_ACTION_TYPE_END);
458         }
459         if (fd != NULL)
460                 return size;
461         off1 = RTE_ALIGN_CEIL(off1, sizeof(double));
462         tmp = RTE_ALIGN_CEIL(offsetof(struct rte_flow_desc, data),
463                              sizeof(double));
464         size = tmp + off1 + off2;
465         if (size > len)
466                 return size;
467         fd = desc;
468         if (fd != NULL) {
469                 *fd = (const struct rte_flow_desc) {
470                         .size = size,
471                         .attr = *attr,
472                 };
473                 tmp -= offsetof(struct rte_flow_desc, data);
474                 off2 = tmp + off1;
475                 off1 = tmp;
476                 goto store;
477         }
478         return 0;
479 }