ethdev: add hash function to RSS flow API action
[dpdk.git] / lib / librte_ether / rte_flow.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2016 6WIND S.A.
3  * Copyright 2016 Mellanox Technologies, Ltd
4  */
5
6 #include <errno.h>
7 #include <stddef.h>
8 #include <stdint.h>
9 #include <string.h>
10
11 #include <rte_common.h>
12 #include <rte_errno.h>
13 #include <rte_branch_prediction.h>
14 #include "rte_ethdev.h"
15 #include "rte_flow_driver.h"
16 #include "rte_flow.h"
17
18 /**
19  * Flow elements description tables.
20  */
21 struct rte_flow_desc_data {
22         const char *name;
23         size_t size;
24 };
25
26 /** Generate flow_item[] entry. */
27 #define MK_FLOW_ITEM(t, s) \
28         [RTE_FLOW_ITEM_TYPE_ ## t] = { \
29                 .name = # t, \
30                 .size = s, \
31         }
32
33 /** Information about known flow pattern items. */
34 static const struct rte_flow_desc_data rte_flow_desc_item[] = {
35         MK_FLOW_ITEM(END, 0),
36         MK_FLOW_ITEM(VOID, 0),
37         MK_FLOW_ITEM(INVERT, 0),
38         MK_FLOW_ITEM(ANY, sizeof(struct rte_flow_item_any)),
39         MK_FLOW_ITEM(PF, 0),
40         MK_FLOW_ITEM(VF, sizeof(struct rte_flow_item_vf)),
41         MK_FLOW_ITEM(PORT, sizeof(struct rte_flow_item_port)),
42         MK_FLOW_ITEM(RAW, sizeof(struct rte_flow_item_raw)),
43         MK_FLOW_ITEM(ETH, sizeof(struct rte_flow_item_eth)),
44         MK_FLOW_ITEM(VLAN, sizeof(struct rte_flow_item_vlan)),
45         MK_FLOW_ITEM(IPV4, sizeof(struct rte_flow_item_ipv4)),
46         MK_FLOW_ITEM(IPV6, sizeof(struct rte_flow_item_ipv6)),
47         MK_FLOW_ITEM(ICMP, sizeof(struct rte_flow_item_icmp)),
48         MK_FLOW_ITEM(UDP, sizeof(struct rte_flow_item_udp)),
49         MK_FLOW_ITEM(TCP, sizeof(struct rte_flow_item_tcp)),
50         MK_FLOW_ITEM(SCTP, sizeof(struct rte_flow_item_sctp)),
51         MK_FLOW_ITEM(VXLAN, sizeof(struct rte_flow_item_vxlan)),
52         MK_FLOW_ITEM(MPLS, sizeof(struct rte_flow_item_mpls)),
53         MK_FLOW_ITEM(GRE, sizeof(struct rte_flow_item_gre)),
54         MK_FLOW_ITEM(E_TAG, sizeof(struct rte_flow_item_e_tag)),
55         MK_FLOW_ITEM(NVGRE, sizeof(struct rte_flow_item_nvgre)),
56         MK_FLOW_ITEM(GENEVE, sizeof(struct rte_flow_item_geneve)),
57 };
58
59 /** Generate flow_action[] entry. */
60 #define MK_FLOW_ACTION(t, s) \
61         [RTE_FLOW_ACTION_TYPE_ ## t] = { \
62                 .name = # t, \
63                 .size = s, \
64         }
65
66 /** Information about known flow actions. */
67 static const struct rte_flow_desc_data rte_flow_desc_action[] = {
68         MK_FLOW_ACTION(END, 0),
69         MK_FLOW_ACTION(VOID, 0),
70         MK_FLOW_ACTION(PASSTHRU, 0),
71         MK_FLOW_ACTION(MARK, sizeof(struct rte_flow_action_mark)),
72         MK_FLOW_ACTION(FLAG, 0),
73         MK_FLOW_ACTION(QUEUE, sizeof(struct rte_flow_action_queue)),
74         MK_FLOW_ACTION(DROP, 0),
75         MK_FLOW_ACTION(COUNT, 0),
76         MK_FLOW_ACTION(RSS, sizeof(struct rte_flow_action_rss)),
77         MK_FLOW_ACTION(PF, 0),
78         MK_FLOW_ACTION(VF, sizeof(struct rte_flow_action_vf)),
79 };
80
81 static int
82 flow_err(uint16_t port_id, int ret, struct rte_flow_error *error)
83 {
84         if (ret == 0)
85                 return 0;
86         if (rte_eth_dev_is_removed(port_id))
87                 return rte_flow_error_set(error, EIO,
88                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
89                                           NULL, rte_strerror(EIO));
90         return ret;
91 }
92
93 /* Get generic flow operations structure from a port. */
94 const struct rte_flow_ops *
95 rte_flow_ops_get(uint16_t port_id, struct rte_flow_error *error)
96 {
97         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
98         const struct rte_flow_ops *ops;
99         int code;
100
101         if (unlikely(!rte_eth_dev_is_valid_port(port_id)))
102                 code = ENODEV;
103         else if (unlikely(!dev->dev_ops->filter_ctrl ||
104                           dev->dev_ops->filter_ctrl(dev,
105                                                     RTE_ETH_FILTER_GENERIC,
106                                                     RTE_ETH_FILTER_GET,
107                                                     &ops) ||
108                           !ops))
109                 code = ENOSYS;
110         else
111                 return ops;
112         rte_flow_error_set(error, code, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
113                            NULL, rte_strerror(code));
114         return NULL;
115 }
116
117 /* Check whether a flow rule can be created on a given port. */
118 int
119 rte_flow_validate(uint16_t port_id,
120                   const struct rte_flow_attr *attr,
121                   const struct rte_flow_item pattern[],
122                   const struct rte_flow_action actions[],
123                   struct rte_flow_error *error)
124 {
125         const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
126         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
127
128         if (unlikely(!ops))
129                 return -rte_errno;
130         if (likely(!!ops->validate))
131                 return flow_err(port_id, ops->validate(dev, attr, pattern,
132                                                        actions, error), error);
133         return rte_flow_error_set(error, ENOSYS,
134                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
135                                   NULL, rte_strerror(ENOSYS));
136 }
137
138 /* Create a flow rule on a given port. */
139 struct rte_flow *
140 rte_flow_create(uint16_t port_id,
141                 const struct rte_flow_attr *attr,
142                 const struct rte_flow_item pattern[],
143                 const struct rte_flow_action actions[],
144                 struct rte_flow_error *error)
145 {
146         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
147         struct rte_flow *flow;
148         const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
149
150         if (unlikely(!ops))
151                 return NULL;
152         if (likely(!!ops->create)) {
153                 flow = ops->create(dev, attr, pattern, actions, error);
154                 if (flow == NULL)
155                         flow_err(port_id, -rte_errno, error);
156                 return flow;
157         }
158         rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
159                            NULL, rte_strerror(ENOSYS));
160         return NULL;
161 }
162
163 /* Destroy a flow rule on a given port. */
164 int
165 rte_flow_destroy(uint16_t port_id,
166                  struct rte_flow *flow,
167                  struct rte_flow_error *error)
168 {
169         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
170         const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
171
172         if (unlikely(!ops))
173                 return -rte_errno;
174         if (likely(!!ops->destroy))
175                 return flow_err(port_id, ops->destroy(dev, flow, error),
176                                 error);
177         return rte_flow_error_set(error, ENOSYS,
178                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
179                                   NULL, rte_strerror(ENOSYS));
180 }
181
182 /* Destroy all flow rules associated with a port. */
183 int
184 rte_flow_flush(uint16_t port_id,
185                struct rte_flow_error *error)
186 {
187         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
188         const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
189
190         if (unlikely(!ops))
191                 return -rte_errno;
192         if (likely(!!ops->flush))
193                 return flow_err(port_id, ops->flush(dev, error), error);
194         return rte_flow_error_set(error, ENOSYS,
195                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
196                                   NULL, rte_strerror(ENOSYS));
197 }
198
199 /* Query an existing flow rule. */
200 int
201 rte_flow_query(uint16_t port_id,
202                struct rte_flow *flow,
203                enum rte_flow_action_type action,
204                void *data,
205                struct rte_flow_error *error)
206 {
207         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
208         const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
209
210         if (!ops)
211                 return -rte_errno;
212         if (likely(!!ops->query))
213                 return flow_err(port_id, ops->query(dev, flow, action, data,
214                                                     error), error);
215         return rte_flow_error_set(error, ENOSYS,
216                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
217                                   NULL, rte_strerror(ENOSYS));
218 }
219
220 /* Restrict ingress traffic to the defined flow rules. */
221 int
222 rte_flow_isolate(uint16_t port_id,
223                  int set,
224                  struct rte_flow_error *error)
225 {
226         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
227         const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
228
229         if (!ops)
230                 return -rte_errno;
231         if (likely(!!ops->isolate))
232                 return flow_err(port_id, ops->isolate(dev, set, error), error);
233         return rte_flow_error_set(error, ENOSYS,
234                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
235                                   NULL, rte_strerror(ENOSYS));
236 }
237
238 /* Initialize flow error structure. */
239 int
240 rte_flow_error_set(struct rte_flow_error *error,
241                    int code,
242                    enum rte_flow_error_type type,
243                    const void *cause,
244                    const char *message)
245 {
246         if (error) {
247                 *error = (struct rte_flow_error){
248                         .type = type,
249                         .cause = cause,
250                         .message = message,
251                 };
252         }
253         rte_errno = code;
254         return -code;
255 }
256
257 /** Pattern item specification types. */
258 enum item_spec_type {
259         ITEM_SPEC,
260         ITEM_LAST,
261         ITEM_MASK,
262 };
263
264 /** Compute storage space needed by item specification and copy it. */
265 static size_t
266 flow_item_spec_copy(void *buf, const struct rte_flow_item *item,
267                     enum item_spec_type type)
268 {
269         size_t size = 0;
270         const void *item_spec =
271                 type == ITEM_SPEC ? item->spec :
272                 type == ITEM_LAST ? item->last :
273                 type == ITEM_MASK ? item->mask :
274                 NULL;
275
276         if (!item_spec)
277                 goto empty;
278         switch (item->type) {
279                 union {
280                         const struct rte_flow_item_raw *raw;
281                 } src;
282                 union {
283                         struct rte_flow_item_raw *raw;
284                 } dst;
285                 size_t off;
286
287         case RTE_FLOW_ITEM_TYPE_RAW:
288                 src.raw = item_spec;
289                 dst.raw = buf;
290                 off = RTE_ALIGN_CEIL(sizeof(struct rte_flow_item_raw),
291                                      sizeof(*src.raw->pattern));
292                 size = off + src.raw->length * sizeof(*src.raw->pattern);
293                 if (dst.raw) {
294                         memcpy(dst.raw, src.raw, sizeof(*src.raw));
295                         dst.raw->pattern = memcpy((uint8_t *)dst.raw + off,
296                                                   src.raw->pattern,
297                                                   size - off);
298                 }
299                 break;
300         default:
301                 size = rte_flow_desc_item[item->type].size;
302                 if (buf)
303                         memcpy(buf, item_spec, size);
304                 break;
305         }
306 empty:
307         return RTE_ALIGN_CEIL(size, sizeof(double));
308 }
309
310 /** Compute storage space needed by action configuration and copy it. */
311 static size_t
312 flow_action_conf_copy(void *buf, const struct rte_flow_action *action)
313 {
314         size_t size = 0;
315
316         if (!action->conf)
317                 goto empty;
318         switch (action->type) {
319                 union {
320                         const struct rte_flow_action_rss *rss;
321                 } src;
322                 union {
323                         struct rte_flow_action_rss *rss;
324                 } dst;
325                 size_t off;
326
327         case RTE_FLOW_ACTION_TYPE_RSS:
328                 src.rss = action->conf;
329                 dst.rss = buf;
330                 off = 0;
331                 if (dst.rss)
332                         *dst.rss = (struct rte_flow_action_rss){
333                                 .func = src.rss->func,
334                                 .types = src.rss->types,
335                                 .key_len = src.rss->key_len,
336                                 .queue_num = src.rss->queue_num,
337                         };
338                 off += sizeof(*src.rss);
339                 if (src.rss->key_len) {
340                         off = RTE_ALIGN_CEIL(off, sizeof(double));
341                         size = sizeof(*src.rss->key) * src.rss->key_len;
342                         if (dst.rss)
343                                 dst.rss->key = memcpy
344                                         ((void *)((uintptr_t)dst.rss + off),
345                                          src.rss->key, size);
346                         off += size;
347                 }
348                 if (src.rss->queue_num) {
349                         off = RTE_ALIGN_CEIL(off, sizeof(double));
350                         size = sizeof(*src.rss->queue) * src.rss->queue_num;
351                         if (dst.rss)
352                                 dst.rss->queue = memcpy
353                                         ((void *)((uintptr_t)dst.rss + off),
354                                          src.rss->queue, size);
355                         off += size;
356                 }
357                 size = off;
358                 break;
359         default:
360                 size = rte_flow_desc_action[action->type].size;
361                 if (buf)
362                         memcpy(buf, action->conf, size);
363                 break;
364         }
365 empty:
366         return RTE_ALIGN_CEIL(size, sizeof(double));
367 }
368
369 /** Store a full rte_flow description. */
370 size_t
371 rte_flow_copy(struct rte_flow_desc *desc, size_t len,
372               const struct rte_flow_attr *attr,
373               const struct rte_flow_item *items,
374               const struct rte_flow_action *actions)
375 {
376         struct rte_flow_desc *fd = NULL;
377         size_t tmp;
378         size_t off1 = 0;
379         size_t off2 = 0;
380         size_t size = 0;
381
382 store:
383         if (items) {
384                 const struct rte_flow_item *item;
385
386                 item = items;
387                 if (fd)
388                         fd->items = (void *)&fd->data[off1];
389                 do {
390                         struct rte_flow_item *dst = NULL;
391
392                         if ((size_t)item->type >=
393                                 RTE_DIM(rte_flow_desc_item) ||
394                             !rte_flow_desc_item[item->type].name) {
395                                 rte_errno = ENOTSUP;
396                                 return 0;
397                         }
398                         if (fd)
399                                 dst = memcpy(fd->data + off1, item,
400                                              sizeof(*item));
401                         off1 += sizeof(*item);
402                         if (item->spec) {
403                                 if (fd)
404                                         dst->spec = fd->data + off2;
405                                 off2 += flow_item_spec_copy
406                                         (fd ? fd->data + off2 : NULL, item,
407                                          ITEM_SPEC);
408                         }
409                         if (item->last) {
410                                 if (fd)
411                                         dst->last = fd->data + off2;
412                                 off2 += flow_item_spec_copy
413                                         (fd ? fd->data + off2 : NULL, item,
414                                          ITEM_LAST);
415                         }
416                         if (item->mask) {
417                                 if (fd)
418                                         dst->mask = fd->data + off2;
419                                 off2 += flow_item_spec_copy
420                                         (fd ? fd->data + off2 : NULL, item,
421                                          ITEM_MASK);
422                         }
423                         off2 = RTE_ALIGN_CEIL(off2, sizeof(double));
424                 } while ((item++)->type != RTE_FLOW_ITEM_TYPE_END);
425                 off1 = RTE_ALIGN_CEIL(off1, sizeof(double));
426         }
427         if (actions) {
428                 const struct rte_flow_action *action;
429
430                 action = actions;
431                 if (fd)
432                         fd->actions = (void *)&fd->data[off1];
433                 do {
434                         struct rte_flow_action *dst = NULL;
435
436                         if ((size_t)action->type >=
437                                 RTE_DIM(rte_flow_desc_action) ||
438                             !rte_flow_desc_action[action->type].name) {
439                                 rte_errno = ENOTSUP;
440                                 return 0;
441                         }
442                         if (fd)
443                                 dst = memcpy(fd->data + off1, action,
444                                              sizeof(*action));
445                         off1 += sizeof(*action);
446                         if (action->conf) {
447                                 if (fd)
448                                         dst->conf = fd->data + off2;
449                                 off2 += flow_action_conf_copy
450                                         (fd ? fd->data + off2 : NULL, action);
451                         }
452                         off2 = RTE_ALIGN_CEIL(off2, sizeof(double));
453                 } while ((action++)->type != RTE_FLOW_ACTION_TYPE_END);
454         }
455         if (fd != NULL)
456                 return size;
457         off1 = RTE_ALIGN_CEIL(off1, sizeof(double));
458         tmp = RTE_ALIGN_CEIL(offsetof(struct rte_flow_desc, data),
459                              sizeof(double));
460         size = tmp + off1 + off2;
461         if (size > len)
462                 return size;
463         fd = desc;
464         if (fd != NULL) {
465                 *fd = (const struct rte_flow_desc) {
466                         .size = size,
467                         .attr = *attr,
468                 };
469                 tmp -= offsetof(struct rte_flow_desc, data);
470                 off2 = tmp + off1;
471                 off1 = tmp;
472                 goto store;
473         }
474         return 0;
475 }