ethdev: add flow API item/action name conversion
[dpdk.git] / lib / librte_ethdev / rte_flow.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2016 6WIND S.A.
3  * Copyright 2016 Mellanox Technologies, Ltd
4  */
5
6 #include <errno.h>
7 #include <stddef.h>
8 #include <stdint.h>
9 #include <string.h>
10
11 #include <rte_common.h>
12 #include <rte_errno.h>
13 #include <rte_branch_prediction.h>
14 #include <rte_string_fns.h>
15 #include "rte_ethdev.h"
16 #include "rte_flow_driver.h"
17 #include "rte_flow.h"
18
19 /**
20  * Flow elements description tables.
21  */
22 struct rte_flow_desc_data {
23         const char *name;
24         size_t size;
25 };
26
27 /** Generate flow_item[] entry. */
28 #define MK_FLOW_ITEM(t, s) \
29         [RTE_FLOW_ITEM_TYPE_ ## t] = { \
30                 .name = # t, \
31                 .size = s, \
32         }
33
34 /** Information about known flow pattern items. */
35 static const struct rte_flow_desc_data rte_flow_desc_item[] = {
36         MK_FLOW_ITEM(END, 0),
37         MK_FLOW_ITEM(VOID, 0),
38         MK_FLOW_ITEM(INVERT, 0),
39         MK_FLOW_ITEM(ANY, sizeof(struct rte_flow_item_any)),
40         MK_FLOW_ITEM(PF, 0),
41         MK_FLOW_ITEM(VF, sizeof(struct rte_flow_item_vf)),
42         MK_FLOW_ITEM(PHY_PORT, sizeof(struct rte_flow_item_phy_port)),
43         MK_FLOW_ITEM(PORT_ID, sizeof(struct rte_flow_item_port_id)),
44         MK_FLOW_ITEM(RAW, sizeof(struct rte_flow_item_raw)),
45         MK_FLOW_ITEM(ETH, sizeof(struct rte_flow_item_eth)),
46         MK_FLOW_ITEM(VLAN, sizeof(struct rte_flow_item_vlan)),
47         MK_FLOW_ITEM(IPV4, sizeof(struct rte_flow_item_ipv4)),
48         MK_FLOW_ITEM(IPV6, sizeof(struct rte_flow_item_ipv6)),
49         MK_FLOW_ITEM(ICMP, sizeof(struct rte_flow_item_icmp)),
50         MK_FLOW_ITEM(UDP, sizeof(struct rte_flow_item_udp)),
51         MK_FLOW_ITEM(TCP, sizeof(struct rte_flow_item_tcp)),
52         MK_FLOW_ITEM(SCTP, sizeof(struct rte_flow_item_sctp)),
53         MK_FLOW_ITEM(VXLAN, sizeof(struct rte_flow_item_vxlan)),
54         MK_FLOW_ITEM(MPLS, sizeof(struct rte_flow_item_mpls)),
55         MK_FLOW_ITEM(GRE, sizeof(struct rte_flow_item_gre)),
56         MK_FLOW_ITEM(E_TAG, sizeof(struct rte_flow_item_e_tag)),
57         MK_FLOW_ITEM(NVGRE, sizeof(struct rte_flow_item_nvgre)),
58         MK_FLOW_ITEM(GENEVE, sizeof(struct rte_flow_item_geneve)),
59         MK_FLOW_ITEM(VXLAN_GPE, sizeof(struct rte_flow_item_vxlan_gpe)),
60         MK_FLOW_ITEM(ARP_ETH_IPV4, sizeof(struct rte_flow_item_arp_eth_ipv4)),
61         MK_FLOW_ITEM(IPV6_EXT, sizeof(struct rte_flow_item_ipv6_ext)),
62         MK_FLOW_ITEM(ICMP6, sizeof(struct rte_flow_item_icmp6)),
63         MK_FLOW_ITEM(ICMP6_ND_NS, sizeof(struct rte_flow_item_icmp6_nd_ns)),
64         MK_FLOW_ITEM(ICMP6_ND_NA, sizeof(struct rte_flow_item_icmp6_nd_na)),
65         MK_FLOW_ITEM(ICMP6_ND_OPT, sizeof(struct rte_flow_item_icmp6_nd_opt)),
66         MK_FLOW_ITEM(ICMP6_ND_OPT_SLA_ETH,
67                      sizeof(struct rte_flow_item_icmp6_nd_opt_sla_eth)),
68         MK_FLOW_ITEM(ICMP6_ND_OPT_TLA_ETH,
69                      sizeof(struct rte_flow_item_icmp6_nd_opt_tla_eth)),
70 };
71
72 /** Generate flow_action[] entry. */
73 #define MK_FLOW_ACTION(t, s) \
74         [RTE_FLOW_ACTION_TYPE_ ## t] = { \
75                 .name = # t, \
76                 .size = s, \
77         }
78
79 /** Information about known flow actions. */
80 static const struct rte_flow_desc_data rte_flow_desc_action[] = {
81         MK_FLOW_ACTION(END, 0),
82         MK_FLOW_ACTION(VOID, 0),
83         MK_FLOW_ACTION(PASSTHRU, 0),
84         MK_FLOW_ACTION(MARK, sizeof(struct rte_flow_action_mark)),
85         MK_FLOW_ACTION(FLAG, 0),
86         MK_FLOW_ACTION(QUEUE, sizeof(struct rte_flow_action_queue)),
87         MK_FLOW_ACTION(DROP, 0),
88         MK_FLOW_ACTION(COUNT, sizeof(struct rte_flow_action_count)),
89         MK_FLOW_ACTION(RSS, sizeof(struct rte_flow_action_rss)),
90         MK_FLOW_ACTION(PF, 0),
91         MK_FLOW_ACTION(VF, sizeof(struct rte_flow_action_vf)),
92         MK_FLOW_ACTION(PHY_PORT, sizeof(struct rte_flow_action_phy_port)),
93         MK_FLOW_ACTION(PORT_ID, sizeof(struct rte_flow_action_port_id)),
94         MK_FLOW_ACTION(OF_SET_MPLS_TTL,
95                        sizeof(struct rte_flow_action_of_set_mpls_ttl)),
96         MK_FLOW_ACTION(OF_DEC_MPLS_TTL, 0),
97         MK_FLOW_ACTION(OF_SET_NW_TTL,
98                        sizeof(struct rte_flow_action_of_set_nw_ttl)),
99         MK_FLOW_ACTION(OF_DEC_NW_TTL, 0),
100         MK_FLOW_ACTION(OF_COPY_TTL_OUT, 0),
101         MK_FLOW_ACTION(OF_COPY_TTL_IN, 0),
102         MK_FLOW_ACTION(OF_POP_VLAN, 0),
103         MK_FLOW_ACTION(OF_PUSH_VLAN,
104                        sizeof(struct rte_flow_action_of_push_vlan)),
105         MK_FLOW_ACTION(OF_SET_VLAN_VID,
106                        sizeof(struct rte_flow_action_of_set_vlan_vid)),
107         MK_FLOW_ACTION(OF_SET_VLAN_PCP,
108                        sizeof(struct rte_flow_action_of_set_vlan_pcp)),
109         MK_FLOW_ACTION(OF_POP_MPLS,
110                        sizeof(struct rte_flow_action_of_pop_mpls)),
111         MK_FLOW_ACTION(OF_PUSH_MPLS,
112                        sizeof(struct rte_flow_action_of_push_mpls)),
113 };
114
115 static int
116 flow_err(uint16_t port_id, int ret, struct rte_flow_error *error)
117 {
118         if (ret == 0)
119                 return 0;
120         if (rte_eth_dev_is_removed(port_id))
121                 return rte_flow_error_set(error, EIO,
122                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
123                                           NULL, rte_strerror(EIO));
124         return ret;
125 }
126
127 /* Get generic flow operations structure from a port. */
128 const struct rte_flow_ops *
129 rte_flow_ops_get(uint16_t port_id, struct rte_flow_error *error)
130 {
131         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
132         const struct rte_flow_ops *ops;
133         int code;
134
135         if (unlikely(!rte_eth_dev_is_valid_port(port_id)))
136                 code = ENODEV;
137         else if (unlikely(!dev->dev_ops->filter_ctrl ||
138                           dev->dev_ops->filter_ctrl(dev,
139                                                     RTE_ETH_FILTER_GENERIC,
140                                                     RTE_ETH_FILTER_GET,
141                                                     &ops) ||
142                           !ops))
143                 code = ENOSYS;
144         else
145                 return ops;
146         rte_flow_error_set(error, code, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
147                            NULL, rte_strerror(code));
148         return NULL;
149 }
150
151 /* Check whether a flow rule can be created on a given port. */
152 int
153 rte_flow_validate(uint16_t port_id,
154                   const struct rte_flow_attr *attr,
155                   const struct rte_flow_item pattern[],
156                   const struct rte_flow_action actions[],
157                   struct rte_flow_error *error)
158 {
159         const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
160         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
161
162         if (unlikely(!ops))
163                 return -rte_errno;
164         if (likely(!!ops->validate))
165                 return flow_err(port_id, ops->validate(dev, attr, pattern,
166                                                        actions, error), error);
167         return rte_flow_error_set(error, ENOSYS,
168                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
169                                   NULL, rte_strerror(ENOSYS));
170 }
171
172 /* Create a flow rule on a given port. */
173 struct rte_flow *
174 rte_flow_create(uint16_t port_id,
175                 const struct rte_flow_attr *attr,
176                 const struct rte_flow_item pattern[],
177                 const struct rte_flow_action actions[],
178                 struct rte_flow_error *error)
179 {
180         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
181         struct rte_flow *flow;
182         const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
183
184         if (unlikely(!ops))
185                 return NULL;
186         if (likely(!!ops->create)) {
187                 flow = ops->create(dev, attr, pattern, actions, error);
188                 if (flow == NULL)
189                         flow_err(port_id, -rte_errno, error);
190                 return flow;
191         }
192         rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
193                            NULL, rte_strerror(ENOSYS));
194         return NULL;
195 }
196
197 /* Destroy a flow rule on a given port. */
198 int
199 rte_flow_destroy(uint16_t port_id,
200                  struct rte_flow *flow,
201                  struct rte_flow_error *error)
202 {
203         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
204         const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
205
206         if (unlikely(!ops))
207                 return -rte_errno;
208         if (likely(!!ops->destroy))
209                 return flow_err(port_id, ops->destroy(dev, flow, error),
210                                 error);
211         return rte_flow_error_set(error, ENOSYS,
212                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
213                                   NULL, rte_strerror(ENOSYS));
214 }
215
216 /* Destroy all flow rules associated with a port. */
217 int
218 rte_flow_flush(uint16_t port_id,
219                struct rte_flow_error *error)
220 {
221         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
222         const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
223
224         if (unlikely(!ops))
225                 return -rte_errno;
226         if (likely(!!ops->flush))
227                 return flow_err(port_id, ops->flush(dev, error), error);
228         return rte_flow_error_set(error, ENOSYS,
229                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
230                                   NULL, rte_strerror(ENOSYS));
231 }
232
233 /* Query an existing flow rule. */
234 int
235 rte_flow_query(uint16_t port_id,
236                struct rte_flow *flow,
237                const struct rte_flow_action *action,
238                void *data,
239                struct rte_flow_error *error)
240 {
241         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
242         const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
243
244         if (!ops)
245                 return -rte_errno;
246         if (likely(!!ops->query))
247                 return flow_err(port_id, ops->query(dev, flow, action, data,
248                                                     error), error);
249         return rte_flow_error_set(error, ENOSYS,
250                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
251                                   NULL, rte_strerror(ENOSYS));
252 }
253
254 /* Restrict ingress traffic to the defined flow rules. */
255 int
256 rte_flow_isolate(uint16_t port_id,
257                  int set,
258                  struct rte_flow_error *error)
259 {
260         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
261         const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
262
263         if (!ops)
264                 return -rte_errno;
265         if (likely(!!ops->isolate))
266                 return flow_err(port_id, ops->isolate(dev, set, error), error);
267         return rte_flow_error_set(error, ENOSYS,
268                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
269                                   NULL, rte_strerror(ENOSYS));
270 }
271
272 /* Initialize flow error structure. */
273 int
274 rte_flow_error_set(struct rte_flow_error *error,
275                    int code,
276                    enum rte_flow_error_type type,
277                    const void *cause,
278                    const char *message)
279 {
280         if (error) {
281                 *error = (struct rte_flow_error){
282                         .type = type,
283                         .cause = cause,
284                         .message = message,
285                 };
286         }
287         rte_errno = code;
288         return -code;
289 }
290
291 /** Pattern item specification types. */
292 enum rte_flow_conv_item_spec_type {
293         RTE_FLOW_CONV_ITEM_SPEC,
294         RTE_FLOW_CONV_ITEM_LAST,
295         RTE_FLOW_CONV_ITEM_MASK,
296 };
297
298 /**
299  * Copy pattern item specification.
300  *
301  * @param[out] buf
302  *   Output buffer. Can be NULL if @p size is zero.
303  * @param size
304  *   Size of @p buf in bytes.
305  * @param[in] item
306  *   Pattern item to copy specification from.
307  * @param type
308  *   Specification selector for either @p spec, @p last or @p mask.
309  *
310  * @return
311  *   Number of bytes needed to store pattern item specification regardless
312  *   of @p size. @p buf contents are truncated to @p size if not large
313  *   enough.
314  */
315 static size_t
316 rte_flow_conv_item_spec(void *buf, const size_t size,
317                         const struct rte_flow_item *item,
318                         enum rte_flow_conv_item_spec_type type)
319 {
320         size_t off;
321         const void *data =
322                 type == RTE_FLOW_CONV_ITEM_SPEC ? item->spec :
323                 type == RTE_FLOW_CONV_ITEM_LAST ? item->last :
324                 type == RTE_FLOW_CONV_ITEM_MASK ? item->mask :
325                 NULL;
326
327         switch (item->type) {
328                 union {
329                         const struct rte_flow_item_raw *raw;
330                 } spec;
331                 union {
332                         const struct rte_flow_item_raw *raw;
333                 } last;
334                 union {
335                         const struct rte_flow_item_raw *raw;
336                 } mask;
337                 union {
338                         const struct rte_flow_item_raw *raw;
339                 } src;
340                 union {
341                         struct rte_flow_item_raw *raw;
342                 } dst;
343                 size_t tmp;
344
345         case RTE_FLOW_ITEM_TYPE_RAW:
346                 spec.raw = item->spec;
347                 last.raw = item->last ? item->last : item->spec;
348                 mask.raw = item->mask ? item->mask : &rte_flow_item_raw_mask;
349                 src.raw = data;
350                 dst.raw = buf;
351                 rte_memcpy(dst.raw,
352                            (&(struct rte_flow_item_raw){
353                                 .relative = src.raw->relative,
354                                 .search = src.raw->search,
355                                 .reserved = src.raw->reserved,
356                                 .offset = src.raw->offset,
357                                 .limit = src.raw->limit,
358                                 .length = src.raw->length,
359                            }),
360                            size > sizeof(*dst.raw) ? sizeof(*dst.raw) : size);
361                 off = sizeof(*dst.raw);
362                 if (type == RTE_FLOW_CONV_ITEM_SPEC ||
363                     (type == RTE_FLOW_CONV_ITEM_MASK &&
364                      ((spec.raw->length & mask.raw->length) >=
365                       (last.raw->length & mask.raw->length))))
366                         tmp = spec.raw->length & mask.raw->length;
367                 else
368                         tmp = last.raw->length & mask.raw->length;
369                 if (tmp) {
370                         off = RTE_ALIGN_CEIL(off, sizeof(*dst.raw->pattern));
371                         if (size >= off + tmp)
372                                 dst.raw->pattern = rte_memcpy
373                                         ((void *)((uintptr_t)dst.raw + off),
374                                          src.raw->pattern, tmp);
375                         off += tmp;
376                 }
377                 break;
378         default:
379                 off = rte_flow_desc_item[item->type].size;
380                 rte_memcpy(buf, data, (size > off ? off : size));
381                 break;
382         }
383         return off;
384 }
385
386 /**
387  * Copy action configuration.
388  *
389  * @param[out] buf
390  *   Output buffer. Can be NULL if @p size is zero.
391  * @param size
392  *   Size of @p buf in bytes.
393  * @param[in] action
394  *   Action to copy configuration from.
395  *
396  * @return
397  *   Number of bytes needed to store pattern item specification regardless
398  *   of @p size. @p buf contents are truncated to @p size if not large
399  *   enough.
400  */
401 static size_t
402 rte_flow_conv_action_conf(void *buf, const size_t size,
403                           const struct rte_flow_action *action)
404 {
405         size_t off;
406
407         switch (action->type) {
408                 union {
409                         const struct rte_flow_action_rss *rss;
410                 } src;
411                 union {
412                         struct rte_flow_action_rss *rss;
413                 } dst;
414                 size_t tmp;
415
416         case RTE_FLOW_ACTION_TYPE_RSS:
417                 src.rss = action->conf;
418                 dst.rss = buf;
419                 rte_memcpy(dst.rss,
420                            (&(struct rte_flow_action_rss){
421                                 .func = src.rss->func,
422                                 .level = src.rss->level,
423                                 .types = src.rss->types,
424                                 .key_len = src.rss->key_len,
425                                 .queue_num = src.rss->queue_num,
426                            }),
427                            size > sizeof(*dst.rss) ? sizeof(*dst.rss) : size);
428                 off = sizeof(*dst.rss);
429                 if (src.rss->key_len) {
430                         off = RTE_ALIGN_CEIL(off, sizeof(*dst.rss->key));
431                         tmp = sizeof(*src.rss->key) * src.rss->key_len;
432                         if (size >= off + tmp)
433                                 dst.rss->key = rte_memcpy
434                                         ((void *)((uintptr_t)dst.rss + off),
435                                          src.rss->key, tmp);
436                         off += tmp;
437                 }
438                 if (src.rss->queue_num) {
439                         off = RTE_ALIGN_CEIL(off, sizeof(*dst.rss->queue));
440                         tmp = sizeof(*src.rss->queue) * src.rss->queue_num;
441                         if (size >= off + tmp)
442                                 dst.rss->queue = rte_memcpy
443                                         ((void *)((uintptr_t)dst.rss + off),
444                                          src.rss->queue, tmp);
445                         off += tmp;
446                 }
447                 break;
448         default:
449                 off = rte_flow_desc_action[action->type].size;
450                 rte_memcpy(buf, action->conf, (size > off ? off : size));
451                 break;
452         }
453         return off;
454 }
455
456 /**
457  * Copy a list of pattern items.
458  *
459  * @param[out] dst
460  *   Destination buffer. Can be NULL if @p size is zero.
461  * @param size
462  *   Size of @p dst in bytes.
463  * @param[in] src
464  *   Source pattern items.
465  * @param num
466  *   Maximum number of pattern items to process from @p src or 0 to process
467  *   the entire list. In both cases, processing stops after
468  *   RTE_FLOW_ITEM_TYPE_END is encountered.
469  * @param[out] error
470  *   Perform verbose error reporting if not NULL.
471  *
472  * @return
473  *   A positive value representing the number of bytes needed to store
474  *   pattern items regardless of @p size on success (@p buf contents are
475  *   truncated to @p size if not large enough), a negative errno value
476  *   otherwise and rte_errno is set.
477  */
478 static int
479 rte_flow_conv_pattern(struct rte_flow_item *dst,
480                       const size_t size,
481                       const struct rte_flow_item *src,
482                       unsigned int num,
483                       struct rte_flow_error *error)
484 {
485         uintptr_t data = (uintptr_t)dst;
486         size_t off;
487         size_t ret;
488         unsigned int i;
489
490         for (i = 0, off = 0; !num || i != num; ++i, ++src, ++dst) {
491                 if ((size_t)src->type >= RTE_DIM(rte_flow_desc_item) ||
492                     !rte_flow_desc_item[src->type].name)
493                         return rte_flow_error_set
494                                 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, src,
495                                  "cannot convert unknown item type");
496                 if (size >= off + sizeof(*dst))
497                         *dst = (struct rte_flow_item){
498                                 .type = src->type,
499                         };
500                 off += sizeof(*dst);
501                 if (!src->type)
502                         num = i + 1;
503         }
504         num = i;
505         src -= num;
506         dst -= num;
507         do {
508                 if (src->spec) {
509                         off = RTE_ALIGN_CEIL(off, sizeof(double));
510                         ret = rte_flow_conv_item_spec
511                                 ((void *)(data + off),
512                                  size > off ? size - off : 0, src,
513                                  RTE_FLOW_CONV_ITEM_SPEC);
514                         if (size && size >= off + ret)
515                                 dst->spec = (void *)(data + off);
516                         off += ret;
517
518                 }
519                 if (src->last) {
520                         off = RTE_ALIGN_CEIL(off, sizeof(double));
521                         ret = rte_flow_conv_item_spec
522                                 ((void *)(data + off),
523                                  size > off ? size - off : 0, src,
524                                  RTE_FLOW_CONV_ITEM_LAST);
525                         if (size && size >= off + ret)
526                                 dst->last = (void *)(data + off);
527                         off += ret;
528                 }
529                 if (src->mask) {
530                         off = RTE_ALIGN_CEIL(off, sizeof(double));
531                         ret = rte_flow_conv_item_spec
532                                 ((void *)(data + off),
533                                  size > off ? size - off : 0, src,
534                                  RTE_FLOW_CONV_ITEM_MASK);
535                         if (size && size >= off + ret)
536                                 dst->mask = (void *)(data + off);
537                         off += ret;
538                 }
539                 ++src;
540                 ++dst;
541         } while (--num);
542         return off;
543 }
544
545 /**
546  * Copy a list of actions.
547  *
548  * @param[out] dst
549  *   Destination buffer. Can be NULL if @p size is zero.
550  * @param size
551  *   Size of @p dst in bytes.
552  * @param[in] src
553  *   Source actions.
554  * @param num
555  *   Maximum number of actions to process from @p src or 0 to process the
556  *   entire list. In both cases, processing stops after
557  *   RTE_FLOW_ACTION_TYPE_END is encountered.
558  * @param[out] error
559  *   Perform verbose error reporting if not NULL.
560  *
561  * @return
562  *   A positive value representing the number of bytes needed to store
563  *   actions regardless of @p size on success (@p buf contents are truncated
564  *   to @p size if not large enough), a negative errno value otherwise and
565  *   rte_errno is set.
566  */
567 static int
568 rte_flow_conv_actions(struct rte_flow_action *dst,
569                       const size_t size,
570                       const struct rte_flow_action *src,
571                       unsigned int num,
572                       struct rte_flow_error *error)
573 {
574         uintptr_t data = (uintptr_t)dst;
575         size_t off;
576         size_t ret;
577         unsigned int i;
578
579         for (i = 0, off = 0; !num || i != num; ++i, ++src, ++dst) {
580                 if ((size_t)src->type >= RTE_DIM(rte_flow_desc_action) ||
581                     !rte_flow_desc_action[src->type].name)
582                         return rte_flow_error_set
583                                 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
584                                  src, "cannot convert unknown action type");
585                 if (size >= off + sizeof(*dst))
586                         *dst = (struct rte_flow_action){
587                                 .type = src->type,
588                         };
589                 off += sizeof(*dst);
590                 if (!src->type)
591                         num = i + 1;
592         }
593         num = i;
594         src -= num;
595         dst -= num;
596         do {
597                 if (src->conf) {
598                         off = RTE_ALIGN_CEIL(off, sizeof(double));
599                         ret = rte_flow_conv_action_conf
600                                 ((void *)(data + off),
601                                  size > off ? size - off : 0, src);
602                         if (size && size >= off + ret)
603                                 dst->conf = (void *)(data + off);
604                         off += ret;
605                 }
606                 ++src;
607                 ++dst;
608         } while (--num);
609         return off;
610 }
611
612 /**
613  * Copy flow rule components.
614  *
615  * This comprises the flow rule descriptor itself, attributes, pattern and
616  * actions list. NULL components in @p src are skipped.
617  *
618  * @param[out] dst
619  *   Destination buffer. Can be NULL if @p size is zero.
620  * @param size
621  *   Size of @p dst in bytes.
622  * @param[in] src
623  *   Source flow rule descriptor.
624  * @param[out] error
625  *   Perform verbose error reporting if not NULL.
626  *
627  * @return
628  *   A positive value representing the number of bytes needed to store all
629  *   components including the descriptor regardless of @p size on success
630  *   (@p buf contents are truncated to @p size if not large enough), a
631  *   negative errno value otherwise and rte_errno is set.
632  */
633 static int
634 rte_flow_conv_rule(struct rte_flow_conv_rule *dst,
635                    const size_t size,
636                    const struct rte_flow_conv_rule *src,
637                    struct rte_flow_error *error)
638 {
639         size_t off;
640         int ret;
641
642         rte_memcpy(dst,
643                    (&(struct rte_flow_conv_rule){
644                         .attr = NULL,
645                         .pattern = NULL,
646                         .actions = NULL,
647                    }),
648                    size > sizeof(*dst) ? sizeof(*dst) : size);
649         off = sizeof(*dst);
650         if (src->attr_ro) {
651                 off = RTE_ALIGN_CEIL(off, sizeof(double));
652                 if (size && size >= off + sizeof(*dst->attr))
653                         dst->attr = rte_memcpy
654                                 ((void *)((uintptr_t)dst + off),
655                                  src->attr_ro, sizeof(*dst->attr));
656                 off += sizeof(*dst->attr);
657         }
658         if (src->pattern_ro) {
659                 off = RTE_ALIGN_CEIL(off, sizeof(double));
660                 ret = rte_flow_conv_pattern((void *)((uintptr_t)dst + off),
661                                             size > off ? size - off : 0,
662                                             src->pattern_ro, 0, error);
663                 if (ret < 0)
664                         return ret;
665                 if (size && size >= off + (size_t)ret)
666                         dst->pattern = (void *)((uintptr_t)dst + off);
667                 off += ret;
668         }
669         if (src->actions_ro) {
670                 off = RTE_ALIGN_CEIL(off, sizeof(double));
671                 ret = rte_flow_conv_actions((void *)((uintptr_t)dst + off),
672                                             size > off ? size - off : 0,
673                                             src->actions_ro, 0, error);
674                 if (ret < 0)
675                         return ret;
676                 if (size >= off + (size_t)ret)
677                         dst->actions = (void *)((uintptr_t)dst + off);
678                 off += ret;
679         }
680         return off;
681 }
682
683 /**
684  * Retrieve the name of a pattern item/action type.
685  *
686  * @param is_action
687  *   Nonzero when @p src represents an action type instead of a pattern item
688  *   type.
689  * @param is_ptr
690  *   Nonzero to write string address instead of contents into @p dst.
691  * @param[out] dst
692  *   Destination buffer. Can be NULL if @p size is zero.
693  * @param size
694  *   Size of @p dst in bytes.
695  * @param[in] src
696  *   Depending on @p is_action, source pattern item or action type cast as a
697  *   pointer.
698  * @param[out] error
699  *   Perform verbose error reporting if not NULL.
700  *
701  * @return
702  *   A positive value representing the number of bytes needed to store the
703  *   name or its address regardless of @p size on success (@p buf contents
704  *   are truncated to @p size if not large enough), a negative errno value
705  *   otherwise and rte_errno is set.
706  */
707 static int
708 rte_flow_conv_name(int is_action,
709                    int is_ptr,
710                    char *dst,
711                    const size_t size,
712                    const void *src,
713                    struct rte_flow_error *error)
714 {
715         struct desc_info {
716                 const struct rte_flow_desc_data *data;
717                 size_t num;
718         };
719         static const struct desc_info info_rep[2] = {
720                 { rte_flow_desc_item, RTE_DIM(rte_flow_desc_item), },
721                 { rte_flow_desc_action, RTE_DIM(rte_flow_desc_action), },
722         };
723         const struct desc_info *const info = &info_rep[!!is_action];
724         unsigned int type = (uintptr_t)src;
725
726         if (type >= info->num)
727                 return rte_flow_error_set
728                         (error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
729                          "unknown object type to retrieve the name of");
730         if (!is_ptr)
731                 return strlcpy(dst, info->data[type].name, size);
732         if (size >= sizeof(const char **))
733                 *((const char **)dst) = info->data[type].name;
734         return sizeof(const char **);
735 }
736
737 /** Helper function to convert flow API objects. */
738 int
739 rte_flow_conv(enum rte_flow_conv_op op,
740               void *dst,
741               size_t size,
742               const void *src,
743               struct rte_flow_error *error)
744 {
745         switch (op) {
746                 const struct rte_flow_attr *attr;
747
748         case RTE_FLOW_CONV_OP_NONE:
749                 return 0;
750         case RTE_FLOW_CONV_OP_ATTR:
751                 attr = src;
752                 if (size > sizeof(*attr))
753                         size = sizeof(*attr);
754                 rte_memcpy(dst, attr, size);
755                 return sizeof(*attr);
756         case RTE_FLOW_CONV_OP_ITEM:
757                 return rte_flow_conv_pattern(dst, size, src, 1, error);
758         case RTE_FLOW_CONV_OP_ACTION:
759                 return rte_flow_conv_actions(dst, size, src, 1, error);
760         case RTE_FLOW_CONV_OP_PATTERN:
761                 return rte_flow_conv_pattern(dst, size, src, 0, error);
762         case RTE_FLOW_CONV_OP_ACTIONS:
763                 return rte_flow_conv_actions(dst, size, src, 0, error);
764         case RTE_FLOW_CONV_OP_RULE:
765                 return rte_flow_conv_rule(dst, size, src, error);
766         case RTE_FLOW_CONV_OP_ITEM_NAME:
767                 return rte_flow_conv_name(0, 0, dst, size, src, error);
768         case RTE_FLOW_CONV_OP_ACTION_NAME:
769                 return rte_flow_conv_name(1, 0, dst, size, src, error);
770         case RTE_FLOW_CONV_OP_ITEM_NAME_PTR:
771                 return rte_flow_conv_name(0, 1, dst, size, src, error);
772         case RTE_FLOW_CONV_OP_ACTION_NAME_PTR:
773                 return rte_flow_conv_name(1, 1, dst, size, src, error);
774         }
775         return rte_flow_error_set
776                 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
777                  "unknown object conversion operation");
778 }
779
780 /** Store a full rte_flow description. */
781 size_t
782 rte_flow_copy(struct rte_flow_desc *desc, size_t len,
783               const struct rte_flow_attr *attr,
784               const struct rte_flow_item *items,
785               const struct rte_flow_action *actions)
786 {
787         /*
788          * Overlap struct rte_flow_conv with struct rte_flow_desc in order
789          * to convert the former to the latter without wasting space.
790          */
791         struct rte_flow_conv_rule *dst =
792                 len ?
793                 (void *)((uintptr_t)desc +
794                          (offsetof(struct rte_flow_desc, actions) -
795                           offsetof(struct rte_flow_conv_rule, actions))) :
796                 NULL;
797         size_t dst_size =
798                 len > sizeof(*desc) - sizeof(*dst) ?
799                 len - (sizeof(*desc) - sizeof(*dst)) :
800                 0;
801         struct rte_flow_conv_rule src = {
802                 .attr_ro = NULL,
803                 .pattern_ro = items,
804                 .actions_ro = actions,
805         };
806         int ret;
807
808         RTE_BUILD_BUG_ON(sizeof(struct rte_flow_desc) <
809                          sizeof(struct rte_flow_conv_rule));
810         if (dst_size &&
811             (&dst->pattern != &desc->items ||
812              &dst->actions != &desc->actions ||
813              (uintptr_t)(dst + 1) != (uintptr_t)(desc + 1))) {
814                 rte_errno = EINVAL;
815                 return 0;
816         }
817         ret = rte_flow_conv(RTE_FLOW_CONV_OP_RULE, dst, dst_size, &src, NULL);
818         if (ret < 0)
819                 return 0;
820         ret += sizeof(*desc) - sizeof(*dst);
821         rte_memcpy(desc,
822                    (&(struct rte_flow_desc){
823                         .size = ret,
824                         .attr = *attr,
825                         .items = dst_size ? dst->pattern : NULL,
826                         .actions = dst_size ? dst->actions : NULL,
827                    }),
828                    len > sizeof(*desc) ? sizeof(*desc) : len);
829         return ret;
830 }
831
832 /**
833  * Expand RSS flows into several possible flows according to the RSS hash
834  * fields requested and the driver capabilities.
835  */
836 int __rte_experimental
837 rte_flow_expand_rss(struct rte_flow_expand_rss *buf, size_t size,
838                     const struct rte_flow_item *pattern, uint64_t types,
839                     const struct rte_flow_expand_node graph[],
840                     int graph_root_index)
841 {
842         const int elt_n = 8;
843         const struct rte_flow_item *item;
844         const struct rte_flow_expand_node *node = &graph[graph_root_index];
845         const int *next_node;
846         const int *stack[elt_n];
847         int stack_pos = 0;
848         struct rte_flow_item flow_items[elt_n];
849         unsigned int i;
850         size_t lsize;
851         size_t user_pattern_size = 0;
852         void *addr = NULL;
853
854         lsize = offsetof(struct rte_flow_expand_rss, entry) +
855                 elt_n * sizeof(buf->entry[0]);
856         if (lsize <= size) {
857                 buf->entry[0].priority = 0;
858                 buf->entry[0].pattern = (void *)&buf->entry[elt_n];
859                 buf->entries = 0;
860                 addr = buf->entry[0].pattern;
861         }
862         for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
863                 const struct rte_flow_expand_node *next = NULL;
864
865                 for (i = 0; node->next && node->next[i]; ++i) {
866                         next = &graph[node->next[i]];
867                         if (next->type == item->type)
868                                 break;
869                 }
870                 if (next)
871                         node = next;
872                 user_pattern_size += sizeof(*item);
873         }
874         user_pattern_size += sizeof(*item); /* Handle END item. */
875         lsize += user_pattern_size;
876         /* Copy the user pattern in the first entry of the buffer. */
877         if (lsize <= size) {
878                 rte_memcpy(addr, pattern, user_pattern_size);
879                 addr = (void *)(((uintptr_t)addr) + user_pattern_size);
880                 buf->entries = 1;
881         }
882         /* Start expanding. */
883         memset(flow_items, 0, sizeof(flow_items));
884         user_pattern_size -= sizeof(*item);
885         next_node = node->next;
886         stack[stack_pos] = next_node;
887         node = next_node ? &graph[*next_node] : NULL;
888         while (node) {
889                 flow_items[stack_pos].type = node->type;
890                 if (node->rss_types & types) {
891                         /*
892                          * compute the number of items to copy from the
893                          * expansion and copy it.
894                          * When the stack_pos is 0, there are 1 element in it,
895                          * plus the addition END item.
896                          */
897                         int elt = stack_pos + 2;
898
899                         flow_items[stack_pos + 1].type = RTE_FLOW_ITEM_TYPE_END;
900                         lsize += elt * sizeof(*item) + user_pattern_size;
901                         if (lsize <= size) {
902                                 size_t n = elt * sizeof(*item);
903
904                                 buf->entry[buf->entries].priority =
905                                         stack_pos + 1;
906                                 buf->entry[buf->entries].pattern = addr;
907                                 buf->entries++;
908                                 rte_memcpy(addr, buf->entry[0].pattern,
909                                            user_pattern_size);
910                                 addr = (void *)(((uintptr_t)addr) +
911                                                 user_pattern_size);
912                                 rte_memcpy(addr, flow_items, n);
913                                 addr = (void *)(((uintptr_t)addr) + n);
914                         }
915                 }
916                 /* Go deeper. */
917                 if (node->next) {
918                         next_node = node->next;
919                         if (stack_pos++ == elt_n) {
920                                 rte_errno = E2BIG;
921                                 return -rte_errno;
922                         }
923                         stack[stack_pos] = next_node;
924                 } else if (*(next_node + 1)) {
925                         /* Follow up with the next possibility. */
926                         ++next_node;
927                 } else {
928                         /* Move to the next path. */
929                         if (stack_pos)
930                                 next_node = stack[--stack_pos];
931                         next_node++;
932                         stack[stack_pos] = next_node;
933                 }
934                 node = *next_node ? &graph[*next_node] : NULL;
935         };
936         return lsize;
937 }