app/testpmd: add GTP parsing and Tx checksum offload
[dpdk.git] / lib / librte_ethdev / rte_flow.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2016 6WIND S.A.
3  * Copyright 2016 Mellanox Technologies, Ltd
4  */
5
6 #include <errno.h>
7 #include <stddef.h>
8 #include <stdint.h>
9 #include <string.h>
10
11 #include <rte_common.h>
12 #include <rte_errno.h>
13 #include <rte_branch_prediction.h>
14 #include <rte_string_fns.h>
15 #include "rte_ethdev.h"
16 #include "rte_flow_driver.h"
17 #include "rte_flow.h"
18
19 /**
20  * Flow elements description tables.
21  */
22 struct rte_flow_desc_data {
23         const char *name;
24         size_t size;
25 };
26
27 /** Generate flow_item[] entry. */
28 #define MK_FLOW_ITEM(t, s) \
29         [RTE_FLOW_ITEM_TYPE_ ## t] = { \
30                 .name = # t, \
31                 .size = s, \
32         }
33
34 /** Information about known flow pattern items. */
35 static const struct rte_flow_desc_data rte_flow_desc_item[] = {
36         MK_FLOW_ITEM(END, 0),
37         MK_FLOW_ITEM(VOID, 0),
38         MK_FLOW_ITEM(INVERT, 0),
39         MK_FLOW_ITEM(ANY, sizeof(struct rte_flow_item_any)),
40         MK_FLOW_ITEM(PF, 0),
41         MK_FLOW_ITEM(VF, sizeof(struct rte_flow_item_vf)),
42         MK_FLOW_ITEM(PHY_PORT, sizeof(struct rte_flow_item_phy_port)),
43         MK_FLOW_ITEM(PORT_ID, sizeof(struct rte_flow_item_port_id)),
44         MK_FLOW_ITEM(RAW, sizeof(struct rte_flow_item_raw)),
45         MK_FLOW_ITEM(ETH, sizeof(struct rte_flow_item_eth)),
46         MK_FLOW_ITEM(VLAN, sizeof(struct rte_flow_item_vlan)),
47         MK_FLOW_ITEM(IPV4, sizeof(struct rte_flow_item_ipv4)),
48         MK_FLOW_ITEM(IPV6, sizeof(struct rte_flow_item_ipv6)),
49         MK_FLOW_ITEM(ICMP, sizeof(struct rte_flow_item_icmp)),
50         MK_FLOW_ITEM(UDP, sizeof(struct rte_flow_item_udp)),
51         MK_FLOW_ITEM(TCP, sizeof(struct rte_flow_item_tcp)),
52         MK_FLOW_ITEM(SCTP, sizeof(struct rte_flow_item_sctp)),
53         MK_FLOW_ITEM(VXLAN, sizeof(struct rte_flow_item_vxlan)),
54         MK_FLOW_ITEM(E_TAG, sizeof(struct rte_flow_item_e_tag)),
55         MK_FLOW_ITEM(NVGRE, sizeof(struct rte_flow_item_nvgre)),
56         MK_FLOW_ITEM(MPLS, sizeof(struct rte_flow_item_mpls)),
57         MK_FLOW_ITEM(GRE, sizeof(struct rte_flow_item_gre)),
58         MK_FLOW_ITEM(FUZZY, sizeof(struct rte_flow_item_fuzzy)),
59         MK_FLOW_ITEM(GTP, sizeof(struct rte_flow_item_gtp)),
60         MK_FLOW_ITEM(GTPC, sizeof(struct rte_flow_item_gtp)),
61         MK_FLOW_ITEM(GTPU, sizeof(struct rte_flow_item_gtp)),
62         MK_FLOW_ITEM(ESP, sizeof(struct rte_flow_item_esp)),
63         MK_FLOW_ITEM(GENEVE, sizeof(struct rte_flow_item_geneve)),
64         MK_FLOW_ITEM(VXLAN_GPE, sizeof(struct rte_flow_item_vxlan_gpe)),
65         MK_FLOW_ITEM(ARP_ETH_IPV4, sizeof(struct rte_flow_item_arp_eth_ipv4)),
66         MK_FLOW_ITEM(IPV6_EXT, sizeof(struct rte_flow_item_ipv6_ext)),
67         MK_FLOW_ITEM(ICMP6, sizeof(struct rte_flow_item_icmp6)),
68         MK_FLOW_ITEM(ICMP6_ND_NS, sizeof(struct rte_flow_item_icmp6_nd_ns)),
69         MK_FLOW_ITEM(ICMP6_ND_NA, sizeof(struct rte_flow_item_icmp6_nd_na)),
70         MK_FLOW_ITEM(ICMP6_ND_OPT, sizeof(struct rte_flow_item_icmp6_nd_opt)),
71         MK_FLOW_ITEM(ICMP6_ND_OPT_SLA_ETH,
72                      sizeof(struct rte_flow_item_icmp6_nd_opt_sla_eth)),
73         MK_FLOW_ITEM(ICMP6_ND_OPT_TLA_ETH,
74                      sizeof(struct rte_flow_item_icmp6_nd_opt_tla_eth)),
75         MK_FLOW_ITEM(MARK, sizeof(struct rte_flow_item_mark)),
76         MK_FLOW_ITEM(META, sizeof(struct rte_flow_item_meta)),
77         MK_FLOW_ITEM(GRE_KEY, sizeof(rte_be32_t)),
78         MK_FLOW_ITEM(GTP_PSC, sizeof(struct rte_flow_item_gtp_psc)),
79         MK_FLOW_ITEM(PPPOES, sizeof(struct rte_flow_item_pppoe)),
80         MK_FLOW_ITEM(PPPOED, sizeof(struct rte_flow_item_pppoe)),
81         MK_FLOW_ITEM(PPPOE_PROTO_ID,
82                         sizeof(struct rte_flow_item_pppoe_proto_id)),
83         MK_FLOW_ITEM(NSH, sizeof(struct rte_flow_item_nsh)),
84         MK_FLOW_ITEM(IGMP, sizeof(struct rte_flow_item_igmp)),
85         MK_FLOW_ITEM(AH, sizeof(struct rte_flow_item_ah)),
86         MK_FLOW_ITEM(HIGIG2, sizeof(struct rte_flow_item_higig2_hdr)),
87 };
88
89 /** Generate flow_action[] entry. */
90 #define MK_FLOW_ACTION(t, s) \
91         [RTE_FLOW_ACTION_TYPE_ ## t] = { \
92                 .name = # t, \
93                 .size = s, \
94         }
95
96 /** Information about known flow actions. */
97 static const struct rte_flow_desc_data rte_flow_desc_action[] = {
98         MK_FLOW_ACTION(END, 0),
99         MK_FLOW_ACTION(VOID, 0),
100         MK_FLOW_ACTION(PASSTHRU, 0),
101         MK_FLOW_ACTION(JUMP, sizeof(struct rte_flow_action_jump)),
102         MK_FLOW_ACTION(MARK, sizeof(struct rte_flow_action_mark)),
103         MK_FLOW_ACTION(FLAG, 0),
104         MK_FLOW_ACTION(QUEUE, sizeof(struct rte_flow_action_queue)),
105         MK_FLOW_ACTION(DROP, 0),
106         MK_FLOW_ACTION(COUNT, sizeof(struct rte_flow_action_count)),
107         MK_FLOW_ACTION(RSS, sizeof(struct rte_flow_action_rss)),
108         MK_FLOW_ACTION(PF, 0),
109         MK_FLOW_ACTION(VF, sizeof(struct rte_flow_action_vf)),
110         MK_FLOW_ACTION(PHY_PORT, sizeof(struct rte_flow_action_phy_port)),
111         MK_FLOW_ACTION(PORT_ID, sizeof(struct rte_flow_action_port_id)),
112         MK_FLOW_ACTION(METER, sizeof(struct rte_flow_action_meter)),
113         MK_FLOW_ACTION(SECURITY, sizeof(struct rte_flow_action_security)),
114         MK_FLOW_ACTION(OF_SET_MPLS_TTL,
115                        sizeof(struct rte_flow_action_of_set_mpls_ttl)),
116         MK_FLOW_ACTION(OF_DEC_MPLS_TTL, 0),
117         MK_FLOW_ACTION(OF_SET_NW_TTL,
118                        sizeof(struct rte_flow_action_of_set_nw_ttl)),
119         MK_FLOW_ACTION(OF_DEC_NW_TTL, 0),
120         MK_FLOW_ACTION(OF_COPY_TTL_OUT, 0),
121         MK_FLOW_ACTION(OF_COPY_TTL_IN, 0),
122         MK_FLOW_ACTION(OF_POP_VLAN, 0),
123         MK_FLOW_ACTION(OF_PUSH_VLAN,
124                        sizeof(struct rte_flow_action_of_push_vlan)),
125         MK_FLOW_ACTION(OF_SET_VLAN_VID,
126                        sizeof(struct rte_flow_action_of_set_vlan_vid)),
127         MK_FLOW_ACTION(OF_SET_VLAN_PCP,
128                        sizeof(struct rte_flow_action_of_set_vlan_pcp)),
129         MK_FLOW_ACTION(OF_POP_MPLS,
130                        sizeof(struct rte_flow_action_of_pop_mpls)),
131         MK_FLOW_ACTION(OF_PUSH_MPLS,
132                        sizeof(struct rte_flow_action_of_push_mpls)),
133         MK_FLOW_ACTION(VXLAN_ENCAP, sizeof(struct rte_flow_action_vxlan_encap)),
134         MK_FLOW_ACTION(VXLAN_DECAP, 0),
135         MK_FLOW_ACTION(NVGRE_ENCAP, sizeof(struct rte_flow_action_vxlan_encap)),
136         MK_FLOW_ACTION(NVGRE_DECAP, 0),
137         MK_FLOW_ACTION(RAW_ENCAP, sizeof(struct rte_flow_action_raw_encap)),
138         MK_FLOW_ACTION(RAW_DECAP, sizeof(struct rte_flow_action_raw_decap)),
139         MK_FLOW_ACTION(SET_IPV4_SRC,
140                        sizeof(struct rte_flow_action_set_ipv4)),
141         MK_FLOW_ACTION(SET_IPV4_DST,
142                        sizeof(struct rte_flow_action_set_ipv4)),
143         MK_FLOW_ACTION(SET_IPV6_SRC,
144                        sizeof(struct rte_flow_action_set_ipv6)),
145         MK_FLOW_ACTION(SET_IPV6_DST,
146                        sizeof(struct rte_flow_action_set_ipv6)),
147         MK_FLOW_ACTION(SET_TP_SRC,
148                        sizeof(struct rte_flow_action_set_tp)),
149         MK_FLOW_ACTION(SET_TP_DST,
150                        sizeof(struct rte_flow_action_set_tp)),
151         MK_FLOW_ACTION(MAC_SWAP, 0),
152         MK_FLOW_ACTION(DEC_TTL, 0),
153         MK_FLOW_ACTION(SET_TTL, sizeof(struct rte_flow_action_set_ttl)),
154         MK_FLOW_ACTION(SET_MAC_SRC, sizeof(struct rte_flow_action_set_mac)),
155         MK_FLOW_ACTION(SET_MAC_DST, sizeof(struct rte_flow_action_set_mac)),
156         MK_FLOW_ACTION(INC_TCP_SEQ, sizeof(rte_be32_t)),
157         MK_FLOW_ACTION(DEC_TCP_SEQ, sizeof(rte_be32_t)),
158         MK_FLOW_ACTION(INC_TCP_ACK, sizeof(rte_be32_t)),
159         MK_FLOW_ACTION(DEC_TCP_ACK, sizeof(rte_be32_t)),
160 };
161
162 static int
163 flow_err(uint16_t port_id, int ret, struct rte_flow_error *error)
164 {
165         if (ret == 0)
166                 return 0;
167         if (rte_eth_dev_is_removed(port_id))
168                 return rte_flow_error_set(error, EIO,
169                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
170                                           NULL, rte_strerror(EIO));
171         return ret;
172 }
173
174 /* Get generic flow operations structure from a port. */
175 const struct rte_flow_ops *
176 rte_flow_ops_get(uint16_t port_id, struct rte_flow_error *error)
177 {
178         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
179         const struct rte_flow_ops *ops;
180         int code;
181
182         if (unlikely(!rte_eth_dev_is_valid_port(port_id)))
183                 code = ENODEV;
184         else if (unlikely(!dev->dev_ops->filter_ctrl ||
185                           dev->dev_ops->filter_ctrl(dev,
186                                                     RTE_ETH_FILTER_GENERIC,
187                                                     RTE_ETH_FILTER_GET,
188                                                     &ops) ||
189                           !ops))
190                 code = ENOSYS;
191         else
192                 return ops;
193         rte_flow_error_set(error, code, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
194                            NULL, rte_strerror(code));
195         return NULL;
196 }
197
198 /* Check whether a flow rule can be created on a given port. */
199 int
200 rte_flow_validate(uint16_t port_id,
201                   const struct rte_flow_attr *attr,
202                   const struct rte_flow_item pattern[],
203                   const struct rte_flow_action actions[],
204                   struct rte_flow_error *error)
205 {
206         const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
207         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
208
209         if (unlikely(!ops))
210                 return -rte_errno;
211         if (likely(!!ops->validate))
212                 return flow_err(port_id, ops->validate(dev, attr, pattern,
213                                                        actions, error), error);
214         return rte_flow_error_set(error, ENOSYS,
215                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
216                                   NULL, rte_strerror(ENOSYS));
217 }
218
219 /* Create a flow rule on a given port. */
220 struct rte_flow *
221 rte_flow_create(uint16_t port_id,
222                 const struct rte_flow_attr *attr,
223                 const struct rte_flow_item pattern[],
224                 const struct rte_flow_action actions[],
225                 struct rte_flow_error *error)
226 {
227         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
228         struct rte_flow *flow;
229         const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
230
231         if (unlikely(!ops))
232                 return NULL;
233         if (likely(!!ops->create)) {
234                 flow = ops->create(dev, attr, pattern, actions, error);
235                 if (flow == NULL)
236                         flow_err(port_id, -rte_errno, error);
237                 return flow;
238         }
239         rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
240                            NULL, rte_strerror(ENOSYS));
241         return NULL;
242 }
243
244 /* Destroy a flow rule on a given port. */
245 int
246 rte_flow_destroy(uint16_t port_id,
247                  struct rte_flow *flow,
248                  struct rte_flow_error *error)
249 {
250         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
251         const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
252
253         if (unlikely(!ops))
254                 return -rte_errno;
255         if (likely(!!ops->destroy))
256                 return flow_err(port_id, ops->destroy(dev, flow, error),
257                                 error);
258         return rte_flow_error_set(error, ENOSYS,
259                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
260                                   NULL, rte_strerror(ENOSYS));
261 }
262
263 /* Destroy all flow rules associated with a port. */
264 int
265 rte_flow_flush(uint16_t port_id,
266                struct rte_flow_error *error)
267 {
268         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
269         const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
270
271         if (unlikely(!ops))
272                 return -rte_errno;
273         if (likely(!!ops->flush))
274                 return flow_err(port_id, ops->flush(dev, error), error);
275         return rte_flow_error_set(error, ENOSYS,
276                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
277                                   NULL, rte_strerror(ENOSYS));
278 }
279
280 /* Query an existing flow rule. */
281 int
282 rte_flow_query(uint16_t port_id,
283                struct rte_flow *flow,
284                const struct rte_flow_action *action,
285                void *data,
286                struct rte_flow_error *error)
287 {
288         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
289         const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
290
291         if (!ops)
292                 return -rte_errno;
293         if (likely(!!ops->query))
294                 return flow_err(port_id, ops->query(dev, flow, action, data,
295                                                     error), error);
296         return rte_flow_error_set(error, ENOSYS,
297                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
298                                   NULL, rte_strerror(ENOSYS));
299 }
300
301 /* Restrict ingress traffic to the defined flow rules. */
302 int
303 rte_flow_isolate(uint16_t port_id,
304                  int set,
305                  struct rte_flow_error *error)
306 {
307         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
308         const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
309
310         if (!ops)
311                 return -rte_errno;
312         if (likely(!!ops->isolate))
313                 return flow_err(port_id, ops->isolate(dev, set, error), error);
314         return rte_flow_error_set(error, ENOSYS,
315                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
316                                   NULL, rte_strerror(ENOSYS));
317 }
318
319 /* Initialize flow error structure. */
320 int
321 rte_flow_error_set(struct rte_flow_error *error,
322                    int code,
323                    enum rte_flow_error_type type,
324                    const void *cause,
325                    const char *message)
326 {
327         if (error) {
328                 *error = (struct rte_flow_error){
329                         .type = type,
330                         .cause = cause,
331                         .message = message,
332                 };
333         }
334         rte_errno = code;
335         return -code;
336 }
337
338 /** Pattern item specification types. */
339 enum rte_flow_conv_item_spec_type {
340         RTE_FLOW_CONV_ITEM_SPEC,
341         RTE_FLOW_CONV_ITEM_LAST,
342         RTE_FLOW_CONV_ITEM_MASK,
343 };
344
345 /**
346  * Copy pattern item specification.
347  *
348  * @param[out] buf
349  *   Output buffer. Can be NULL if @p size is zero.
350  * @param size
351  *   Size of @p buf in bytes.
352  * @param[in] item
353  *   Pattern item to copy specification from.
354  * @param type
355  *   Specification selector for either @p spec, @p last or @p mask.
356  *
357  * @return
358  *   Number of bytes needed to store pattern item specification regardless
359  *   of @p size. @p buf contents are truncated to @p size if not large
360  *   enough.
361  */
362 static size_t
363 rte_flow_conv_item_spec(void *buf, const size_t size,
364                         const struct rte_flow_item *item,
365                         enum rte_flow_conv_item_spec_type type)
366 {
367         size_t off;
368         const void *data =
369                 type == RTE_FLOW_CONV_ITEM_SPEC ? item->spec :
370                 type == RTE_FLOW_CONV_ITEM_LAST ? item->last :
371                 type == RTE_FLOW_CONV_ITEM_MASK ? item->mask :
372                 NULL;
373
374         switch (item->type) {
375                 union {
376                         const struct rte_flow_item_raw *raw;
377                 } spec;
378                 union {
379                         const struct rte_flow_item_raw *raw;
380                 } last;
381                 union {
382                         const struct rte_flow_item_raw *raw;
383                 } mask;
384                 union {
385                         const struct rte_flow_item_raw *raw;
386                 } src;
387                 union {
388                         struct rte_flow_item_raw *raw;
389                 } dst;
390                 size_t tmp;
391
392         case RTE_FLOW_ITEM_TYPE_RAW:
393                 spec.raw = item->spec;
394                 last.raw = item->last ? item->last : item->spec;
395                 mask.raw = item->mask ? item->mask : &rte_flow_item_raw_mask;
396                 src.raw = data;
397                 dst.raw = buf;
398                 rte_memcpy(dst.raw,
399                            (&(struct rte_flow_item_raw){
400                                 .relative = src.raw->relative,
401                                 .search = src.raw->search,
402                                 .reserved = src.raw->reserved,
403                                 .offset = src.raw->offset,
404                                 .limit = src.raw->limit,
405                                 .length = src.raw->length,
406                            }),
407                            size > sizeof(*dst.raw) ? sizeof(*dst.raw) : size);
408                 off = sizeof(*dst.raw);
409                 if (type == RTE_FLOW_CONV_ITEM_SPEC ||
410                     (type == RTE_FLOW_CONV_ITEM_MASK &&
411                      ((spec.raw->length & mask.raw->length) >=
412                       (last.raw->length & mask.raw->length))))
413                         tmp = spec.raw->length & mask.raw->length;
414                 else
415                         tmp = last.raw->length & mask.raw->length;
416                 if (tmp) {
417                         off = RTE_ALIGN_CEIL(off, sizeof(*dst.raw->pattern));
418                         if (size >= off + tmp)
419                                 dst.raw->pattern = rte_memcpy
420                                         ((void *)((uintptr_t)dst.raw + off),
421                                          src.raw->pattern, tmp);
422                         off += tmp;
423                 }
424                 break;
425         default:
426                 off = rte_flow_desc_item[item->type].size;
427                 rte_memcpy(buf, data, (size > off ? off : size));
428                 break;
429         }
430         return off;
431 }
432
433 /**
434  * Copy action configuration.
435  *
436  * @param[out] buf
437  *   Output buffer. Can be NULL if @p size is zero.
438  * @param size
439  *   Size of @p buf in bytes.
440  * @param[in] action
441  *   Action to copy configuration from.
442  *
443  * @return
444  *   Number of bytes needed to store pattern item specification regardless
445  *   of @p size. @p buf contents are truncated to @p size if not large
446  *   enough.
447  */
448 static size_t
449 rte_flow_conv_action_conf(void *buf, const size_t size,
450                           const struct rte_flow_action *action)
451 {
452         size_t off;
453
454         switch (action->type) {
455                 union {
456                         const struct rte_flow_action_rss *rss;
457                         const struct rte_flow_action_vxlan_encap *vxlan_encap;
458                         const struct rte_flow_action_nvgre_encap *nvgre_encap;
459                 } src;
460                 union {
461                         struct rte_flow_action_rss *rss;
462                         struct rte_flow_action_vxlan_encap *vxlan_encap;
463                         struct rte_flow_action_nvgre_encap *nvgre_encap;
464                 } dst;
465                 size_t tmp;
466                 int ret;
467
468         case RTE_FLOW_ACTION_TYPE_RSS:
469                 src.rss = action->conf;
470                 dst.rss = buf;
471                 rte_memcpy(dst.rss,
472                            (&(struct rte_flow_action_rss){
473                                 .func = src.rss->func,
474                                 .level = src.rss->level,
475                                 .types = src.rss->types,
476                                 .key_len = src.rss->key_len,
477                                 .queue_num = src.rss->queue_num,
478                            }),
479                            size > sizeof(*dst.rss) ? sizeof(*dst.rss) : size);
480                 off = sizeof(*dst.rss);
481                 if (src.rss->key_len) {
482                         off = RTE_ALIGN_CEIL(off, sizeof(*dst.rss->key));
483                         tmp = sizeof(*src.rss->key) * src.rss->key_len;
484                         if (size >= off + tmp)
485                                 dst.rss->key = rte_memcpy
486                                         ((void *)((uintptr_t)dst.rss + off),
487                                          src.rss->key, tmp);
488                         off += tmp;
489                 }
490                 if (src.rss->queue_num) {
491                         off = RTE_ALIGN_CEIL(off, sizeof(*dst.rss->queue));
492                         tmp = sizeof(*src.rss->queue) * src.rss->queue_num;
493                         if (size >= off + tmp)
494                                 dst.rss->queue = rte_memcpy
495                                         ((void *)((uintptr_t)dst.rss + off),
496                                          src.rss->queue, tmp);
497                         off += tmp;
498                 }
499                 break;
500         case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
501         case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
502                 src.vxlan_encap = action->conf;
503                 dst.vxlan_encap = buf;
504                 RTE_BUILD_BUG_ON(sizeof(*src.vxlan_encap) !=
505                                  sizeof(*src.nvgre_encap) ||
506                                  offsetof(struct rte_flow_action_vxlan_encap,
507                                           definition) !=
508                                  offsetof(struct rte_flow_action_nvgre_encap,
509                                           definition));
510                 off = sizeof(*dst.vxlan_encap);
511                 if (src.vxlan_encap->definition) {
512                         off = RTE_ALIGN_CEIL
513                                 (off, sizeof(*dst.vxlan_encap->definition));
514                         ret = rte_flow_conv
515                                 (RTE_FLOW_CONV_OP_PATTERN,
516                                  (void *)((uintptr_t)dst.vxlan_encap + off),
517                                  size > off ? size - off : 0,
518                                  src.vxlan_encap->definition, NULL);
519                         if (ret < 0)
520                                 return 0;
521                         if (size >= off + ret)
522                                 dst.vxlan_encap->definition =
523                                         (void *)((uintptr_t)dst.vxlan_encap +
524                                                  off);
525                         off += ret;
526                 }
527                 break;
528         default:
529                 off = rte_flow_desc_action[action->type].size;
530                 rte_memcpy(buf, action->conf, (size > off ? off : size));
531                 break;
532         }
533         return off;
534 }
535
536 /**
537  * Copy a list of pattern items.
538  *
539  * @param[out] dst
540  *   Destination buffer. Can be NULL if @p size is zero.
541  * @param size
542  *   Size of @p dst in bytes.
543  * @param[in] src
544  *   Source pattern items.
545  * @param num
546  *   Maximum number of pattern items to process from @p src or 0 to process
547  *   the entire list. In both cases, processing stops after
548  *   RTE_FLOW_ITEM_TYPE_END is encountered.
549  * @param[out] error
550  *   Perform verbose error reporting if not NULL.
551  *
552  * @return
553  *   A positive value representing the number of bytes needed to store
554  *   pattern items regardless of @p size on success (@p buf contents are
555  *   truncated to @p size if not large enough), a negative errno value
556  *   otherwise and rte_errno is set.
557  */
558 static int
559 rte_flow_conv_pattern(struct rte_flow_item *dst,
560                       const size_t size,
561                       const struct rte_flow_item *src,
562                       unsigned int num,
563                       struct rte_flow_error *error)
564 {
565         uintptr_t data = (uintptr_t)dst;
566         size_t off;
567         size_t ret;
568         unsigned int i;
569
570         for (i = 0, off = 0; !num || i != num; ++i, ++src, ++dst) {
571                 if ((size_t)src->type >= RTE_DIM(rte_flow_desc_item) ||
572                     !rte_flow_desc_item[src->type].name)
573                         return rte_flow_error_set
574                                 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, src,
575                                  "cannot convert unknown item type");
576                 if (size >= off + sizeof(*dst))
577                         *dst = (struct rte_flow_item){
578                                 .type = src->type,
579                         };
580                 off += sizeof(*dst);
581                 if (!src->type)
582                         num = i + 1;
583         }
584         num = i;
585         src -= num;
586         dst -= num;
587         do {
588                 if (src->spec) {
589                         off = RTE_ALIGN_CEIL(off, sizeof(double));
590                         ret = rte_flow_conv_item_spec
591                                 ((void *)(data + off),
592                                  size > off ? size - off : 0, src,
593                                  RTE_FLOW_CONV_ITEM_SPEC);
594                         if (size && size >= off + ret)
595                                 dst->spec = (void *)(data + off);
596                         off += ret;
597
598                 }
599                 if (src->last) {
600                         off = RTE_ALIGN_CEIL(off, sizeof(double));
601                         ret = rte_flow_conv_item_spec
602                                 ((void *)(data + off),
603                                  size > off ? size - off : 0, src,
604                                  RTE_FLOW_CONV_ITEM_LAST);
605                         if (size && size >= off + ret)
606                                 dst->last = (void *)(data + off);
607                         off += ret;
608                 }
609                 if (src->mask) {
610                         off = RTE_ALIGN_CEIL(off, sizeof(double));
611                         ret = rte_flow_conv_item_spec
612                                 ((void *)(data + off),
613                                  size > off ? size - off : 0, src,
614                                  RTE_FLOW_CONV_ITEM_MASK);
615                         if (size && size >= off + ret)
616                                 dst->mask = (void *)(data + off);
617                         off += ret;
618                 }
619                 ++src;
620                 ++dst;
621         } while (--num);
622         return off;
623 }
624
625 /**
626  * Copy a list of actions.
627  *
628  * @param[out] dst
629  *   Destination buffer. Can be NULL if @p size is zero.
630  * @param size
631  *   Size of @p dst in bytes.
632  * @param[in] src
633  *   Source actions.
634  * @param num
635  *   Maximum number of actions to process from @p src or 0 to process the
636  *   entire list. In both cases, processing stops after
637  *   RTE_FLOW_ACTION_TYPE_END is encountered.
638  * @param[out] error
639  *   Perform verbose error reporting if not NULL.
640  *
641  * @return
642  *   A positive value representing the number of bytes needed to store
643  *   actions regardless of @p size on success (@p buf contents are truncated
644  *   to @p size if not large enough), a negative errno value otherwise and
645  *   rte_errno is set.
646  */
647 static int
648 rte_flow_conv_actions(struct rte_flow_action *dst,
649                       const size_t size,
650                       const struct rte_flow_action *src,
651                       unsigned int num,
652                       struct rte_flow_error *error)
653 {
654         uintptr_t data = (uintptr_t)dst;
655         size_t off;
656         size_t ret;
657         unsigned int i;
658
659         for (i = 0, off = 0; !num || i != num; ++i, ++src, ++dst) {
660                 if ((size_t)src->type >= RTE_DIM(rte_flow_desc_action) ||
661                     !rte_flow_desc_action[src->type].name)
662                         return rte_flow_error_set
663                                 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
664                                  src, "cannot convert unknown action type");
665                 if (size >= off + sizeof(*dst))
666                         *dst = (struct rte_flow_action){
667                                 .type = src->type,
668                         };
669                 off += sizeof(*dst);
670                 if (!src->type)
671                         num = i + 1;
672         }
673         num = i;
674         src -= num;
675         dst -= num;
676         do {
677                 if (src->conf) {
678                         off = RTE_ALIGN_CEIL(off, sizeof(double));
679                         ret = rte_flow_conv_action_conf
680                                 ((void *)(data + off),
681                                  size > off ? size - off : 0, src);
682                         if (size && size >= off + ret)
683                                 dst->conf = (void *)(data + off);
684                         off += ret;
685                 }
686                 ++src;
687                 ++dst;
688         } while (--num);
689         return off;
690 }
691
692 /**
693  * Copy flow rule components.
694  *
695  * This comprises the flow rule descriptor itself, attributes, pattern and
696  * actions list. NULL components in @p src are skipped.
697  *
698  * @param[out] dst
699  *   Destination buffer. Can be NULL if @p size is zero.
700  * @param size
701  *   Size of @p dst in bytes.
702  * @param[in] src
703  *   Source flow rule descriptor.
704  * @param[out] error
705  *   Perform verbose error reporting if not NULL.
706  *
707  * @return
708  *   A positive value representing the number of bytes needed to store all
709  *   components including the descriptor regardless of @p size on success
710  *   (@p buf contents are truncated to @p size if not large enough), a
711  *   negative errno value otherwise and rte_errno is set.
712  */
713 static int
714 rte_flow_conv_rule(struct rte_flow_conv_rule *dst,
715                    const size_t size,
716                    const struct rte_flow_conv_rule *src,
717                    struct rte_flow_error *error)
718 {
719         size_t off;
720         int ret;
721
722         rte_memcpy(dst,
723                    (&(struct rte_flow_conv_rule){
724                         .attr = NULL,
725                         .pattern = NULL,
726                         .actions = NULL,
727                    }),
728                    size > sizeof(*dst) ? sizeof(*dst) : size);
729         off = sizeof(*dst);
730         if (src->attr_ro) {
731                 off = RTE_ALIGN_CEIL(off, sizeof(double));
732                 if (size && size >= off + sizeof(*dst->attr))
733                         dst->attr = rte_memcpy
734                                 ((void *)((uintptr_t)dst + off),
735                                  src->attr_ro, sizeof(*dst->attr));
736                 off += sizeof(*dst->attr);
737         }
738         if (src->pattern_ro) {
739                 off = RTE_ALIGN_CEIL(off, sizeof(double));
740                 ret = rte_flow_conv_pattern((void *)((uintptr_t)dst + off),
741                                             size > off ? size - off : 0,
742                                             src->pattern_ro, 0, error);
743                 if (ret < 0)
744                         return ret;
745                 if (size && size >= off + (size_t)ret)
746                         dst->pattern = (void *)((uintptr_t)dst + off);
747                 off += ret;
748         }
749         if (src->actions_ro) {
750                 off = RTE_ALIGN_CEIL(off, sizeof(double));
751                 ret = rte_flow_conv_actions((void *)((uintptr_t)dst + off),
752                                             size > off ? size - off : 0,
753                                             src->actions_ro, 0, error);
754                 if (ret < 0)
755                         return ret;
756                 if (size >= off + (size_t)ret)
757                         dst->actions = (void *)((uintptr_t)dst + off);
758                 off += ret;
759         }
760         return off;
761 }
762
763 /**
764  * Retrieve the name of a pattern item/action type.
765  *
766  * @param is_action
767  *   Nonzero when @p src represents an action type instead of a pattern item
768  *   type.
769  * @param is_ptr
770  *   Nonzero to write string address instead of contents into @p dst.
771  * @param[out] dst
772  *   Destination buffer. Can be NULL if @p size is zero.
773  * @param size
774  *   Size of @p dst in bytes.
775  * @param[in] src
776  *   Depending on @p is_action, source pattern item or action type cast as a
777  *   pointer.
778  * @param[out] error
779  *   Perform verbose error reporting if not NULL.
780  *
781  * @return
782  *   A positive value representing the number of bytes needed to store the
783  *   name or its address regardless of @p size on success (@p buf contents
784  *   are truncated to @p size if not large enough), a negative errno value
785  *   otherwise and rte_errno is set.
786  */
787 static int
788 rte_flow_conv_name(int is_action,
789                    int is_ptr,
790                    char *dst,
791                    const size_t size,
792                    const void *src,
793                    struct rte_flow_error *error)
794 {
795         struct desc_info {
796                 const struct rte_flow_desc_data *data;
797                 size_t num;
798         };
799         static const struct desc_info info_rep[2] = {
800                 { rte_flow_desc_item, RTE_DIM(rte_flow_desc_item), },
801                 { rte_flow_desc_action, RTE_DIM(rte_flow_desc_action), },
802         };
803         const struct desc_info *const info = &info_rep[!!is_action];
804         unsigned int type = (uintptr_t)src;
805
806         if (type >= info->num)
807                 return rte_flow_error_set
808                         (error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
809                          "unknown object type to retrieve the name of");
810         if (!is_ptr)
811                 return strlcpy(dst, info->data[type].name, size);
812         if (size >= sizeof(const char **))
813                 *((const char **)dst) = info->data[type].name;
814         return sizeof(const char **);
815 }
816
817 /** Helper function to convert flow API objects. */
818 int
819 rte_flow_conv(enum rte_flow_conv_op op,
820               void *dst,
821               size_t size,
822               const void *src,
823               struct rte_flow_error *error)
824 {
825         switch (op) {
826                 const struct rte_flow_attr *attr;
827
828         case RTE_FLOW_CONV_OP_NONE:
829                 return 0;
830         case RTE_FLOW_CONV_OP_ATTR:
831                 attr = src;
832                 if (size > sizeof(*attr))
833                         size = sizeof(*attr);
834                 rte_memcpy(dst, attr, size);
835                 return sizeof(*attr);
836         case RTE_FLOW_CONV_OP_ITEM:
837                 return rte_flow_conv_pattern(dst, size, src, 1, error);
838         case RTE_FLOW_CONV_OP_ACTION:
839                 return rte_flow_conv_actions(dst, size, src, 1, error);
840         case RTE_FLOW_CONV_OP_PATTERN:
841                 return rte_flow_conv_pattern(dst, size, src, 0, error);
842         case RTE_FLOW_CONV_OP_ACTIONS:
843                 return rte_flow_conv_actions(dst, size, src, 0, error);
844         case RTE_FLOW_CONV_OP_RULE:
845                 return rte_flow_conv_rule(dst, size, src, error);
846         case RTE_FLOW_CONV_OP_ITEM_NAME:
847                 return rte_flow_conv_name(0, 0, dst, size, src, error);
848         case RTE_FLOW_CONV_OP_ACTION_NAME:
849                 return rte_flow_conv_name(1, 0, dst, size, src, error);
850         case RTE_FLOW_CONV_OP_ITEM_NAME_PTR:
851                 return rte_flow_conv_name(0, 1, dst, size, src, error);
852         case RTE_FLOW_CONV_OP_ACTION_NAME_PTR:
853                 return rte_flow_conv_name(1, 1, dst, size, src, error);
854         }
855         return rte_flow_error_set
856                 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
857                  "unknown object conversion operation");
858 }
859
860 /** Store a full rte_flow description. */
861 size_t
862 rte_flow_copy(struct rte_flow_desc *desc, size_t len,
863               const struct rte_flow_attr *attr,
864               const struct rte_flow_item *items,
865               const struct rte_flow_action *actions)
866 {
867         /*
868          * Overlap struct rte_flow_conv with struct rte_flow_desc in order
869          * to convert the former to the latter without wasting space.
870          */
871         struct rte_flow_conv_rule *dst =
872                 len ?
873                 (void *)((uintptr_t)desc +
874                          (offsetof(struct rte_flow_desc, actions) -
875                           offsetof(struct rte_flow_conv_rule, actions))) :
876                 NULL;
877         size_t dst_size =
878                 len > sizeof(*desc) - sizeof(*dst) ?
879                 len - (sizeof(*desc) - sizeof(*dst)) :
880                 0;
881         struct rte_flow_conv_rule src = {
882                 .attr_ro = NULL,
883                 .pattern_ro = items,
884                 .actions_ro = actions,
885         };
886         int ret;
887
888         RTE_BUILD_BUG_ON(sizeof(struct rte_flow_desc) <
889                          sizeof(struct rte_flow_conv_rule));
890         if (dst_size &&
891             (&dst->pattern != &desc->items ||
892              &dst->actions != &desc->actions ||
893              (uintptr_t)(dst + 1) != (uintptr_t)(desc + 1))) {
894                 rte_errno = EINVAL;
895                 return 0;
896         }
897         ret = rte_flow_conv(RTE_FLOW_CONV_OP_RULE, dst, dst_size, &src, NULL);
898         if (ret < 0)
899                 return 0;
900         ret += sizeof(*desc) - sizeof(*dst);
901         rte_memcpy(desc,
902                    (&(struct rte_flow_desc){
903                         .size = ret,
904                         .attr = *attr,
905                         .items = dst_size ? dst->pattern : NULL,
906                         .actions = dst_size ? dst->actions : NULL,
907                    }),
908                    len > sizeof(*desc) ? sizeof(*desc) : len);
909         return ret;
910 }
911
912 /**
913  * Expand RSS flows into several possible flows according to the RSS hash
914  * fields requested and the driver capabilities.
915  */
916 int
917 rte_flow_expand_rss(struct rte_flow_expand_rss *buf, size_t size,
918                     const struct rte_flow_item *pattern, uint64_t types,
919                     const struct rte_flow_expand_node graph[],
920                     int graph_root_index)
921 {
922         const int elt_n = 8;
923         const struct rte_flow_item *item;
924         const struct rte_flow_expand_node *node = &graph[graph_root_index];
925         const int *next_node;
926         const int *stack[elt_n];
927         int stack_pos = 0;
928         struct rte_flow_item flow_items[elt_n];
929         unsigned int i;
930         size_t lsize;
931         size_t user_pattern_size = 0;
932         void *addr = NULL;
933
934         lsize = offsetof(struct rte_flow_expand_rss, entry) +
935                 elt_n * sizeof(buf->entry[0]);
936         if (lsize <= size) {
937                 buf->entry[0].priority = 0;
938                 buf->entry[0].pattern = (void *)&buf->entry[elt_n];
939                 buf->entries = 0;
940                 addr = buf->entry[0].pattern;
941         }
942         for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
943                 const struct rte_flow_expand_node *next = NULL;
944
945                 for (i = 0; node->next && node->next[i]; ++i) {
946                         next = &graph[node->next[i]];
947                         if (next->type == item->type)
948                                 break;
949                 }
950                 if (next)
951                         node = next;
952                 user_pattern_size += sizeof(*item);
953         }
954         user_pattern_size += sizeof(*item); /* Handle END item. */
955         lsize += user_pattern_size;
956         /* Copy the user pattern in the first entry of the buffer. */
957         if (lsize <= size) {
958                 rte_memcpy(addr, pattern, user_pattern_size);
959                 addr = (void *)(((uintptr_t)addr) + user_pattern_size);
960                 buf->entries = 1;
961         }
962         /* Start expanding. */
963         memset(flow_items, 0, sizeof(flow_items));
964         user_pattern_size -= sizeof(*item);
965         next_node = node->next;
966         stack[stack_pos] = next_node;
967         node = next_node ? &graph[*next_node] : NULL;
968         while (node) {
969                 flow_items[stack_pos].type = node->type;
970                 if (node->rss_types & types) {
971                         /*
972                          * compute the number of items to copy from the
973                          * expansion and copy it.
974                          * When the stack_pos is 0, there are 1 element in it,
975                          * plus the addition END item.
976                          */
977                         int elt = stack_pos + 2;
978
979                         flow_items[stack_pos + 1].type = RTE_FLOW_ITEM_TYPE_END;
980                         lsize += elt * sizeof(*item) + user_pattern_size;
981                         if (lsize <= size) {
982                                 size_t n = elt * sizeof(*item);
983
984                                 buf->entry[buf->entries].priority =
985                                         stack_pos + 1;
986                                 buf->entry[buf->entries].pattern = addr;
987                                 buf->entries++;
988                                 rte_memcpy(addr, buf->entry[0].pattern,
989                                            user_pattern_size);
990                                 addr = (void *)(((uintptr_t)addr) +
991                                                 user_pattern_size);
992                                 rte_memcpy(addr, flow_items, n);
993                                 addr = (void *)(((uintptr_t)addr) + n);
994                         }
995                 }
996                 /* Go deeper. */
997                 if (node->next) {
998                         next_node = node->next;
999                         if (stack_pos++ == elt_n) {
1000                                 rte_errno = E2BIG;
1001                                 return -rte_errno;
1002                         }
1003                         stack[stack_pos] = next_node;
1004                 } else if (*(next_node + 1)) {
1005                         /* Follow up with the next possibility. */
1006                         ++next_node;
1007                 } else {
1008                         /* Move to the next path. */
1009                         if (stack_pos)
1010                                 next_node = stack[--stack_pos];
1011                         next_node++;
1012                         stack[stack_pos] = next_node;
1013                 }
1014                 node = *next_node ? &graph[*next_node] : NULL;
1015         };
1016         return lsize;
1017 }