ethdev: introduce GENEVE header TLV option item
[dpdk.git] / lib / librte_ethdev / rte_flow.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2016 6WIND S.A.
3  * Copyright 2016 Mellanox Technologies, Ltd
4  */
5
6 #include <errno.h>
7 #include <stddef.h>
8 #include <stdint.h>
9 #include <string.h>
10
11 #include <rte_common.h>
12 #include <rte_errno.h>
13 #include <rte_branch_prediction.h>
14 #include <rte_string_fns.h>
15 #include <rte_mbuf.h>
16 #include <rte_mbuf_dyn.h>
17 #include "rte_ethdev.h"
18 #include "rte_flow_driver.h"
19 #include "rte_flow.h"
20
21 /* Mbuf dynamic field name for metadata. */
22 int32_t rte_flow_dynf_metadata_offs = -1;
23
24 /* Mbuf dynamic field flag bit number for metadata. */
25 uint64_t rte_flow_dynf_metadata_mask;
26
27 /**
28  * Flow elements description tables.
29  */
30 struct rte_flow_desc_data {
31         const char *name;
32         size_t size;
33 };
34
35 /** Generate flow_item[] entry. */
36 #define MK_FLOW_ITEM(t, s) \
37         [RTE_FLOW_ITEM_TYPE_ ## t] = { \
38                 .name = # t, \
39                 .size = s, \
40         }
41
42 /** Information about known flow pattern items. */
43 static const struct rte_flow_desc_data rte_flow_desc_item[] = {
44         MK_FLOW_ITEM(END, 0),
45         MK_FLOW_ITEM(VOID, 0),
46         MK_FLOW_ITEM(INVERT, 0),
47         MK_FLOW_ITEM(ANY, sizeof(struct rte_flow_item_any)),
48         MK_FLOW_ITEM(PF, 0),
49         MK_FLOW_ITEM(VF, sizeof(struct rte_flow_item_vf)),
50         MK_FLOW_ITEM(PHY_PORT, sizeof(struct rte_flow_item_phy_port)),
51         MK_FLOW_ITEM(PORT_ID, sizeof(struct rte_flow_item_port_id)),
52         MK_FLOW_ITEM(RAW, sizeof(struct rte_flow_item_raw)),
53         MK_FLOW_ITEM(ETH, sizeof(struct rte_flow_item_eth)),
54         MK_FLOW_ITEM(VLAN, sizeof(struct rte_flow_item_vlan)),
55         MK_FLOW_ITEM(IPV4, sizeof(struct rte_flow_item_ipv4)),
56         MK_FLOW_ITEM(IPV6, sizeof(struct rte_flow_item_ipv6)),
57         MK_FLOW_ITEM(ICMP, sizeof(struct rte_flow_item_icmp)),
58         MK_FLOW_ITEM(UDP, sizeof(struct rte_flow_item_udp)),
59         MK_FLOW_ITEM(TCP, sizeof(struct rte_flow_item_tcp)),
60         MK_FLOW_ITEM(SCTP, sizeof(struct rte_flow_item_sctp)),
61         MK_FLOW_ITEM(VXLAN, sizeof(struct rte_flow_item_vxlan)),
62         MK_FLOW_ITEM(E_TAG, sizeof(struct rte_flow_item_e_tag)),
63         MK_FLOW_ITEM(NVGRE, sizeof(struct rte_flow_item_nvgre)),
64         MK_FLOW_ITEM(MPLS, sizeof(struct rte_flow_item_mpls)),
65         MK_FLOW_ITEM(GRE, sizeof(struct rte_flow_item_gre)),
66         MK_FLOW_ITEM(FUZZY, sizeof(struct rte_flow_item_fuzzy)),
67         MK_FLOW_ITEM(GTP, sizeof(struct rte_flow_item_gtp)),
68         MK_FLOW_ITEM(GTPC, sizeof(struct rte_flow_item_gtp)),
69         MK_FLOW_ITEM(GTPU, sizeof(struct rte_flow_item_gtp)),
70         MK_FLOW_ITEM(ESP, sizeof(struct rte_flow_item_esp)),
71         MK_FLOW_ITEM(GENEVE, sizeof(struct rte_flow_item_geneve)),
72         MK_FLOW_ITEM(VXLAN_GPE, sizeof(struct rte_flow_item_vxlan_gpe)),
73         MK_FLOW_ITEM(ARP_ETH_IPV4, sizeof(struct rte_flow_item_arp_eth_ipv4)),
74         MK_FLOW_ITEM(IPV6_EXT, sizeof(struct rte_flow_item_ipv6_ext)),
75         MK_FLOW_ITEM(IPV6_FRAG_EXT, sizeof(struct rte_flow_item_ipv6_frag_ext)),
76         MK_FLOW_ITEM(ICMP6, sizeof(struct rte_flow_item_icmp6)),
77         MK_FLOW_ITEM(ICMP6_ND_NS, sizeof(struct rte_flow_item_icmp6_nd_ns)),
78         MK_FLOW_ITEM(ICMP6_ND_NA, sizeof(struct rte_flow_item_icmp6_nd_na)),
79         MK_FLOW_ITEM(ICMP6_ND_OPT, sizeof(struct rte_flow_item_icmp6_nd_opt)),
80         MK_FLOW_ITEM(ICMP6_ND_OPT_SLA_ETH,
81                      sizeof(struct rte_flow_item_icmp6_nd_opt_sla_eth)),
82         MK_FLOW_ITEM(ICMP6_ND_OPT_TLA_ETH,
83                      sizeof(struct rte_flow_item_icmp6_nd_opt_tla_eth)),
84         MK_FLOW_ITEM(MARK, sizeof(struct rte_flow_item_mark)),
85         MK_FLOW_ITEM(META, sizeof(struct rte_flow_item_meta)),
86         MK_FLOW_ITEM(TAG, sizeof(struct rte_flow_item_tag)),
87         MK_FLOW_ITEM(GRE_KEY, sizeof(rte_be32_t)),
88         MK_FLOW_ITEM(GTP_PSC, sizeof(struct rte_flow_item_gtp_psc)),
89         MK_FLOW_ITEM(PPPOES, sizeof(struct rte_flow_item_pppoe)),
90         MK_FLOW_ITEM(PPPOED, sizeof(struct rte_flow_item_pppoe)),
91         MK_FLOW_ITEM(PPPOE_PROTO_ID,
92                         sizeof(struct rte_flow_item_pppoe_proto_id)),
93         MK_FLOW_ITEM(NSH, sizeof(struct rte_flow_item_nsh)),
94         MK_FLOW_ITEM(IGMP, sizeof(struct rte_flow_item_igmp)),
95         MK_FLOW_ITEM(AH, sizeof(struct rte_flow_item_ah)),
96         MK_FLOW_ITEM(HIGIG2, sizeof(struct rte_flow_item_higig2_hdr)),
97         MK_FLOW_ITEM(L2TPV3OIP, sizeof(struct rte_flow_item_l2tpv3oip)),
98         MK_FLOW_ITEM(PFCP, sizeof(struct rte_flow_item_pfcp)),
99         MK_FLOW_ITEM(ECPRI, sizeof(struct rte_flow_item_ecpri)),
100         MK_FLOW_ITEM(GENEVE_OPT, sizeof(struct rte_flow_item_geneve_opt)),
101 };
102
103 /** Generate flow_action[] entry. */
104 #define MK_FLOW_ACTION(t, s) \
105         [RTE_FLOW_ACTION_TYPE_ ## t] = { \
106                 .name = # t, \
107                 .size = s, \
108         }
109
110 /** Information about known flow actions. */
111 static const struct rte_flow_desc_data rte_flow_desc_action[] = {
112         MK_FLOW_ACTION(END, 0),
113         MK_FLOW_ACTION(VOID, 0),
114         MK_FLOW_ACTION(PASSTHRU, 0),
115         MK_FLOW_ACTION(JUMP, sizeof(struct rte_flow_action_jump)),
116         MK_FLOW_ACTION(MARK, sizeof(struct rte_flow_action_mark)),
117         MK_FLOW_ACTION(FLAG, 0),
118         MK_FLOW_ACTION(QUEUE, sizeof(struct rte_flow_action_queue)),
119         MK_FLOW_ACTION(DROP, 0),
120         MK_FLOW_ACTION(COUNT, sizeof(struct rte_flow_action_count)),
121         MK_FLOW_ACTION(RSS, sizeof(struct rte_flow_action_rss)),
122         MK_FLOW_ACTION(PF, 0),
123         MK_FLOW_ACTION(VF, sizeof(struct rte_flow_action_vf)),
124         MK_FLOW_ACTION(PHY_PORT, sizeof(struct rte_flow_action_phy_port)),
125         MK_FLOW_ACTION(PORT_ID, sizeof(struct rte_flow_action_port_id)),
126         MK_FLOW_ACTION(METER, sizeof(struct rte_flow_action_meter)),
127         MK_FLOW_ACTION(SECURITY, sizeof(struct rte_flow_action_security)),
128         MK_FLOW_ACTION(OF_SET_MPLS_TTL,
129                        sizeof(struct rte_flow_action_of_set_mpls_ttl)),
130         MK_FLOW_ACTION(OF_DEC_MPLS_TTL, 0),
131         MK_FLOW_ACTION(OF_SET_NW_TTL,
132                        sizeof(struct rte_flow_action_of_set_nw_ttl)),
133         MK_FLOW_ACTION(OF_DEC_NW_TTL, 0),
134         MK_FLOW_ACTION(OF_COPY_TTL_OUT, 0),
135         MK_FLOW_ACTION(OF_COPY_TTL_IN, 0),
136         MK_FLOW_ACTION(OF_POP_VLAN, 0),
137         MK_FLOW_ACTION(OF_PUSH_VLAN,
138                        sizeof(struct rte_flow_action_of_push_vlan)),
139         MK_FLOW_ACTION(OF_SET_VLAN_VID,
140                        sizeof(struct rte_flow_action_of_set_vlan_vid)),
141         MK_FLOW_ACTION(OF_SET_VLAN_PCP,
142                        sizeof(struct rte_flow_action_of_set_vlan_pcp)),
143         MK_FLOW_ACTION(OF_POP_MPLS,
144                        sizeof(struct rte_flow_action_of_pop_mpls)),
145         MK_FLOW_ACTION(OF_PUSH_MPLS,
146                        sizeof(struct rte_flow_action_of_push_mpls)),
147         MK_FLOW_ACTION(VXLAN_ENCAP, sizeof(struct rte_flow_action_vxlan_encap)),
148         MK_FLOW_ACTION(VXLAN_DECAP, 0),
149         MK_FLOW_ACTION(NVGRE_ENCAP, sizeof(struct rte_flow_action_vxlan_encap)),
150         MK_FLOW_ACTION(NVGRE_DECAP, 0),
151         MK_FLOW_ACTION(RAW_ENCAP, sizeof(struct rte_flow_action_raw_encap)),
152         MK_FLOW_ACTION(RAW_DECAP, sizeof(struct rte_flow_action_raw_decap)),
153         MK_FLOW_ACTION(SET_IPV4_SRC,
154                        sizeof(struct rte_flow_action_set_ipv4)),
155         MK_FLOW_ACTION(SET_IPV4_DST,
156                        sizeof(struct rte_flow_action_set_ipv4)),
157         MK_FLOW_ACTION(SET_IPV6_SRC,
158                        sizeof(struct rte_flow_action_set_ipv6)),
159         MK_FLOW_ACTION(SET_IPV6_DST,
160                        sizeof(struct rte_flow_action_set_ipv6)),
161         MK_FLOW_ACTION(SET_TP_SRC,
162                        sizeof(struct rte_flow_action_set_tp)),
163         MK_FLOW_ACTION(SET_TP_DST,
164                        sizeof(struct rte_flow_action_set_tp)),
165         MK_FLOW_ACTION(MAC_SWAP, 0),
166         MK_FLOW_ACTION(DEC_TTL, 0),
167         MK_FLOW_ACTION(SET_TTL, sizeof(struct rte_flow_action_set_ttl)),
168         MK_FLOW_ACTION(SET_MAC_SRC, sizeof(struct rte_flow_action_set_mac)),
169         MK_FLOW_ACTION(SET_MAC_DST, sizeof(struct rte_flow_action_set_mac)),
170         MK_FLOW_ACTION(INC_TCP_SEQ, sizeof(rte_be32_t)),
171         MK_FLOW_ACTION(DEC_TCP_SEQ, sizeof(rte_be32_t)),
172         MK_FLOW_ACTION(INC_TCP_ACK, sizeof(rte_be32_t)),
173         MK_FLOW_ACTION(DEC_TCP_ACK, sizeof(rte_be32_t)),
174         MK_FLOW_ACTION(SET_TAG, sizeof(struct rte_flow_action_set_tag)),
175         MK_FLOW_ACTION(SET_META, sizeof(struct rte_flow_action_set_meta)),
176         MK_FLOW_ACTION(SET_IPV4_DSCP, sizeof(struct rte_flow_action_set_dscp)),
177         MK_FLOW_ACTION(SET_IPV6_DSCP, sizeof(struct rte_flow_action_set_dscp)),
178         MK_FLOW_ACTION(AGE, sizeof(struct rte_flow_action_age)),
179         MK_FLOW_ACTION(SAMPLE, sizeof(struct rte_flow_action_sample)),
180         /**
181          * Shared action represented as handle of type
182          * (struct rte_flow_shared action *) stored in conf field (see
183          * struct rte_flow_action); no need for additional structure to * store
184          * shared action handle.
185          */
186         MK_FLOW_ACTION(SHARED, 0),
187 };
188
189 int
190 rte_flow_dynf_metadata_register(void)
191 {
192         int offset;
193         int flag;
194
195         static const struct rte_mbuf_dynfield desc_offs = {
196                 .name = RTE_MBUF_DYNFIELD_METADATA_NAME,
197                 .size = sizeof(uint32_t),
198                 .align = __alignof__(uint32_t),
199         };
200         static const struct rte_mbuf_dynflag desc_flag = {
201                 .name = RTE_MBUF_DYNFLAG_METADATA_NAME,
202         };
203
204         offset = rte_mbuf_dynfield_register(&desc_offs);
205         if (offset < 0)
206                 goto error;
207         flag = rte_mbuf_dynflag_register(&desc_flag);
208         if (flag < 0)
209                 goto error;
210         rte_flow_dynf_metadata_offs = offset;
211         rte_flow_dynf_metadata_mask = (1ULL << flag);
212         return 0;
213
214 error:
215         rte_flow_dynf_metadata_offs = -1;
216         rte_flow_dynf_metadata_mask = 0ULL;
217         return -rte_errno;
218 }
219
220 static inline void
221 fts_enter(struct rte_eth_dev *dev)
222 {
223         if (!(dev->data->dev_flags & RTE_ETH_DEV_FLOW_OPS_THREAD_SAFE))
224                 pthread_mutex_lock(&dev->data->flow_ops_mutex);
225 }
226
227 static inline void
228 fts_exit(struct rte_eth_dev *dev)
229 {
230         if (!(dev->data->dev_flags & RTE_ETH_DEV_FLOW_OPS_THREAD_SAFE))
231                 pthread_mutex_unlock(&dev->data->flow_ops_mutex);
232 }
233
234 static int
235 flow_err(uint16_t port_id, int ret, struct rte_flow_error *error)
236 {
237         if (ret == 0)
238                 return 0;
239         if (rte_eth_dev_is_removed(port_id))
240                 return rte_flow_error_set(error, EIO,
241                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
242                                           NULL, rte_strerror(EIO));
243         return ret;
244 }
245
246 /* Get generic flow operations structure from a port. */
247 const struct rte_flow_ops *
248 rte_flow_ops_get(uint16_t port_id, struct rte_flow_error *error)
249 {
250         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
251         const struct rte_flow_ops *ops;
252         int code;
253
254         if (unlikely(!rte_eth_dev_is_valid_port(port_id)))
255                 code = ENODEV;
256         else if (unlikely(!dev->dev_ops->filter_ctrl ||
257                           dev->dev_ops->filter_ctrl(dev,
258                                                     RTE_ETH_FILTER_GENERIC,
259                                                     RTE_ETH_FILTER_GET,
260                                                     &ops) ||
261                           !ops))
262                 code = ENOSYS;
263         else
264                 return ops;
265         rte_flow_error_set(error, code, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
266                            NULL, rte_strerror(code));
267         return NULL;
268 }
269
270 /* Check whether a flow rule can be created on a given port. */
271 int
272 rte_flow_validate(uint16_t port_id,
273                   const struct rte_flow_attr *attr,
274                   const struct rte_flow_item pattern[],
275                   const struct rte_flow_action actions[],
276                   struct rte_flow_error *error)
277 {
278         const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
279         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
280         int ret;
281
282         if (unlikely(!ops))
283                 return -rte_errno;
284         if (likely(!!ops->validate)) {
285                 fts_enter(dev);
286                 ret = ops->validate(dev, attr, pattern, actions, error);
287                 fts_exit(dev);
288                 return flow_err(port_id, ret, error);
289         }
290         return rte_flow_error_set(error, ENOSYS,
291                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
292                                   NULL, rte_strerror(ENOSYS));
293 }
294
295 /* Create a flow rule on a given port. */
296 struct rte_flow *
297 rte_flow_create(uint16_t port_id,
298                 const struct rte_flow_attr *attr,
299                 const struct rte_flow_item pattern[],
300                 const struct rte_flow_action actions[],
301                 struct rte_flow_error *error)
302 {
303         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
304         struct rte_flow *flow;
305         const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
306
307         if (unlikely(!ops))
308                 return NULL;
309         if (likely(!!ops->create)) {
310                 fts_enter(dev);
311                 flow = ops->create(dev, attr, pattern, actions, error);
312                 fts_exit(dev);
313                 if (flow == NULL)
314                         flow_err(port_id, -rte_errno, error);
315                 return flow;
316         }
317         rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
318                            NULL, rte_strerror(ENOSYS));
319         return NULL;
320 }
321
322 /* Destroy a flow rule on a given port. */
323 int
324 rte_flow_destroy(uint16_t port_id,
325                  struct rte_flow *flow,
326                  struct rte_flow_error *error)
327 {
328         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
329         const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
330         int ret;
331
332         if (unlikely(!ops))
333                 return -rte_errno;
334         if (likely(!!ops->destroy)) {
335                 fts_enter(dev);
336                 ret = ops->destroy(dev, flow, error);
337                 fts_exit(dev);
338                 return flow_err(port_id, ret, error);
339         }
340         return rte_flow_error_set(error, ENOSYS,
341                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
342                                   NULL, rte_strerror(ENOSYS));
343 }
344
345 /* Destroy all flow rules associated with a port. */
346 int
347 rte_flow_flush(uint16_t port_id,
348                struct rte_flow_error *error)
349 {
350         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
351         const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
352         int ret;
353
354         if (unlikely(!ops))
355                 return -rte_errno;
356         if (likely(!!ops->flush)) {
357                 fts_enter(dev);
358                 ret = ops->flush(dev, error);
359                 fts_exit(dev);
360                 return flow_err(port_id, ret, error);
361         }
362         return rte_flow_error_set(error, ENOSYS,
363                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
364                                   NULL, rte_strerror(ENOSYS));
365 }
366
367 /* Query an existing flow rule. */
368 int
369 rte_flow_query(uint16_t port_id,
370                struct rte_flow *flow,
371                const struct rte_flow_action *action,
372                void *data,
373                struct rte_flow_error *error)
374 {
375         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
376         const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
377         int ret;
378
379         if (!ops)
380                 return -rte_errno;
381         if (likely(!!ops->query)) {
382                 fts_enter(dev);
383                 ret = ops->query(dev, flow, action, data, error);
384                 fts_exit(dev);
385                 return flow_err(port_id, ret, error);
386         }
387         return rte_flow_error_set(error, ENOSYS,
388                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
389                                   NULL, rte_strerror(ENOSYS));
390 }
391
392 /* Restrict ingress traffic to the defined flow rules. */
393 int
394 rte_flow_isolate(uint16_t port_id,
395                  int set,
396                  struct rte_flow_error *error)
397 {
398         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
399         const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
400         int ret;
401
402         if (!ops)
403                 return -rte_errno;
404         if (likely(!!ops->isolate)) {
405                 fts_enter(dev);
406                 ret = ops->isolate(dev, set, error);
407                 fts_exit(dev);
408                 return flow_err(port_id, ret, error);
409         }
410         return rte_flow_error_set(error, ENOSYS,
411                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
412                                   NULL, rte_strerror(ENOSYS));
413 }
414
415 /* Initialize flow error structure. */
416 int
417 rte_flow_error_set(struct rte_flow_error *error,
418                    int code,
419                    enum rte_flow_error_type type,
420                    const void *cause,
421                    const char *message)
422 {
423         if (error) {
424                 *error = (struct rte_flow_error){
425                         .type = type,
426                         .cause = cause,
427                         .message = message,
428                 };
429         }
430         rte_errno = code;
431         return -code;
432 }
433
434 /** Pattern item specification types. */
435 enum rte_flow_conv_item_spec_type {
436         RTE_FLOW_CONV_ITEM_SPEC,
437         RTE_FLOW_CONV_ITEM_LAST,
438         RTE_FLOW_CONV_ITEM_MASK,
439 };
440
441 /**
442  * Copy pattern item specification.
443  *
444  * @param[out] buf
445  *   Output buffer. Can be NULL if @p size is zero.
446  * @param size
447  *   Size of @p buf in bytes.
448  * @param[in] item
449  *   Pattern item to copy specification from.
450  * @param type
451  *   Specification selector for either @p spec, @p last or @p mask.
452  *
453  * @return
454  *   Number of bytes needed to store pattern item specification regardless
455  *   of @p size. @p buf contents are truncated to @p size if not large
456  *   enough.
457  */
458 static size_t
459 rte_flow_conv_item_spec(void *buf, const size_t size,
460                         const struct rte_flow_item *item,
461                         enum rte_flow_conv_item_spec_type type)
462 {
463         size_t off;
464         const void *data =
465                 type == RTE_FLOW_CONV_ITEM_SPEC ? item->spec :
466                 type == RTE_FLOW_CONV_ITEM_LAST ? item->last :
467                 type == RTE_FLOW_CONV_ITEM_MASK ? item->mask :
468                 NULL;
469
470         switch (item->type) {
471                 union {
472                         const struct rte_flow_item_raw *raw;
473                 } spec;
474                 union {
475                         const struct rte_flow_item_raw *raw;
476                 } last;
477                 union {
478                         const struct rte_flow_item_raw *raw;
479                 } mask;
480                 union {
481                         const struct rte_flow_item_raw *raw;
482                 } src;
483                 union {
484                         struct rte_flow_item_raw *raw;
485                 } dst;
486                 size_t tmp;
487
488         case RTE_FLOW_ITEM_TYPE_RAW:
489                 spec.raw = item->spec;
490                 last.raw = item->last ? item->last : item->spec;
491                 mask.raw = item->mask ? item->mask : &rte_flow_item_raw_mask;
492                 src.raw = data;
493                 dst.raw = buf;
494                 rte_memcpy(dst.raw,
495                            (&(struct rte_flow_item_raw){
496                                 .relative = src.raw->relative,
497                                 .search = src.raw->search,
498                                 .reserved = src.raw->reserved,
499                                 .offset = src.raw->offset,
500                                 .limit = src.raw->limit,
501                                 .length = src.raw->length,
502                            }),
503                            size > sizeof(*dst.raw) ? sizeof(*dst.raw) : size);
504                 off = sizeof(*dst.raw);
505                 if (type == RTE_FLOW_CONV_ITEM_SPEC ||
506                     (type == RTE_FLOW_CONV_ITEM_MASK &&
507                      ((spec.raw->length & mask.raw->length) >=
508                       (last.raw->length & mask.raw->length))))
509                         tmp = spec.raw->length & mask.raw->length;
510                 else
511                         tmp = last.raw->length & mask.raw->length;
512                 if (tmp) {
513                         off = RTE_ALIGN_CEIL(off, sizeof(*dst.raw->pattern));
514                         if (size >= off + tmp)
515                                 dst.raw->pattern = rte_memcpy
516                                         ((void *)((uintptr_t)dst.raw + off),
517                                          src.raw->pattern, tmp);
518                         off += tmp;
519                 }
520                 break;
521         default:
522                 /**
523                  * allow PMD private flow item
524                  */
525                 off = (int)item->type >= 0 ?
526                       rte_flow_desc_item[item->type].size : sizeof(void *);
527                 rte_memcpy(buf, data, (size > off ? off : size));
528                 break;
529         }
530         return off;
531 }
532
533 /**
534  * Copy action configuration.
535  *
536  * @param[out] buf
537  *   Output buffer. Can be NULL if @p size is zero.
538  * @param size
539  *   Size of @p buf in bytes.
540  * @param[in] action
541  *   Action to copy configuration from.
542  *
543  * @return
544  *   Number of bytes needed to store pattern item specification regardless
545  *   of @p size. @p buf contents are truncated to @p size if not large
546  *   enough.
547  */
548 static size_t
549 rte_flow_conv_action_conf(void *buf, const size_t size,
550                           const struct rte_flow_action *action)
551 {
552         size_t off;
553
554         switch (action->type) {
555                 union {
556                         const struct rte_flow_action_rss *rss;
557                         const struct rte_flow_action_vxlan_encap *vxlan_encap;
558                         const struct rte_flow_action_nvgre_encap *nvgre_encap;
559                 } src;
560                 union {
561                         struct rte_flow_action_rss *rss;
562                         struct rte_flow_action_vxlan_encap *vxlan_encap;
563                         struct rte_flow_action_nvgre_encap *nvgre_encap;
564                 } dst;
565                 size_t tmp;
566                 int ret;
567
568         case RTE_FLOW_ACTION_TYPE_RSS:
569                 src.rss = action->conf;
570                 dst.rss = buf;
571                 rte_memcpy(dst.rss,
572                            (&(struct rte_flow_action_rss){
573                                 .func = src.rss->func,
574                                 .level = src.rss->level,
575                                 .types = src.rss->types,
576                                 .key_len = src.rss->key_len,
577                                 .queue_num = src.rss->queue_num,
578                            }),
579                            size > sizeof(*dst.rss) ? sizeof(*dst.rss) : size);
580                 off = sizeof(*dst.rss);
581                 if (src.rss->key_len && src.rss->key) {
582                         off = RTE_ALIGN_CEIL(off, sizeof(*dst.rss->key));
583                         tmp = sizeof(*src.rss->key) * src.rss->key_len;
584                         if (size >= off + tmp)
585                                 dst.rss->key = rte_memcpy
586                                         ((void *)((uintptr_t)dst.rss + off),
587                                          src.rss->key, tmp);
588                         off += tmp;
589                 }
590                 if (src.rss->queue_num) {
591                         off = RTE_ALIGN_CEIL(off, sizeof(*dst.rss->queue));
592                         tmp = sizeof(*src.rss->queue) * src.rss->queue_num;
593                         if (size >= off + tmp)
594                                 dst.rss->queue = rte_memcpy
595                                         ((void *)((uintptr_t)dst.rss + off),
596                                          src.rss->queue, tmp);
597                         off += tmp;
598                 }
599                 break;
600         case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
601         case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
602                 src.vxlan_encap = action->conf;
603                 dst.vxlan_encap = buf;
604                 RTE_BUILD_BUG_ON(sizeof(*src.vxlan_encap) !=
605                                  sizeof(*src.nvgre_encap) ||
606                                  offsetof(struct rte_flow_action_vxlan_encap,
607                                           definition) !=
608                                  offsetof(struct rte_flow_action_nvgre_encap,
609                                           definition));
610                 off = sizeof(*dst.vxlan_encap);
611                 if (src.vxlan_encap->definition) {
612                         off = RTE_ALIGN_CEIL
613                                 (off, sizeof(*dst.vxlan_encap->definition));
614                         ret = rte_flow_conv
615                                 (RTE_FLOW_CONV_OP_PATTERN,
616                                  (void *)((uintptr_t)dst.vxlan_encap + off),
617                                  size > off ? size - off : 0,
618                                  src.vxlan_encap->definition, NULL);
619                         if (ret < 0)
620                                 return 0;
621                         if (size >= off + ret)
622                                 dst.vxlan_encap->definition =
623                                         (void *)((uintptr_t)dst.vxlan_encap +
624                                                  off);
625                         off += ret;
626                 }
627                 break;
628         default:
629                 /**
630                  * allow PMD private flow action
631                  */
632                 off = (int)action->type >= 0 ?
633                       rte_flow_desc_action[action->type].size : sizeof(void *);
634                 rte_memcpy(buf, action->conf, (size > off ? off : size));
635                 break;
636         }
637         return off;
638 }
639
640 /**
641  * Copy a list of pattern items.
642  *
643  * @param[out] dst
644  *   Destination buffer. Can be NULL if @p size is zero.
645  * @param size
646  *   Size of @p dst in bytes.
647  * @param[in] src
648  *   Source pattern items.
649  * @param num
650  *   Maximum number of pattern items to process from @p src or 0 to process
651  *   the entire list. In both cases, processing stops after
652  *   RTE_FLOW_ITEM_TYPE_END is encountered.
653  * @param[out] error
654  *   Perform verbose error reporting if not NULL.
655  *
656  * @return
657  *   A positive value representing the number of bytes needed to store
658  *   pattern items regardless of @p size on success (@p buf contents are
659  *   truncated to @p size if not large enough), a negative errno value
660  *   otherwise and rte_errno is set.
661  */
662 static int
663 rte_flow_conv_pattern(struct rte_flow_item *dst,
664                       const size_t size,
665                       const struct rte_flow_item *src,
666                       unsigned int num,
667                       struct rte_flow_error *error)
668 {
669         uintptr_t data = (uintptr_t)dst;
670         size_t off;
671         size_t ret;
672         unsigned int i;
673
674         for (i = 0, off = 0; !num || i != num; ++i, ++src, ++dst) {
675                 /**
676                  * allow PMD private flow item
677                  */
678                 if (((int)src->type >= 0) &&
679                         ((size_t)src->type >= RTE_DIM(rte_flow_desc_item) ||
680                     !rte_flow_desc_item[src->type].name))
681                         return rte_flow_error_set
682                                 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, src,
683                                  "cannot convert unknown item type");
684                 if (size >= off + sizeof(*dst))
685                         *dst = (struct rte_flow_item){
686                                 .type = src->type,
687                         };
688                 off += sizeof(*dst);
689                 if (!src->type)
690                         num = i + 1;
691         }
692         num = i;
693         src -= num;
694         dst -= num;
695         do {
696                 if (src->spec) {
697                         off = RTE_ALIGN_CEIL(off, sizeof(double));
698                         ret = rte_flow_conv_item_spec
699                                 ((void *)(data + off),
700                                  size > off ? size - off : 0, src,
701                                  RTE_FLOW_CONV_ITEM_SPEC);
702                         if (size && size >= off + ret)
703                                 dst->spec = (void *)(data + off);
704                         off += ret;
705
706                 }
707                 if (src->last) {
708                         off = RTE_ALIGN_CEIL(off, sizeof(double));
709                         ret = rte_flow_conv_item_spec
710                                 ((void *)(data + off),
711                                  size > off ? size - off : 0, src,
712                                  RTE_FLOW_CONV_ITEM_LAST);
713                         if (size && size >= off + ret)
714                                 dst->last = (void *)(data + off);
715                         off += ret;
716                 }
717                 if (src->mask) {
718                         off = RTE_ALIGN_CEIL(off, sizeof(double));
719                         ret = rte_flow_conv_item_spec
720                                 ((void *)(data + off),
721                                  size > off ? size - off : 0, src,
722                                  RTE_FLOW_CONV_ITEM_MASK);
723                         if (size && size >= off + ret)
724                                 dst->mask = (void *)(data + off);
725                         off += ret;
726                 }
727                 ++src;
728                 ++dst;
729         } while (--num);
730         return off;
731 }
732
733 /**
734  * Copy a list of actions.
735  *
736  * @param[out] dst
737  *   Destination buffer. Can be NULL if @p size is zero.
738  * @param size
739  *   Size of @p dst in bytes.
740  * @param[in] src
741  *   Source actions.
742  * @param num
743  *   Maximum number of actions to process from @p src or 0 to process the
744  *   entire list. In both cases, processing stops after
745  *   RTE_FLOW_ACTION_TYPE_END is encountered.
746  * @param[out] error
747  *   Perform verbose error reporting if not NULL.
748  *
749  * @return
750  *   A positive value representing the number of bytes needed to store
751  *   actions regardless of @p size on success (@p buf contents are truncated
752  *   to @p size if not large enough), a negative errno value otherwise and
753  *   rte_errno is set.
754  */
755 static int
756 rte_flow_conv_actions(struct rte_flow_action *dst,
757                       const size_t size,
758                       const struct rte_flow_action *src,
759                       unsigned int num,
760                       struct rte_flow_error *error)
761 {
762         uintptr_t data = (uintptr_t)dst;
763         size_t off;
764         size_t ret;
765         unsigned int i;
766
767         for (i = 0, off = 0; !num || i != num; ++i, ++src, ++dst) {
768                 /**
769                  * allow PMD private flow action
770                  */
771                 if (((int)src->type >= 0) &&
772                     ((size_t)src->type >= RTE_DIM(rte_flow_desc_action) ||
773                     !rte_flow_desc_action[src->type].name))
774                         return rte_flow_error_set
775                                 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
776                                  src, "cannot convert unknown action type");
777                 if (size >= off + sizeof(*dst))
778                         *dst = (struct rte_flow_action){
779                                 .type = src->type,
780                         };
781                 off += sizeof(*dst);
782                 if (!src->type)
783                         num = i + 1;
784         }
785         num = i;
786         src -= num;
787         dst -= num;
788         do {
789                 if (src->conf) {
790                         off = RTE_ALIGN_CEIL(off, sizeof(double));
791                         ret = rte_flow_conv_action_conf
792                                 ((void *)(data + off),
793                                  size > off ? size - off : 0, src);
794                         if (size && size >= off + ret)
795                                 dst->conf = (void *)(data + off);
796                         off += ret;
797                 }
798                 ++src;
799                 ++dst;
800         } while (--num);
801         return off;
802 }
803
804 /**
805  * Copy flow rule components.
806  *
807  * This comprises the flow rule descriptor itself, attributes, pattern and
808  * actions list. NULL components in @p src are skipped.
809  *
810  * @param[out] dst
811  *   Destination buffer. Can be NULL if @p size is zero.
812  * @param size
813  *   Size of @p dst in bytes.
814  * @param[in] src
815  *   Source flow rule descriptor.
816  * @param[out] error
817  *   Perform verbose error reporting if not NULL.
818  *
819  * @return
820  *   A positive value representing the number of bytes needed to store all
821  *   components including the descriptor regardless of @p size on success
822  *   (@p buf contents are truncated to @p size if not large enough), a
823  *   negative errno value otherwise and rte_errno is set.
824  */
825 static int
826 rte_flow_conv_rule(struct rte_flow_conv_rule *dst,
827                    const size_t size,
828                    const struct rte_flow_conv_rule *src,
829                    struct rte_flow_error *error)
830 {
831         size_t off;
832         int ret;
833
834         rte_memcpy(dst,
835                    (&(struct rte_flow_conv_rule){
836                         .attr = NULL,
837                         .pattern = NULL,
838                         .actions = NULL,
839                    }),
840                    size > sizeof(*dst) ? sizeof(*dst) : size);
841         off = sizeof(*dst);
842         if (src->attr_ro) {
843                 off = RTE_ALIGN_CEIL(off, sizeof(double));
844                 if (size && size >= off + sizeof(*dst->attr))
845                         dst->attr = rte_memcpy
846                                 ((void *)((uintptr_t)dst + off),
847                                  src->attr_ro, sizeof(*dst->attr));
848                 off += sizeof(*dst->attr);
849         }
850         if (src->pattern_ro) {
851                 off = RTE_ALIGN_CEIL(off, sizeof(double));
852                 ret = rte_flow_conv_pattern((void *)((uintptr_t)dst + off),
853                                             size > off ? size - off : 0,
854                                             src->pattern_ro, 0, error);
855                 if (ret < 0)
856                         return ret;
857                 if (size && size >= off + (size_t)ret)
858                         dst->pattern = (void *)((uintptr_t)dst + off);
859                 off += ret;
860         }
861         if (src->actions_ro) {
862                 off = RTE_ALIGN_CEIL(off, sizeof(double));
863                 ret = rte_flow_conv_actions((void *)((uintptr_t)dst + off),
864                                             size > off ? size - off : 0,
865                                             src->actions_ro, 0, error);
866                 if (ret < 0)
867                         return ret;
868                 if (size >= off + (size_t)ret)
869                         dst->actions = (void *)((uintptr_t)dst + off);
870                 off += ret;
871         }
872         return off;
873 }
874
875 /**
876  * Retrieve the name of a pattern item/action type.
877  *
878  * @param is_action
879  *   Nonzero when @p src represents an action type instead of a pattern item
880  *   type.
881  * @param is_ptr
882  *   Nonzero to write string address instead of contents into @p dst.
883  * @param[out] dst
884  *   Destination buffer. Can be NULL if @p size is zero.
885  * @param size
886  *   Size of @p dst in bytes.
887  * @param[in] src
888  *   Depending on @p is_action, source pattern item or action type cast as a
889  *   pointer.
890  * @param[out] error
891  *   Perform verbose error reporting if not NULL.
892  *
893  * @return
894  *   A positive value representing the number of bytes needed to store the
895  *   name or its address regardless of @p size on success (@p buf contents
896  *   are truncated to @p size if not large enough), a negative errno value
897  *   otherwise and rte_errno is set.
898  */
899 static int
900 rte_flow_conv_name(int is_action,
901                    int is_ptr,
902                    char *dst,
903                    const size_t size,
904                    const void *src,
905                    struct rte_flow_error *error)
906 {
907         struct desc_info {
908                 const struct rte_flow_desc_data *data;
909                 size_t num;
910         };
911         static const struct desc_info info_rep[2] = {
912                 { rte_flow_desc_item, RTE_DIM(rte_flow_desc_item), },
913                 { rte_flow_desc_action, RTE_DIM(rte_flow_desc_action), },
914         };
915         const struct desc_info *const info = &info_rep[!!is_action];
916         unsigned int type = (uintptr_t)src;
917
918         if (type >= info->num)
919                 return rte_flow_error_set
920                         (error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
921                          "unknown object type to retrieve the name of");
922         if (!is_ptr)
923                 return strlcpy(dst, info->data[type].name, size);
924         if (size >= sizeof(const char **))
925                 *((const char **)dst) = info->data[type].name;
926         return sizeof(const char **);
927 }
928
929 /** Helper function to convert flow API objects. */
930 int
931 rte_flow_conv(enum rte_flow_conv_op op,
932               void *dst,
933               size_t size,
934               const void *src,
935               struct rte_flow_error *error)
936 {
937         switch (op) {
938                 const struct rte_flow_attr *attr;
939
940         case RTE_FLOW_CONV_OP_NONE:
941                 return 0;
942         case RTE_FLOW_CONV_OP_ATTR:
943                 attr = src;
944                 if (size > sizeof(*attr))
945                         size = sizeof(*attr);
946                 rte_memcpy(dst, attr, size);
947                 return sizeof(*attr);
948         case RTE_FLOW_CONV_OP_ITEM:
949                 return rte_flow_conv_pattern(dst, size, src, 1, error);
950         case RTE_FLOW_CONV_OP_ACTION:
951                 return rte_flow_conv_actions(dst, size, src, 1, error);
952         case RTE_FLOW_CONV_OP_PATTERN:
953                 return rte_flow_conv_pattern(dst, size, src, 0, error);
954         case RTE_FLOW_CONV_OP_ACTIONS:
955                 return rte_flow_conv_actions(dst, size, src, 0, error);
956         case RTE_FLOW_CONV_OP_RULE:
957                 return rte_flow_conv_rule(dst, size, src, error);
958         case RTE_FLOW_CONV_OP_ITEM_NAME:
959                 return rte_flow_conv_name(0, 0, dst, size, src, error);
960         case RTE_FLOW_CONV_OP_ACTION_NAME:
961                 return rte_flow_conv_name(1, 0, dst, size, src, error);
962         case RTE_FLOW_CONV_OP_ITEM_NAME_PTR:
963                 return rte_flow_conv_name(0, 1, dst, size, src, error);
964         case RTE_FLOW_CONV_OP_ACTION_NAME_PTR:
965                 return rte_flow_conv_name(1, 1, dst, size, src, error);
966         }
967         return rte_flow_error_set
968                 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
969                  "unknown object conversion operation");
970 }
971
972 /** Store a full rte_flow description. */
973 size_t
974 rte_flow_copy(struct rte_flow_desc *desc, size_t len,
975               const struct rte_flow_attr *attr,
976               const struct rte_flow_item *items,
977               const struct rte_flow_action *actions)
978 {
979         /*
980          * Overlap struct rte_flow_conv with struct rte_flow_desc in order
981          * to convert the former to the latter without wasting space.
982          */
983         struct rte_flow_conv_rule *dst =
984                 len ?
985                 (void *)((uintptr_t)desc +
986                          (offsetof(struct rte_flow_desc, actions) -
987                           offsetof(struct rte_flow_conv_rule, actions))) :
988                 NULL;
989         size_t dst_size =
990                 len > sizeof(*desc) - sizeof(*dst) ?
991                 len - (sizeof(*desc) - sizeof(*dst)) :
992                 0;
993         struct rte_flow_conv_rule src = {
994                 .attr_ro = NULL,
995                 .pattern_ro = items,
996                 .actions_ro = actions,
997         };
998         int ret;
999
1000         RTE_BUILD_BUG_ON(sizeof(struct rte_flow_desc) <
1001                          sizeof(struct rte_flow_conv_rule));
1002         if (dst_size &&
1003             (&dst->pattern != &desc->items ||
1004              &dst->actions != &desc->actions ||
1005              (uintptr_t)(dst + 1) != (uintptr_t)(desc + 1))) {
1006                 rte_errno = EINVAL;
1007                 return 0;
1008         }
1009         ret = rte_flow_conv(RTE_FLOW_CONV_OP_RULE, dst, dst_size, &src, NULL);
1010         if (ret < 0)
1011                 return 0;
1012         ret += sizeof(*desc) - sizeof(*dst);
1013         rte_memcpy(desc,
1014                    (&(struct rte_flow_desc){
1015                         .size = ret,
1016                         .attr = *attr,
1017                         .items = dst_size ? dst->pattern : NULL,
1018                         .actions = dst_size ? dst->actions : NULL,
1019                    }),
1020                    len > sizeof(*desc) ? sizeof(*desc) : len);
1021         return ret;
1022 }
1023
1024 int
1025 rte_flow_dev_dump(uint16_t port_id, FILE *file, struct rte_flow_error *error)
1026 {
1027         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1028         const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1029         int ret;
1030
1031         if (unlikely(!ops))
1032                 return -rte_errno;
1033         if (likely(!!ops->dev_dump)) {
1034                 fts_enter(dev);
1035                 ret = ops->dev_dump(dev, file, error);
1036                 fts_exit(dev);
1037                 return flow_err(port_id, ret, error);
1038         }
1039         return rte_flow_error_set(error, ENOSYS,
1040                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1041                                   NULL, rte_strerror(ENOSYS));
1042 }
1043
1044 int
1045 rte_flow_get_aged_flows(uint16_t port_id, void **contexts,
1046                     uint32_t nb_contexts, struct rte_flow_error *error)
1047 {
1048         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1049         const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1050         int ret;
1051
1052         if (unlikely(!ops))
1053                 return -rte_errno;
1054         if (likely(!!ops->get_aged_flows)) {
1055                 fts_enter(dev);
1056                 ret = ops->get_aged_flows(dev, contexts, nb_contexts, error);
1057                 fts_exit(dev);
1058                 return flow_err(port_id, ret, error);
1059         }
1060         return rte_flow_error_set(error, ENOTSUP,
1061                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1062                                   NULL, rte_strerror(ENOTSUP));
1063 }
1064
1065 struct rte_flow_shared_action *
1066 rte_flow_shared_action_create(uint16_t port_id,
1067                               const struct rte_flow_shared_action_conf *conf,
1068                               const struct rte_flow_action *action,
1069                               struct rte_flow_error *error)
1070 {
1071         struct rte_flow_shared_action *shared_action;
1072         const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1073
1074         if (unlikely(!ops))
1075                 return NULL;
1076         if (unlikely(!ops->shared_action_create)) {
1077                 rte_flow_error_set(error, ENOSYS,
1078                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1079                                    rte_strerror(ENOSYS));
1080                 return NULL;
1081         }
1082         shared_action = ops->shared_action_create(&rte_eth_devices[port_id],
1083                                                   conf, action, error);
1084         if (shared_action == NULL)
1085                 flow_err(port_id, -rte_errno, error);
1086         return shared_action;
1087 }
1088
1089 int
1090 rte_flow_shared_action_destroy(uint16_t port_id,
1091                               struct rte_flow_shared_action *action,
1092                               struct rte_flow_error *error)
1093 {
1094         int ret;
1095         const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1096
1097         if (unlikely(!ops))
1098                 return -rte_errno;
1099         if (unlikely(!ops->shared_action_destroy))
1100                 return rte_flow_error_set(error, ENOSYS,
1101                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1102                                           NULL, rte_strerror(ENOSYS));
1103         ret = ops->shared_action_destroy(&rte_eth_devices[port_id], action,
1104                                          error);
1105         return flow_err(port_id, ret, error);
1106 }
1107
1108 int
1109 rte_flow_shared_action_update(uint16_t port_id,
1110                               struct rte_flow_shared_action *action,
1111                               const struct rte_flow_action *update,
1112                               struct rte_flow_error *error)
1113 {
1114         int ret;
1115         const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1116
1117         if (unlikely(!ops))
1118                 return -rte_errno;
1119         if (unlikely(!ops->shared_action_update))
1120                 return rte_flow_error_set(error, ENOSYS,
1121                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1122                                           NULL, rte_strerror(ENOSYS));
1123         ret = ops->shared_action_update(&rte_eth_devices[port_id], action,
1124                                         update, error);
1125         return flow_err(port_id, ret, error);
1126 }
1127
1128 int
1129 rte_flow_shared_action_query(uint16_t port_id,
1130                              const struct rte_flow_shared_action *action,
1131                              void *data,
1132                              struct rte_flow_error *error)
1133 {
1134         int ret;
1135         const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1136
1137         if (unlikely(!ops))
1138                 return -rte_errno;
1139         if (unlikely(!ops->shared_action_query))
1140                 return rte_flow_error_set(error, ENOSYS,
1141                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1142                                           NULL, rte_strerror(ENOSYS));
1143         ret = ops->shared_action_query(&rte_eth_devices[port_id], action,
1144                                        data, error);
1145         return flow_err(port_id, ret, error);
1146 }
1147
1148 int
1149 rte_flow_tunnel_decap_set(uint16_t port_id,
1150                           struct rte_flow_tunnel *tunnel,
1151                           struct rte_flow_action **actions,
1152                           uint32_t *num_of_actions,
1153                           struct rte_flow_error *error)
1154 {
1155         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1156         const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1157
1158         if (unlikely(!ops))
1159                 return -rte_errno;
1160         if (likely(!!ops->tunnel_decap_set)) {
1161                 return flow_err(port_id,
1162                                 ops->tunnel_decap_set(dev, tunnel, actions,
1163                                                       num_of_actions, error),
1164                                 error);
1165         }
1166         return rte_flow_error_set(error, ENOTSUP,
1167                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1168                                   NULL, rte_strerror(ENOTSUP));
1169 }
1170
1171 int
1172 rte_flow_tunnel_match(uint16_t port_id,
1173                       struct rte_flow_tunnel *tunnel,
1174                       struct rte_flow_item **items,
1175                       uint32_t *num_of_items,
1176                       struct rte_flow_error *error)
1177 {
1178         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1179         const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1180
1181         if (unlikely(!ops))
1182                 return -rte_errno;
1183         if (likely(!!ops->tunnel_match)) {
1184                 return flow_err(port_id,
1185                                 ops->tunnel_match(dev, tunnel, items,
1186                                                   num_of_items, error),
1187                                 error);
1188         }
1189         return rte_flow_error_set(error, ENOTSUP,
1190                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1191                                   NULL, rte_strerror(ENOTSUP));
1192 }
1193
1194 int
1195 rte_flow_get_restore_info(uint16_t port_id,
1196                           struct rte_mbuf *m,
1197                           struct rte_flow_restore_info *restore_info,
1198                           struct rte_flow_error *error)
1199 {
1200         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1201         const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1202
1203         if (unlikely(!ops))
1204                 return -rte_errno;
1205         if (likely(!!ops->get_restore_info)) {
1206                 return flow_err(port_id,
1207                                 ops->get_restore_info(dev, m, restore_info,
1208                                                       error),
1209                                 error);
1210         }
1211         return rte_flow_error_set(error, ENOTSUP,
1212                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1213                                   NULL, rte_strerror(ENOTSUP));
1214 }
1215
1216 int
1217 rte_flow_tunnel_action_decap_release(uint16_t port_id,
1218                                      struct rte_flow_action *actions,
1219                                      uint32_t num_of_actions,
1220                                      struct rte_flow_error *error)
1221 {
1222         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1223         const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1224
1225         if (unlikely(!ops))
1226                 return -rte_errno;
1227         if (likely(!!ops->tunnel_action_decap_release)) {
1228                 return flow_err(port_id,
1229                                 ops->tunnel_action_decap_release(dev, actions,
1230                                                                  num_of_actions,
1231                                                                  error),
1232                                 error);
1233         }
1234         return rte_flow_error_set(error, ENOTSUP,
1235                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1236                                   NULL, rte_strerror(ENOTSUP));
1237 }
1238
1239 int
1240 rte_flow_tunnel_item_release(uint16_t port_id,
1241                              struct rte_flow_item *items,
1242                              uint32_t num_of_items,
1243                              struct rte_flow_error *error)
1244 {
1245         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1246         const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1247
1248         if (unlikely(!ops))
1249                 return -rte_errno;
1250         if (likely(!!ops->tunnel_item_release)) {
1251                 return flow_err(port_id,
1252                                 ops->tunnel_item_release(dev, items,
1253                                                          num_of_items, error),
1254                                 error);
1255         }
1256         return rte_flow_error_set(error, ENOTSUP,
1257                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1258                                   NULL, rte_strerror(ENOTSUP));
1259 }