ethdev: add represented port item to flow API
[dpdk.git] / lib / ethdev / rte_flow.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2016 6WIND S.A.
3  * Copyright 2016 Mellanox Technologies, Ltd
4  */
5
6 #include <errno.h>
7 #include <stddef.h>
8 #include <stdint.h>
9 #include <string.h>
10
11 #include <rte_common.h>
12 #include <rte_errno.h>
13 #include <rte_branch_prediction.h>
14 #include <rte_string_fns.h>
15 #include <rte_mbuf.h>
16 #include <rte_mbuf_dyn.h>
17 #include "rte_ethdev.h"
18 #include "rte_flow_driver.h"
19 #include "rte_flow.h"
20
21 /* Mbuf dynamic field name for metadata. */
22 int32_t rte_flow_dynf_metadata_offs = -1;
23
24 /* Mbuf dynamic field flag bit number for metadata. */
25 uint64_t rte_flow_dynf_metadata_mask;
26
27 /**
28  * Flow elements description tables.
29  */
30 struct rte_flow_desc_data {
31         const char *name;
32         size_t size;
33 };
34
35 /** Generate flow_item[] entry. */
36 #define MK_FLOW_ITEM(t, s) \
37         [RTE_FLOW_ITEM_TYPE_ ## t] = { \
38                 .name = # t, \
39                 .size = s, \
40         }
41
42 /** Information about known flow pattern items. */
43 static const struct rte_flow_desc_data rte_flow_desc_item[] = {
44         MK_FLOW_ITEM(END, 0),
45         MK_FLOW_ITEM(VOID, 0),
46         MK_FLOW_ITEM(INVERT, 0),
47         MK_FLOW_ITEM(ANY, sizeof(struct rte_flow_item_any)),
48         MK_FLOW_ITEM(PF, 0),
49         MK_FLOW_ITEM(VF, sizeof(struct rte_flow_item_vf)),
50         MK_FLOW_ITEM(PHY_PORT, sizeof(struct rte_flow_item_phy_port)),
51         MK_FLOW_ITEM(PORT_ID, sizeof(struct rte_flow_item_port_id)),
52         MK_FLOW_ITEM(RAW, sizeof(struct rte_flow_item_raw)),
53         MK_FLOW_ITEM(ETH, sizeof(struct rte_flow_item_eth)),
54         MK_FLOW_ITEM(VLAN, sizeof(struct rte_flow_item_vlan)),
55         MK_FLOW_ITEM(IPV4, sizeof(struct rte_flow_item_ipv4)),
56         MK_FLOW_ITEM(IPV6, sizeof(struct rte_flow_item_ipv6)),
57         MK_FLOW_ITEM(ICMP, sizeof(struct rte_flow_item_icmp)),
58         MK_FLOW_ITEM(UDP, sizeof(struct rte_flow_item_udp)),
59         MK_FLOW_ITEM(TCP, sizeof(struct rte_flow_item_tcp)),
60         MK_FLOW_ITEM(SCTP, sizeof(struct rte_flow_item_sctp)),
61         MK_FLOW_ITEM(VXLAN, sizeof(struct rte_flow_item_vxlan)),
62         MK_FLOW_ITEM(E_TAG, sizeof(struct rte_flow_item_e_tag)),
63         MK_FLOW_ITEM(NVGRE, sizeof(struct rte_flow_item_nvgre)),
64         MK_FLOW_ITEM(MPLS, sizeof(struct rte_flow_item_mpls)),
65         MK_FLOW_ITEM(GRE, sizeof(struct rte_flow_item_gre)),
66         MK_FLOW_ITEM(FUZZY, sizeof(struct rte_flow_item_fuzzy)),
67         MK_FLOW_ITEM(GTP, sizeof(struct rte_flow_item_gtp)),
68         MK_FLOW_ITEM(GTPC, sizeof(struct rte_flow_item_gtp)),
69         MK_FLOW_ITEM(GTPU, sizeof(struct rte_flow_item_gtp)),
70         MK_FLOW_ITEM(ESP, sizeof(struct rte_flow_item_esp)),
71         MK_FLOW_ITEM(GENEVE, sizeof(struct rte_flow_item_geneve)),
72         MK_FLOW_ITEM(VXLAN_GPE, sizeof(struct rte_flow_item_vxlan_gpe)),
73         MK_FLOW_ITEM(ARP_ETH_IPV4, sizeof(struct rte_flow_item_arp_eth_ipv4)),
74         MK_FLOW_ITEM(IPV6_EXT, sizeof(struct rte_flow_item_ipv6_ext)),
75         MK_FLOW_ITEM(IPV6_FRAG_EXT, sizeof(struct rte_flow_item_ipv6_frag_ext)),
76         MK_FLOW_ITEM(ICMP6, sizeof(struct rte_flow_item_icmp6)),
77         MK_FLOW_ITEM(ICMP6_ND_NS, sizeof(struct rte_flow_item_icmp6_nd_ns)),
78         MK_FLOW_ITEM(ICMP6_ND_NA, sizeof(struct rte_flow_item_icmp6_nd_na)),
79         MK_FLOW_ITEM(ICMP6_ND_OPT, sizeof(struct rte_flow_item_icmp6_nd_opt)),
80         MK_FLOW_ITEM(ICMP6_ND_OPT_SLA_ETH,
81                      sizeof(struct rte_flow_item_icmp6_nd_opt_sla_eth)),
82         MK_FLOW_ITEM(ICMP6_ND_OPT_TLA_ETH,
83                      sizeof(struct rte_flow_item_icmp6_nd_opt_tla_eth)),
84         MK_FLOW_ITEM(MARK, sizeof(struct rte_flow_item_mark)),
85         MK_FLOW_ITEM(META, sizeof(struct rte_flow_item_meta)),
86         MK_FLOW_ITEM(TAG, sizeof(struct rte_flow_item_tag)),
87         MK_FLOW_ITEM(GRE_KEY, sizeof(rte_be32_t)),
88         MK_FLOW_ITEM(GTP_PSC, sizeof(struct rte_flow_item_gtp_psc)),
89         MK_FLOW_ITEM(PPPOES, sizeof(struct rte_flow_item_pppoe)),
90         MK_FLOW_ITEM(PPPOED, sizeof(struct rte_flow_item_pppoe)),
91         MK_FLOW_ITEM(PPPOE_PROTO_ID,
92                         sizeof(struct rte_flow_item_pppoe_proto_id)),
93         MK_FLOW_ITEM(NSH, sizeof(struct rte_flow_item_nsh)),
94         MK_FLOW_ITEM(IGMP, sizeof(struct rte_flow_item_igmp)),
95         MK_FLOW_ITEM(AH, sizeof(struct rte_flow_item_ah)),
96         MK_FLOW_ITEM(HIGIG2, sizeof(struct rte_flow_item_higig2_hdr)),
97         MK_FLOW_ITEM(L2TPV3OIP, sizeof(struct rte_flow_item_l2tpv3oip)),
98         MK_FLOW_ITEM(PFCP, sizeof(struct rte_flow_item_pfcp)),
99         MK_FLOW_ITEM(ECPRI, sizeof(struct rte_flow_item_ecpri)),
100         MK_FLOW_ITEM(GENEVE_OPT, sizeof(struct rte_flow_item_geneve_opt)),
101         MK_FLOW_ITEM(INTEGRITY, sizeof(struct rte_flow_item_integrity)),
102         MK_FLOW_ITEM(CONNTRACK, sizeof(uint32_t)),
103         MK_FLOW_ITEM(PORT_REPRESENTOR, sizeof(struct rte_flow_item_ethdev)),
104         MK_FLOW_ITEM(REPRESENTED_PORT, sizeof(struct rte_flow_item_ethdev)),
105 };
106
107 /** Generate flow_action[] entry. */
108 #define MK_FLOW_ACTION(t, s) \
109         [RTE_FLOW_ACTION_TYPE_ ## t] = { \
110                 .name = # t, \
111                 .size = s, \
112         }
113
114 /** Information about known flow actions. */
115 static const struct rte_flow_desc_data rte_flow_desc_action[] = {
116         MK_FLOW_ACTION(END, 0),
117         MK_FLOW_ACTION(VOID, 0),
118         MK_FLOW_ACTION(PASSTHRU, 0),
119         MK_FLOW_ACTION(JUMP, sizeof(struct rte_flow_action_jump)),
120         MK_FLOW_ACTION(MARK, sizeof(struct rte_flow_action_mark)),
121         MK_FLOW_ACTION(FLAG, 0),
122         MK_FLOW_ACTION(QUEUE, sizeof(struct rte_flow_action_queue)),
123         MK_FLOW_ACTION(DROP, 0),
124         MK_FLOW_ACTION(COUNT, sizeof(struct rte_flow_action_count)),
125         MK_FLOW_ACTION(RSS, sizeof(struct rte_flow_action_rss)),
126         MK_FLOW_ACTION(PF, 0),
127         MK_FLOW_ACTION(VF, sizeof(struct rte_flow_action_vf)),
128         MK_FLOW_ACTION(PHY_PORT, sizeof(struct rte_flow_action_phy_port)),
129         MK_FLOW_ACTION(PORT_ID, sizeof(struct rte_flow_action_port_id)),
130         MK_FLOW_ACTION(METER, sizeof(struct rte_flow_action_meter)),
131         MK_FLOW_ACTION(SECURITY, sizeof(struct rte_flow_action_security)),
132         MK_FLOW_ACTION(OF_SET_MPLS_TTL,
133                        sizeof(struct rte_flow_action_of_set_mpls_ttl)),
134         MK_FLOW_ACTION(OF_DEC_MPLS_TTL, 0),
135         MK_FLOW_ACTION(OF_SET_NW_TTL,
136                        sizeof(struct rte_flow_action_of_set_nw_ttl)),
137         MK_FLOW_ACTION(OF_DEC_NW_TTL, 0),
138         MK_FLOW_ACTION(OF_COPY_TTL_OUT, 0),
139         MK_FLOW_ACTION(OF_COPY_TTL_IN, 0),
140         MK_FLOW_ACTION(OF_POP_VLAN, 0),
141         MK_FLOW_ACTION(OF_PUSH_VLAN,
142                        sizeof(struct rte_flow_action_of_push_vlan)),
143         MK_FLOW_ACTION(OF_SET_VLAN_VID,
144                        sizeof(struct rte_flow_action_of_set_vlan_vid)),
145         MK_FLOW_ACTION(OF_SET_VLAN_PCP,
146                        sizeof(struct rte_flow_action_of_set_vlan_pcp)),
147         MK_FLOW_ACTION(OF_POP_MPLS,
148                        sizeof(struct rte_flow_action_of_pop_mpls)),
149         MK_FLOW_ACTION(OF_PUSH_MPLS,
150                        sizeof(struct rte_flow_action_of_push_mpls)),
151         MK_FLOW_ACTION(VXLAN_ENCAP, sizeof(struct rte_flow_action_vxlan_encap)),
152         MK_FLOW_ACTION(VXLAN_DECAP, 0),
153         MK_FLOW_ACTION(NVGRE_ENCAP, sizeof(struct rte_flow_action_vxlan_encap)),
154         MK_FLOW_ACTION(NVGRE_DECAP, 0),
155         MK_FLOW_ACTION(RAW_ENCAP, sizeof(struct rte_flow_action_raw_encap)),
156         MK_FLOW_ACTION(RAW_DECAP, sizeof(struct rte_flow_action_raw_decap)),
157         MK_FLOW_ACTION(SET_IPV4_SRC,
158                        sizeof(struct rte_flow_action_set_ipv4)),
159         MK_FLOW_ACTION(SET_IPV4_DST,
160                        sizeof(struct rte_flow_action_set_ipv4)),
161         MK_FLOW_ACTION(SET_IPV6_SRC,
162                        sizeof(struct rte_flow_action_set_ipv6)),
163         MK_FLOW_ACTION(SET_IPV6_DST,
164                        sizeof(struct rte_flow_action_set_ipv6)),
165         MK_FLOW_ACTION(SET_TP_SRC,
166                        sizeof(struct rte_flow_action_set_tp)),
167         MK_FLOW_ACTION(SET_TP_DST,
168                        sizeof(struct rte_flow_action_set_tp)),
169         MK_FLOW_ACTION(MAC_SWAP, 0),
170         MK_FLOW_ACTION(DEC_TTL, 0),
171         MK_FLOW_ACTION(SET_TTL, sizeof(struct rte_flow_action_set_ttl)),
172         MK_FLOW_ACTION(SET_MAC_SRC, sizeof(struct rte_flow_action_set_mac)),
173         MK_FLOW_ACTION(SET_MAC_DST, sizeof(struct rte_flow_action_set_mac)),
174         MK_FLOW_ACTION(INC_TCP_SEQ, sizeof(rte_be32_t)),
175         MK_FLOW_ACTION(DEC_TCP_SEQ, sizeof(rte_be32_t)),
176         MK_FLOW_ACTION(INC_TCP_ACK, sizeof(rte_be32_t)),
177         MK_FLOW_ACTION(DEC_TCP_ACK, sizeof(rte_be32_t)),
178         MK_FLOW_ACTION(SET_TAG, sizeof(struct rte_flow_action_set_tag)),
179         MK_FLOW_ACTION(SET_META, sizeof(struct rte_flow_action_set_meta)),
180         MK_FLOW_ACTION(SET_IPV4_DSCP, sizeof(struct rte_flow_action_set_dscp)),
181         MK_FLOW_ACTION(SET_IPV6_DSCP, sizeof(struct rte_flow_action_set_dscp)),
182         MK_FLOW_ACTION(AGE, sizeof(struct rte_flow_action_age)),
183         MK_FLOW_ACTION(SAMPLE, sizeof(struct rte_flow_action_sample)),
184         MK_FLOW_ACTION(MODIFY_FIELD,
185                        sizeof(struct rte_flow_action_modify_field)),
186         /**
187          * Indirect action represented as handle of type
188          * (struct rte_flow_action_handle *) stored in conf field (see
189          * struct rte_flow_action); no need for additional structure to * store
190          * indirect action handle.
191          */
192         MK_FLOW_ACTION(INDIRECT, 0),
193         MK_FLOW_ACTION(CONNTRACK, sizeof(struct rte_flow_action_conntrack)),
194 };
195
196 int
197 rte_flow_dynf_metadata_register(void)
198 {
199         int offset;
200         int flag;
201
202         static const struct rte_mbuf_dynfield desc_offs = {
203                 .name = RTE_MBUF_DYNFIELD_METADATA_NAME,
204                 .size = sizeof(uint32_t),
205                 .align = __alignof__(uint32_t),
206         };
207         static const struct rte_mbuf_dynflag desc_flag = {
208                 .name = RTE_MBUF_DYNFLAG_METADATA_NAME,
209         };
210
211         offset = rte_mbuf_dynfield_register(&desc_offs);
212         if (offset < 0)
213                 goto error;
214         flag = rte_mbuf_dynflag_register(&desc_flag);
215         if (flag < 0)
216                 goto error;
217         rte_flow_dynf_metadata_offs = offset;
218         rte_flow_dynf_metadata_mask = (1ULL << flag);
219         return 0;
220
221 error:
222         rte_flow_dynf_metadata_offs = -1;
223         rte_flow_dynf_metadata_mask = 0ULL;
224         return -rte_errno;
225 }
226
227 static inline void
228 fts_enter(struct rte_eth_dev *dev)
229 {
230         if (!(dev->data->dev_flags & RTE_ETH_DEV_FLOW_OPS_THREAD_SAFE))
231                 pthread_mutex_lock(&dev->data->flow_ops_mutex);
232 }
233
234 static inline void
235 fts_exit(struct rte_eth_dev *dev)
236 {
237         if (!(dev->data->dev_flags & RTE_ETH_DEV_FLOW_OPS_THREAD_SAFE))
238                 pthread_mutex_unlock(&dev->data->flow_ops_mutex);
239 }
240
241 static int
242 flow_err(uint16_t port_id, int ret, struct rte_flow_error *error)
243 {
244         if (ret == 0)
245                 return 0;
246         if (rte_eth_dev_is_removed(port_id))
247                 return rte_flow_error_set(error, EIO,
248                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
249                                           NULL, rte_strerror(EIO));
250         return ret;
251 }
252
253 /* Get generic flow operations structure from a port. */
254 const struct rte_flow_ops *
255 rte_flow_ops_get(uint16_t port_id, struct rte_flow_error *error)
256 {
257         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
258         const struct rte_flow_ops *ops;
259         int code;
260
261         if (unlikely(!rte_eth_dev_is_valid_port(port_id)))
262                 code = ENODEV;
263         else if (unlikely(dev->dev_ops->flow_ops_get == NULL))
264                 /* flow API not supported with this driver dev_ops */
265                 code = ENOSYS;
266         else
267                 code = dev->dev_ops->flow_ops_get(dev, &ops);
268         if (code == 0 && ops == NULL)
269                 /* flow API not supported with this device */
270                 code = ENOSYS;
271
272         if (code != 0) {
273                 rte_flow_error_set(error, code, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
274                                    NULL, rte_strerror(code));
275                 return NULL;
276         }
277         return ops;
278 }
279
280 /* Check whether a flow rule can be created on a given port. */
281 int
282 rte_flow_validate(uint16_t port_id,
283                   const struct rte_flow_attr *attr,
284                   const struct rte_flow_item pattern[],
285                   const struct rte_flow_action actions[],
286                   struct rte_flow_error *error)
287 {
288         const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
289         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
290         int ret;
291
292         if (unlikely(!ops))
293                 return -rte_errno;
294         if (likely(!!ops->validate)) {
295                 fts_enter(dev);
296                 ret = ops->validate(dev, attr, pattern, actions, error);
297                 fts_exit(dev);
298                 return flow_err(port_id, ret, error);
299         }
300         return rte_flow_error_set(error, ENOSYS,
301                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
302                                   NULL, rte_strerror(ENOSYS));
303 }
304
305 /* Create a flow rule on a given port. */
306 struct rte_flow *
307 rte_flow_create(uint16_t port_id,
308                 const struct rte_flow_attr *attr,
309                 const struct rte_flow_item pattern[],
310                 const struct rte_flow_action actions[],
311                 struct rte_flow_error *error)
312 {
313         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
314         struct rte_flow *flow;
315         const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
316
317         if (unlikely(!ops))
318                 return NULL;
319         if (likely(!!ops->create)) {
320                 fts_enter(dev);
321                 flow = ops->create(dev, attr, pattern, actions, error);
322                 fts_exit(dev);
323                 if (flow == NULL)
324                         flow_err(port_id, -rte_errno, error);
325                 return flow;
326         }
327         rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
328                            NULL, rte_strerror(ENOSYS));
329         return NULL;
330 }
331
332 /* Destroy a flow rule on a given port. */
333 int
334 rte_flow_destroy(uint16_t port_id,
335                  struct rte_flow *flow,
336                  struct rte_flow_error *error)
337 {
338         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
339         const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
340         int ret;
341
342         if (unlikely(!ops))
343                 return -rte_errno;
344         if (likely(!!ops->destroy)) {
345                 fts_enter(dev);
346                 ret = ops->destroy(dev, flow, error);
347                 fts_exit(dev);
348                 return flow_err(port_id, ret, error);
349         }
350         return rte_flow_error_set(error, ENOSYS,
351                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
352                                   NULL, rte_strerror(ENOSYS));
353 }
354
355 /* Destroy all flow rules associated with a port. */
356 int
357 rte_flow_flush(uint16_t port_id,
358                struct rte_flow_error *error)
359 {
360         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
361         const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
362         int ret;
363
364         if (unlikely(!ops))
365                 return -rte_errno;
366         if (likely(!!ops->flush)) {
367                 fts_enter(dev);
368                 ret = ops->flush(dev, error);
369                 fts_exit(dev);
370                 return flow_err(port_id, ret, error);
371         }
372         return rte_flow_error_set(error, ENOSYS,
373                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
374                                   NULL, rte_strerror(ENOSYS));
375 }
376
377 /* Query an existing flow rule. */
378 int
379 rte_flow_query(uint16_t port_id,
380                struct rte_flow *flow,
381                const struct rte_flow_action *action,
382                void *data,
383                struct rte_flow_error *error)
384 {
385         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
386         const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
387         int ret;
388
389         if (!ops)
390                 return -rte_errno;
391         if (likely(!!ops->query)) {
392                 fts_enter(dev);
393                 ret = ops->query(dev, flow, action, data, error);
394                 fts_exit(dev);
395                 return flow_err(port_id, ret, error);
396         }
397         return rte_flow_error_set(error, ENOSYS,
398                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
399                                   NULL, rte_strerror(ENOSYS));
400 }
401
402 /* Restrict ingress traffic to the defined flow rules. */
403 int
404 rte_flow_isolate(uint16_t port_id,
405                  int set,
406                  struct rte_flow_error *error)
407 {
408         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
409         const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
410         int ret;
411
412         if (!ops)
413                 return -rte_errno;
414         if (likely(!!ops->isolate)) {
415                 fts_enter(dev);
416                 ret = ops->isolate(dev, set, error);
417                 fts_exit(dev);
418                 return flow_err(port_id, ret, error);
419         }
420         return rte_flow_error_set(error, ENOSYS,
421                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
422                                   NULL, rte_strerror(ENOSYS));
423 }
424
425 /* Initialize flow error structure. */
426 int
427 rte_flow_error_set(struct rte_flow_error *error,
428                    int code,
429                    enum rte_flow_error_type type,
430                    const void *cause,
431                    const char *message)
432 {
433         if (error) {
434                 *error = (struct rte_flow_error){
435                         .type = type,
436                         .cause = cause,
437                         .message = message,
438                 };
439         }
440         rte_errno = code;
441         return -code;
442 }
443
444 /** Pattern item specification types. */
445 enum rte_flow_conv_item_spec_type {
446         RTE_FLOW_CONV_ITEM_SPEC,
447         RTE_FLOW_CONV_ITEM_LAST,
448         RTE_FLOW_CONV_ITEM_MASK,
449 };
450
451 /**
452  * Copy pattern item specification.
453  *
454  * @param[out] buf
455  *   Output buffer. Can be NULL if @p size is zero.
456  * @param size
457  *   Size of @p buf in bytes.
458  * @param[in] item
459  *   Pattern item to copy specification from.
460  * @param type
461  *   Specification selector for either @p spec, @p last or @p mask.
462  *
463  * @return
464  *   Number of bytes needed to store pattern item specification regardless
465  *   of @p size. @p buf contents are truncated to @p size if not large
466  *   enough.
467  */
468 static size_t
469 rte_flow_conv_item_spec(void *buf, const size_t size,
470                         const struct rte_flow_item *item,
471                         enum rte_flow_conv_item_spec_type type)
472 {
473         size_t off;
474         const void *data =
475                 type == RTE_FLOW_CONV_ITEM_SPEC ? item->spec :
476                 type == RTE_FLOW_CONV_ITEM_LAST ? item->last :
477                 type == RTE_FLOW_CONV_ITEM_MASK ? item->mask :
478                 NULL;
479
480         switch (item->type) {
481                 union {
482                         const struct rte_flow_item_raw *raw;
483                 } spec;
484                 union {
485                         const struct rte_flow_item_raw *raw;
486                 } last;
487                 union {
488                         const struct rte_flow_item_raw *raw;
489                 } mask;
490                 union {
491                         const struct rte_flow_item_raw *raw;
492                 } src;
493                 union {
494                         struct rte_flow_item_raw *raw;
495                 } dst;
496                 size_t tmp;
497
498         case RTE_FLOW_ITEM_TYPE_RAW:
499                 spec.raw = item->spec;
500                 last.raw = item->last ? item->last : item->spec;
501                 mask.raw = item->mask ? item->mask : &rte_flow_item_raw_mask;
502                 src.raw = data;
503                 dst.raw = buf;
504                 rte_memcpy(dst.raw,
505                            (&(struct rte_flow_item_raw){
506                                 .relative = src.raw->relative,
507                                 .search = src.raw->search,
508                                 .reserved = src.raw->reserved,
509                                 .offset = src.raw->offset,
510                                 .limit = src.raw->limit,
511                                 .length = src.raw->length,
512                            }),
513                            size > sizeof(*dst.raw) ? sizeof(*dst.raw) : size);
514                 off = sizeof(*dst.raw);
515                 if (type == RTE_FLOW_CONV_ITEM_SPEC ||
516                     (type == RTE_FLOW_CONV_ITEM_MASK &&
517                      ((spec.raw->length & mask.raw->length) >=
518                       (last.raw->length & mask.raw->length))))
519                         tmp = spec.raw->length & mask.raw->length;
520                 else
521                         tmp = last.raw->length & mask.raw->length;
522                 if (tmp) {
523                         off = RTE_ALIGN_CEIL(off, sizeof(*dst.raw->pattern));
524                         if (size >= off + tmp)
525                                 dst.raw->pattern = rte_memcpy
526                                         ((void *)((uintptr_t)dst.raw + off),
527                                          src.raw->pattern, tmp);
528                         off += tmp;
529                 }
530                 break;
531         default:
532                 /**
533                  * allow PMD private flow item
534                  */
535                 off = (int)item->type >= 0 ?
536                       rte_flow_desc_item[item->type].size : sizeof(void *);
537                 rte_memcpy(buf, data, (size > off ? off : size));
538                 break;
539         }
540         return off;
541 }
542
543 /**
544  * Copy action configuration.
545  *
546  * @param[out] buf
547  *   Output buffer. Can be NULL if @p size is zero.
548  * @param size
549  *   Size of @p buf in bytes.
550  * @param[in] action
551  *   Action to copy configuration from.
552  *
553  * @return
554  *   Number of bytes needed to store pattern item specification regardless
555  *   of @p size. @p buf contents are truncated to @p size if not large
556  *   enough.
557  */
558 static size_t
559 rte_flow_conv_action_conf(void *buf, const size_t size,
560                           const struct rte_flow_action *action)
561 {
562         size_t off;
563
564         switch (action->type) {
565                 union {
566                         const struct rte_flow_action_rss *rss;
567                         const struct rte_flow_action_vxlan_encap *vxlan_encap;
568                         const struct rte_flow_action_nvgre_encap *nvgre_encap;
569                 } src;
570                 union {
571                         struct rte_flow_action_rss *rss;
572                         struct rte_flow_action_vxlan_encap *vxlan_encap;
573                         struct rte_flow_action_nvgre_encap *nvgre_encap;
574                 } dst;
575                 size_t tmp;
576                 int ret;
577
578         case RTE_FLOW_ACTION_TYPE_RSS:
579                 src.rss = action->conf;
580                 dst.rss = buf;
581                 rte_memcpy(dst.rss,
582                            (&(struct rte_flow_action_rss){
583                                 .func = src.rss->func,
584                                 .level = src.rss->level,
585                                 .types = src.rss->types,
586                                 .key_len = src.rss->key_len,
587                                 .queue_num = src.rss->queue_num,
588                            }),
589                            size > sizeof(*dst.rss) ? sizeof(*dst.rss) : size);
590                 off = sizeof(*dst.rss);
591                 if (src.rss->key_len && src.rss->key) {
592                         off = RTE_ALIGN_CEIL(off, sizeof(*dst.rss->key));
593                         tmp = sizeof(*src.rss->key) * src.rss->key_len;
594                         if (size >= off + tmp)
595                                 dst.rss->key = rte_memcpy
596                                         ((void *)((uintptr_t)dst.rss + off),
597                                          src.rss->key, tmp);
598                         off += tmp;
599                 }
600                 if (src.rss->queue_num) {
601                         off = RTE_ALIGN_CEIL(off, sizeof(*dst.rss->queue));
602                         tmp = sizeof(*src.rss->queue) * src.rss->queue_num;
603                         if (size >= off + tmp)
604                                 dst.rss->queue = rte_memcpy
605                                         ((void *)((uintptr_t)dst.rss + off),
606                                          src.rss->queue, tmp);
607                         off += tmp;
608                 }
609                 break;
610         case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
611         case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
612                 src.vxlan_encap = action->conf;
613                 dst.vxlan_encap = buf;
614                 RTE_BUILD_BUG_ON(sizeof(*src.vxlan_encap) !=
615                                  sizeof(*src.nvgre_encap) ||
616                                  offsetof(struct rte_flow_action_vxlan_encap,
617                                           definition) !=
618                                  offsetof(struct rte_flow_action_nvgre_encap,
619                                           definition));
620                 off = sizeof(*dst.vxlan_encap);
621                 if (src.vxlan_encap->definition) {
622                         off = RTE_ALIGN_CEIL
623                                 (off, sizeof(*dst.vxlan_encap->definition));
624                         ret = rte_flow_conv
625                                 (RTE_FLOW_CONV_OP_PATTERN,
626                                  (void *)((uintptr_t)dst.vxlan_encap + off),
627                                  size > off ? size - off : 0,
628                                  src.vxlan_encap->definition, NULL);
629                         if (ret < 0)
630                                 return 0;
631                         if (size >= off + ret)
632                                 dst.vxlan_encap->definition =
633                                         (void *)((uintptr_t)dst.vxlan_encap +
634                                                  off);
635                         off += ret;
636                 }
637                 break;
638         default:
639                 /**
640                  * allow PMD private flow action
641                  */
642                 off = (int)action->type >= 0 ?
643                       rte_flow_desc_action[action->type].size : sizeof(void *);
644                 rte_memcpy(buf, action->conf, (size > off ? off : size));
645                 break;
646         }
647         return off;
648 }
649
650 /**
651  * Copy a list of pattern items.
652  *
653  * @param[out] dst
654  *   Destination buffer. Can be NULL if @p size is zero.
655  * @param size
656  *   Size of @p dst in bytes.
657  * @param[in] src
658  *   Source pattern items.
659  * @param num
660  *   Maximum number of pattern items to process from @p src or 0 to process
661  *   the entire list. In both cases, processing stops after
662  *   RTE_FLOW_ITEM_TYPE_END is encountered.
663  * @param[out] error
664  *   Perform verbose error reporting if not NULL.
665  *
666  * @return
667  *   A positive value representing the number of bytes needed to store
668  *   pattern items regardless of @p size on success (@p buf contents are
669  *   truncated to @p size if not large enough), a negative errno value
670  *   otherwise and rte_errno is set.
671  */
672 static int
673 rte_flow_conv_pattern(struct rte_flow_item *dst,
674                       const size_t size,
675                       const struct rte_flow_item *src,
676                       unsigned int num,
677                       struct rte_flow_error *error)
678 {
679         uintptr_t data = (uintptr_t)dst;
680         size_t off;
681         size_t ret;
682         unsigned int i;
683
684         for (i = 0, off = 0; !num || i != num; ++i, ++src, ++dst) {
685                 /**
686                  * allow PMD private flow item
687                  */
688                 if (((int)src->type >= 0) &&
689                         ((size_t)src->type >= RTE_DIM(rte_flow_desc_item) ||
690                     !rte_flow_desc_item[src->type].name))
691                         return rte_flow_error_set
692                                 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, src,
693                                  "cannot convert unknown item type");
694                 if (size >= off + sizeof(*dst))
695                         *dst = (struct rte_flow_item){
696                                 .type = src->type,
697                         };
698                 off += sizeof(*dst);
699                 if (!src->type)
700                         num = i + 1;
701         }
702         num = i;
703         src -= num;
704         dst -= num;
705         do {
706                 if (src->spec) {
707                         off = RTE_ALIGN_CEIL(off, sizeof(double));
708                         ret = rte_flow_conv_item_spec
709                                 ((void *)(data + off),
710                                  size > off ? size - off : 0, src,
711                                  RTE_FLOW_CONV_ITEM_SPEC);
712                         if (size && size >= off + ret)
713                                 dst->spec = (void *)(data + off);
714                         off += ret;
715
716                 }
717                 if (src->last) {
718                         off = RTE_ALIGN_CEIL(off, sizeof(double));
719                         ret = rte_flow_conv_item_spec
720                                 ((void *)(data + off),
721                                  size > off ? size - off : 0, src,
722                                  RTE_FLOW_CONV_ITEM_LAST);
723                         if (size && size >= off + ret)
724                                 dst->last = (void *)(data + off);
725                         off += ret;
726                 }
727                 if (src->mask) {
728                         off = RTE_ALIGN_CEIL(off, sizeof(double));
729                         ret = rte_flow_conv_item_spec
730                                 ((void *)(data + off),
731                                  size > off ? size - off : 0, src,
732                                  RTE_FLOW_CONV_ITEM_MASK);
733                         if (size && size >= off + ret)
734                                 dst->mask = (void *)(data + off);
735                         off += ret;
736                 }
737                 ++src;
738                 ++dst;
739         } while (--num);
740         return off;
741 }
742
743 /**
744  * Copy a list of actions.
745  *
746  * @param[out] dst
747  *   Destination buffer. Can be NULL if @p size is zero.
748  * @param size
749  *   Size of @p dst in bytes.
750  * @param[in] src
751  *   Source actions.
752  * @param num
753  *   Maximum number of actions to process from @p src or 0 to process the
754  *   entire list. In both cases, processing stops after
755  *   RTE_FLOW_ACTION_TYPE_END is encountered.
756  * @param[out] error
757  *   Perform verbose error reporting if not NULL.
758  *
759  * @return
760  *   A positive value representing the number of bytes needed to store
761  *   actions regardless of @p size on success (@p buf contents are truncated
762  *   to @p size if not large enough), a negative errno value otherwise and
763  *   rte_errno is set.
764  */
765 static int
766 rte_flow_conv_actions(struct rte_flow_action *dst,
767                       const size_t size,
768                       const struct rte_flow_action *src,
769                       unsigned int num,
770                       struct rte_flow_error *error)
771 {
772         uintptr_t data = (uintptr_t)dst;
773         size_t off;
774         size_t ret;
775         unsigned int i;
776
777         for (i = 0, off = 0; !num || i != num; ++i, ++src, ++dst) {
778                 /**
779                  * allow PMD private flow action
780                  */
781                 if (((int)src->type >= 0) &&
782                     ((size_t)src->type >= RTE_DIM(rte_flow_desc_action) ||
783                     !rte_flow_desc_action[src->type].name))
784                         return rte_flow_error_set
785                                 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
786                                  src, "cannot convert unknown action type");
787                 if (size >= off + sizeof(*dst))
788                         *dst = (struct rte_flow_action){
789                                 .type = src->type,
790                         };
791                 off += sizeof(*dst);
792                 if (!src->type)
793                         num = i + 1;
794         }
795         num = i;
796         src -= num;
797         dst -= num;
798         do {
799                 if (src->conf) {
800                         off = RTE_ALIGN_CEIL(off, sizeof(double));
801                         ret = rte_flow_conv_action_conf
802                                 ((void *)(data + off),
803                                  size > off ? size - off : 0, src);
804                         if (size && size >= off + ret)
805                                 dst->conf = (void *)(data + off);
806                         off += ret;
807                 }
808                 ++src;
809                 ++dst;
810         } while (--num);
811         return off;
812 }
813
814 /**
815  * Copy flow rule components.
816  *
817  * This comprises the flow rule descriptor itself, attributes, pattern and
818  * actions list. NULL components in @p src are skipped.
819  *
820  * @param[out] dst
821  *   Destination buffer. Can be NULL if @p size is zero.
822  * @param size
823  *   Size of @p dst in bytes.
824  * @param[in] src
825  *   Source flow rule descriptor.
826  * @param[out] error
827  *   Perform verbose error reporting if not NULL.
828  *
829  * @return
830  *   A positive value representing the number of bytes needed to store all
831  *   components including the descriptor regardless of @p size on success
832  *   (@p buf contents are truncated to @p size if not large enough), a
833  *   negative errno value otherwise and rte_errno is set.
834  */
835 static int
836 rte_flow_conv_rule(struct rte_flow_conv_rule *dst,
837                    const size_t size,
838                    const struct rte_flow_conv_rule *src,
839                    struct rte_flow_error *error)
840 {
841         size_t off;
842         int ret;
843
844         rte_memcpy(dst,
845                    (&(struct rte_flow_conv_rule){
846                         .attr = NULL,
847                         .pattern = NULL,
848                         .actions = NULL,
849                    }),
850                    size > sizeof(*dst) ? sizeof(*dst) : size);
851         off = sizeof(*dst);
852         if (src->attr_ro) {
853                 off = RTE_ALIGN_CEIL(off, sizeof(double));
854                 if (size && size >= off + sizeof(*dst->attr))
855                         dst->attr = rte_memcpy
856                                 ((void *)((uintptr_t)dst + off),
857                                  src->attr_ro, sizeof(*dst->attr));
858                 off += sizeof(*dst->attr);
859         }
860         if (src->pattern_ro) {
861                 off = RTE_ALIGN_CEIL(off, sizeof(double));
862                 ret = rte_flow_conv_pattern((void *)((uintptr_t)dst + off),
863                                             size > off ? size - off : 0,
864                                             src->pattern_ro, 0, error);
865                 if (ret < 0)
866                         return ret;
867                 if (size && size >= off + (size_t)ret)
868                         dst->pattern = (void *)((uintptr_t)dst + off);
869                 off += ret;
870         }
871         if (src->actions_ro) {
872                 off = RTE_ALIGN_CEIL(off, sizeof(double));
873                 ret = rte_flow_conv_actions((void *)((uintptr_t)dst + off),
874                                             size > off ? size - off : 0,
875                                             src->actions_ro, 0, error);
876                 if (ret < 0)
877                         return ret;
878                 if (size >= off + (size_t)ret)
879                         dst->actions = (void *)((uintptr_t)dst + off);
880                 off += ret;
881         }
882         return off;
883 }
884
885 /**
886  * Retrieve the name of a pattern item/action type.
887  *
888  * @param is_action
889  *   Nonzero when @p src represents an action type instead of a pattern item
890  *   type.
891  * @param is_ptr
892  *   Nonzero to write string address instead of contents into @p dst.
893  * @param[out] dst
894  *   Destination buffer. Can be NULL if @p size is zero.
895  * @param size
896  *   Size of @p dst in bytes.
897  * @param[in] src
898  *   Depending on @p is_action, source pattern item or action type cast as a
899  *   pointer.
900  * @param[out] error
901  *   Perform verbose error reporting if not NULL.
902  *
903  * @return
904  *   A positive value representing the number of bytes needed to store the
905  *   name or its address regardless of @p size on success (@p buf contents
906  *   are truncated to @p size if not large enough), a negative errno value
907  *   otherwise and rte_errno is set.
908  */
909 static int
910 rte_flow_conv_name(int is_action,
911                    int is_ptr,
912                    char *dst,
913                    const size_t size,
914                    const void *src,
915                    struct rte_flow_error *error)
916 {
917         struct desc_info {
918                 const struct rte_flow_desc_data *data;
919                 size_t num;
920         };
921         static const struct desc_info info_rep[2] = {
922                 { rte_flow_desc_item, RTE_DIM(rte_flow_desc_item), },
923                 { rte_flow_desc_action, RTE_DIM(rte_flow_desc_action), },
924         };
925         const struct desc_info *const info = &info_rep[!!is_action];
926         unsigned int type = (uintptr_t)src;
927
928         if (type >= info->num)
929                 return rte_flow_error_set
930                         (error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
931                          "unknown object type to retrieve the name of");
932         if (!is_ptr)
933                 return strlcpy(dst, info->data[type].name, size);
934         if (size >= sizeof(const char **))
935                 *((const char **)dst) = info->data[type].name;
936         return sizeof(const char **);
937 }
938
939 /** Helper function to convert flow API objects. */
940 int
941 rte_flow_conv(enum rte_flow_conv_op op,
942               void *dst,
943               size_t size,
944               const void *src,
945               struct rte_flow_error *error)
946 {
947         switch (op) {
948                 const struct rte_flow_attr *attr;
949
950         case RTE_FLOW_CONV_OP_NONE:
951                 return 0;
952         case RTE_FLOW_CONV_OP_ATTR:
953                 attr = src;
954                 if (size > sizeof(*attr))
955                         size = sizeof(*attr);
956                 rte_memcpy(dst, attr, size);
957                 return sizeof(*attr);
958         case RTE_FLOW_CONV_OP_ITEM:
959                 return rte_flow_conv_pattern(dst, size, src, 1, error);
960         case RTE_FLOW_CONV_OP_ACTION:
961                 return rte_flow_conv_actions(dst, size, src, 1, error);
962         case RTE_FLOW_CONV_OP_PATTERN:
963                 return rte_flow_conv_pattern(dst, size, src, 0, error);
964         case RTE_FLOW_CONV_OP_ACTIONS:
965                 return rte_flow_conv_actions(dst, size, src, 0, error);
966         case RTE_FLOW_CONV_OP_RULE:
967                 return rte_flow_conv_rule(dst, size, src, error);
968         case RTE_FLOW_CONV_OP_ITEM_NAME:
969                 return rte_flow_conv_name(0, 0, dst, size, src, error);
970         case RTE_FLOW_CONV_OP_ACTION_NAME:
971                 return rte_flow_conv_name(1, 0, dst, size, src, error);
972         case RTE_FLOW_CONV_OP_ITEM_NAME_PTR:
973                 return rte_flow_conv_name(0, 1, dst, size, src, error);
974         case RTE_FLOW_CONV_OP_ACTION_NAME_PTR:
975                 return rte_flow_conv_name(1, 1, dst, size, src, error);
976         }
977         return rte_flow_error_set
978                 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
979                  "unknown object conversion operation");
980 }
981
982 /** Store a full rte_flow description. */
983 size_t
984 rte_flow_copy(struct rte_flow_desc *desc, size_t len,
985               const struct rte_flow_attr *attr,
986               const struct rte_flow_item *items,
987               const struct rte_flow_action *actions)
988 {
989         /*
990          * Overlap struct rte_flow_conv with struct rte_flow_desc in order
991          * to convert the former to the latter without wasting space.
992          */
993         struct rte_flow_conv_rule *dst =
994                 len ?
995                 (void *)((uintptr_t)desc +
996                          (offsetof(struct rte_flow_desc, actions) -
997                           offsetof(struct rte_flow_conv_rule, actions))) :
998                 NULL;
999         size_t dst_size =
1000                 len > sizeof(*desc) - sizeof(*dst) ?
1001                 len - (sizeof(*desc) - sizeof(*dst)) :
1002                 0;
1003         struct rte_flow_conv_rule src = {
1004                 .attr_ro = NULL,
1005                 .pattern_ro = items,
1006                 .actions_ro = actions,
1007         };
1008         int ret;
1009
1010         RTE_BUILD_BUG_ON(sizeof(struct rte_flow_desc) <
1011                          sizeof(struct rte_flow_conv_rule));
1012         if (dst_size &&
1013             (&dst->pattern != &desc->items ||
1014              &dst->actions != &desc->actions ||
1015              (uintptr_t)(dst + 1) != (uintptr_t)(desc + 1))) {
1016                 rte_errno = EINVAL;
1017                 return 0;
1018         }
1019         ret = rte_flow_conv(RTE_FLOW_CONV_OP_RULE, dst, dst_size, &src, NULL);
1020         if (ret < 0)
1021                 return 0;
1022         ret += sizeof(*desc) - sizeof(*dst);
1023         rte_memcpy(desc,
1024                    (&(struct rte_flow_desc){
1025                         .size = ret,
1026                         .attr = *attr,
1027                         .items = dst_size ? dst->pattern : NULL,
1028                         .actions = dst_size ? dst->actions : NULL,
1029                    }),
1030                    len > sizeof(*desc) ? sizeof(*desc) : len);
1031         return ret;
1032 }
1033
1034 int
1035 rte_flow_dev_dump(uint16_t port_id, struct rte_flow *flow,
1036                         FILE *file, struct rte_flow_error *error)
1037 {
1038         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1039         const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1040         int ret;
1041
1042         if (unlikely(!ops))
1043                 return -rte_errno;
1044         if (likely(!!ops->dev_dump)) {
1045                 fts_enter(dev);
1046                 ret = ops->dev_dump(dev, flow, file, error);
1047                 fts_exit(dev);
1048                 return flow_err(port_id, ret, error);
1049         }
1050         return rte_flow_error_set(error, ENOSYS,
1051                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1052                                   NULL, rte_strerror(ENOSYS));
1053 }
1054
1055 int
1056 rte_flow_get_aged_flows(uint16_t port_id, void **contexts,
1057                     uint32_t nb_contexts, struct rte_flow_error *error)
1058 {
1059         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1060         const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1061         int ret;
1062
1063         if (unlikely(!ops))
1064                 return -rte_errno;
1065         if (likely(!!ops->get_aged_flows)) {
1066                 fts_enter(dev);
1067                 ret = ops->get_aged_flows(dev, contexts, nb_contexts, error);
1068                 fts_exit(dev);
1069                 return flow_err(port_id, ret, error);
1070         }
1071         return rte_flow_error_set(error, ENOTSUP,
1072                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1073                                   NULL, rte_strerror(ENOTSUP));
1074 }
1075
1076 struct rte_flow_action_handle *
1077 rte_flow_action_handle_create(uint16_t port_id,
1078                               const struct rte_flow_indir_action_conf *conf,
1079                               const struct rte_flow_action *action,
1080                               struct rte_flow_error *error)
1081 {
1082         struct rte_flow_action_handle *handle;
1083         const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1084
1085         if (unlikely(!ops))
1086                 return NULL;
1087         if (unlikely(!ops->action_handle_create)) {
1088                 rte_flow_error_set(error, ENOSYS,
1089                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1090                                    rte_strerror(ENOSYS));
1091                 return NULL;
1092         }
1093         handle = ops->action_handle_create(&rte_eth_devices[port_id],
1094                                            conf, action, error);
1095         if (handle == NULL)
1096                 flow_err(port_id, -rte_errno, error);
1097         return handle;
1098 }
1099
1100 int
1101 rte_flow_action_handle_destroy(uint16_t port_id,
1102                                struct rte_flow_action_handle *handle,
1103                                struct rte_flow_error *error)
1104 {
1105         int ret;
1106         const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1107
1108         if (unlikely(!ops))
1109                 return -rte_errno;
1110         if (unlikely(!ops->action_handle_destroy))
1111                 return rte_flow_error_set(error, ENOSYS,
1112                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1113                                           NULL, rte_strerror(ENOSYS));
1114         ret = ops->action_handle_destroy(&rte_eth_devices[port_id],
1115                                          handle, error);
1116         return flow_err(port_id, ret, error);
1117 }
1118
1119 int
1120 rte_flow_action_handle_update(uint16_t port_id,
1121                               struct rte_flow_action_handle *handle,
1122                               const void *update,
1123                               struct rte_flow_error *error)
1124 {
1125         int ret;
1126         const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1127
1128         if (unlikely(!ops))
1129                 return -rte_errno;
1130         if (unlikely(!ops->action_handle_update))
1131                 return rte_flow_error_set(error, ENOSYS,
1132                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1133                                           NULL, rte_strerror(ENOSYS));
1134         ret = ops->action_handle_update(&rte_eth_devices[port_id], handle,
1135                                         update, error);
1136         return flow_err(port_id, ret, error);
1137 }
1138
1139 int
1140 rte_flow_action_handle_query(uint16_t port_id,
1141                              const struct rte_flow_action_handle *handle,
1142                              void *data,
1143                              struct rte_flow_error *error)
1144 {
1145         int ret;
1146         const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1147
1148         if (unlikely(!ops))
1149                 return -rte_errno;
1150         if (unlikely(!ops->action_handle_query))
1151                 return rte_flow_error_set(error, ENOSYS,
1152                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1153                                           NULL, rte_strerror(ENOSYS));
1154         ret = ops->action_handle_query(&rte_eth_devices[port_id], handle,
1155                                        data, error);
1156         return flow_err(port_id, ret, error);
1157 }
1158
1159 int
1160 rte_flow_tunnel_decap_set(uint16_t port_id,
1161                           struct rte_flow_tunnel *tunnel,
1162                           struct rte_flow_action **actions,
1163                           uint32_t *num_of_actions,
1164                           struct rte_flow_error *error)
1165 {
1166         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1167         const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1168
1169         if (unlikely(!ops))
1170                 return -rte_errno;
1171         if (likely(!!ops->tunnel_decap_set)) {
1172                 return flow_err(port_id,
1173                                 ops->tunnel_decap_set(dev, tunnel, actions,
1174                                                       num_of_actions, error),
1175                                 error);
1176         }
1177         return rte_flow_error_set(error, ENOTSUP,
1178                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1179                                   NULL, rte_strerror(ENOTSUP));
1180 }
1181
1182 int
1183 rte_flow_tunnel_match(uint16_t port_id,
1184                       struct rte_flow_tunnel *tunnel,
1185                       struct rte_flow_item **items,
1186                       uint32_t *num_of_items,
1187                       struct rte_flow_error *error)
1188 {
1189         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1190         const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1191
1192         if (unlikely(!ops))
1193                 return -rte_errno;
1194         if (likely(!!ops->tunnel_match)) {
1195                 return flow_err(port_id,
1196                                 ops->tunnel_match(dev, tunnel, items,
1197                                                   num_of_items, error),
1198                                 error);
1199         }
1200         return rte_flow_error_set(error, ENOTSUP,
1201                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1202                                   NULL, rte_strerror(ENOTSUP));
1203 }
1204
1205 int
1206 rte_flow_get_restore_info(uint16_t port_id,
1207                           struct rte_mbuf *m,
1208                           struct rte_flow_restore_info *restore_info,
1209                           struct rte_flow_error *error)
1210 {
1211         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1212         const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1213
1214         if (unlikely(!ops))
1215                 return -rte_errno;
1216         if (likely(!!ops->get_restore_info)) {
1217                 return flow_err(port_id,
1218                                 ops->get_restore_info(dev, m, restore_info,
1219                                                       error),
1220                                 error);
1221         }
1222         return rte_flow_error_set(error, ENOTSUP,
1223                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1224                                   NULL, rte_strerror(ENOTSUP));
1225 }
1226
1227 int
1228 rte_flow_tunnel_action_decap_release(uint16_t port_id,
1229                                      struct rte_flow_action *actions,
1230                                      uint32_t num_of_actions,
1231                                      struct rte_flow_error *error)
1232 {
1233         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1234         const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1235
1236         if (unlikely(!ops))
1237                 return -rte_errno;
1238         if (likely(!!ops->tunnel_action_decap_release)) {
1239                 return flow_err(port_id,
1240                                 ops->tunnel_action_decap_release(dev, actions,
1241                                                                  num_of_actions,
1242                                                                  error),
1243                                 error);
1244         }
1245         return rte_flow_error_set(error, ENOTSUP,
1246                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1247                                   NULL, rte_strerror(ENOTSUP));
1248 }
1249
1250 int
1251 rte_flow_tunnel_item_release(uint16_t port_id,
1252                              struct rte_flow_item *items,
1253                              uint32_t num_of_items,
1254                              struct rte_flow_error *error)
1255 {
1256         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1257         const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1258
1259         if (unlikely(!ops))
1260                 return -rte_errno;
1261         if (likely(!!ops->tunnel_item_release)) {
1262                 return flow_err(port_id,
1263                                 ops->tunnel_item_release(dev, items,
1264                                                          num_of_items, error),
1265                                 error);
1266         }
1267         return rte_flow_error_set(error, ENOTSUP,
1268                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1269                                   NULL, rte_strerror(ENOTSUP));
1270 }