net/virtio: fix incorrect cast of void *
[dpdk.git] / ethdev / rte_flow.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2016 6WIND S.A.
3  * Copyright 2016 Mellanox Technologies, Ltd
4  */
5
6 #include <errno.h>
7 #include <stddef.h>
8 #include <stdint.h>
9 #include <string.h>
10
11 #include <rte_common.h>
12 #include <rte_errno.h>
13 #include <rte_branch_prediction.h>
14 #include <rte_string_fns.h>
15 #include <rte_mbuf.h>
16 #include <rte_mbuf_dyn.h>
17 #include "rte_ethdev.h"
18 #include "rte_flow_driver.h"
19 #include "rte_flow.h"
20
21 /* Mbuf dynamic field name for metadata. */
22 int32_t rte_flow_dynf_metadata_offs = -1;
23
24 /* Mbuf dynamic field flag bit number for metadata. */
25 uint64_t rte_flow_dynf_metadata_mask;
26
27 /**
28  * Flow elements description tables.
29  */
30 struct rte_flow_desc_data {
31         const char *name;
32         size_t size;
33 };
34
35 /** Generate flow_item[] entry. */
36 #define MK_FLOW_ITEM(t, s) \
37         [RTE_FLOW_ITEM_TYPE_ ## t] = { \
38                 .name = # t, \
39                 .size = s, \
40         }
41
42 /** Information about known flow pattern items. */
43 static const struct rte_flow_desc_data rte_flow_desc_item[] = {
44         MK_FLOW_ITEM(END, 0),
45         MK_FLOW_ITEM(VOID, 0),
46         MK_FLOW_ITEM(INVERT, 0),
47         MK_FLOW_ITEM(ANY, sizeof(struct rte_flow_item_any)),
48         MK_FLOW_ITEM(PF, 0),
49         MK_FLOW_ITEM(VF, sizeof(struct rte_flow_item_vf)),
50         MK_FLOW_ITEM(PHY_PORT, sizeof(struct rte_flow_item_phy_port)),
51         MK_FLOW_ITEM(PORT_ID, sizeof(struct rte_flow_item_port_id)),
52         MK_FLOW_ITEM(RAW, sizeof(struct rte_flow_item_raw)),
53         MK_FLOW_ITEM(ETH, sizeof(struct rte_flow_item_eth)),
54         MK_FLOW_ITEM(VLAN, sizeof(struct rte_flow_item_vlan)),
55         MK_FLOW_ITEM(IPV4, sizeof(struct rte_flow_item_ipv4)),
56         MK_FLOW_ITEM(IPV6, sizeof(struct rte_flow_item_ipv6)),
57         MK_FLOW_ITEM(ICMP, sizeof(struct rte_flow_item_icmp)),
58         MK_FLOW_ITEM(UDP, sizeof(struct rte_flow_item_udp)),
59         MK_FLOW_ITEM(TCP, sizeof(struct rte_flow_item_tcp)),
60         MK_FLOW_ITEM(SCTP, sizeof(struct rte_flow_item_sctp)),
61         MK_FLOW_ITEM(VXLAN, sizeof(struct rte_flow_item_vxlan)),
62         MK_FLOW_ITEM(E_TAG, sizeof(struct rte_flow_item_e_tag)),
63         MK_FLOW_ITEM(NVGRE, sizeof(struct rte_flow_item_nvgre)),
64         MK_FLOW_ITEM(MPLS, sizeof(struct rte_flow_item_mpls)),
65         MK_FLOW_ITEM(GRE, sizeof(struct rte_flow_item_gre)),
66         MK_FLOW_ITEM(FUZZY, sizeof(struct rte_flow_item_fuzzy)),
67         MK_FLOW_ITEM(GTP, sizeof(struct rte_flow_item_gtp)),
68         MK_FLOW_ITEM(GTPC, sizeof(struct rte_flow_item_gtp)),
69         MK_FLOW_ITEM(GTPU, sizeof(struct rte_flow_item_gtp)),
70         MK_FLOW_ITEM(ESP, sizeof(struct rte_flow_item_esp)),
71         MK_FLOW_ITEM(GENEVE, sizeof(struct rte_flow_item_geneve)),
72         MK_FLOW_ITEM(VXLAN_GPE, sizeof(struct rte_flow_item_vxlan_gpe)),
73         MK_FLOW_ITEM(ARP_ETH_IPV4, sizeof(struct rte_flow_item_arp_eth_ipv4)),
74         MK_FLOW_ITEM(IPV6_EXT, sizeof(struct rte_flow_item_ipv6_ext)),
75         MK_FLOW_ITEM(IPV6_FRAG_EXT, sizeof(struct rte_flow_item_ipv6_frag_ext)),
76         MK_FLOW_ITEM(ICMP6, sizeof(struct rte_flow_item_icmp6)),
77         MK_FLOW_ITEM(ICMP6_ND_NS, sizeof(struct rte_flow_item_icmp6_nd_ns)),
78         MK_FLOW_ITEM(ICMP6_ND_NA, sizeof(struct rte_flow_item_icmp6_nd_na)),
79         MK_FLOW_ITEM(ICMP6_ND_OPT, sizeof(struct rte_flow_item_icmp6_nd_opt)),
80         MK_FLOW_ITEM(ICMP6_ND_OPT_SLA_ETH,
81                      sizeof(struct rte_flow_item_icmp6_nd_opt_sla_eth)),
82         MK_FLOW_ITEM(ICMP6_ND_OPT_TLA_ETH,
83                      sizeof(struct rte_flow_item_icmp6_nd_opt_tla_eth)),
84         MK_FLOW_ITEM(MARK, sizeof(struct rte_flow_item_mark)),
85         MK_FLOW_ITEM(META, sizeof(struct rte_flow_item_meta)),
86         MK_FLOW_ITEM(TAG, sizeof(struct rte_flow_item_tag)),
87         MK_FLOW_ITEM(GRE_KEY, sizeof(rte_be32_t)),
88         MK_FLOW_ITEM(GTP_PSC, sizeof(struct rte_flow_item_gtp_psc)),
89         MK_FLOW_ITEM(PPPOES, sizeof(struct rte_flow_item_pppoe)),
90         MK_FLOW_ITEM(PPPOED, sizeof(struct rte_flow_item_pppoe)),
91         MK_FLOW_ITEM(PPPOE_PROTO_ID,
92                         sizeof(struct rte_flow_item_pppoe_proto_id)),
93         MK_FLOW_ITEM(NSH, sizeof(struct rte_flow_item_nsh)),
94         MK_FLOW_ITEM(IGMP, sizeof(struct rte_flow_item_igmp)),
95         MK_FLOW_ITEM(AH, sizeof(struct rte_flow_item_ah)),
96         MK_FLOW_ITEM(HIGIG2, sizeof(struct rte_flow_item_higig2_hdr)),
97         MK_FLOW_ITEM(L2TPV3OIP, sizeof(struct rte_flow_item_l2tpv3oip)),
98         MK_FLOW_ITEM(PFCP, sizeof(struct rte_flow_item_pfcp)),
99         MK_FLOW_ITEM(ECPRI, sizeof(struct rte_flow_item_ecpri)),
100         MK_FLOW_ITEM(GENEVE_OPT, sizeof(struct rte_flow_item_geneve_opt)),
101         MK_FLOW_ITEM(INTEGRITY, sizeof(struct rte_flow_item_integrity)),
102         MK_FLOW_ITEM(CONNTRACK, sizeof(uint32_t)),
103 };
104
105 /** Generate flow_action[] entry. */
106 #define MK_FLOW_ACTION(t, s) \
107         [RTE_FLOW_ACTION_TYPE_ ## t] = { \
108                 .name = # t, \
109                 .size = s, \
110         }
111
112 /** Information about known flow actions. */
113 static const struct rte_flow_desc_data rte_flow_desc_action[] = {
114         MK_FLOW_ACTION(END, 0),
115         MK_FLOW_ACTION(VOID, 0),
116         MK_FLOW_ACTION(PASSTHRU, 0),
117         MK_FLOW_ACTION(JUMP, sizeof(struct rte_flow_action_jump)),
118         MK_FLOW_ACTION(MARK, sizeof(struct rte_flow_action_mark)),
119         MK_FLOW_ACTION(FLAG, 0),
120         MK_FLOW_ACTION(QUEUE, sizeof(struct rte_flow_action_queue)),
121         MK_FLOW_ACTION(DROP, 0),
122         MK_FLOW_ACTION(COUNT, sizeof(struct rte_flow_action_count)),
123         MK_FLOW_ACTION(RSS, sizeof(struct rte_flow_action_rss)),
124         MK_FLOW_ACTION(PF, 0),
125         MK_FLOW_ACTION(VF, sizeof(struct rte_flow_action_vf)),
126         MK_FLOW_ACTION(PHY_PORT, sizeof(struct rte_flow_action_phy_port)),
127         MK_FLOW_ACTION(PORT_ID, sizeof(struct rte_flow_action_port_id)),
128         MK_FLOW_ACTION(METER, sizeof(struct rte_flow_action_meter)),
129         MK_FLOW_ACTION(SECURITY, sizeof(struct rte_flow_action_security)),
130         MK_FLOW_ACTION(OF_SET_MPLS_TTL,
131                        sizeof(struct rte_flow_action_of_set_mpls_ttl)),
132         MK_FLOW_ACTION(OF_DEC_MPLS_TTL, 0),
133         MK_FLOW_ACTION(OF_SET_NW_TTL,
134                        sizeof(struct rte_flow_action_of_set_nw_ttl)),
135         MK_FLOW_ACTION(OF_DEC_NW_TTL, 0),
136         MK_FLOW_ACTION(OF_COPY_TTL_OUT, 0),
137         MK_FLOW_ACTION(OF_COPY_TTL_IN, 0),
138         MK_FLOW_ACTION(OF_POP_VLAN, 0),
139         MK_FLOW_ACTION(OF_PUSH_VLAN,
140                        sizeof(struct rte_flow_action_of_push_vlan)),
141         MK_FLOW_ACTION(OF_SET_VLAN_VID,
142                        sizeof(struct rte_flow_action_of_set_vlan_vid)),
143         MK_FLOW_ACTION(OF_SET_VLAN_PCP,
144                        sizeof(struct rte_flow_action_of_set_vlan_pcp)),
145         MK_FLOW_ACTION(OF_POP_MPLS,
146                        sizeof(struct rte_flow_action_of_pop_mpls)),
147         MK_FLOW_ACTION(OF_PUSH_MPLS,
148                        sizeof(struct rte_flow_action_of_push_mpls)),
149         MK_FLOW_ACTION(VXLAN_ENCAP, sizeof(struct rte_flow_action_vxlan_encap)),
150         MK_FLOW_ACTION(VXLAN_DECAP, 0),
151         MK_FLOW_ACTION(NVGRE_ENCAP, sizeof(struct rte_flow_action_vxlan_encap)),
152         MK_FLOW_ACTION(NVGRE_DECAP, 0),
153         MK_FLOW_ACTION(RAW_ENCAP, sizeof(struct rte_flow_action_raw_encap)),
154         MK_FLOW_ACTION(RAW_DECAP, sizeof(struct rte_flow_action_raw_decap)),
155         MK_FLOW_ACTION(SET_IPV4_SRC,
156                        sizeof(struct rte_flow_action_set_ipv4)),
157         MK_FLOW_ACTION(SET_IPV4_DST,
158                        sizeof(struct rte_flow_action_set_ipv4)),
159         MK_FLOW_ACTION(SET_IPV6_SRC,
160                        sizeof(struct rte_flow_action_set_ipv6)),
161         MK_FLOW_ACTION(SET_IPV6_DST,
162                        sizeof(struct rte_flow_action_set_ipv6)),
163         MK_FLOW_ACTION(SET_TP_SRC,
164                        sizeof(struct rte_flow_action_set_tp)),
165         MK_FLOW_ACTION(SET_TP_DST,
166                        sizeof(struct rte_flow_action_set_tp)),
167         MK_FLOW_ACTION(MAC_SWAP, 0),
168         MK_FLOW_ACTION(DEC_TTL, 0),
169         MK_FLOW_ACTION(SET_TTL, sizeof(struct rte_flow_action_set_ttl)),
170         MK_FLOW_ACTION(SET_MAC_SRC, sizeof(struct rte_flow_action_set_mac)),
171         MK_FLOW_ACTION(SET_MAC_DST, sizeof(struct rte_flow_action_set_mac)),
172         MK_FLOW_ACTION(INC_TCP_SEQ, sizeof(rte_be32_t)),
173         MK_FLOW_ACTION(DEC_TCP_SEQ, sizeof(rte_be32_t)),
174         MK_FLOW_ACTION(INC_TCP_ACK, sizeof(rte_be32_t)),
175         MK_FLOW_ACTION(DEC_TCP_ACK, sizeof(rte_be32_t)),
176         MK_FLOW_ACTION(SET_TAG, sizeof(struct rte_flow_action_set_tag)),
177         MK_FLOW_ACTION(SET_META, sizeof(struct rte_flow_action_set_meta)),
178         MK_FLOW_ACTION(SET_IPV4_DSCP, sizeof(struct rte_flow_action_set_dscp)),
179         MK_FLOW_ACTION(SET_IPV6_DSCP, sizeof(struct rte_flow_action_set_dscp)),
180         MK_FLOW_ACTION(AGE, sizeof(struct rte_flow_action_age)),
181         MK_FLOW_ACTION(SAMPLE, sizeof(struct rte_flow_action_sample)),
182         MK_FLOW_ACTION(MODIFY_FIELD,
183                        sizeof(struct rte_flow_action_modify_field)),
184         /**
185          * Indirect action represented as handle of type
186          * (struct rte_flow_action_handle *) stored in conf field (see
187          * struct rte_flow_action); no need for additional structure to * store
188          * indirect action handle.
189          */
190         MK_FLOW_ACTION(INDIRECT, 0),
191         MK_FLOW_ACTION(CONNTRACK, sizeof(struct rte_flow_action_conntrack)),
192 };
193
194 int
195 rte_flow_dynf_metadata_register(void)
196 {
197         int offset;
198         int flag;
199
200         static const struct rte_mbuf_dynfield desc_offs = {
201                 .name = RTE_MBUF_DYNFIELD_METADATA_NAME,
202                 .size = sizeof(uint32_t),
203                 .align = __alignof__(uint32_t),
204         };
205         static const struct rte_mbuf_dynflag desc_flag = {
206                 .name = RTE_MBUF_DYNFLAG_METADATA_NAME,
207         };
208
209         offset = rte_mbuf_dynfield_register(&desc_offs);
210         if (offset < 0)
211                 goto error;
212         flag = rte_mbuf_dynflag_register(&desc_flag);
213         if (flag < 0)
214                 goto error;
215         rte_flow_dynf_metadata_offs = offset;
216         rte_flow_dynf_metadata_mask = (1ULL << flag);
217         return 0;
218
219 error:
220         rte_flow_dynf_metadata_offs = -1;
221         rte_flow_dynf_metadata_mask = 0ULL;
222         return -rte_errno;
223 }
224
225 static inline void
226 fts_enter(struct rte_eth_dev *dev)
227 {
228         if (!(dev->data->dev_flags & RTE_ETH_DEV_FLOW_OPS_THREAD_SAFE))
229                 pthread_mutex_lock(&dev->data->flow_ops_mutex);
230 }
231
232 static inline void
233 fts_exit(struct rte_eth_dev *dev)
234 {
235         if (!(dev->data->dev_flags & RTE_ETH_DEV_FLOW_OPS_THREAD_SAFE))
236                 pthread_mutex_unlock(&dev->data->flow_ops_mutex);
237 }
238
239 static int
240 flow_err(uint16_t port_id, int ret, struct rte_flow_error *error)
241 {
242         if (ret == 0)
243                 return 0;
244         if (rte_eth_dev_is_removed(port_id))
245                 return rte_flow_error_set(error, EIO,
246                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
247                                           NULL, rte_strerror(EIO));
248         return ret;
249 }
250
251 /* Get generic flow operations structure from a port. */
252 const struct rte_flow_ops *
253 rte_flow_ops_get(uint16_t port_id, struct rte_flow_error *error)
254 {
255         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
256         const struct rte_flow_ops *ops;
257         int code;
258
259         if (unlikely(!rte_eth_dev_is_valid_port(port_id)))
260                 code = ENODEV;
261         else if (unlikely(dev->dev_ops->flow_ops_get == NULL))
262                 /* flow API not supported with this driver dev_ops */
263                 code = ENOSYS;
264         else
265                 code = dev->dev_ops->flow_ops_get(dev, &ops);
266         if (code == 0 && ops == NULL)
267                 /* flow API not supported with this device */
268                 code = ENOSYS;
269
270         if (code != 0) {
271                 rte_flow_error_set(error, code, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
272                                    NULL, rte_strerror(code));
273                 return NULL;
274         }
275         return ops;
276 }
277
278 /* Check whether a flow rule can be created on a given port. */
279 int
280 rte_flow_validate(uint16_t port_id,
281                   const struct rte_flow_attr *attr,
282                   const struct rte_flow_item pattern[],
283                   const struct rte_flow_action actions[],
284                   struct rte_flow_error *error)
285 {
286         const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
287         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
288         int ret;
289
290         if (unlikely(!ops))
291                 return -rte_errno;
292         if (likely(!!ops->validate)) {
293                 fts_enter(dev);
294                 ret = ops->validate(dev, attr, pattern, actions, error);
295                 fts_exit(dev);
296                 return flow_err(port_id, ret, error);
297         }
298         return rte_flow_error_set(error, ENOSYS,
299                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
300                                   NULL, rte_strerror(ENOSYS));
301 }
302
303 /* Create a flow rule on a given port. */
304 struct rte_flow *
305 rte_flow_create(uint16_t port_id,
306                 const struct rte_flow_attr *attr,
307                 const struct rte_flow_item pattern[],
308                 const struct rte_flow_action actions[],
309                 struct rte_flow_error *error)
310 {
311         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
312         struct rte_flow *flow;
313         const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
314
315         if (unlikely(!ops))
316                 return NULL;
317         if (likely(!!ops->create)) {
318                 fts_enter(dev);
319                 flow = ops->create(dev, attr, pattern, actions, error);
320                 fts_exit(dev);
321                 if (flow == NULL)
322                         flow_err(port_id, -rte_errno, error);
323                 return flow;
324         }
325         rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
326                            NULL, rte_strerror(ENOSYS));
327         return NULL;
328 }
329
330 /* Destroy a flow rule on a given port. */
331 int
332 rte_flow_destroy(uint16_t port_id,
333                  struct rte_flow *flow,
334                  struct rte_flow_error *error)
335 {
336         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
337         const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
338         int ret;
339
340         if (unlikely(!ops))
341                 return -rte_errno;
342         if (likely(!!ops->destroy)) {
343                 fts_enter(dev);
344                 ret = ops->destroy(dev, flow, error);
345                 fts_exit(dev);
346                 return flow_err(port_id, ret, error);
347         }
348         return rte_flow_error_set(error, ENOSYS,
349                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
350                                   NULL, rte_strerror(ENOSYS));
351 }
352
353 /* Destroy all flow rules associated with a port. */
354 int
355 rte_flow_flush(uint16_t port_id,
356                struct rte_flow_error *error)
357 {
358         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
359         const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
360         int ret;
361
362         if (unlikely(!ops))
363                 return -rte_errno;
364         if (likely(!!ops->flush)) {
365                 fts_enter(dev);
366                 ret = ops->flush(dev, error);
367                 fts_exit(dev);
368                 return flow_err(port_id, ret, error);
369         }
370         return rte_flow_error_set(error, ENOSYS,
371                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
372                                   NULL, rte_strerror(ENOSYS));
373 }
374
375 /* Query an existing flow rule. */
376 int
377 rte_flow_query(uint16_t port_id,
378                struct rte_flow *flow,
379                const struct rte_flow_action *action,
380                void *data,
381                struct rte_flow_error *error)
382 {
383         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
384         const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
385         int ret;
386
387         if (!ops)
388                 return -rte_errno;
389         if (likely(!!ops->query)) {
390                 fts_enter(dev);
391                 ret = ops->query(dev, flow, action, data, error);
392                 fts_exit(dev);
393                 return flow_err(port_id, ret, error);
394         }
395         return rte_flow_error_set(error, ENOSYS,
396                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
397                                   NULL, rte_strerror(ENOSYS));
398 }
399
400 /* Restrict ingress traffic to the defined flow rules. */
401 int
402 rte_flow_isolate(uint16_t port_id,
403                  int set,
404                  struct rte_flow_error *error)
405 {
406         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
407         const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
408         int ret;
409
410         if (!ops)
411                 return -rte_errno;
412         if (likely(!!ops->isolate)) {
413                 fts_enter(dev);
414                 ret = ops->isolate(dev, set, error);
415                 fts_exit(dev);
416                 return flow_err(port_id, ret, error);
417         }
418         return rte_flow_error_set(error, ENOSYS,
419                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
420                                   NULL, rte_strerror(ENOSYS));
421 }
422
423 /* Initialize flow error structure. */
424 int
425 rte_flow_error_set(struct rte_flow_error *error,
426                    int code,
427                    enum rte_flow_error_type type,
428                    const void *cause,
429                    const char *message)
430 {
431         if (error) {
432                 *error = (struct rte_flow_error){
433                         .type = type,
434                         .cause = cause,
435                         .message = message,
436                 };
437         }
438         rte_errno = code;
439         return -code;
440 }
441
442 /** Pattern item specification types. */
443 enum rte_flow_conv_item_spec_type {
444         RTE_FLOW_CONV_ITEM_SPEC,
445         RTE_FLOW_CONV_ITEM_LAST,
446         RTE_FLOW_CONV_ITEM_MASK,
447 };
448
449 /**
450  * Copy pattern item specification.
451  *
452  * @param[out] buf
453  *   Output buffer. Can be NULL if @p size is zero.
454  * @param size
455  *   Size of @p buf in bytes.
456  * @param[in] item
457  *   Pattern item to copy specification from.
458  * @param type
459  *   Specification selector for either @p spec, @p last or @p mask.
460  *
461  * @return
462  *   Number of bytes needed to store pattern item specification regardless
463  *   of @p size. @p buf contents are truncated to @p size if not large
464  *   enough.
465  */
466 static size_t
467 rte_flow_conv_item_spec(void *buf, const size_t size,
468                         const struct rte_flow_item *item,
469                         enum rte_flow_conv_item_spec_type type)
470 {
471         size_t off;
472         const void *data =
473                 type == RTE_FLOW_CONV_ITEM_SPEC ? item->spec :
474                 type == RTE_FLOW_CONV_ITEM_LAST ? item->last :
475                 type == RTE_FLOW_CONV_ITEM_MASK ? item->mask :
476                 NULL;
477
478         switch (item->type) {
479                 union {
480                         const struct rte_flow_item_raw *raw;
481                 } spec;
482                 union {
483                         const struct rte_flow_item_raw *raw;
484                 } last;
485                 union {
486                         const struct rte_flow_item_raw *raw;
487                 } mask;
488                 union {
489                         const struct rte_flow_item_raw *raw;
490                 } src;
491                 union {
492                         struct rte_flow_item_raw *raw;
493                 } dst;
494                 size_t tmp;
495
496         case RTE_FLOW_ITEM_TYPE_RAW:
497                 spec.raw = item->spec;
498                 last.raw = item->last ? item->last : item->spec;
499                 mask.raw = item->mask ? item->mask : &rte_flow_item_raw_mask;
500                 src.raw = data;
501                 dst.raw = buf;
502                 rte_memcpy(dst.raw,
503                            (&(struct rte_flow_item_raw){
504                                 .relative = src.raw->relative,
505                                 .search = src.raw->search,
506                                 .reserved = src.raw->reserved,
507                                 .offset = src.raw->offset,
508                                 .limit = src.raw->limit,
509                                 .length = src.raw->length,
510                            }),
511                            size > sizeof(*dst.raw) ? sizeof(*dst.raw) : size);
512                 off = sizeof(*dst.raw);
513                 if (type == RTE_FLOW_CONV_ITEM_SPEC ||
514                     (type == RTE_FLOW_CONV_ITEM_MASK &&
515                      ((spec.raw->length & mask.raw->length) >=
516                       (last.raw->length & mask.raw->length))))
517                         tmp = spec.raw->length & mask.raw->length;
518                 else
519                         tmp = last.raw->length & mask.raw->length;
520                 if (tmp) {
521                         off = RTE_ALIGN_CEIL(off, sizeof(*dst.raw->pattern));
522                         if (size >= off + tmp)
523                                 dst.raw->pattern = rte_memcpy
524                                         ((void *)((uintptr_t)dst.raw + off),
525                                          src.raw->pattern, tmp);
526                         off += tmp;
527                 }
528                 break;
529         default:
530                 /**
531                  * allow PMD private flow item
532                  */
533                 off = (int)item->type >= 0 ?
534                       rte_flow_desc_item[item->type].size : sizeof(void *);
535                 rte_memcpy(buf, data, (size > off ? off : size));
536                 break;
537         }
538         return off;
539 }
540
541 /**
542  * Copy action configuration.
543  *
544  * @param[out] buf
545  *   Output buffer. Can be NULL if @p size is zero.
546  * @param size
547  *   Size of @p buf in bytes.
548  * @param[in] action
549  *   Action to copy configuration from.
550  *
551  * @return
552  *   Number of bytes needed to store pattern item specification regardless
553  *   of @p size. @p buf contents are truncated to @p size if not large
554  *   enough.
555  */
556 static size_t
557 rte_flow_conv_action_conf(void *buf, const size_t size,
558                           const struct rte_flow_action *action)
559 {
560         size_t off;
561
562         switch (action->type) {
563                 union {
564                         const struct rte_flow_action_rss *rss;
565                         const struct rte_flow_action_vxlan_encap *vxlan_encap;
566                         const struct rte_flow_action_nvgre_encap *nvgre_encap;
567                 } src;
568                 union {
569                         struct rte_flow_action_rss *rss;
570                         struct rte_flow_action_vxlan_encap *vxlan_encap;
571                         struct rte_flow_action_nvgre_encap *nvgre_encap;
572                 } dst;
573                 size_t tmp;
574                 int ret;
575
576         case RTE_FLOW_ACTION_TYPE_RSS:
577                 src.rss = action->conf;
578                 dst.rss = buf;
579                 rte_memcpy(dst.rss,
580                            (&(struct rte_flow_action_rss){
581                                 .func = src.rss->func,
582                                 .level = src.rss->level,
583                                 .types = src.rss->types,
584                                 .key_len = src.rss->key_len,
585                                 .queue_num = src.rss->queue_num,
586                            }),
587                            size > sizeof(*dst.rss) ? sizeof(*dst.rss) : size);
588                 off = sizeof(*dst.rss);
589                 if (src.rss->key_len && src.rss->key) {
590                         off = RTE_ALIGN_CEIL(off, sizeof(*dst.rss->key));
591                         tmp = sizeof(*src.rss->key) * src.rss->key_len;
592                         if (size >= off + tmp)
593                                 dst.rss->key = rte_memcpy
594                                         ((void *)((uintptr_t)dst.rss + off),
595                                          src.rss->key, tmp);
596                         off += tmp;
597                 }
598                 if (src.rss->queue_num) {
599                         off = RTE_ALIGN_CEIL(off, sizeof(*dst.rss->queue));
600                         tmp = sizeof(*src.rss->queue) * src.rss->queue_num;
601                         if (size >= off + tmp)
602                                 dst.rss->queue = rte_memcpy
603                                         ((void *)((uintptr_t)dst.rss + off),
604                                          src.rss->queue, tmp);
605                         off += tmp;
606                 }
607                 break;
608         case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
609         case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
610                 src.vxlan_encap = action->conf;
611                 dst.vxlan_encap = buf;
612                 RTE_BUILD_BUG_ON(sizeof(*src.vxlan_encap) !=
613                                  sizeof(*src.nvgre_encap) ||
614                                  offsetof(struct rte_flow_action_vxlan_encap,
615                                           definition) !=
616                                  offsetof(struct rte_flow_action_nvgre_encap,
617                                           definition));
618                 off = sizeof(*dst.vxlan_encap);
619                 if (src.vxlan_encap->definition) {
620                         off = RTE_ALIGN_CEIL
621                                 (off, sizeof(*dst.vxlan_encap->definition));
622                         ret = rte_flow_conv
623                                 (RTE_FLOW_CONV_OP_PATTERN,
624                                  (void *)((uintptr_t)dst.vxlan_encap + off),
625                                  size > off ? size - off : 0,
626                                  src.vxlan_encap->definition, NULL);
627                         if (ret < 0)
628                                 return 0;
629                         if (size >= off + ret)
630                                 dst.vxlan_encap->definition =
631                                         (void *)((uintptr_t)dst.vxlan_encap +
632                                                  off);
633                         off += ret;
634                 }
635                 break;
636         default:
637                 /**
638                  * allow PMD private flow action
639                  */
640                 off = (int)action->type >= 0 ?
641                       rte_flow_desc_action[action->type].size : sizeof(void *);
642                 rte_memcpy(buf, action->conf, (size > off ? off : size));
643                 break;
644         }
645         return off;
646 }
647
648 /**
649  * Copy a list of pattern items.
650  *
651  * @param[out] dst
652  *   Destination buffer. Can be NULL if @p size is zero.
653  * @param size
654  *   Size of @p dst in bytes.
655  * @param[in] src
656  *   Source pattern items.
657  * @param num
658  *   Maximum number of pattern items to process from @p src or 0 to process
659  *   the entire list. In both cases, processing stops after
660  *   RTE_FLOW_ITEM_TYPE_END is encountered.
661  * @param[out] error
662  *   Perform verbose error reporting if not NULL.
663  *
664  * @return
665  *   A positive value representing the number of bytes needed to store
666  *   pattern items regardless of @p size on success (@p buf contents are
667  *   truncated to @p size if not large enough), a negative errno value
668  *   otherwise and rte_errno is set.
669  */
670 static int
671 rte_flow_conv_pattern(struct rte_flow_item *dst,
672                       const size_t size,
673                       const struct rte_flow_item *src,
674                       unsigned int num,
675                       struct rte_flow_error *error)
676 {
677         uintptr_t data = (uintptr_t)dst;
678         size_t off;
679         size_t ret;
680         unsigned int i;
681
682         for (i = 0, off = 0; !num || i != num; ++i, ++src, ++dst) {
683                 /**
684                  * allow PMD private flow item
685                  */
686                 if (((int)src->type >= 0) &&
687                         ((size_t)src->type >= RTE_DIM(rte_flow_desc_item) ||
688                     !rte_flow_desc_item[src->type].name))
689                         return rte_flow_error_set
690                                 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, src,
691                                  "cannot convert unknown item type");
692                 if (size >= off + sizeof(*dst))
693                         *dst = (struct rte_flow_item){
694                                 .type = src->type,
695                         };
696                 off += sizeof(*dst);
697                 if (!src->type)
698                         num = i + 1;
699         }
700         num = i;
701         src -= num;
702         dst -= num;
703         do {
704                 if (src->spec) {
705                         off = RTE_ALIGN_CEIL(off, sizeof(double));
706                         ret = rte_flow_conv_item_spec
707                                 ((void *)(data + off),
708                                  size > off ? size - off : 0, src,
709                                  RTE_FLOW_CONV_ITEM_SPEC);
710                         if (size && size >= off + ret)
711                                 dst->spec = (void *)(data + off);
712                         off += ret;
713
714                 }
715                 if (src->last) {
716                         off = RTE_ALIGN_CEIL(off, sizeof(double));
717                         ret = rte_flow_conv_item_spec
718                                 ((void *)(data + off),
719                                  size > off ? size - off : 0, src,
720                                  RTE_FLOW_CONV_ITEM_LAST);
721                         if (size && size >= off + ret)
722                                 dst->last = (void *)(data + off);
723                         off += ret;
724                 }
725                 if (src->mask) {
726                         off = RTE_ALIGN_CEIL(off, sizeof(double));
727                         ret = rte_flow_conv_item_spec
728                                 ((void *)(data + off),
729                                  size > off ? size - off : 0, src,
730                                  RTE_FLOW_CONV_ITEM_MASK);
731                         if (size && size >= off + ret)
732                                 dst->mask = (void *)(data + off);
733                         off += ret;
734                 }
735                 ++src;
736                 ++dst;
737         } while (--num);
738         return off;
739 }
740
741 /**
742  * Copy a list of actions.
743  *
744  * @param[out] dst
745  *   Destination buffer. Can be NULL if @p size is zero.
746  * @param size
747  *   Size of @p dst in bytes.
748  * @param[in] src
749  *   Source actions.
750  * @param num
751  *   Maximum number of actions to process from @p src or 0 to process the
752  *   entire list. In both cases, processing stops after
753  *   RTE_FLOW_ACTION_TYPE_END is encountered.
754  * @param[out] error
755  *   Perform verbose error reporting if not NULL.
756  *
757  * @return
758  *   A positive value representing the number of bytes needed to store
759  *   actions regardless of @p size on success (@p buf contents are truncated
760  *   to @p size if not large enough), a negative errno value otherwise and
761  *   rte_errno is set.
762  */
763 static int
764 rte_flow_conv_actions(struct rte_flow_action *dst,
765                       const size_t size,
766                       const struct rte_flow_action *src,
767                       unsigned int num,
768                       struct rte_flow_error *error)
769 {
770         uintptr_t data = (uintptr_t)dst;
771         size_t off;
772         size_t ret;
773         unsigned int i;
774
775         for (i = 0, off = 0; !num || i != num; ++i, ++src, ++dst) {
776                 /**
777                  * allow PMD private flow action
778                  */
779                 if (((int)src->type >= 0) &&
780                     ((size_t)src->type >= RTE_DIM(rte_flow_desc_action) ||
781                     !rte_flow_desc_action[src->type].name))
782                         return rte_flow_error_set
783                                 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
784                                  src, "cannot convert unknown action type");
785                 if (size >= off + sizeof(*dst))
786                         *dst = (struct rte_flow_action){
787                                 .type = src->type,
788                         };
789                 off += sizeof(*dst);
790                 if (!src->type)
791                         num = i + 1;
792         }
793         num = i;
794         src -= num;
795         dst -= num;
796         do {
797                 if (src->conf) {
798                         off = RTE_ALIGN_CEIL(off, sizeof(double));
799                         ret = rte_flow_conv_action_conf
800                                 ((void *)(data + off),
801                                  size > off ? size - off : 0, src);
802                         if (size && size >= off + ret)
803                                 dst->conf = (void *)(data + off);
804                         off += ret;
805                 }
806                 ++src;
807                 ++dst;
808         } while (--num);
809         return off;
810 }
811
812 /**
813  * Copy flow rule components.
814  *
815  * This comprises the flow rule descriptor itself, attributes, pattern and
816  * actions list. NULL components in @p src are skipped.
817  *
818  * @param[out] dst
819  *   Destination buffer. Can be NULL if @p size is zero.
820  * @param size
821  *   Size of @p dst in bytes.
822  * @param[in] src
823  *   Source flow rule descriptor.
824  * @param[out] error
825  *   Perform verbose error reporting if not NULL.
826  *
827  * @return
828  *   A positive value representing the number of bytes needed to store all
829  *   components including the descriptor regardless of @p size on success
830  *   (@p buf contents are truncated to @p size if not large enough), a
831  *   negative errno value otherwise and rte_errno is set.
832  */
833 static int
834 rte_flow_conv_rule(struct rte_flow_conv_rule *dst,
835                    const size_t size,
836                    const struct rte_flow_conv_rule *src,
837                    struct rte_flow_error *error)
838 {
839         size_t off;
840         int ret;
841
842         rte_memcpy(dst,
843                    (&(struct rte_flow_conv_rule){
844                         .attr = NULL,
845                         .pattern = NULL,
846                         .actions = NULL,
847                    }),
848                    size > sizeof(*dst) ? sizeof(*dst) : size);
849         off = sizeof(*dst);
850         if (src->attr_ro) {
851                 off = RTE_ALIGN_CEIL(off, sizeof(double));
852                 if (size && size >= off + sizeof(*dst->attr))
853                         dst->attr = rte_memcpy
854                                 ((void *)((uintptr_t)dst + off),
855                                  src->attr_ro, sizeof(*dst->attr));
856                 off += sizeof(*dst->attr);
857         }
858         if (src->pattern_ro) {
859                 off = RTE_ALIGN_CEIL(off, sizeof(double));
860                 ret = rte_flow_conv_pattern((void *)((uintptr_t)dst + off),
861                                             size > off ? size - off : 0,
862                                             src->pattern_ro, 0, error);
863                 if (ret < 0)
864                         return ret;
865                 if (size && size >= off + (size_t)ret)
866                         dst->pattern = (void *)((uintptr_t)dst + off);
867                 off += ret;
868         }
869         if (src->actions_ro) {
870                 off = RTE_ALIGN_CEIL(off, sizeof(double));
871                 ret = rte_flow_conv_actions((void *)((uintptr_t)dst + off),
872                                             size > off ? size - off : 0,
873                                             src->actions_ro, 0, error);
874                 if (ret < 0)
875                         return ret;
876                 if (size >= off + (size_t)ret)
877                         dst->actions = (void *)((uintptr_t)dst + off);
878                 off += ret;
879         }
880         return off;
881 }
882
883 /**
884  * Retrieve the name of a pattern item/action type.
885  *
886  * @param is_action
887  *   Nonzero when @p src represents an action type instead of a pattern item
888  *   type.
889  * @param is_ptr
890  *   Nonzero to write string address instead of contents into @p dst.
891  * @param[out] dst
892  *   Destination buffer. Can be NULL if @p size is zero.
893  * @param size
894  *   Size of @p dst in bytes.
895  * @param[in] src
896  *   Depending on @p is_action, source pattern item or action type cast as a
897  *   pointer.
898  * @param[out] error
899  *   Perform verbose error reporting if not NULL.
900  *
901  * @return
902  *   A positive value representing the number of bytes needed to store the
903  *   name or its address regardless of @p size on success (@p buf contents
904  *   are truncated to @p size if not large enough), a negative errno value
905  *   otherwise and rte_errno is set.
906  */
907 static int
908 rte_flow_conv_name(int is_action,
909                    int is_ptr,
910                    char *dst,
911                    const size_t size,
912                    const void *src,
913                    struct rte_flow_error *error)
914 {
915         struct desc_info {
916                 const struct rte_flow_desc_data *data;
917                 size_t num;
918         };
919         static const struct desc_info info_rep[2] = {
920                 { rte_flow_desc_item, RTE_DIM(rte_flow_desc_item), },
921                 { rte_flow_desc_action, RTE_DIM(rte_flow_desc_action), },
922         };
923         const struct desc_info *const info = &info_rep[!!is_action];
924         unsigned int type = (uintptr_t)src;
925
926         if (type >= info->num)
927                 return rte_flow_error_set
928                         (error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
929                          "unknown object type to retrieve the name of");
930         if (!is_ptr)
931                 return strlcpy(dst, info->data[type].name, size);
932         if (size >= sizeof(const char **))
933                 *((const char **)dst) = info->data[type].name;
934         return sizeof(const char **);
935 }
936
937 /** Helper function to convert flow API objects. */
938 int
939 rte_flow_conv(enum rte_flow_conv_op op,
940               void *dst,
941               size_t size,
942               const void *src,
943               struct rte_flow_error *error)
944 {
945         switch (op) {
946                 const struct rte_flow_attr *attr;
947
948         case RTE_FLOW_CONV_OP_NONE:
949                 return 0;
950         case RTE_FLOW_CONV_OP_ATTR:
951                 attr = src;
952                 if (size > sizeof(*attr))
953                         size = sizeof(*attr);
954                 rte_memcpy(dst, attr, size);
955                 return sizeof(*attr);
956         case RTE_FLOW_CONV_OP_ITEM:
957                 return rte_flow_conv_pattern(dst, size, src, 1, error);
958         case RTE_FLOW_CONV_OP_ACTION:
959                 return rte_flow_conv_actions(dst, size, src, 1, error);
960         case RTE_FLOW_CONV_OP_PATTERN:
961                 return rte_flow_conv_pattern(dst, size, src, 0, error);
962         case RTE_FLOW_CONV_OP_ACTIONS:
963                 return rte_flow_conv_actions(dst, size, src, 0, error);
964         case RTE_FLOW_CONV_OP_RULE:
965                 return rte_flow_conv_rule(dst, size, src, error);
966         case RTE_FLOW_CONV_OP_ITEM_NAME:
967                 return rte_flow_conv_name(0, 0, dst, size, src, error);
968         case RTE_FLOW_CONV_OP_ACTION_NAME:
969                 return rte_flow_conv_name(1, 0, dst, size, src, error);
970         case RTE_FLOW_CONV_OP_ITEM_NAME_PTR:
971                 return rte_flow_conv_name(0, 1, dst, size, src, error);
972         case RTE_FLOW_CONV_OP_ACTION_NAME_PTR:
973                 return rte_flow_conv_name(1, 1, dst, size, src, error);
974         }
975         return rte_flow_error_set
976                 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
977                  "unknown object conversion operation");
978 }
979
980 /** Store a full rte_flow description. */
981 size_t
982 rte_flow_copy(struct rte_flow_desc *desc, size_t len,
983               const struct rte_flow_attr *attr,
984               const struct rte_flow_item *items,
985               const struct rte_flow_action *actions)
986 {
987         /*
988          * Overlap struct rte_flow_conv with struct rte_flow_desc in order
989          * to convert the former to the latter without wasting space.
990          */
991         struct rte_flow_conv_rule *dst =
992                 len ?
993                 (void *)((uintptr_t)desc +
994                          (offsetof(struct rte_flow_desc, actions) -
995                           offsetof(struct rte_flow_conv_rule, actions))) :
996                 NULL;
997         size_t dst_size =
998                 len > sizeof(*desc) - sizeof(*dst) ?
999                 len - (sizeof(*desc) - sizeof(*dst)) :
1000                 0;
1001         struct rte_flow_conv_rule src = {
1002                 .attr_ro = NULL,
1003                 .pattern_ro = items,
1004                 .actions_ro = actions,
1005         };
1006         int ret;
1007
1008         RTE_BUILD_BUG_ON(sizeof(struct rte_flow_desc) <
1009                          sizeof(struct rte_flow_conv_rule));
1010         if (dst_size &&
1011             (&dst->pattern != &desc->items ||
1012              &dst->actions != &desc->actions ||
1013              (uintptr_t)(dst + 1) != (uintptr_t)(desc + 1))) {
1014                 rte_errno = EINVAL;
1015                 return 0;
1016         }
1017         ret = rte_flow_conv(RTE_FLOW_CONV_OP_RULE, dst, dst_size, &src, NULL);
1018         if (ret < 0)
1019                 return 0;
1020         ret += sizeof(*desc) - sizeof(*dst);
1021         rte_memcpy(desc,
1022                    (&(struct rte_flow_desc){
1023                         .size = ret,
1024                         .attr = *attr,
1025                         .items = dst_size ? dst->pattern : NULL,
1026                         .actions = dst_size ? dst->actions : NULL,
1027                    }),
1028                    len > sizeof(*desc) ? sizeof(*desc) : len);
1029         return ret;
1030 }
1031
1032 int
1033 rte_flow_dev_dump(uint16_t port_id, struct rte_flow *flow,
1034                         FILE *file, struct rte_flow_error *error)
1035 {
1036         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1037         const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1038         int ret;
1039
1040         if (unlikely(!ops))
1041                 return -rte_errno;
1042         if (likely(!!ops->dev_dump)) {
1043                 fts_enter(dev);
1044                 ret = ops->dev_dump(dev, flow, file, error);
1045                 fts_exit(dev);
1046                 return flow_err(port_id, ret, error);
1047         }
1048         return rte_flow_error_set(error, ENOSYS,
1049                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1050                                   NULL, rte_strerror(ENOSYS));
1051 }
1052
1053 int
1054 rte_flow_get_aged_flows(uint16_t port_id, void **contexts,
1055                     uint32_t nb_contexts, struct rte_flow_error *error)
1056 {
1057         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1058         const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1059         int ret;
1060
1061         if (unlikely(!ops))
1062                 return -rte_errno;
1063         if (likely(!!ops->get_aged_flows)) {
1064                 fts_enter(dev);
1065                 ret = ops->get_aged_flows(dev, contexts, nb_contexts, error);
1066                 fts_exit(dev);
1067                 return flow_err(port_id, ret, error);
1068         }
1069         return rte_flow_error_set(error, ENOTSUP,
1070                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1071                                   NULL, rte_strerror(ENOTSUP));
1072 }
1073
1074 struct rte_flow_action_handle *
1075 rte_flow_action_handle_create(uint16_t port_id,
1076                               const struct rte_flow_indir_action_conf *conf,
1077                               const struct rte_flow_action *action,
1078                               struct rte_flow_error *error)
1079 {
1080         struct rte_flow_action_handle *handle;
1081         const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1082
1083         if (unlikely(!ops))
1084                 return NULL;
1085         if (unlikely(!ops->action_handle_create)) {
1086                 rte_flow_error_set(error, ENOSYS,
1087                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1088                                    rte_strerror(ENOSYS));
1089                 return NULL;
1090         }
1091         handle = ops->action_handle_create(&rte_eth_devices[port_id],
1092                                            conf, action, error);
1093         if (handle == NULL)
1094                 flow_err(port_id, -rte_errno, error);
1095         return handle;
1096 }
1097
1098 int
1099 rte_flow_action_handle_destroy(uint16_t port_id,
1100                                struct rte_flow_action_handle *handle,
1101                                struct rte_flow_error *error)
1102 {
1103         int ret;
1104         const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1105
1106         if (unlikely(!ops))
1107                 return -rte_errno;
1108         if (unlikely(!ops->action_handle_destroy))
1109                 return rte_flow_error_set(error, ENOSYS,
1110                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1111                                           NULL, rte_strerror(ENOSYS));
1112         ret = ops->action_handle_destroy(&rte_eth_devices[port_id],
1113                                          handle, error);
1114         return flow_err(port_id, ret, error);
1115 }
1116
1117 int
1118 rte_flow_action_handle_update(uint16_t port_id,
1119                               struct rte_flow_action_handle *handle,
1120                               const void *update,
1121                               struct rte_flow_error *error)
1122 {
1123         int ret;
1124         const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1125
1126         if (unlikely(!ops))
1127                 return -rte_errno;
1128         if (unlikely(!ops->action_handle_update))
1129                 return rte_flow_error_set(error, ENOSYS,
1130                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1131                                           NULL, rte_strerror(ENOSYS));
1132         ret = ops->action_handle_update(&rte_eth_devices[port_id], handle,
1133                                         update, error);
1134         return flow_err(port_id, ret, error);
1135 }
1136
1137 int
1138 rte_flow_action_handle_query(uint16_t port_id,
1139                              const struct rte_flow_action_handle *handle,
1140                              void *data,
1141                              struct rte_flow_error *error)
1142 {
1143         int ret;
1144         const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1145
1146         if (unlikely(!ops))
1147                 return -rte_errno;
1148         if (unlikely(!ops->action_handle_query))
1149                 return rte_flow_error_set(error, ENOSYS,
1150                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1151                                           NULL, rte_strerror(ENOSYS));
1152         ret = ops->action_handle_query(&rte_eth_devices[port_id], handle,
1153                                        data, error);
1154         return flow_err(port_id, ret, error);
1155 }
1156
1157 int
1158 rte_flow_tunnel_decap_set(uint16_t port_id,
1159                           struct rte_flow_tunnel *tunnel,
1160                           struct rte_flow_action **actions,
1161                           uint32_t *num_of_actions,
1162                           struct rte_flow_error *error)
1163 {
1164         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1165         const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1166
1167         if (unlikely(!ops))
1168                 return -rte_errno;
1169         if (likely(!!ops->tunnel_decap_set)) {
1170                 return flow_err(port_id,
1171                                 ops->tunnel_decap_set(dev, tunnel, actions,
1172                                                       num_of_actions, error),
1173                                 error);
1174         }
1175         return rte_flow_error_set(error, ENOTSUP,
1176                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1177                                   NULL, rte_strerror(ENOTSUP));
1178 }
1179
1180 int
1181 rte_flow_tunnel_match(uint16_t port_id,
1182                       struct rte_flow_tunnel *tunnel,
1183                       struct rte_flow_item **items,
1184                       uint32_t *num_of_items,
1185                       struct rte_flow_error *error)
1186 {
1187         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1188         const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1189
1190         if (unlikely(!ops))
1191                 return -rte_errno;
1192         if (likely(!!ops->tunnel_match)) {
1193                 return flow_err(port_id,
1194                                 ops->tunnel_match(dev, tunnel, items,
1195                                                   num_of_items, error),
1196                                 error);
1197         }
1198         return rte_flow_error_set(error, ENOTSUP,
1199                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1200                                   NULL, rte_strerror(ENOTSUP));
1201 }
1202
1203 int
1204 rte_flow_get_restore_info(uint16_t port_id,
1205                           struct rte_mbuf *m,
1206                           struct rte_flow_restore_info *restore_info,
1207                           struct rte_flow_error *error)
1208 {
1209         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1210         const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1211
1212         if (unlikely(!ops))
1213                 return -rte_errno;
1214         if (likely(!!ops->get_restore_info)) {
1215                 return flow_err(port_id,
1216                                 ops->get_restore_info(dev, m, restore_info,
1217                                                       error),
1218                                 error);
1219         }
1220         return rte_flow_error_set(error, ENOTSUP,
1221                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1222                                   NULL, rte_strerror(ENOTSUP));
1223 }
1224
1225 int
1226 rte_flow_tunnel_action_decap_release(uint16_t port_id,
1227                                      struct rte_flow_action *actions,
1228                                      uint32_t num_of_actions,
1229                                      struct rte_flow_error *error)
1230 {
1231         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1232         const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1233
1234         if (unlikely(!ops))
1235                 return -rte_errno;
1236         if (likely(!!ops->tunnel_action_decap_release)) {
1237                 return flow_err(port_id,
1238                                 ops->tunnel_action_decap_release(dev, actions,
1239                                                                  num_of_actions,
1240                                                                  error),
1241                                 error);
1242         }
1243         return rte_flow_error_set(error, ENOTSUP,
1244                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1245                                   NULL, rte_strerror(ENOTSUP));
1246 }
1247
1248 int
1249 rte_flow_tunnel_item_release(uint16_t port_id,
1250                              struct rte_flow_item *items,
1251                              uint32_t num_of_items,
1252                              struct rte_flow_error *error)
1253 {
1254         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1255         const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1256
1257         if (unlikely(!ops))
1258                 return -rte_errno;
1259         if (likely(!!ops->tunnel_item_release)) {
1260                 return flow_err(port_id,
1261                                 ops->tunnel_item_release(dev, items,
1262                                                          num_of_items, error),
1263                                 error);
1264         }
1265         return rte_flow_error_set(error, ENOTSUP,
1266                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1267                                   NULL, rte_strerror(ENOTSUP));
1268 }