1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2016 6WIND S.A.
3 * Copyright 2016 Mellanox Technologies, Ltd
11 #include <rte_common.h>
12 #include <rte_errno.h>
13 #include <rte_branch_prediction.h>
14 #include <rte_string_fns.h>
16 #include <rte_mbuf_dyn.h>
17 #include "rte_ethdev.h"
18 #include "rte_flow_driver.h"
21 /* Mbuf dynamic field name for metadata. */
22 int32_t rte_flow_dynf_metadata_offs = -1;
24 /* Mbuf dynamic field flag bit number for metadata. */
25 uint64_t rte_flow_dynf_metadata_mask;
28 * Flow elements description tables.
30 struct rte_flow_desc_data {
35 /** Generate flow_item[] entry. */
36 #define MK_FLOW_ITEM(t, s) \
37 [RTE_FLOW_ITEM_TYPE_ ## t] = { \
42 /** Information about known flow pattern items. */
43 static const struct rte_flow_desc_data rte_flow_desc_item[] = {
45 MK_FLOW_ITEM(VOID, 0),
46 MK_FLOW_ITEM(INVERT, 0),
47 MK_FLOW_ITEM(ANY, sizeof(struct rte_flow_item_any)),
49 MK_FLOW_ITEM(VF, sizeof(struct rte_flow_item_vf)),
50 MK_FLOW_ITEM(PHY_PORT, sizeof(struct rte_flow_item_phy_port)),
51 MK_FLOW_ITEM(PORT_ID, sizeof(struct rte_flow_item_port_id)),
52 MK_FLOW_ITEM(RAW, sizeof(struct rte_flow_item_raw)),
53 MK_FLOW_ITEM(ETH, sizeof(struct rte_flow_item_eth)),
54 MK_FLOW_ITEM(VLAN, sizeof(struct rte_flow_item_vlan)),
55 MK_FLOW_ITEM(IPV4, sizeof(struct rte_flow_item_ipv4)),
56 MK_FLOW_ITEM(IPV6, sizeof(struct rte_flow_item_ipv6)),
57 MK_FLOW_ITEM(ICMP, sizeof(struct rte_flow_item_icmp)),
58 MK_FLOW_ITEM(UDP, sizeof(struct rte_flow_item_udp)),
59 MK_FLOW_ITEM(TCP, sizeof(struct rte_flow_item_tcp)),
60 MK_FLOW_ITEM(SCTP, sizeof(struct rte_flow_item_sctp)),
61 MK_FLOW_ITEM(VXLAN, sizeof(struct rte_flow_item_vxlan)),
62 MK_FLOW_ITEM(E_TAG, sizeof(struct rte_flow_item_e_tag)),
63 MK_FLOW_ITEM(NVGRE, sizeof(struct rte_flow_item_nvgre)),
64 MK_FLOW_ITEM(MPLS, sizeof(struct rte_flow_item_mpls)),
65 MK_FLOW_ITEM(GRE, sizeof(struct rte_flow_item_gre)),
66 MK_FLOW_ITEM(FUZZY, sizeof(struct rte_flow_item_fuzzy)),
67 MK_FLOW_ITEM(GTP, sizeof(struct rte_flow_item_gtp)),
68 MK_FLOW_ITEM(GTPC, sizeof(struct rte_flow_item_gtp)),
69 MK_FLOW_ITEM(GTPU, sizeof(struct rte_flow_item_gtp)),
70 MK_FLOW_ITEM(ESP, sizeof(struct rte_flow_item_esp)),
71 MK_FLOW_ITEM(GENEVE, sizeof(struct rte_flow_item_geneve)),
72 MK_FLOW_ITEM(VXLAN_GPE, sizeof(struct rte_flow_item_vxlan_gpe)),
73 MK_FLOW_ITEM(ARP_ETH_IPV4, sizeof(struct rte_flow_item_arp_eth_ipv4)),
74 MK_FLOW_ITEM(IPV6_EXT, sizeof(struct rte_flow_item_ipv6_ext)),
75 MK_FLOW_ITEM(ICMP6, sizeof(struct rte_flow_item_icmp6)),
76 MK_FLOW_ITEM(ICMP6_ND_NS, sizeof(struct rte_flow_item_icmp6_nd_ns)),
77 MK_FLOW_ITEM(ICMP6_ND_NA, sizeof(struct rte_flow_item_icmp6_nd_na)),
78 MK_FLOW_ITEM(ICMP6_ND_OPT, sizeof(struct rte_flow_item_icmp6_nd_opt)),
79 MK_FLOW_ITEM(ICMP6_ND_OPT_SLA_ETH,
80 sizeof(struct rte_flow_item_icmp6_nd_opt_sla_eth)),
81 MK_FLOW_ITEM(ICMP6_ND_OPT_TLA_ETH,
82 sizeof(struct rte_flow_item_icmp6_nd_opt_tla_eth)),
83 MK_FLOW_ITEM(MARK, sizeof(struct rte_flow_item_mark)),
84 MK_FLOW_ITEM(META, sizeof(struct rte_flow_item_meta)),
85 MK_FLOW_ITEM(TAG, sizeof(struct rte_flow_item_tag)),
86 MK_FLOW_ITEM(GRE_KEY, sizeof(rte_be32_t)),
87 MK_FLOW_ITEM(GTP_PSC, sizeof(struct rte_flow_item_gtp_psc)),
88 MK_FLOW_ITEM(PPPOES, sizeof(struct rte_flow_item_pppoe)),
89 MK_FLOW_ITEM(PPPOED, sizeof(struct rte_flow_item_pppoe)),
90 MK_FLOW_ITEM(PPPOE_PROTO_ID,
91 sizeof(struct rte_flow_item_pppoe_proto_id)),
92 MK_FLOW_ITEM(NSH, sizeof(struct rte_flow_item_nsh)),
93 MK_FLOW_ITEM(IGMP, sizeof(struct rte_flow_item_igmp)),
94 MK_FLOW_ITEM(AH, sizeof(struct rte_flow_item_ah)),
95 MK_FLOW_ITEM(HIGIG2, sizeof(struct rte_flow_item_higig2_hdr)),
96 MK_FLOW_ITEM(L2TPV3OIP, sizeof(struct rte_flow_item_l2tpv3oip)),
97 MK_FLOW_ITEM(PFCP, sizeof(struct rte_flow_item_pfcp)),
98 MK_FLOW_ITEM(ECPRI, sizeof(struct rte_flow_item_ecpri)),
101 /** Generate flow_action[] entry. */
102 #define MK_FLOW_ACTION(t, s) \
103 [RTE_FLOW_ACTION_TYPE_ ## t] = { \
108 /** Information about known flow actions. */
109 static const struct rte_flow_desc_data rte_flow_desc_action[] = {
110 MK_FLOW_ACTION(END, 0),
111 MK_FLOW_ACTION(VOID, 0),
112 MK_FLOW_ACTION(PASSTHRU, 0),
113 MK_FLOW_ACTION(JUMP, sizeof(struct rte_flow_action_jump)),
114 MK_FLOW_ACTION(MARK, sizeof(struct rte_flow_action_mark)),
115 MK_FLOW_ACTION(FLAG, 0),
116 MK_FLOW_ACTION(QUEUE, sizeof(struct rte_flow_action_queue)),
117 MK_FLOW_ACTION(DROP, 0),
118 MK_FLOW_ACTION(COUNT, sizeof(struct rte_flow_action_count)),
119 MK_FLOW_ACTION(RSS, sizeof(struct rte_flow_action_rss)),
120 MK_FLOW_ACTION(PF, 0),
121 MK_FLOW_ACTION(VF, sizeof(struct rte_flow_action_vf)),
122 MK_FLOW_ACTION(PHY_PORT, sizeof(struct rte_flow_action_phy_port)),
123 MK_FLOW_ACTION(PORT_ID, sizeof(struct rte_flow_action_port_id)),
124 MK_FLOW_ACTION(METER, sizeof(struct rte_flow_action_meter)),
125 MK_FLOW_ACTION(SECURITY, sizeof(struct rte_flow_action_security)),
126 MK_FLOW_ACTION(OF_SET_MPLS_TTL,
127 sizeof(struct rte_flow_action_of_set_mpls_ttl)),
128 MK_FLOW_ACTION(OF_DEC_MPLS_TTL, 0),
129 MK_FLOW_ACTION(OF_SET_NW_TTL,
130 sizeof(struct rte_flow_action_of_set_nw_ttl)),
131 MK_FLOW_ACTION(OF_DEC_NW_TTL, 0),
132 MK_FLOW_ACTION(OF_COPY_TTL_OUT, 0),
133 MK_FLOW_ACTION(OF_COPY_TTL_IN, 0),
134 MK_FLOW_ACTION(OF_POP_VLAN, 0),
135 MK_FLOW_ACTION(OF_PUSH_VLAN,
136 sizeof(struct rte_flow_action_of_push_vlan)),
137 MK_FLOW_ACTION(OF_SET_VLAN_VID,
138 sizeof(struct rte_flow_action_of_set_vlan_vid)),
139 MK_FLOW_ACTION(OF_SET_VLAN_PCP,
140 sizeof(struct rte_flow_action_of_set_vlan_pcp)),
141 MK_FLOW_ACTION(OF_POP_MPLS,
142 sizeof(struct rte_flow_action_of_pop_mpls)),
143 MK_FLOW_ACTION(OF_PUSH_MPLS,
144 sizeof(struct rte_flow_action_of_push_mpls)),
145 MK_FLOW_ACTION(VXLAN_ENCAP, sizeof(struct rte_flow_action_vxlan_encap)),
146 MK_FLOW_ACTION(VXLAN_DECAP, 0),
147 MK_FLOW_ACTION(NVGRE_ENCAP, sizeof(struct rte_flow_action_vxlan_encap)),
148 MK_FLOW_ACTION(NVGRE_DECAP, 0),
149 MK_FLOW_ACTION(RAW_ENCAP, sizeof(struct rte_flow_action_raw_encap)),
150 MK_FLOW_ACTION(RAW_DECAP, sizeof(struct rte_flow_action_raw_decap)),
151 MK_FLOW_ACTION(SET_IPV4_SRC,
152 sizeof(struct rte_flow_action_set_ipv4)),
153 MK_FLOW_ACTION(SET_IPV4_DST,
154 sizeof(struct rte_flow_action_set_ipv4)),
155 MK_FLOW_ACTION(SET_IPV6_SRC,
156 sizeof(struct rte_flow_action_set_ipv6)),
157 MK_FLOW_ACTION(SET_IPV6_DST,
158 sizeof(struct rte_flow_action_set_ipv6)),
159 MK_FLOW_ACTION(SET_TP_SRC,
160 sizeof(struct rte_flow_action_set_tp)),
161 MK_FLOW_ACTION(SET_TP_DST,
162 sizeof(struct rte_flow_action_set_tp)),
163 MK_FLOW_ACTION(MAC_SWAP, 0),
164 MK_FLOW_ACTION(DEC_TTL, 0),
165 MK_FLOW_ACTION(SET_TTL, sizeof(struct rte_flow_action_set_ttl)),
166 MK_FLOW_ACTION(SET_MAC_SRC, sizeof(struct rte_flow_action_set_mac)),
167 MK_FLOW_ACTION(SET_MAC_DST, sizeof(struct rte_flow_action_set_mac)),
168 MK_FLOW_ACTION(INC_TCP_SEQ, sizeof(rte_be32_t)),
169 MK_FLOW_ACTION(DEC_TCP_SEQ, sizeof(rte_be32_t)),
170 MK_FLOW_ACTION(INC_TCP_ACK, sizeof(rte_be32_t)),
171 MK_FLOW_ACTION(DEC_TCP_ACK, sizeof(rte_be32_t)),
172 MK_FLOW_ACTION(SET_TAG, sizeof(struct rte_flow_action_set_tag)),
173 MK_FLOW_ACTION(SET_META, sizeof(struct rte_flow_action_set_meta)),
174 MK_FLOW_ACTION(SET_IPV4_DSCP, sizeof(struct rte_flow_action_set_dscp)),
175 MK_FLOW_ACTION(SET_IPV6_DSCP, sizeof(struct rte_flow_action_set_dscp)),
176 MK_FLOW_ACTION(AGE, sizeof(struct rte_flow_action_age)),
180 rte_flow_dynf_metadata_register(void)
185 static const struct rte_mbuf_dynfield desc_offs = {
186 .name = RTE_MBUF_DYNFIELD_METADATA_NAME,
187 .size = sizeof(uint32_t),
188 .align = __alignof__(uint32_t),
190 static const struct rte_mbuf_dynflag desc_flag = {
191 .name = RTE_MBUF_DYNFLAG_METADATA_NAME,
194 offset = rte_mbuf_dynfield_register(&desc_offs);
197 flag = rte_mbuf_dynflag_register(&desc_flag);
200 rte_flow_dynf_metadata_offs = offset;
201 rte_flow_dynf_metadata_mask = (1ULL << flag);
205 rte_flow_dynf_metadata_offs = -1;
206 rte_flow_dynf_metadata_mask = 0ULL;
211 flow_err(uint16_t port_id, int ret, struct rte_flow_error *error)
215 if (rte_eth_dev_is_removed(port_id))
216 return rte_flow_error_set(error, EIO,
217 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
218 NULL, rte_strerror(EIO));
222 static enum rte_flow_item_type
223 rte_flow_expand_rss_item_complete(const struct rte_flow_item *item)
225 enum rte_flow_item_type ret = RTE_FLOW_ITEM_TYPE_VOID;
226 uint16_t ether_type = 0;
227 uint16_t ether_type_m;
228 uint8_t ip_next_proto = 0;
229 uint8_t ip_next_proto_m;
231 if (item == NULL || item->spec == NULL)
233 switch (item->type) {
234 case RTE_FLOW_ITEM_TYPE_ETH:
236 ether_type_m = ((const struct rte_flow_item_eth *)
239 ether_type_m = rte_flow_item_eth_mask.type;
240 if (ether_type_m != RTE_BE16(0xFFFF))
242 ether_type = ((const struct rte_flow_item_eth *)
244 if (rte_be_to_cpu_16(ether_type) == RTE_ETHER_TYPE_IPV4)
245 ret = RTE_FLOW_ITEM_TYPE_IPV4;
246 else if (rte_be_to_cpu_16(ether_type) == RTE_ETHER_TYPE_IPV6)
247 ret = RTE_FLOW_ITEM_TYPE_IPV6;
248 else if (rte_be_to_cpu_16(ether_type) == RTE_ETHER_TYPE_VLAN)
249 ret = RTE_FLOW_ITEM_TYPE_VLAN;
251 case RTE_FLOW_ITEM_TYPE_VLAN:
253 ether_type_m = ((const struct rte_flow_item_vlan *)
254 (item->mask))->inner_type;
256 ether_type_m = rte_flow_item_vlan_mask.inner_type;
257 if (ether_type_m != RTE_BE16(0xFFFF))
259 ether_type = ((const struct rte_flow_item_vlan *)
260 (item->spec))->inner_type;
261 if (rte_be_to_cpu_16(ether_type) == RTE_ETHER_TYPE_IPV4)
262 ret = RTE_FLOW_ITEM_TYPE_IPV4;
263 else if (rte_be_to_cpu_16(ether_type) == RTE_ETHER_TYPE_IPV6)
264 ret = RTE_FLOW_ITEM_TYPE_IPV6;
265 else if (rte_be_to_cpu_16(ether_type) == RTE_ETHER_TYPE_VLAN)
266 ret = RTE_FLOW_ITEM_TYPE_VLAN;
268 case RTE_FLOW_ITEM_TYPE_IPV4:
270 ip_next_proto_m = ((const struct rte_flow_item_ipv4 *)
271 (item->mask))->hdr.next_proto_id;
274 rte_flow_item_ipv4_mask.hdr.next_proto_id;
275 if (ip_next_proto_m != 0xFF)
277 ip_next_proto = ((const struct rte_flow_item_ipv4 *)
278 (item->spec))->hdr.next_proto_id;
279 if (ip_next_proto == IPPROTO_UDP)
280 ret = RTE_FLOW_ITEM_TYPE_UDP;
281 else if (ip_next_proto == IPPROTO_TCP)
282 ret = RTE_FLOW_ITEM_TYPE_TCP;
283 else if (ip_next_proto == IPPROTO_IP)
284 ret = RTE_FLOW_ITEM_TYPE_IPV4;
285 else if (ip_next_proto == IPPROTO_IPV6)
286 ret = RTE_FLOW_ITEM_TYPE_IPV6;
288 case RTE_FLOW_ITEM_TYPE_IPV6:
290 ip_next_proto_m = ((const struct rte_flow_item_ipv6 *)
291 (item->mask))->hdr.proto;
294 rte_flow_item_ipv6_mask.hdr.proto;
295 if (ip_next_proto_m != 0xFF)
297 ip_next_proto = ((const struct rte_flow_item_ipv6 *)
298 (item->spec))->hdr.proto;
299 if (ip_next_proto == IPPROTO_UDP)
300 ret = RTE_FLOW_ITEM_TYPE_UDP;
301 else if (ip_next_proto == IPPROTO_TCP)
302 ret = RTE_FLOW_ITEM_TYPE_TCP;
303 else if (ip_next_proto == IPPROTO_IP)
304 ret = RTE_FLOW_ITEM_TYPE_IPV4;
305 else if (ip_next_proto == IPPROTO_IPV6)
306 ret = RTE_FLOW_ITEM_TYPE_IPV6;
309 ret = RTE_FLOW_ITEM_TYPE_VOID;
315 /* Get generic flow operations structure from a port. */
316 const struct rte_flow_ops *
317 rte_flow_ops_get(uint16_t port_id, struct rte_flow_error *error)
319 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
320 const struct rte_flow_ops *ops;
323 if (unlikely(!rte_eth_dev_is_valid_port(port_id)))
325 else if (unlikely(!dev->dev_ops->filter_ctrl ||
326 dev->dev_ops->filter_ctrl(dev,
327 RTE_ETH_FILTER_GENERIC,
334 rte_flow_error_set(error, code, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
335 NULL, rte_strerror(code));
339 /* Check whether a flow rule can be created on a given port. */
341 rte_flow_validate(uint16_t port_id,
342 const struct rte_flow_attr *attr,
343 const struct rte_flow_item pattern[],
344 const struct rte_flow_action actions[],
345 struct rte_flow_error *error)
347 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
348 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
352 if (likely(!!ops->validate))
353 return flow_err(port_id, ops->validate(dev, attr, pattern,
354 actions, error), error);
355 return rte_flow_error_set(error, ENOSYS,
356 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
357 NULL, rte_strerror(ENOSYS));
360 /* Create a flow rule on a given port. */
362 rte_flow_create(uint16_t port_id,
363 const struct rte_flow_attr *attr,
364 const struct rte_flow_item pattern[],
365 const struct rte_flow_action actions[],
366 struct rte_flow_error *error)
368 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
369 struct rte_flow *flow;
370 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
374 if (likely(!!ops->create)) {
375 flow = ops->create(dev, attr, pattern, actions, error);
377 flow_err(port_id, -rte_errno, error);
380 rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
381 NULL, rte_strerror(ENOSYS));
385 /* Destroy a flow rule on a given port. */
387 rte_flow_destroy(uint16_t port_id,
388 struct rte_flow *flow,
389 struct rte_flow_error *error)
391 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
392 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
396 if (likely(!!ops->destroy))
397 return flow_err(port_id, ops->destroy(dev, flow, error),
399 return rte_flow_error_set(error, ENOSYS,
400 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
401 NULL, rte_strerror(ENOSYS));
404 /* Destroy all flow rules associated with a port. */
406 rte_flow_flush(uint16_t port_id,
407 struct rte_flow_error *error)
409 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
410 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
414 if (likely(!!ops->flush))
415 return flow_err(port_id, ops->flush(dev, error), error);
416 return rte_flow_error_set(error, ENOSYS,
417 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
418 NULL, rte_strerror(ENOSYS));
421 /* Query an existing flow rule. */
423 rte_flow_query(uint16_t port_id,
424 struct rte_flow *flow,
425 const struct rte_flow_action *action,
427 struct rte_flow_error *error)
429 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
430 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
434 if (likely(!!ops->query))
435 return flow_err(port_id, ops->query(dev, flow, action, data,
437 return rte_flow_error_set(error, ENOSYS,
438 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
439 NULL, rte_strerror(ENOSYS));
442 /* Restrict ingress traffic to the defined flow rules. */
444 rte_flow_isolate(uint16_t port_id,
446 struct rte_flow_error *error)
448 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
449 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
453 if (likely(!!ops->isolate))
454 return flow_err(port_id, ops->isolate(dev, set, error), error);
455 return rte_flow_error_set(error, ENOSYS,
456 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
457 NULL, rte_strerror(ENOSYS));
460 /* Initialize flow error structure. */
462 rte_flow_error_set(struct rte_flow_error *error,
464 enum rte_flow_error_type type,
469 *error = (struct rte_flow_error){
479 /** Pattern item specification types. */
480 enum rte_flow_conv_item_spec_type {
481 RTE_FLOW_CONV_ITEM_SPEC,
482 RTE_FLOW_CONV_ITEM_LAST,
483 RTE_FLOW_CONV_ITEM_MASK,
487 * Copy pattern item specification.
490 * Output buffer. Can be NULL if @p size is zero.
492 * Size of @p buf in bytes.
494 * Pattern item to copy specification from.
496 * Specification selector for either @p spec, @p last or @p mask.
499 * Number of bytes needed to store pattern item specification regardless
500 * of @p size. @p buf contents are truncated to @p size if not large
504 rte_flow_conv_item_spec(void *buf, const size_t size,
505 const struct rte_flow_item *item,
506 enum rte_flow_conv_item_spec_type type)
510 type == RTE_FLOW_CONV_ITEM_SPEC ? item->spec :
511 type == RTE_FLOW_CONV_ITEM_LAST ? item->last :
512 type == RTE_FLOW_CONV_ITEM_MASK ? item->mask :
515 switch (item->type) {
517 const struct rte_flow_item_raw *raw;
520 const struct rte_flow_item_raw *raw;
523 const struct rte_flow_item_raw *raw;
526 const struct rte_flow_item_raw *raw;
529 struct rte_flow_item_raw *raw;
533 case RTE_FLOW_ITEM_TYPE_RAW:
534 spec.raw = item->spec;
535 last.raw = item->last ? item->last : item->spec;
536 mask.raw = item->mask ? item->mask : &rte_flow_item_raw_mask;
540 (&(struct rte_flow_item_raw){
541 .relative = src.raw->relative,
542 .search = src.raw->search,
543 .reserved = src.raw->reserved,
544 .offset = src.raw->offset,
545 .limit = src.raw->limit,
546 .length = src.raw->length,
548 size > sizeof(*dst.raw) ? sizeof(*dst.raw) : size);
549 off = sizeof(*dst.raw);
550 if (type == RTE_FLOW_CONV_ITEM_SPEC ||
551 (type == RTE_FLOW_CONV_ITEM_MASK &&
552 ((spec.raw->length & mask.raw->length) >=
553 (last.raw->length & mask.raw->length))))
554 tmp = spec.raw->length & mask.raw->length;
556 tmp = last.raw->length & mask.raw->length;
558 off = RTE_ALIGN_CEIL(off, sizeof(*dst.raw->pattern));
559 if (size >= off + tmp)
560 dst.raw->pattern = rte_memcpy
561 ((void *)((uintptr_t)dst.raw + off),
562 src.raw->pattern, tmp);
567 off = rte_flow_desc_item[item->type].size;
568 rte_memcpy(buf, data, (size > off ? off : size));
575 * Copy action configuration.
578 * Output buffer. Can be NULL if @p size is zero.
580 * Size of @p buf in bytes.
582 * Action to copy configuration from.
585 * Number of bytes needed to store pattern item specification regardless
586 * of @p size. @p buf contents are truncated to @p size if not large
590 rte_flow_conv_action_conf(void *buf, const size_t size,
591 const struct rte_flow_action *action)
595 switch (action->type) {
597 const struct rte_flow_action_rss *rss;
598 const struct rte_flow_action_vxlan_encap *vxlan_encap;
599 const struct rte_flow_action_nvgre_encap *nvgre_encap;
602 struct rte_flow_action_rss *rss;
603 struct rte_flow_action_vxlan_encap *vxlan_encap;
604 struct rte_flow_action_nvgre_encap *nvgre_encap;
609 case RTE_FLOW_ACTION_TYPE_RSS:
610 src.rss = action->conf;
613 (&(struct rte_flow_action_rss){
614 .func = src.rss->func,
615 .level = src.rss->level,
616 .types = src.rss->types,
617 .key_len = src.rss->key_len,
618 .queue_num = src.rss->queue_num,
620 size > sizeof(*dst.rss) ? sizeof(*dst.rss) : size);
621 off = sizeof(*dst.rss);
622 if (src.rss->key_len) {
623 off = RTE_ALIGN_CEIL(off, sizeof(*dst.rss->key));
624 tmp = sizeof(*src.rss->key) * src.rss->key_len;
625 if (size >= off + tmp)
626 dst.rss->key = rte_memcpy
627 ((void *)((uintptr_t)dst.rss + off),
631 if (src.rss->queue_num) {
632 off = RTE_ALIGN_CEIL(off, sizeof(*dst.rss->queue));
633 tmp = sizeof(*src.rss->queue) * src.rss->queue_num;
634 if (size >= off + tmp)
635 dst.rss->queue = rte_memcpy
636 ((void *)((uintptr_t)dst.rss + off),
637 src.rss->queue, tmp);
641 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
642 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
643 src.vxlan_encap = action->conf;
644 dst.vxlan_encap = buf;
645 RTE_BUILD_BUG_ON(sizeof(*src.vxlan_encap) !=
646 sizeof(*src.nvgre_encap) ||
647 offsetof(struct rte_flow_action_vxlan_encap,
649 offsetof(struct rte_flow_action_nvgre_encap,
651 off = sizeof(*dst.vxlan_encap);
652 if (src.vxlan_encap->definition) {
654 (off, sizeof(*dst.vxlan_encap->definition));
656 (RTE_FLOW_CONV_OP_PATTERN,
657 (void *)((uintptr_t)dst.vxlan_encap + off),
658 size > off ? size - off : 0,
659 src.vxlan_encap->definition, NULL);
662 if (size >= off + ret)
663 dst.vxlan_encap->definition =
664 (void *)((uintptr_t)dst.vxlan_encap +
670 off = rte_flow_desc_action[action->type].size;
671 rte_memcpy(buf, action->conf, (size > off ? off : size));
678 * Copy a list of pattern items.
681 * Destination buffer. Can be NULL if @p size is zero.
683 * Size of @p dst in bytes.
685 * Source pattern items.
687 * Maximum number of pattern items to process from @p src or 0 to process
688 * the entire list. In both cases, processing stops after
689 * RTE_FLOW_ITEM_TYPE_END is encountered.
691 * Perform verbose error reporting if not NULL.
694 * A positive value representing the number of bytes needed to store
695 * pattern items regardless of @p size on success (@p buf contents are
696 * truncated to @p size if not large enough), a negative errno value
697 * otherwise and rte_errno is set.
700 rte_flow_conv_pattern(struct rte_flow_item *dst,
702 const struct rte_flow_item *src,
704 struct rte_flow_error *error)
706 uintptr_t data = (uintptr_t)dst;
711 for (i = 0, off = 0; !num || i != num; ++i, ++src, ++dst) {
712 if ((size_t)src->type >= RTE_DIM(rte_flow_desc_item) ||
713 !rte_flow_desc_item[src->type].name)
714 return rte_flow_error_set
715 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, src,
716 "cannot convert unknown item type");
717 if (size >= off + sizeof(*dst))
718 *dst = (struct rte_flow_item){
730 off = RTE_ALIGN_CEIL(off, sizeof(double));
731 ret = rte_flow_conv_item_spec
732 ((void *)(data + off),
733 size > off ? size - off : 0, src,
734 RTE_FLOW_CONV_ITEM_SPEC);
735 if (size && size >= off + ret)
736 dst->spec = (void *)(data + off);
741 off = RTE_ALIGN_CEIL(off, sizeof(double));
742 ret = rte_flow_conv_item_spec
743 ((void *)(data + off),
744 size > off ? size - off : 0, src,
745 RTE_FLOW_CONV_ITEM_LAST);
746 if (size && size >= off + ret)
747 dst->last = (void *)(data + off);
751 off = RTE_ALIGN_CEIL(off, sizeof(double));
752 ret = rte_flow_conv_item_spec
753 ((void *)(data + off),
754 size > off ? size - off : 0, src,
755 RTE_FLOW_CONV_ITEM_MASK);
756 if (size && size >= off + ret)
757 dst->mask = (void *)(data + off);
767 * Copy a list of actions.
770 * Destination buffer. Can be NULL if @p size is zero.
772 * Size of @p dst in bytes.
776 * Maximum number of actions to process from @p src or 0 to process the
777 * entire list. In both cases, processing stops after
778 * RTE_FLOW_ACTION_TYPE_END is encountered.
780 * Perform verbose error reporting if not NULL.
783 * A positive value representing the number of bytes needed to store
784 * actions regardless of @p size on success (@p buf contents are truncated
785 * to @p size if not large enough), a negative errno value otherwise and
789 rte_flow_conv_actions(struct rte_flow_action *dst,
791 const struct rte_flow_action *src,
793 struct rte_flow_error *error)
795 uintptr_t data = (uintptr_t)dst;
800 for (i = 0, off = 0; !num || i != num; ++i, ++src, ++dst) {
801 if ((size_t)src->type >= RTE_DIM(rte_flow_desc_action) ||
802 !rte_flow_desc_action[src->type].name)
803 return rte_flow_error_set
804 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
805 src, "cannot convert unknown action type");
806 if (size >= off + sizeof(*dst))
807 *dst = (struct rte_flow_action){
819 off = RTE_ALIGN_CEIL(off, sizeof(double));
820 ret = rte_flow_conv_action_conf
821 ((void *)(data + off),
822 size > off ? size - off : 0, src);
823 if (size && size >= off + ret)
824 dst->conf = (void *)(data + off);
834 * Copy flow rule components.
836 * This comprises the flow rule descriptor itself, attributes, pattern and
837 * actions list. NULL components in @p src are skipped.
840 * Destination buffer. Can be NULL if @p size is zero.
842 * Size of @p dst in bytes.
844 * Source flow rule descriptor.
846 * Perform verbose error reporting if not NULL.
849 * A positive value representing the number of bytes needed to store all
850 * components including the descriptor regardless of @p size on success
851 * (@p buf contents are truncated to @p size if not large enough), a
852 * negative errno value otherwise and rte_errno is set.
855 rte_flow_conv_rule(struct rte_flow_conv_rule *dst,
857 const struct rte_flow_conv_rule *src,
858 struct rte_flow_error *error)
864 (&(struct rte_flow_conv_rule){
869 size > sizeof(*dst) ? sizeof(*dst) : size);
872 off = RTE_ALIGN_CEIL(off, sizeof(double));
873 if (size && size >= off + sizeof(*dst->attr))
874 dst->attr = rte_memcpy
875 ((void *)((uintptr_t)dst + off),
876 src->attr_ro, sizeof(*dst->attr));
877 off += sizeof(*dst->attr);
879 if (src->pattern_ro) {
880 off = RTE_ALIGN_CEIL(off, sizeof(double));
881 ret = rte_flow_conv_pattern((void *)((uintptr_t)dst + off),
882 size > off ? size - off : 0,
883 src->pattern_ro, 0, error);
886 if (size && size >= off + (size_t)ret)
887 dst->pattern = (void *)((uintptr_t)dst + off);
890 if (src->actions_ro) {
891 off = RTE_ALIGN_CEIL(off, sizeof(double));
892 ret = rte_flow_conv_actions((void *)((uintptr_t)dst + off),
893 size > off ? size - off : 0,
894 src->actions_ro, 0, error);
897 if (size >= off + (size_t)ret)
898 dst->actions = (void *)((uintptr_t)dst + off);
905 * Retrieve the name of a pattern item/action type.
908 * Nonzero when @p src represents an action type instead of a pattern item
911 * Nonzero to write string address instead of contents into @p dst.
913 * Destination buffer. Can be NULL if @p size is zero.
915 * Size of @p dst in bytes.
917 * Depending on @p is_action, source pattern item or action type cast as a
920 * Perform verbose error reporting if not NULL.
923 * A positive value representing the number of bytes needed to store the
924 * name or its address regardless of @p size on success (@p buf contents
925 * are truncated to @p size if not large enough), a negative errno value
926 * otherwise and rte_errno is set.
929 rte_flow_conv_name(int is_action,
934 struct rte_flow_error *error)
937 const struct rte_flow_desc_data *data;
940 static const struct desc_info info_rep[2] = {
941 { rte_flow_desc_item, RTE_DIM(rte_flow_desc_item), },
942 { rte_flow_desc_action, RTE_DIM(rte_flow_desc_action), },
944 const struct desc_info *const info = &info_rep[!!is_action];
945 unsigned int type = (uintptr_t)src;
947 if (type >= info->num)
948 return rte_flow_error_set
949 (error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
950 "unknown object type to retrieve the name of");
952 return strlcpy(dst, info->data[type].name, size);
953 if (size >= sizeof(const char **))
954 *((const char **)dst) = info->data[type].name;
955 return sizeof(const char **);
958 /** Helper function to convert flow API objects. */
960 rte_flow_conv(enum rte_flow_conv_op op,
964 struct rte_flow_error *error)
967 const struct rte_flow_attr *attr;
969 case RTE_FLOW_CONV_OP_NONE:
971 case RTE_FLOW_CONV_OP_ATTR:
973 if (size > sizeof(*attr))
974 size = sizeof(*attr);
975 rte_memcpy(dst, attr, size);
976 return sizeof(*attr);
977 case RTE_FLOW_CONV_OP_ITEM:
978 return rte_flow_conv_pattern(dst, size, src, 1, error);
979 case RTE_FLOW_CONV_OP_ACTION:
980 return rte_flow_conv_actions(dst, size, src, 1, error);
981 case RTE_FLOW_CONV_OP_PATTERN:
982 return rte_flow_conv_pattern(dst, size, src, 0, error);
983 case RTE_FLOW_CONV_OP_ACTIONS:
984 return rte_flow_conv_actions(dst, size, src, 0, error);
985 case RTE_FLOW_CONV_OP_RULE:
986 return rte_flow_conv_rule(dst, size, src, error);
987 case RTE_FLOW_CONV_OP_ITEM_NAME:
988 return rte_flow_conv_name(0, 0, dst, size, src, error);
989 case RTE_FLOW_CONV_OP_ACTION_NAME:
990 return rte_flow_conv_name(1, 0, dst, size, src, error);
991 case RTE_FLOW_CONV_OP_ITEM_NAME_PTR:
992 return rte_flow_conv_name(0, 1, dst, size, src, error);
993 case RTE_FLOW_CONV_OP_ACTION_NAME_PTR:
994 return rte_flow_conv_name(1, 1, dst, size, src, error);
996 return rte_flow_error_set
997 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
998 "unknown object conversion operation");
1001 /** Store a full rte_flow description. */
1003 rte_flow_copy(struct rte_flow_desc *desc, size_t len,
1004 const struct rte_flow_attr *attr,
1005 const struct rte_flow_item *items,
1006 const struct rte_flow_action *actions)
1009 * Overlap struct rte_flow_conv with struct rte_flow_desc in order
1010 * to convert the former to the latter without wasting space.
1012 struct rte_flow_conv_rule *dst =
1014 (void *)((uintptr_t)desc +
1015 (offsetof(struct rte_flow_desc, actions) -
1016 offsetof(struct rte_flow_conv_rule, actions))) :
1019 len > sizeof(*desc) - sizeof(*dst) ?
1020 len - (sizeof(*desc) - sizeof(*dst)) :
1022 struct rte_flow_conv_rule src = {
1024 .pattern_ro = items,
1025 .actions_ro = actions,
1029 RTE_BUILD_BUG_ON(sizeof(struct rte_flow_desc) <
1030 sizeof(struct rte_flow_conv_rule));
1032 (&dst->pattern != &desc->items ||
1033 &dst->actions != &desc->actions ||
1034 (uintptr_t)(dst + 1) != (uintptr_t)(desc + 1))) {
1038 ret = rte_flow_conv(RTE_FLOW_CONV_OP_RULE, dst, dst_size, &src, NULL);
1041 ret += sizeof(*desc) - sizeof(*dst);
1043 (&(struct rte_flow_desc){
1046 .items = dst_size ? dst->pattern : NULL,
1047 .actions = dst_size ? dst->actions : NULL,
1049 len > sizeof(*desc) ? sizeof(*desc) : len);
1054 * Expand RSS flows into several possible flows according to the RSS hash
1055 * fields requested and the driver capabilities.
1058 rte_flow_expand_rss(struct rte_flow_expand_rss *buf, size_t size,
1059 const struct rte_flow_item *pattern, uint64_t types,
1060 const struct rte_flow_expand_node graph[],
1061 int graph_root_index)
1063 const int elt_n = 8;
1064 const struct rte_flow_item *item;
1065 const struct rte_flow_expand_node *node = &graph[graph_root_index];
1066 const int *next_node;
1067 const int *stack[elt_n];
1069 struct rte_flow_item flow_items[elt_n];
1072 size_t user_pattern_size = 0;
1074 const struct rte_flow_expand_node *next = NULL;
1075 struct rte_flow_item missed_item;
1078 const struct rte_flow_item *last_item = NULL;
1080 memset(&missed_item, 0, sizeof(missed_item));
1081 lsize = offsetof(struct rte_flow_expand_rss, entry) +
1082 elt_n * sizeof(buf->entry[0]);
1083 if (lsize <= size) {
1084 buf->entry[0].priority = 0;
1085 buf->entry[0].pattern = (void *)&buf->entry[elt_n];
1087 addr = buf->entry[0].pattern;
1089 for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
1090 if (item->type != RTE_FLOW_ITEM_TYPE_VOID)
1092 for (i = 0; node->next && node->next[i]; ++i) {
1093 next = &graph[node->next[i]];
1094 if (next->type == item->type)
1099 user_pattern_size += sizeof(*item);
1101 user_pattern_size += sizeof(*item); /* Handle END item. */
1102 lsize += user_pattern_size;
1103 /* Copy the user pattern in the first entry of the buffer. */
1104 if (lsize <= size) {
1105 rte_memcpy(addr, pattern, user_pattern_size);
1106 addr = (void *)(((uintptr_t)addr) + user_pattern_size);
1109 /* Start expanding. */
1110 memset(flow_items, 0, sizeof(flow_items));
1111 user_pattern_size -= sizeof(*item);
1113 * Check if the last valid item has spec set
1114 * and need complete pattern.
1116 missed_item.type = rte_flow_expand_rss_item_complete(last_item);
1117 if (missed_item.type != RTE_FLOW_ITEM_TYPE_VOID) {
1120 for (i = 0; node->next && node->next[i]; ++i) {
1121 next = &graph[node->next[i]];
1122 if (next->type == missed_item.type) {
1123 flow_items[0].type = missed_item.type;
1124 flow_items[1].type = RTE_FLOW_ITEM_TYPE_END;
1130 if (next && missed) {
1131 elt = 2; /* missed item + item end. */
1133 lsize += elt * sizeof(*item) + user_pattern_size;
1134 if ((node->rss_types & types) && lsize <= size) {
1135 buf->entry[buf->entries].priority = 1;
1136 buf->entry[buf->entries].pattern = addr;
1138 rte_memcpy(addr, buf->entry[0].pattern,
1140 addr = (void *)(((uintptr_t)addr) + user_pattern_size);
1141 rte_memcpy(addr, flow_items, elt * sizeof(*item));
1142 addr = (void *)(((uintptr_t)addr) +
1143 elt * sizeof(*item));
1146 memset(flow_items, 0, sizeof(flow_items));
1147 next_node = node->next;
1148 stack[stack_pos] = next_node;
1149 node = next_node ? &graph[*next_node] : NULL;
1151 flow_items[stack_pos].type = node->type;
1152 if (node->rss_types & types) {
1154 * compute the number of items to copy from the
1155 * expansion and copy it.
1156 * When the stack_pos is 0, there are 1 element in it,
1157 * plus the addition END item.
1159 elt = stack_pos + 2;
1160 flow_items[stack_pos + 1].type = RTE_FLOW_ITEM_TYPE_END;
1161 lsize += elt * sizeof(*item) + user_pattern_size;
1162 if (lsize <= size) {
1163 size_t n = elt * sizeof(*item);
1165 buf->entry[buf->entries].priority =
1166 stack_pos + 1 + missed;
1167 buf->entry[buf->entries].pattern = addr;
1169 rte_memcpy(addr, buf->entry[0].pattern,
1171 addr = (void *)(((uintptr_t)addr) +
1173 rte_memcpy(addr, &missed_item,
1174 missed * sizeof(*item));
1175 addr = (void *)(((uintptr_t)addr) +
1176 missed * sizeof(*item));
1177 rte_memcpy(addr, flow_items, n);
1178 addr = (void *)(((uintptr_t)addr) + n);
1183 next_node = node->next;
1184 if (stack_pos++ == elt_n) {
1188 stack[stack_pos] = next_node;
1189 } else if (*(next_node + 1)) {
1190 /* Follow up with the next possibility. */
1193 /* Move to the next path. */
1195 next_node = stack[--stack_pos];
1197 stack[stack_pos] = next_node;
1199 node = *next_node ? &graph[*next_node] : NULL;
1201 /* no expanded flows but we have missed item, create one rule for it */
1202 if (buf->entries == 1 && missed != 0) {
1204 lsize += elt * sizeof(*item) + user_pattern_size;
1205 if (lsize <= size) {
1206 buf->entry[buf->entries].priority = 1;
1207 buf->entry[buf->entries].pattern = addr;
1209 flow_items[0].type = missed_item.type;
1210 flow_items[1].type = RTE_FLOW_ITEM_TYPE_END;
1211 rte_memcpy(addr, buf->entry[0].pattern,
1213 addr = (void *)(((uintptr_t)addr) + user_pattern_size);
1214 rte_memcpy(addr, flow_items, elt * sizeof(*item));
1215 addr = (void *)(((uintptr_t)addr) +
1216 elt * sizeof(*item));
1223 rte_flow_dev_dump(uint16_t port_id, FILE *file, struct rte_flow_error *error)
1225 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1226 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1230 if (likely(!!ops->dev_dump))
1231 return flow_err(port_id, ops->dev_dump(dev, file, error),
1233 return rte_flow_error_set(error, ENOSYS,
1234 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1235 NULL, rte_strerror(ENOSYS));
1239 rte_flow_get_aged_flows(uint16_t port_id, void **contexts,
1240 uint32_t nb_contexts, struct rte_flow_error *error)
1242 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1243 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1247 if (likely(!!ops->get_aged_flows))
1248 return flow_err(port_id, ops->get_aged_flows(dev, contexts,
1249 nb_contexts, error), error);
1250 return rte_flow_error_set(error, ENOTSUP,
1251 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1252 NULL, rte_strerror(ENOTSUP));