1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2016 6WIND S.A.
3 * Copyright 2016 Mellanox Technologies, Ltd
11 #include <rte_common.h>
12 #include <rte_errno.h>
13 #include <rte_branch_prediction.h>
14 #include <rte_string_fns.h>
16 #include <rte_mbuf_dyn.h>
17 #include "rte_ethdev.h"
18 #include "rte_flow_driver.h"
21 /* Mbuf dynamic field name for metadata. */
22 int rte_flow_dynf_metadata_offs = -1;
24 /* Mbuf dynamic field flag bit number for metadata. */
25 uint64_t rte_flow_dynf_metadata_mask;
28 * Flow elements description tables.
30 struct rte_flow_desc_data {
35 /** Generate flow_item[] entry. */
36 #define MK_FLOW_ITEM(t, s) \
37 [RTE_FLOW_ITEM_TYPE_ ## t] = { \
42 /** Information about known flow pattern items. */
43 static const struct rte_flow_desc_data rte_flow_desc_item[] = {
45 MK_FLOW_ITEM(VOID, 0),
46 MK_FLOW_ITEM(INVERT, 0),
47 MK_FLOW_ITEM(ANY, sizeof(struct rte_flow_item_any)),
49 MK_FLOW_ITEM(VF, sizeof(struct rte_flow_item_vf)),
50 MK_FLOW_ITEM(PHY_PORT, sizeof(struct rte_flow_item_phy_port)),
51 MK_FLOW_ITEM(PORT_ID, sizeof(struct rte_flow_item_port_id)),
52 MK_FLOW_ITEM(RAW, sizeof(struct rte_flow_item_raw)),
53 MK_FLOW_ITEM(ETH, sizeof(struct rte_flow_item_eth)),
54 MK_FLOW_ITEM(VLAN, sizeof(struct rte_flow_item_vlan)),
55 MK_FLOW_ITEM(IPV4, sizeof(struct rte_flow_item_ipv4)),
56 MK_FLOW_ITEM(IPV6, sizeof(struct rte_flow_item_ipv6)),
57 MK_FLOW_ITEM(ICMP, sizeof(struct rte_flow_item_icmp)),
58 MK_FLOW_ITEM(UDP, sizeof(struct rte_flow_item_udp)),
59 MK_FLOW_ITEM(TCP, sizeof(struct rte_flow_item_tcp)),
60 MK_FLOW_ITEM(SCTP, sizeof(struct rte_flow_item_sctp)),
61 MK_FLOW_ITEM(VXLAN, sizeof(struct rte_flow_item_vxlan)),
62 MK_FLOW_ITEM(E_TAG, sizeof(struct rte_flow_item_e_tag)),
63 MK_FLOW_ITEM(NVGRE, sizeof(struct rte_flow_item_nvgre)),
64 MK_FLOW_ITEM(MPLS, sizeof(struct rte_flow_item_mpls)),
65 MK_FLOW_ITEM(GRE, sizeof(struct rte_flow_item_gre)),
66 MK_FLOW_ITEM(FUZZY, sizeof(struct rte_flow_item_fuzzy)),
67 MK_FLOW_ITEM(GTP, sizeof(struct rte_flow_item_gtp)),
68 MK_FLOW_ITEM(GTPC, sizeof(struct rte_flow_item_gtp)),
69 MK_FLOW_ITEM(GTPU, sizeof(struct rte_flow_item_gtp)),
70 MK_FLOW_ITEM(ESP, sizeof(struct rte_flow_item_esp)),
71 MK_FLOW_ITEM(GENEVE, sizeof(struct rte_flow_item_geneve)),
72 MK_FLOW_ITEM(VXLAN_GPE, sizeof(struct rte_flow_item_vxlan_gpe)),
73 MK_FLOW_ITEM(ARP_ETH_IPV4, sizeof(struct rte_flow_item_arp_eth_ipv4)),
74 MK_FLOW_ITEM(IPV6_EXT, sizeof(struct rte_flow_item_ipv6_ext)),
75 MK_FLOW_ITEM(ICMP6, sizeof(struct rte_flow_item_icmp6)),
76 MK_FLOW_ITEM(ICMP6_ND_NS, sizeof(struct rte_flow_item_icmp6_nd_ns)),
77 MK_FLOW_ITEM(ICMP6_ND_NA, sizeof(struct rte_flow_item_icmp6_nd_na)),
78 MK_FLOW_ITEM(ICMP6_ND_OPT, sizeof(struct rte_flow_item_icmp6_nd_opt)),
79 MK_FLOW_ITEM(ICMP6_ND_OPT_SLA_ETH,
80 sizeof(struct rte_flow_item_icmp6_nd_opt_sla_eth)),
81 MK_FLOW_ITEM(ICMP6_ND_OPT_TLA_ETH,
82 sizeof(struct rte_flow_item_icmp6_nd_opt_tla_eth)),
83 MK_FLOW_ITEM(MARK, sizeof(struct rte_flow_item_mark)),
84 MK_FLOW_ITEM(META, sizeof(struct rte_flow_item_meta)),
85 MK_FLOW_ITEM(TAG, sizeof(struct rte_flow_item_tag)),
86 MK_FLOW_ITEM(GRE_KEY, sizeof(rte_be32_t)),
87 MK_FLOW_ITEM(GTP_PSC, sizeof(struct rte_flow_item_gtp_psc)),
88 MK_FLOW_ITEM(PPPOES, sizeof(struct rte_flow_item_pppoe)),
89 MK_FLOW_ITEM(PPPOED, sizeof(struct rte_flow_item_pppoe)),
90 MK_FLOW_ITEM(PPPOE_PROTO_ID,
91 sizeof(struct rte_flow_item_pppoe_proto_id)),
92 MK_FLOW_ITEM(NSH, sizeof(struct rte_flow_item_nsh)),
93 MK_FLOW_ITEM(IGMP, sizeof(struct rte_flow_item_igmp)),
94 MK_FLOW_ITEM(AH, sizeof(struct rte_flow_item_ah)),
95 MK_FLOW_ITEM(HIGIG2, sizeof(struct rte_flow_item_higig2_hdr)),
98 /** Generate flow_action[] entry. */
99 #define MK_FLOW_ACTION(t, s) \
100 [RTE_FLOW_ACTION_TYPE_ ## t] = { \
105 /** Information about known flow actions. */
106 static const struct rte_flow_desc_data rte_flow_desc_action[] = {
107 MK_FLOW_ACTION(END, 0),
108 MK_FLOW_ACTION(VOID, 0),
109 MK_FLOW_ACTION(PASSTHRU, 0),
110 MK_FLOW_ACTION(JUMP, sizeof(struct rte_flow_action_jump)),
111 MK_FLOW_ACTION(MARK, sizeof(struct rte_flow_action_mark)),
112 MK_FLOW_ACTION(FLAG, 0),
113 MK_FLOW_ACTION(QUEUE, sizeof(struct rte_flow_action_queue)),
114 MK_FLOW_ACTION(DROP, 0),
115 MK_FLOW_ACTION(COUNT, sizeof(struct rte_flow_action_count)),
116 MK_FLOW_ACTION(RSS, sizeof(struct rte_flow_action_rss)),
117 MK_FLOW_ACTION(PF, 0),
118 MK_FLOW_ACTION(VF, sizeof(struct rte_flow_action_vf)),
119 MK_FLOW_ACTION(PHY_PORT, sizeof(struct rte_flow_action_phy_port)),
120 MK_FLOW_ACTION(PORT_ID, sizeof(struct rte_flow_action_port_id)),
121 MK_FLOW_ACTION(METER, sizeof(struct rte_flow_action_meter)),
122 MK_FLOW_ACTION(SECURITY, sizeof(struct rte_flow_action_security)),
123 MK_FLOW_ACTION(OF_SET_MPLS_TTL,
124 sizeof(struct rte_flow_action_of_set_mpls_ttl)),
125 MK_FLOW_ACTION(OF_DEC_MPLS_TTL, 0),
126 MK_FLOW_ACTION(OF_SET_NW_TTL,
127 sizeof(struct rte_flow_action_of_set_nw_ttl)),
128 MK_FLOW_ACTION(OF_DEC_NW_TTL, 0),
129 MK_FLOW_ACTION(OF_COPY_TTL_OUT, 0),
130 MK_FLOW_ACTION(OF_COPY_TTL_IN, 0),
131 MK_FLOW_ACTION(OF_POP_VLAN, 0),
132 MK_FLOW_ACTION(OF_PUSH_VLAN,
133 sizeof(struct rte_flow_action_of_push_vlan)),
134 MK_FLOW_ACTION(OF_SET_VLAN_VID,
135 sizeof(struct rte_flow_action_of_set_vlan_vid)),
136 MK_FLOW_ACTION(OF_SET_VLAN_PCP,
137 sizeof(struct rte_flow_action_of_set_vlan_pcp)),
138 MK_FLOW_ACTION(OF_POP_MPLS,
139 sizeof(struct rte_flow_action_of_pop_mpls)),
140 MK_FLOW_ACTION(OF_PUSH_MPLS,
141 sizeof(struct rte_flow_action_of_push_mpls)),
142 MK_FLOW_ACTION(VXLAN_ENCAP, sizeof(struct rte_flow_action_vxlan_encap)),
143 MK_FLOW_ACTION(VXLAN_DECAP, 0),
144 MK_FLOW_ACTION(NVGRE_ENCAP, sizeof(struct rte_flow_action_vxlan_encap)),
145 MK_FLOW_ACTION(NVGRE_DECAP, 0),
146 MK_FLOW_ACTION(RAW_ENCAP, sizeof(struct rte_flow_action_raw_encap)),
147 MK_FLOW_ACTION(RAW_DECAP, sizeof(struct rte_flow_action_raw_decap)),
148 MK_FLOW_ACTION(SET_IPV4_SRC,
149 sizeof(struct rte_flow_action_set_ipv4)),
150 MK_FLOW_ACTION(SET_IPV4_DST,
151 sizeof(struct rte_flow_action_set_ipv4)),
152 MK_FLOW_ACTION(SET_IPV6_SRC,
153 sizeof(struct rte_flow_action_set_ipv6)),
154 MK_FLOW_ACTION(SET_IPV6_DST,
155 sizeof(struct rte_flow_action_set_ipv6)),
156 MK_FLOW_ACTION(SET_TP_SRC,
157 sizeof(struct rte_flow_action_set_tp)),
158 MK_FLOW_ACTION(SET_TP_DST,
159 sizeof(struct rte_flow_action_set_tp)),
160 MK_FLOW_ACTION(MAC_SWAP, 0),
161 MK_FLOW_ACTION(DEC_TTL, 0),
162 MK_FLOW_ACTION(SET_TTL, sizeof(struct rte_flow_action_set_ttl)),
163 MK_FLOW_ACTION(SET_MAC_SRC, sizeof(struct rte_flow_action_set_mac)),
164 MK_FLOW_ACTION(SET_MAC_DST, sizeof(struct rte_flow_action_set_mac)),
165 MK_FLOW_ACTION(INC_TCP_SEQ, sizeof(rte_be32_t)),
166 MK_FLOW_ACTION(DEC_TCP_SEQ, sizeof(rte_be32_t)),
167 MK_FLOW_ACTION(INC_TCP_ACK, sizeof(rte_be32_t)),
168 MK_FLOW_ACTION(DEC_TCP_ACK, sizeof(rte_be32_t)),
169 MK_FLOW_ACTION(SET_TAG, sizeof(struct rte_flow_action_set_tag)),
170 MK_FLOW_ACTION(SET_META, sizeof(struct rte_flow_action_set_meta)),
174 rte_flow_dynf_metadata_register(void)
179 static const struct rte_mbuf_dynfield desc_offs = {
180 .name = RTE_MBUF_DYNFIELD_METADATA_NAME,
181 .size = sizeof(uint32_t),
182 .align = __alignof__(uint32_t),
184 static const struct rte_mbuf_dynflag desc_flag = {
185 .name = RTE_MBUF_DYNFLAG_METADATA_NAME,
188 offset = rte_mbuf_dynfield_register(&desc_offs);
191 flag = rte_mbuf_dynflag_register(&desc_flag);
194 rte_flow_dynf_metadata_offs = offset;
195 rte_flow_dynf_metadata_mask = (1ULL << flag);
199 rte_flow_dynf_metadata_offs = -1;
200 rte_flow_dynf_metadata_mask = 0ULL;
205 flow_err(uint16_t port_id, int ret, struct rte_flow_error *error)
209 if (rte_eth_dev_is_removed(port_id))
210 return rte_flow_error_set(error, EIO,
211 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
212 NULL, rte_strerror(EIO));
216 static enum rte_flow_item_type
217 rte_flow_expand_rss_item_complete(const struct rte_flow_item *item)
219 enum rte_flow_item_type ret = RTE_FLOW_ITEM_TYPE_VOID;
220 uint16_t ether_type = 0;
221 uint16_t ether_type_m;
222 uint8_t ip_next_proto = 0;
223 uint8_t ip_next_proto_m;
225 if (item == NULL || item->spec == NULL)
227 switch (item->type) {
228 case RTE_FLOW_ITEM_TYPE_ETH:
230 ether_type_m = ((const struct rte_flow_item_eth *)
233 ether_type_m = rte_flow_item_eth_mask.type;
234 if (ether_type_m != RTE_BE16(0xFFFF))
236 ether_type = ((const struct rte_flow_item_eth *)
238 if (rte_be_to_cpu_16(ether_type) == RTE_ETHER_TYPE_IPV4)
239 ret = RTE_FLOW_ITEM_TYPE_IPV4;
240 else if (rte_be_to_cpu_16(ether_type) == RTE_ETHER_TYPE_IPV6)
241 ret = RTE_FLOW_ITEM_TYPE_IPV6;
242 else if (rte_be_to_cpu_16(ether_type) == RTE_ETHER_TYPE_VLAN)
243 ret = RTE_FLOW_ITEM_TYPE_VLAN;
245 case RTE_FLOW_ITEM_TYPE_VLAN:
247 ether_type_m = ((const struct rte_flow_item_vlan *)
248 (item->mask))->inner_type;
250 ether_type_m = rte_flow_item_vlan_mask.inner_type;
251 if (ether_type_m != RTE_BE16(0xFFFF))
253 ether_type = ((const struct rte_flow_item_vlan *)
254 (item->spec))->inner_type;
255 if (rte_be_to_cpu_16(ether_type) == RTE_ETHER_TYPE_IPV4)
256 ret = RTE_FLOW_ITEM_TYPE_IPV4;
257 else if (rte_be_to_cpu_16(ether_type) == RTE_ETHER_TYPE_IPV6)
258 ret = RTE_FLOW_ITEM_TYPE_IPV6;
259 else if (rte_be_to_cpu_16(ether_type) == RTE_ETHER_TYPE_VLAN)
260 ret = RTE_FLOW_ITEM_TYPE_VLAN;
262 case RTE_FLOW_ITEM_TYPE_IPV4:
264 ip_next_proto_m = ((const struct rte_flow_item_ipv4 *)
265 (item->mask))->hdr.next_proto_id;
268 rte_flow_item_ipv4_mask.hdr.next_proto_id;
269 if (ip_next_proto_m != 0xFF)
271 ip_next_proto = ((const struct rte_flow_item_ipv4 *)
272 (item->spec))->hdr.next_proto_id;
273 if (ip_next_proto == IPPROTO_UDP)
274 ret = RTE_FLOW_ITEM_TYPE_UDP;
275 else if (ip_next_proto == IPPROTO_TCP)
276 ret = RTE_FLOW_ITEM_TYPE_TCP;
277 else if (ip_next_proto == IPPROTO_IP)
278 ret = RTE_FLOW_ITEM_TYPE_IPV4;
279 else if (ip_next_proto == IPPROTO_IPV6)
280 ret = RTE_FLOW_ITEM_TYPE_IPV6;
282 case RTE_FLOW_ITEM_TYPE_IPV6:
284 ip_next_proto_m = ((const struct rte_flow_item_ipv6 *)
285 (item->mask))->hdr.proto;
288 rte_flow_item_ipv6_mask.hdr.proto;
289 if (ip_next_proto_m != 0xFF)
291 ip_next_proto = ((const struct rte_flow_item_ipv6 *)
292 (item->spec))->hdr.proto;
293 if (ip_next_proto == IPPROTO_UDP)
294 ret = RTE_FLOW_ITEM_TYPE_UDP;
295 else if (ip_next_proto == IPPROTO_TCP)
296 ret = RTE_FLOW_ITEM_TYPE_TCP;
297 else if (ip_next_proto == IPPROTO_IP)
298 ret = RTE_FLOW_ITEM_TYPE_IPV4;
299 else if (ip_next_proto == IPPROTO_IPV6)
300 ret = RTE_FLOW_ITEM_TYPE_IPV6;
303 ret = RTE_FLOW_ITEM_TYPE_VOID;
309 /* Get generic flow operations structure from a port. */
310 const struct rte_flow_ops *
311 rte_flow_ops_get(uint16_t port_id, struct rte_flow_error *error)
313 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
314 const struct rte_flow_ops *ops;
317 if (unlikely(!rte_eth_dev_is_valid_port(port_id)))
319 else if (unlikely(!dev->dev_ops->filter_ctrl ||
320 dev->dev_ops->filter_ctrl(dev,
321 RTE_ETH_FILTER_GENERIC,
328 rte_flow_error_set(error, code, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
329 NULL, rte_strerror(code));
333 /* Check whether a flow rule can be created on a given port. */
335 rte_flow_validate(uint16_t port_id,
336 const struct rte_flow_attr *attr,
337 const struct rte_flow_item pattern[],
338 const struct rte_flow_action actions[],
339 struct rte_flow_error *error)
341 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
342 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
346 if (likely(!!ops->validate))
347 return flow_err(port_id, ops->validate(dev, attr, pattern,
348 actions, error), error);
349 return rte_flow_error_set(error, ENOSYS,
350 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
351 NULL, rte_strerror(ENOSYS));
354 /* Create a flow rule on a given port. */
356 rte_flow_create(uint16_t port_id,
357 const struct rte_flow_attr *attr,
358 const struct rte_flow_item pattern[],
359 const struct rte_flow_action actions[],
360 struct rte_flow_error *error)
362 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
363 struct rte_flow *flow;
364 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
368 if (likely(!!ops->create)) {
369 flow = ops->create(dev, attr, pattern, actions, error);
371 flow_err(port_id, -rte_errno, error);
374 rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
375 NULL, rte_strerror(ENOSYS));
379 /* Destroy a flow rule on a given port. */
381 rte_flow_destroy(uint16_t port_id,
382 struct rte_flow *flow,
383 struct rte_flow_error *error)
385 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
386 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
390 if (likely(!!ops->destroy))
391 return flow_err(port_id, ops->destroy(dev, flow, error),
393 return rte_flow_error_set(error, ENOSYS,
394 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
395 NULL, rte_strerror(ENOSYS));
398 /* Destroy all flow rules associated with a port. */
400 rte_flow_flush(uint16_t port_id,
401 struct rte_flow_error *error)
403 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
404 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
408 if (likely(!!ops->flush))
409 return flow_err(port_id, ops->flush(dev, error), error);
410 return rte_flow_error_set(error, ENOSYS,
411 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
412 NULL, rte_strerror(ENOSYS));
415 /* Query an existing flow rule. */
417 rte_flow_query(uint16_t port_id,
418 struct rte_flow *flow,
419 const struct rte_flow_action *action,
421 struct rte_flow_error *error)
423 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
424 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
428 if (likely(!!ops->query))
429 return flow_err(port_id, ops->query(dev, flow, action, data,
431 return rte_flow_error_set(error, ENOSYS,
432 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
433 NULL, rte_strerror(ENOSYS));
436 /* Restrict ingress traffic to the defined flow rules. */
438 rte_flow_isolate(uint16_t port_id,
440 struct rte_flow_error *error)
442 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
443 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
447 if (likely(!!ops->isolate))
448 return flow_err(port_id, ops->isolate(dev, set, error), error);
449 return rte_flow_error_set(error, ENOSYS,
450 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
451 NULL, rte_strerror(ENOSYS));
454 /* Initialize flow error structure. */
456 rte_flow_error_set(struct rte_flow_error *error,
458 enum rte_flow_error_type type,
463 *error = (struct rte_flow_error){
473 /** Pattern item specification types. */
474 enum rte_flow_conv_item_spec_type {
475 RTE_FLOW_CONV_ITEM_SPEC,
476 RTE_FLOW_CONV_ITEM_LAST,
477 RTE_FLOW_CONV_ITEM_MASK,
481 * Copy pattern item specification.
484 * Output buffer. Can be NULL if @p size is zero.
486 * Size of @p buf in bytes.
488 * Pattern item to copy specification from.
490 * Specification selector for either @p spec, @p last or @p mask.
493 * Number of bytes needed to store pattern item specification regardless
494 * of @p size. @p buf contents are truncated to @p size if not large
498 rte_flow_conv_item_spec(void *buf, const size_t size,
499 const struct rte_flow_item *item,
500 enum rte_flow_conv_item_spec_type type)
504 type == RTE_FLOW_CONV_ITEM_SPEC ? item->spec :
505 type == RTE_FLOW_CONV_ITEM_LAST ? item->last :
506 type == RTE_FLOW_CONV_ITEM_MASK ? item->mask :
509 switch (item->type) {
511 const struct rte_flow_item_raw *raw;
514 const struct rte_flow_item_raw *raw;
517 const struct rte_flow_item_raw *raw;
520 const struct rte_flow_item_raw *raw;
523 struct rte_flow_item_raw *raw;
527 case RTE_FLOW_ITEM_TYPE_RAW:
528 spec.raw = item->spec;
529 last.raw = item->last ? item->last : item->spec;
530 mask.raw = item->mask ? item->mask : &rte_flow_item_raw_mask;
534 (&(struct rte_flow_item_raw){
535 .relative = src.raw->relative,
536 .search = src.raw->search,
537 .reserved = src.raw->reserved,
538 .offset = src.raw->offset,
539 .limit = src.raw->limit,
540 .length = src.raw->length,
542 size > sizeof(*dst.raw) ? sizeof(*dst.raw) : size);
543 off = sizeof(*dst.raw);
544 if (type == RTE_FLOW_CONV_ITEM_SPEC ||
545 (type == RTE_FLOW_CONV_ITEM_MASK &&
546 ((spec.raw->length & mask.raw->length) >=
547 (last.raw->length & mask.raw->length))))
548 tmp = spec.raw->length & mask.raw->length;
550 tmp = last.raw->length & mask.raw->length;
552 off = RTE_ALIGN_CEIL(off, sizeof(*dst.raw->pattern));
553 if (size >= off + tmp)
554 dst.raw->pattern = rte_memcpy
555 ((void *)((uintptr_t)dst.raw + off),
556 src.raw->pattern, tmp);
561 off = rte_flow_desc_item[item->type].size;
562 rte_memcpy(buf, data, (size > off ? off : size));
569 * Copy action configuration.
572 * Output buffer. Can be NULL if @p size is zero.
574 * Size of @p buf in bytes.
576 * Action to copy configuration from.
579 * Number of bytes needed to store pattern item specification regardless
580 * of @p size. @p buf contents are truncated to @p size if not large
584 rte_flow_conv_action_conf(void *buf, const size_t size,
585 const struct rte_flow_action *action)
589 switch (action->type) {
591 const struct rte_flow_action_rss *rss;
592 const struct rte_flow_action_vxlan_encap *vxlan_encap;
593 const struct rte_flow_action_nvgre_encap *nvgre_encap;
596 struct rte_flow_action_rss *rss;
597 struct rte_flow_action_vxlan_encap *vxlan_encap;
598 struct rte_flow_action_nvgre_encap *nvgre_encap;
603 case RTE_FLOW_ACTION_TYPE_RSS:
604 src.rss = action->conf;
607 (&(struct rte_flow_action_rss){
608 .func = src.rss->func,
609 .level = src.rss->level,
610 .types = src.rss->types,
611 .key_len = src.rss->key_len,
612 .queue_num = src.rss->queue_num,
614 size > sizeof(*dst.rss) ? sizeof(*dst.rss) : size);
615 off = sizeof(*dst.rss);
616 if (src.rss->key_len) {
617 off = RTE_ALIGN_CEIL(off, sizeof(*dst.rss->key));
618 tmp = sizeof(*src.rss->key) * src.rss->key_len;
619 if (size >= off + tmp)
620 dst.rss->key = rte_memcpy
621 ((void *)((uintptr_t)dst.rss + off),
625 if (src.rss->queue_num) {
626 off = RTE_ALIGN_CEIL(off, sizeof(*dst.rss->queue));
627 tmp = sizeof(*src.rss->queue) * src.rss->queue_num;
628 if (size >= off + tmp)
629 dst.rss->queue = rte_memcpy
630 ((void *)((uintptr_t)dst.rss + off),
631 src.rss->queue, tmp);
635 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
636 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
637 src.vxlan_encap = action->conf;
638 dst.vxlan_encap = buf;
639 RTE_BUILD_BUG_ON(sizeof(*src.vxlan_encap) !=
640 sizeof(*src.nvgre_encap) ||
641 offsetof(struct rte_flow_action_vxlan_encap,
643 offsetof(struct rte_flow_action_nvgre_encap,
645 off = sizeof(*dst.vxlan_encap);
646 if (src.vxlan_encap->definition) {
648 (off, sizeof(*dst.vxlan_encap->definition));
650 (RTE_FLOW_CONV_OP_PATTERN,
651 (void *)((uintptr_t)dst.vxlan_encap + off),
652 size > off ? size - off : 0,
653 src.vxlan_encap->definition, NULL);
656 if (size >= off + ret)
657 dst.vxlan_encap->definition =
658 (void *)((uintptr_t)dst.vxlan_encap +
664 off = rte_flow_desc_action[action->type].size;
665 rte_memcpy(buf, action->conf, (size > off ? off : size));
672 * Copy a list of pattern items.
675 * Destination buffer. Can be NULL if @p size is zero.
677 * Size of @p dst in bytes.
679 * Source pattern items.
681 * Maximum number of pattern items to process from @p src or 0 to process
682 * the entire list. In both cases, processing stops after
683 * RTE_FLOW_ITEM_TYPE_END is encountered.
685 * Perform verbose error reporting if not NULL.
688 * A positive value representing the number of bytes needed to store
689 * pattern items regardless of @p size on success (@p buf contents are
690 * truncated to @p size if not large enough), a negative errno value
691 * otherwise and rte_errno is set.
694 rte_flow_conv_pattern(struct rte_flow_item *dst,
696 const struct rte_flow_item *src,
698 struct rte_flow_error *error)
700 uintptr_t data = (uintptr_t)dst;
705 for (i = 0, off = 0; !num || i != num; ++i, ++src, ++dst) {
706 if ((size_t)src->type >= RTE_DIM(rte_flow_desc_item) ||
707 !rte_flow_desc_item[src->type].name)
708 return rte_flow_error_set
709 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, src,
710 "cannot convert unknown item type");
711 if (size >= off + sizeof(*dst))
712 *dst = (struct rte_flow_item){
724 off = RTE_ALIGN_CEIL(off, sizeof(double));
725 ret = rte_flow_conv_item_spec
726 ((void *)(data + off),
727 size > off ? size - off : 0, src,
728 RTE_FLOW_CONV_ITEM_SPEC);
729 if (size && size >= off + ret)
730 dst->spec = (void *)(data + off);
735 off = RTE_ALIGN_CEIL(off, sizeof(double));
736 ret = rte_flow_conv_item_spec
737 ((void *)(data + off),
738 size > off ? size - off : 0, src,
739 RTE_FLOW_CONV_ITEM_LAST);
740 if (size && size >= off + ret)
741 dst->last = (void *)(data + off);
745 off = RTE_ALIGN_CEIL(off, sizeof(double));
746 ret = rte_flow_conv_item_spec
747 ((void *)(data + off),
748 size > off ? size - off : 0, src,
749 RTE_FLOW_CONV_ITEM_MASK);
750 if (size && size >= off + ret)
751 dst->mask = (void *)(data + off);
761 * Copy a list of actions.
764 * Destination buffer. Can be NULL if @p size is zero.
766 * Size of @p dst in bytes.
770 * Maximum number of actions to process from @p src or 0 to process the
771 * entire list. In both cases, processing stops after
772 * RTE_FLOW_ACTION_TYPE_END is encountered.
774 * Perform verbose error reporting if not NULL.
777 * A positive value representing the number of bytes needed to store
778 * actions regardless of @p size on success (@p buf contents are truncated
779 * to @p size if not large enough), a negative errno value otherwise and
783 rte_flow_conv_actions(struct rte_flow_action *dst,
785 const struct rte_flow_action *src,
787 struct rte_flow_error *error)
789 uintptr_t data = (uintptr_t)dst;
794 for (i = 0, off = 0; !num || i != num; ++i, ++src, ++dst) {
795 if ((size_t)src->type >= RTE_DIM(rte_flow_desc_action) ||
796 !rte_flow_desc_action[src->type].name)
797 return rte_flow_error_set
798 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
799 src, "cannot convert unknown action type");
800 if (size >= off + sizeof(*dst))
801 *dst = (struct rte_flow_action){
813 off = RTE_ALIGN_CEIL(off, sizeof(double));
814 ret = rte_flow_conv_action_conf
815 ((void *)(data + off),
816 size > off ? size - off : 0, src);
817 if (size && size >= off + ret)
818 dst->conf = (void *)(data + off);
828 * Copy flow rule components.
830 * This comprises the flow rule descriptor itself, attributes, pattern and
831 * actions list. NULL components in @p src are skipped.
834 * Destination buffer. Can be NULL if @p size is zero.
836 * Size of @p dst in bytes.
838 * Source flow rule descriptor.
840 * Perform verbose error reporting if not NULL.
843 * A positive value representing the number of bytes needed to store all
844 * components including the descriptor regardless of @p size on success
845 * (@p buf contents are truncated to @p size if not large enough), a
846 * negative errno value otherwise and rte_errno is set.
849 rte_flow_conv_rule(struct rte_flow_conv_rule *dst,
851 const struct rte_flow_conv_rule *src,
852 struct rte_flow_error *error)
858 (&(struct rte_flow_conv_rule){
863 size > sizeof(*dst) ? sizeof(*dst) : size);
866 off = RTE_ALIGN_CEIL(off, sizeof(double));
867 if (size && size >= off + sizeof(*dst->attr))
868 dst->attr = rte_memcpy
869 ((void *)((uintptr_t)dst + off),
870 src->attr_ro, sizeof(*dst->attr));
871 off += sizeof(*dst->attr);
873 if (src->pattern_ro) {
874 off = RTE_ALIGN_CEIL(off, sizeof(double));
875 ret = rte_flow_conv_pattern((void *)((uintptr_t)dst + off),
876 size > off ? size - off : 0,
877 src->pattern_ro, 0, error);
880 if (size && size >= off + (size_t)ret)
881 dst->pattern = (void *)((uintptr_t)dst + off);
884 if (src->actions_ro) {
885 off = RTE_ALIGN_CEIL(off, sizeof(double));
886 ret = rte_flow_conv_actions((void *)((uintptr_t)dst + off),
887 size > off ? size - off : 0,
888 src->actions_ro, 0, error);
891 if (size >= off + (size_t)ret)
892 dst->actions = (void *)((uintptr_t)dst + off);
899 * Retrieve the name of a pattern item/action type.
902 * Nonzero when @p src represents an action type instead of a pattern item
905 * Nonzero to write string address instead of contents into @p dst.
907 * Destination buffer. Can be NULL if @p size is zero.
909 * Size of @p dst in bytes.
911 * Depending on @p is_action, source pattern item or action type cast as a
914 * Perform verbose error reporting if not NULL.
917 * A positive value representing the number of bytes needed to store the
918 * name or its address regardless of @p size on success (@p buf contents
919 * are truncated to @p size if not large enough), a negative errno value
920 * otherwise and rte_errno is set.
923 rte_flow_conv_name(int is_action,
928 struct rte_flow_error *error)
931 const struct rte_flow_desc_data *data;
934 static const struct desc_info info_rep[2] = {
935 { rte_flow_desc_item, RTE_DIM(rte_flow_desc_item), },
936 { rte_flow_desc_action, RTE_DIM(rte_flow_desc_action), },
938 const struct desc_info *const info = &info_rep[!!is_action];
939 unsigned int type = (uintptr_t)src;
941 if (type >= info->num)
942 return rte_flow_error_set
943 (error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
944 "unknown object type to retrieve the name of");
946 return strlcpy(dst, info->data[type].name, size);
947 if (size >= sizeof(const char **))
948 *((const char **)dst) = info->data[type].name;
949 return sizeof(const char **);
952 /** Helper function to convert flow API objects. */
954 rte_flow_conv(enum rte_flow_conv_op op,
958 struct rte_flow_error *error)
961 const struct rte_flow_attr *attr;
963 case RTE_FLOW_CONV_OP_NONE:
965 case RTE_FLOW_CONV_OP_ATTR:
967 if (size > sizeof(*attr))
968 size = sizeof(*attr);
969 rte_memcpy(dst, attr, size);
970 return sizeof(*attr);
971 case RTE_FLOW_CONV_OP_ITEM:
972 return rte_flow_conv_pattern(dst, size, src, 1, error);
973 case RTE_FLOW_CONV_OP_ACTION:
974 return rte_flow_conv_actions(dst, size, src, 1, error);
975 case RTE_FLOW_CONV_OP_PATTERN:
976 return rte_flow_conv_pattern(dst, size, src, 0, error);
977 case RTE_FLOW_CONV_OP_ACTIONS:
978 return rte_flow_conv_actions(dst, size, src, 0, error);
979 case RTE_FLOW_CONV_OP_RULE:
980 return rte_flow_conv_rule(dst, size, src, error);
981 case RTE_FLOW_CONV_OP_ITEM_NAME:
982 return rte_flow_conv_name(0, 0, dst, size, src, error);
983 case RTE_FLOW_CONV_OP_ACTION_NAME:
984 return rte_flow_conv_name(1, 0, dst, size, src, error);
985 case RTE_FLOW_CONV_OP_ITEM_NAME_PTR:
986 return rte_flow_conv_name(0, 1, dst, size, src, error);
987 case RTE_FLOW_CONV_OP_ACTION_NAME_PTR:
988 return rte_flow_conv_name(1, 1, dst, size, src, error);
990 return rte_flow_error_set
991 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
992 "unknown object conversion operation");
995 /** Store a full rte_flow description. */
997 rte_flow_copy(struct rte_flow_desc *desc, size_t len,
998 const struct rte_flow_attr *attr,
999 const struct rte_flow_item *items,
1000 const struct rte_flow_action *actions)
1003 * Overlap struct rte_flow_conv with struct rte_flow_desc in order
1004 * to convert the former to the latter without wasting space.
1006 struct rte_flow_conv_rule *dst =
1008 (void *)((uintptr_t)desc +
1009 (offsetof(struct rte_flow_desc, actions) -
1010 offsetof(struct rte_flow_conv_rule, actions))) :
1013 len > sizeof(*desc) - sizeof(*dst) ?
1014 len - (sizeof(*desc) - sizeof(*dst)) :
1016 struct rte_flow_conv_rule src = {
1018 .pattern_ro = items,
1019 .actions_ro = actions,
1023 RTE_BUILD_BUG_ON(sizeof(struct rte_flow_desc) <
1024 sizeof(struct rte_flow_conv_rule));
1026 (&dst->pattern != &desc->items ||
1027 &dst->actions != &desc->actions ||
1028 (uintptr_t)(dst + 1) != (uintptr_t)(desc + 1))) {
1032 ret = rte_flow_conv(RTE_FLOW_CONV_OP_RULE, dst, dst_size, &src, NULL);
1035 ret += sizeof(*desc) - sizeof(*dst);
1037 (&(struct rte_flow_desc){
1040 .items = dst_size ? dst->pattern : NULL,
1041 .actions = dst_size ? dst->actions : NULL,
1043 len > sizeof(*desc) ? sizeof(*desc) : len);
1048 * Expand RSS flows into several possible flows according to the RSS hash
1049 * fields requested and the driver capabilities.
1052 rte_flow_expand_rss(struct rte_flow_expand_rss *buf, size_t size,
1053 const struct rte_flow_item *pattern, uint64_t types,
1054 const struct rte_flow_expand_node graph[],
1055 int graph_root_index)
1057 const int elt_n = 8;
1058 const struct rte_flow_item *item;
1059 const struct rte_flow_expand_node *node = &graph[graph_root_index];
1060 const int *next_node;
1061 const int *stack[elt_n];
1063 struct rte_flow_item flow_items[elt_n];
1066 size_t user_pattern_size = 0;
1068 const struct rte_flow_expand_node *next = NULL;
1069 struct rte_flow_item missed_item;
1072 const struct rte_flow_item *last_item = NULL;
1074 memset(&missed_item, 0, sizeof(missed_item));
1075 lsize = offsetof(struct rte_flow_expand_rss, entry) +
1076 elt_n * sizeof(buf->entry[0]);
1077 if (lsize <= size) {
1078 buf->entry[0].priority = 0;
1079 buf->entry[0].pattern = (void *)&buf->entry[elt_n];
1081 addr = buf->entry[0].pattern;
1083 for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
1084 if (item->type != RTE_FLOW_ITEM_TYPE_VOID)
1086 for (i = 0; node->next && node->next[i]; ++i) {
1087 next = &graph[node->next[i]];
1088 if (next->type == item->type)
1093 user_pattern_size += sizeof(*item);
1095 user_pattern_size += sizeof(*item); /* Handle END item. */
1096 lsize += user_pattern_size;
1097 /* Copy the user pattern in the first entry of the buffer. */
1098 if (lsize <= size) {
1099 rte_memcpy(addr, pattern, user_pattern_size);
1100 addr = (void *)(((uintptr_t)addr) + user_pattern_size);
1103 /* Start expanding. */
1104 memset(flow_items, 0, sizeof(flow_items));
1105 user_pattern_size -= sizeof(*item);
1107 * Check if the last valid item has spec set
1108 * and need complete pattern.
1110 missed_item.type = rte_flow_expand_rss_item_complete(last_item);
1111 if (missed_item.type != RTE_FLOW_ITEM_TYPE_VOID) {
1114 for (i = 0; node->next && node->next[i]; ++i) {
1115 next = &graph[node->next[i]];
1116 if (next->type == missed_item.type) {
1117 flow_items[0].type = missed_item.type;
1118 flow_items[1].type = RTE_FLOW_ITEM_TYPE_END;
1124 if (next && missed) {
1125 elt = 2; /* missed item + item end. */
1127 lsize += elt * sizeof(*item) + user_pattern_size;
1128 if ((node->rss_types & types) && lsize <= size) {
1129 buf->entry[buf->entries].priority = 1;
1130 buf->entry[buf->entries].pattern = addr;
1132 rte_memcpy(addr, buf->entry[0].pattern,
1134 addr = (void *)(((uintptr_t)addr) + user_pattern_size);
1135 rte_memcpy(addr, flow_items, elt * sizeof(*item));
1136 addr = (void *)(((uintptr_t)addr) +
1137 elt * sizeof(*item));
1140 memset(flow_items, 0, sizeof(flow_items));
1141 next_node = node->next;
1142 stack[stack_pos] = next_node;
1143 node = next_node ? &graph[*next_node] : NULL;
1145 flow_items[stack_pos].type = node->type;
1146 if (node->rss_types & types) {
1148 * compute the number of items to copy from the
1149 * expansion and copy it.
1150 * When the stack_pos is 0, there are 1 element in it,
1151 * plus the addition END item.
1153 elt = stack_pos + 2;
1154 flow_items[stack_pos + 1].type = RTE_FLOW_ITEM_TYPE_END;
1155 lsize += elt * sizeof(*item) + user_pattern_size;
1156 if (lsize <= size) {
1157 size_t n = elt * sizeof(*item);
1159 buf->entry[buf->entries].priority =
1160 stack_pos + 1 + missed;
1161 buf->entry[buf->entries].pattern = addr;
1163 rte_memcpy(addr, buf->entry[0].pattern,
1165 addr = (void *)(((uintptr_t)addr) +
1167 rte_memcpy(addr, &missed_item,
1168 missed * sizeof(*item));
1169 addr = (void *)(((uintptr_t)addr) +
1170 missed * sizeof(*item));
1171 rte_memcpy(addr, flow_items, n);
1172 addr = (void *)(((uintptr_t)addr) + n);
1177 next_node = node->next;
1178 if (stack_pos++ == elt_n) {
1182 stack[stack_pos] = next_node;
1183 } else if (*(next_node + 1)) {
1184 /* Follow up with the next possibility. */
1187 /* Move to the next path. */
1189 next_node = stack[--stack_pos];
1191 stack[stack_pos] = next_node;
1193 node = *next_node ? &graph[*next_node] : NULL;
1195 /* no expanded flows but we have missed item, create one rule for it */
1196 if (buf->entries == 1 && missed != 0) {
1198 lsize += elt * sizeof(*item) + user_pattern_size;
1199 if (lsize <= size) {
1200 buf->entry[buf->entries].priority = 1;
1201 buf->entry[buf->entries].pattern = addr;
1203 flow_items[0].type = missed_item.type;
1204 flow_items[1].type = RTE_FLOW_ITEM_TYPE_END;
1205 rte_memcpy(addr, buf->entry[0].pattern,
1207 addr = (void *)(((uintptr_t)addr) + user_pattern_size);
1208 rte_memcpy(addr, flow_items, elt * sizeof(*item));
1209 addr = (void *)(((uintptr_t)addr) +
1210 elt * sizeof(*item));