1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2016 6WIND S.A.
3 * Copyright 2016 Mellanox Technologies, Ltd
11 #include <rte_common.h>
12 #include <rte_errno.h>
13 #include <rte_branch_prediction.h>
14 #include <rte_string_fns.h>
16 #include <rte_mbuf_dyn.h>
17 #include "rte_ethdev.h"
18 #include "rte_flow_driver.h"
21 /* Mbuf dynamic field name for metadata. */
22 int rte_flow_dynf_metadata_offs = -1;
24 /* Mbuf dynamic field flag bit number for metadata. */
25 uint64_t rte_flow_dynf_metadata_mask;
28 * Flow elements description tables.
30 struct rte_flow_desc_data {
35 /** Generate flow_item[] entry. */
36 #define MK_FLOW_ITEM(t, s) \
37 [RTE_FLOW_ITEM_TYPE_ ## t] = { \
42 /** Information about known flow pattern items. */
43 static const struct rte_flow_desc_data rte_flow_desc_item[] = {
45 MK_FLOW_ITEM(VOID, 0),
46 MK_FLOW_ITEM(INVERT, 0),
47 MK_FLOW_ITEM(ANY, sizeof(struct rte_flow_item_any)),
49 MK_FLOW_ITEM(VF, sizeof(struct rte_flow_item_vf)),
50 MK_FLOW_ITEM(PHY_PORT, sizeof(struct rte_flow_item_phy_port)),
51 MK_FLOW_ITEM(PORT_ID, sizeof(struct rte_flow_item_port_id)),
52 MK_FLOW_ITEM(RAW, sizeof(struct rte_flow_item_raw)),
53 MK_FLOW_ITEM(ETH, sizeof(struct rte_flow_item_eth)),
54 MK_FLOW_ITEM(VLAN, sizeof(struct rte_flow_item_vlan)),
55 MK_FLOW_ITEM(IPV4, sizeof(struct rte_flow_item_ipv4)),
56 MK_FLOW_ITEM(IPV6, sizeof(struct rte_flow_item_ipv6)),
57 MK_FLOW_ITEM(ICMP, sizeof(struct rte_flow_item_icmp)),
58 MK_FLOW_ITEM(UDP, sizeof(struct rte_flow_item_udp)),
59 MK_FLOW_ITEM(TCP, sizeof(struct rte_flow_item_tcp)),
60 MK_FLOW_ITEM(SCTP, sizeof(struct rte_flow_item_sctp)),
61 MK_FLOW_ITEM(VXLAN, sizeof(struct rte_flow_item_vxlan)),
62 MK_FLOW_ITEM(E_TAG, sizeof(struct rte_flow_item_e_tag)),
63 MK_FLOW_ITEM(NVGRE, sizeof(struct rte_flow_item_nvgre)),
64 MK_FLOW_ITEM(MPLS, sizeof(struct rte_flow_item_mpls)),
65 MK_FLOW_ITEM(GRE, sizeof(struct rte_flow_item_gre)),
66 MK_FLOW_ITEM(FUZZY, sizeof(struct rte_flow_item_fuzzy)),
67 MK_FLOW_ITEM(GTP, sizeof(struct rte_flow_item_gtp)),
68 MK_FLOW_ITEM(GTPC, sizeof(struct rte_flow_item_gtp)),
69 MK_FLOW_ITEM(GTPU, sizeof(struct rte_flow_item_gtp)),
70 MK_FLOW_ITEM(ESP, sizeof(struct rte_flow_item_esp)),
71 MK_FLOW_ITEM(GENEVE, sizeof(struct rte_flow_item_geneve)),
72 MK_FLOW_ITEM(VXLAN_GPE, sizeof(struct rte_flow_item_vxlan_gpe)),
73 MK_FLOW_ITEM(ARP_ETH_IPV4, sizeof(struct rte_flow_item_arp_eth_ipv4)),
74 MK_FLOW_ITEM(IPV6_EXT, sizeof(struct rte_flow_item_ipv6_ext)),
75 MK_FLOW_ITEM(ICMP6, sizeof(struct rte_flow_item_icmp6)),
76 MK_FLOW_ITEM(ICMP6_ND_NS, sizeof(struct rte_flow_item_icmp6_nd_ns)),
77 MK_FLOW_ITEM(ICMP6_ND_NA, sizeof(struct rte_flow_item_icmp6_nd_na)),
78 MK_FLOW_ITEM(ICMP6_ND_OPT, sizeof(struct rte_flow_item_icmp6_nd_opt)),
79 MK_FLOW_ITEM(ICMP6_ND_OPT_SLA_ETH,
80 sizeof(struct rte_flow_item_icmp6_nd_opt_sla_eth)),
81 MK_FLOW_ITEM(ICMP6_ND_OPT_TLA_ETH,
82 sizeof(struct rte_flow_item_icmp6_nd_opt_tla_eth)),
83 MK_FLOW_ITEM(MARK, sizeof(struct rte_flow_item_mark)),
84 MK_FLOW_ITEM(META, sizeof(struct rte_flow_item_meta)),
85 MK_FLOW_ITEM(TAG, sizeof(struct rte_flow_item_tag)),
86 MK_FLOW_ITEM(GRE_KEY, sizeof(rte_be32_t)),
87 MK_FLOW_ITEM(GTP_PSC, sizeof(struct rte_flow_item_gtp_psc)),
88 MK_FLOW_ITEM(PPPOES, sizeof(struct rte_flow_item_pppoe)),
89 MK_FLOW_ITEM(PPPOED, sizeof(struct rte_flow_item_pppoe)),
90 MK_FLOW_ITEM(PPPOE_PROTO_ID,
91 sizeof(struct rte_flow_item_pppoe_proto_id)),
92 MK_FLOW_ITEM(NSH, sizeof(struct rte_flow_item_nsh)),
93 MK_FLOW_ITEM(IGMP, sizeof(struct rte_flow_item_igmp)),
94 MK_FLOW_ITEM(AH, sizeof(struct rte_flow_item_ah)),
95 MK_FLOW_ITEM(HIGIG2, sizeof(struct rte_flow_item_higig2_hdr)),
98 /** Generate flow_action[] entry. */
99 #define MK_FLOW_ACTION(t, s) \
100 [RTE_FLOW_ACTION_TYPE_ ## t] = { \
105 /** Information about known flow actions. */
106 static const struct rte_flow_desc_data rte_flow_desc_action[] = {
107 MK_FLOW_ACTION(END, 0),
108 MK_FLOW_ACTION(VOID, 0),
109 MK_FLOW_ACTION(PASSTHRU, 0),
110 MK_FLOW_ACTION(JUMP, sizeof(struct rte_flow_action_jump)),
111 MK_FLOW_ACTION(MARK, sizeof(struct rte_flow_action_mark)),
112 MK_FLOW_ACTION(FLAG, 0),
113 MK_FLOW_ACTION(QUEUE, sizeof(struct rte_flow_action_queue)),
114 MK_FLOW_ACTION(DROP, 0),
115 MK_FLOW_ACTION(COUNT, sizeof(struct rte_flow_action_count)),
116 MK_FLOW_ACTION(RSS, sizeof(struct rte_flow_action_rss)),
117 MK_FLOW_ACTION(PF, 0),
118 MK_FLOW_ACTION(VF, sizeof(struct rte_flow_action_vf)),
119 MK_FLOW_ACTION(PHY_PORT, sizeof(struct rte_flow_action_phy_port)),
120 MK_FLOW_ACTION(PORT_ID, sizeof(struct rte_flow_action_port_id)),
121 MK_FLOW_ACTION(METER, sizeof(struct rte_flow_action_meter)),
122 MK_FLOW_ACTION(SECURITY, sizeof(struct rte_flow_action_security)),
123 MK_FLOW_ACTION(OF_SET_MPLS_TTL,
124 sizeof(struct rte_flow_action_of_set_mpls_ttl)),
125 MK_FLOW_ACTION(OF_DEC_MPLS_TTL, 0),
126 MK_FLOW_ACTION(OF_SET_NW_TTL,
127 sizeof(struct rte_flow_action_of_set_nw_ttl)),
128 MK_FLOW_ACTION(OF_DEC_NW_TTL, 0),
129 MK_FLOW_ACTION(OF_COPY_TTL_OUT, 0),
130 MK_FLOW_ACTION(OF_COPY_TTL_IN, 0),
131 MK_FLOW_ACTION(OF_POP_VLAN, 0),
132 MK_FLOW_ACTION(OF_PUSH_VLAN,
133 sizeof(struct rte_flow_action_of_push_vlan)),
134 MK_FLOW_ACTION(OF_SET_VLAN_VID,
135 sizeof(struct rte_flow_action_of_set_vlan_vid)),
136 MK_FLOW_ACTION(OF_SET_VLAN_PCP,
137 sizeof(struct rte_flow_action_of_set_vlan_pcp)),
138 MK_FLOW_ACTION(OF_POP_MPLS,
139 sizeof(struct rte_flow_action_of_pop_mpls)),
140 MK_FLOW_ACTION(OF_PUSH_MPLS,
141 sizeof(struct rte_flow_action_of_push_mpls)),
142 MK_FLOW_ACTION(VXLAN_ENCAP, sizeof(struct rte_flow_action_vxlan_encap)),
143 MK_FLOW_ACTION(VXLAN_DECAP, 0),
144 MK_FLOW_ACTION(NVGRE_ENCAP, sizeof(struct rte_flow_action_vxlan_encap)),
145 MK_FLOW_ACTION(NVGRE_DECAP, 0),
146 MK_FLOW_ACTION(RAW_ENCAP, sizeof(struct rte_flow_action_raw_encap)),
147 MK_FLOW_ACTION(RAW_DECAP, sizeof(struct rte_flow_action_raw_decap)),
148 MK_FLOW_ACTION(SET_IPV4_SRC,
149 sizeof(struct rte_flow_action_set_ipv4)),
150 MK_FLOW_ACTION(SET_IPV4_DST,
151 sizeof(struct rte_flow_action_set_ipv4)),
152 MK_FLOW_ACTION(SET_IPV6_SRC,
153 sizeof(struct rte_flow_action_set_ipv6)),
154 MK_FLOW_ACTION(SET_IPV6_DST,
155 sizeof(struct rte_flow_action_set_ipv6)),
156 MK_FLOW_ACTION(SET_TP_SRC,
157 sizeof(struct rte_flow_action_set_tp)),
158 MK_FLOW_ACTION(SET_TP_DST,
159 sizeof(struct rte_flow_action_set_tp)),
160 MK_FLOW_ACTION(MAC_SWAP, 0),
161 MK_FLOW_ACTION(DEC_TTL, 0),
162 MK_FLOW_ACTION(SET_TTL, sizeof(struct rte_flow_action_set_ttl)),
163 MK_FLOW_ACTION(SET_MAC_SRC, sizeof(struct rte_flow_action_set_mac)),
164 MK_FLOW_ACTION(SET_MAC_DST, sizeof(struct rte_flow_action_set_mac)),
165 MK_FLOW_ACTION(INC_TCP_SEQ, sizeof(rte_be32_t)),
166 MK_FLOW_ACTION(DEC_TCP_SEQ, sizeof(rte_be32_t)),
167 MK_FLOW_ACTION(INC_TCP_ACK, sizeof(rte_be32_t)),
168 MK_FLOW_ACTION(DEC_TCP_ACK, sizeof(rte_be32_t)),
169 MK_FLOW_ACTION(SET_TAG, sizeof(struct rte_flow_action_set_tag)),
170 MK_FLOW_ACTION(SET_META, sizeof(struct rte_flow_action_set_meta)),
174 rte_flow_dynf_metadata_register(void)
179 static const struct rte_mbuf_dynfield desc_offs = {
180 .name = RTE_MBUF_DYNFIELD_METADATA_NAME,
181 .size = sizeof(uint32_t),
182 .align = __alignof__(uint32_t),
184 static const struct rte_mbuf_dynflag desc_flag = {
185 .name = RTE_MBUF_DYNFLAG_METADATA_NAME,
188 offset = rte_mbuf_dynfield_register(&desc_offs);
191 flag = rte_mbuf_dynflag_register(&desc_flag);
194 rte_flow_dynf_metadata_offs = offset;
195 rte_flow_dynf_metadata_mask = (1ULL << flag);
199 rte_flow_dynf_metadata_offs = -1;
200 rte_flow_dynf_metadata_mask = 0ULL;
205 flow_err(uint16_t port_id, int ret, struct rte_flow_error *error)
209 if (rte_eth_dev_is_removed(port_id))
210 return rte_flow_error_set(error, EIO,
211 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
212 NULL, rte_strerror(EIO));
216 static enum rte_flow_item_type
217 rte_flow_expand_rss_item_complete(const struct rte_flow_item *item)
219 enum rte_flow_item_type ret = RTE_FLOW_ITEM_TYPE_VOID;
220 uint16_t ether_type = 0;
221 uint8_t ip_next_proto = 0;
223 if (item == NULL || item->spec == NULL)
225 switch (item->type) {
226 case RTE_FLOW_ITEM_TYPE_ETH:
227 ether_type = ((const struct rte_flow_item_eth *)
229 if (rte_be_to_cpu_16(ether_type) == RTE_ETHER_TYPE_IPV4)
230 ret = RTE_FLOW_ITEM_TYPE_IPV4;
231 else if (rte_be_to_cpu_16(ether_type) == RTE_ETHER_TYPE_IPV6)
232 ret = RTE_FLOW_ITEM_TYPE_IPV6;
233 else if (rte_be_to_cpu_16(ether_type) == RTE_ETHER_TYPE_VLAN)
234 ret = RTE_FLOW_ITEM_TYPE_VLAN;
236 case RTE_FLOW_ITEM_TYPE_VLAN:
237 ether_type = ((const struct rte_flow_item_vlan *)
238 (item->spec))->inner_type;
239 if (rte_be_to_cpu_16(ether_type) == RTE_ETHER_TYPE_IPV4)
240 ret = RTE_FLOW_ITEM_TYPE_IPV4;
241 else if (rte_be_to_cpu_16(ether_type) == RTE_ETHER_TYPE_IPV6)
242 ret = RTE_FLOW_ITEM_TYPE_IPV6;
243 else if (rte_be_to_cpu_16(ether_type) == RTE_ETHER_TYPE_VLAN)
244 ret = RTE_FLOW_ITEM_TYPE_VLAN;
246 case RTE_FLOW_ITEM_TYPE_IPV4:
247 ip_next_proto = ((const struct rte_flow_item_ipv4 *)
248 (item->spec))->hdr.next_proto_id;
249 if (ip_next_proto == IPPROTO_UDP)
250 ret = RTE_FLOW_ITEM_TYPE_UDP;
251 else if (ip_next_proto == IPPROTO_TCP)
252 ret = RTE_FLOW_ITEM_TYPE_TCP;
253 else if (ip_next_proto == IPPROTO_IP)
254 ret = RTE_FLOW_ITEM_TYPE_IPV4;
255 else if (ip_next_proto == IPPROTO_IPV6)
256 ret = RTE_FLOW_ITEM_TYPE_IPV6;
258 case RTE_FLOW_ITEM_TYPE_IPV6:
259 ip_next_proto = ((const struct rte_flow_item_ipv6 *)
260 (item->spec))->hdr.proto;
261 if (ip_next_proto == IPPROTO_UDP)
262 ret = RTE_FLOW_ITEM_TYPE_UDP;
263 else if (ip_next_proto == IPPROTO_TCP)
264 ret = RTE_FLOW_ITEM_TYPE_TCP;
265 else if (ip_next_proto == IPPROTO_IP)
266 ret = RTE_FLOW_ITEM_TYPE_IPV4;
267 else if (ip_next_proto == IPPROTO_IPV6)
268 ret = RTE_FLOW_ITEM_TYPE_IPV6;
271 ret = RTE_FLOW_ITEM_TYPE_VOID;
277 /* Get generic flow operations structure from a port. */
278 const struct rte_flow_ops *
279 rte_flow_ops_get(uint16_t port_id, struct rte_flow_error *error)
281 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
282 const struct rte_flow_ops *ops;
285 if (unlikely(!rte_eth_dev_is_valid_port(port_id)))
287 else if (unlikely(!dev->dev_ops->filter_ctrl ||
288 dev->dev_ops->filter_ctrl(dev,
289 RTE_ETH_FILTER_GENERIC,
296 rte_flow_error_set(error, code, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
297 NULL, rte_strerror(code));
301 /* Check whether a flow rule can be created on a given port. */
303 rte_flow_validate(uint16_t port_id,
304 const struct rte_flow_attr *attr,
305 const struct rte_flow_item pattern[],
306 const struct rte_flow_action actions[],
307 struct rte_flow_error *error)
309 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
310 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
314 if (likely(!!ops->validate))
315 return flow_err(port_id, ops->validate(dev, attr, pattern,
316 actions, error), error);
317 return rte_flow_error_set(error, ENOSYS,
318 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
319 NULL, rte_strerror(ENOSYS));
322 /* Create a flow rule on a given port. */
324 rte_flow_create(uint16_t port_id,
325 const struct rte_flow_attr *attr,
326 const struct rte_flow_item pattern[],
327 const struct rte_flow_action actions[],
328 struct rte_flow_error *error)
330 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
331 struct rte_flow *flow;
332 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
336 if (likely(!!ops->create)) {
337 flow = ops->create(dev, attr, pattern, actions, error);
339 flow_err(port_id, -rte_errno, error);
342 rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
343 NULL, rte_strerror(ENOSYS));
347 /* Destroy a flow rule on a given port. */
349 rte_flow_destroy(uint16_t port_id,
350 struct rte_flow *flow,
351 struct rte_flow_error *error)
353 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
354 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
358 if (likely(!!ops->destroy))
359 return flow_err(port_id, ops->destroy(dev, flow, error),
361 return rte_flow_error_set(error, ENOSYS,
362 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
363 NULL, rte_strerror(ENOSYS));
366 /* Destroy all flow rules associated with a port. */
368 rte_flow_flush(uint16_t port_id,
369 struct rte_flow_error *error)
371 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
372 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
376 if (likely(!!ops->flush))
377 return flow_err(port_id, ops->flush(dev, error), error);
378 return rte_flow_error_set(error, ENOSYS,
379 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
380 NULL, rte_strerror(ENOSYS));
383 /* Query an existing flow rule. */
385 rte_flow_query(uint16_t port_id,
386 struct rte_flow *flow,
387 const struct rte_flow_action *action,
389 struct rte_flow_error *error)
391 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
392 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
396 if (likely(!!ops->query))
397 return flow_err(port_id, ops->query(dev, flow, action, data,
399 return rte_flow_error_set(error, ENOSYS,
400 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
401 NULL, rte_strerror(ENOSYS));
404 /* Restrict ingress traffic to the defined flow rules. */
406 rte_flow_isolate(uint16_t port_id,
408 struct rte_flow_error *error)
410 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
411 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
415 if (likely(!!ops->isolate))
416 return flow_err(port_id, ops->isolate(dev, set, error), error);
417 return rte_flow_error_set(error, ENOSYS,
418 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
419 NULL, rte_strerror(ENOSYS));
422 /* Initialize flow error structure. */
424 rte_flow_error_set(struct rte_flow_error *error,
426 enum rte_flow_error_type type,
431 *error = (struct rte_flow_error){
441 /** Pattern item specification types. */
442 enum rte_flow_conv_item_spec_type {
443 RTE_FLOW_CONV_ITEM_SPEC,
444 RTE_FLOW_CONV_ITEM_LAST,
445 RTE_FLOW_CONV_ITEM_MASK,
449 * Copy pattern item specification.
452 * Output buffer. Can be NULL if @p size is zero.
454 * Size of @p buf in bytes.
456 * Pattern item to copy specification from.
458 * Specification selector for either @p spec, @p last or @p mask.
461 * Number of bytes needed to store pattern item specification regardless
462 * of @p size. @p buf contents are truncated to @p size if not large
466 rte_flow_conv_item_spec(void *buf, const size_t size,
467 const struct rte_flow_item *item,
468 enum rte_flow_conv_item_spec_type type)
472 type == RTE_FLOW_CONV_ITEM_SPEC ? item->spec :
473 type == RTE_FLOW_CONV_ITEM_LAST ? item->last :
474 type == RTE_FLOW_CONV_ITEM_MASK ? item->mask :
477 switch (item->type) {
479 const struct rte_flow_item_raw *raw;
482 const struct rte_flow_item_raw *raw;
485 const struct rte_flow_item_raw *raw;
488 const struct rte_flow_item_raw *raw;
491 struct rte_flow_item_raw *raw;
495 case RTE_FLOW_ITEM_TYPE_RAW:
496 spec.raw = item->spec;
497 last.raw = item->last ? item->last : item->spec;
498 mask.raw = item->mask ? item->mask : &rte_flow_item_raw_mask;
502 (&(struct rte_flow_item_raw){
503 .relative = src.raw->relative,
504 .search = src.raw->search,
505 .reserved = src.raw->reserved,
506 .offset = src.raw->offset,
507 .limit = src.raw->limit,
508 .length = src.raw->length,
510 size > sizeof(*dst.raw) ? sizeof(*dst.raw) : size);
511 off = sizeof(*dst.raw);
512 if (type == RTE_FLOW_CONV_ITEM_SPEC ||
513 (type == RTE_FLOW_CONV_ITEM_MASK &&
514 ((spec.raw->length & mask.raw->length) >=
515 (last.raw->length & mask.raw->length))))
516 tmp = spec.raw->length & mask.raw->length;
518 tmp = last.raw->length & mask.raw->length;
520 off = RTE_ALIGN_CEIL(off, sizeof(*dst.raw->pattern));
521 if (size >= off + tmp)
522 dst.raw->pattern = rte_memcpy
523 ((void *)((uintptr_t)dst.raw + off),
524 src.raw->pattern, tmp);
529 off = rte_flow_desc_item[item->type].size;
530 rte_memcpy(buf, data, (size > off ? off : size));
537 * Copy action configuration.
540 * Output buffer. Can be NULL if @p size is zero.
542 * Size of @p buf in bytes.
544 * Action to copy configuration from.
547 * Number of bytes needed to store pattern item specification regardless
548 * of @p size. @p buf contents are truncated to @p size if not large
552 rte_flow_conv_action_conf(void *buf, const size_t size,
553 const struct rte_flow_action *action)
557 switch (action->type) {
559 const struct rte_flow_action_rss *rss;
560 const struct rte_flow_action_vxlan_encap *vxlan_encap;
561 const struct rte_flow_action_nvgre_encap *nvgre_encap;
564 struct rte_flow_action_rss *rss;
565 struct rte_flow_action_vxlan_encap *vxlan_encap;
566 struct rte_flow_action_nvgre_encap *nvgre_encap;
571 case RTE_FLOW_ACTION_TYPE_RSS:
572 src.rss = action->conf;
575 (&(struct rte_flow_action_rss){
576 .func = src.rss->func,
577 .level = src.rss->level,
578 .types = src.rss->types,
579 .key_len = src.rss->key_len,
580 .queue_num = src.rss->queue_num,
582 size > sizeof(*dst.rss) ? sizeof(*dst.rss) : size);
583 off = sizeof(*dst.rss);
584 if (src.rss->key_len) {
585 off = RTE_ALIGN_CEIL(off, sizeof(*dst.rss->key));
586 tmp = sizeof(*src.rss->key) * src.rss->key_len;
587 if (size >= off + tmp)
588 dst.rss->key = rte_memcpy
589 ((void *)((uintptr_t)dst.rss + off),
593 if (src.rss->queue_num) {
594 off = RTE_ALIGN_CEIL(off, sizeof(*dst.rss->queue));
595 tmp = sizeof(*src.rss->queue) * src.rss->queue_num;
596 if (size >= off + tmp)
597 dst.rss->queue = rte_memcpy
598 ((void *)((uintptr_t)dst.rss + off),
599 src.rss->queue, tmp);
603 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
604 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
605 src.vxlan_encap = action->conf;
606 dst.vxlan_encap = buf;
607 RTE_BUILD_BUG_ON(sizeof(*src.vxlan_encap) !=
608 sizeof(*src.nvgre_encap) ||
609 offsetof(struct rte_flow_action_vxlan_encap,
611 offsetof(struct rte_flow_action_nvgre_encap,
613 off = sizeof(*dst.vxlan_encap);
614 if (src.vxlan_encap->definition) {
616 (off, sizeof(*dst.vxlan_encap->definition));
618 (RTE_FLOW_CONV_OP_PATTERN,
619 (void *)((uintptr_t)dst.vxlan_encap + off),
620 size > off ? size - off : 0,
621 src.vxlan_encap->definition, NULL);
624 if (size >= off + ret)
625 dst.vxlan_encap->definition =
626 (void *)((uintptr_t)dst.vxlan_encap +
632 off = rte_flow_desc_action[action->type].size;
633 rte_memcpy(buf, action->conf, (size > off ? off : size));
640 * Copy a list of pattern items.
643 * Destination buffer. Can be NULL if @p size is zero.
645 * Size of @p dst in bytes.
647 * Source pattern items.
649 * Maximum number of pattern items to process from @p src or 0 to process
650 * the entire list. In both cases, processing stops after
651 * RTE_FLOW_ITEM_TYPE_END is encountered.
653 * Perform verbose error reporting if not NULL.
656 * A positive value representing the number of bytes needed to store
657 * pattern items regardless of @p size on success (@p buf contents are
658 * truncated to @p size if not large enough), a negative errno value
659 * otherwise and rte_errno is set.
662 rte_flow_conv_pattern(struct rte_flow_item *dst,
664 const struct rte_flow_item *src,
666 struct rte_flow_error *error)
668 uintptr_t data = (uintptr_t)dst;
673 for (i = 0, off = 0; !num || i != num; ++i, ++src, ++dst) {
674 if ((size_t)src->type >= RTE_DIM(rte_flow_desc_item) ||
675 !rte_flow_desc_item[src->type].name)
676 return rte_flow_error_set
677 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, src,
678 "cannot convert unknown item type");
679 if (size >= off + sizeof(*dst))
680 *dst = (struct rte_flow_item){
692 off = RTE_ALIGN_CEIL(off, sizeof(double));
693 ret = rte_flow_conv_item_spec
694 ((void *)(data + off),
695 size > off ? size - off : 0, src,
696 RTE_FLOW_CONV_ITEM_SPEC);
697 if (size && size >= off + ret)
698 dst->spec = (void *)(data + off);
703 off = RTE_ALIGN_CEIL(off, sizeof(double));
704 ret = rte_flow_conv_item_spec
705 ((void *)(data + off),
706 size > off ? size - off : 0, src,
707 RTE_FLOW_CONV_ITEM_LAST);
708 if (size && size >= off + ret)
709 dst->last = (void *)(data + off);
713 off = RTE_ALIGN_CEIL(off, sizeof(double));
714 ret = rte_flow_conv_item_spec
715 ((void *)(data + off),
716 size > off ? size - off : 0, src,
717 RTE_FLOW_CONV_ITEM_MASK);
718 if (size && size >= off + ret)
719 dst->mask = (void *)(data + off);
729 * Copy a list of actions.
732 * Destination buffer. Can be NULL if @p size is zero.
734 * Size of @p dst in bytes.
738 * Maximum number of actions to process from @p src or 0 to process the
739 * entire list. In both cases, processing stops after
740 * RTE_FLOW_ACTION_TYPE_END is encountered.
742 * Perform verbose error reporting if not NULL.
745 * A positive value representing the number of bytes needed to store
746 * actions regardless of @p size on success (@p buf contents are truncated
747 * to @p size if not large enough), a negative errno value otherwise and
751 rte_flow_conv_actions(struct rte_flow_action *dst,
753 const struct rte_flow_action *src,
755 struct rte_flow_error *error)
757 uintptr_t data = (uintptr_t)dst;
762 for (i = 0, off = 0; !num || i != num; ++i, ++src, ++dst) {
763 if ((size_t)src->type >= RTE_DIM(rte_flow_desc_action) ||
764 !rte_flow_desc_action[src->type].name)
765 return rte_flow_error_set
766 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
767 src, "cannot convert unknown action type");
768 if (size >= off + sizeof(*dst))
769 *dst = (struct rte_flow_action){
781 off = RTE_ALIGN_CEIL(off, sizeof(double));
782 ret = rte_flow_conv_action_conf
783 ((void *)(data + off),
784 size > off ? size - off : 0, src);
785 if (size && size >= off + ret)
786 dst->conf = (void *)(data + off);
796 * Copy flow rule components.
798 * This comprises the flow rule descriptor itself, attributes, pattern and
799 * actions list. NULL components in @p src are skipped.
802 * Destination buffer. Can be NULL if @p size is zero.
804 * Size of @p dst in bytes.
806 * Source flow rule descriptor.
808 * Perform verbose error reporting if not NULL.
811 * A positive value representing the number of bytes needed to store all
812 * components including the descriptor regardless of @p size on success
813 * (@p buf contents are truncated to @p size if not large enough), a
814 * negative errno value otherwise and rte_errno is set.
817 rte_flow_conv_rule(struct rte_flow_conv_rule *dst,
819 const struct rte_flow_conv_rule *src,
820 struct rte_flow_error *error)
826 (&(struct rte_flow_conv_rule){
831 size > sizeof(*dst) ? sizeof(*dst) : size);
834 off = RTE_ALIGN_CEIL(off, sizeof(double));
835 if (size && size >= off + sizeof(*dst->attr))
836 dst->attr = rte_memcpy
837 ((void *)((uintptr_t)dst + off),
838 src->attr_ro, sizeof(*dst->attr));
839 off += sizeof(*dst->attr);
841 if (src->pattern_ro) {
842 off = RTE_ALIGN_CEIL(off, sizeof(double));
843 ret = rte_flow_conv_pattern((void *)((uintptr_t)dst + off),
844 size > off ? size - off : 0,
845 src->pattern_ro, 0, error);
848 if (size && size >= off + (size_t)ret)
849 dst->pattern = (void *)((uintptr_t)dst + off);
852 if (src->actions_ro) {
853 off = RTE_ALIGN_CEIL(off, sizeof(double));
854 ret = rte_flow_conv_actions((void *)((uintptr_t)dst + off),
855 size > off ? size - off : 0,
856 src->actions_ro, 0, error);
859 if (size >= off + (size_t)ret)
860 dst->actions = (void *)((uintptr_t)dst + off);
867 * Retrieve the name of a pattern item/action type.
870 * Nonzero when @p src represents an action type instead of a pattern item
873 * Nonzero to write string address instead of contents into @p dst.
875 * Destination buffer. Can be NULL if @p size is zero.
877 * Size of @p dst in bytes.
879 * Depending on @p is_action, source pattern item or action type cast as a
882 * Perform verbose error reporting if not NULL.
885 * A positive value representing the number of bytes needed to store the
886 * name or its address regardless of @p size on success (@p buf contents
887 * are truncated to @p size if not large enough), a negative errno value
888 * otherwise and rte_errno is set.
891 rte_flow_conv_name(int is_action,
896 struct rte_flow_error *error)
899 const struct rte_flow_desc_data *data;
902 static const struct desc_info info_rep[2] = {
903 { rte_flow_desc_item, RTE_DIM(rte_flow_desc_item), },
904 { rte_flow_desc_action, RTE_DIM(rte_flow_desc_action), },
906 const struct desc_info *const info = &info_rep[!!is_action];
907 unsigned int type = (uintptr_t)src;
909 if (type >= info->num)
910 return rte_flow_error_set
911 (error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
912 "unknown object type to retrieve the name of");
914 return strlcpy(dst, info->data[type].name, size);
915 if (size >= sizeof(const char **))
916 *((const char **)dst) = info->data[type].name;
917 return sizeof(const char **);
920 /** Helper function to convert flow API objects. */
922 rte_flow_conv(enum rte_flow_conv_op op,
926 struct rte_flow_error *error)
929 const struct rte_flow_attr *attr;
931 case RTE_FLOW_CONV_OP_NONE:
933 case RTE_FLOW_CONV_OP_ATTR:
935 if (size > sizeof(*attr))
936 size = sizeof(*attr);
937 rte_memcpy(dst, attr, size);
938 return sizeof(*attr);
939 case RTE_FLOW_CONV_OP_ITEM:
940 return rte_flow_conv_pattern(dst, size, src, 1, error);
941 case RTE_FLOW_CONV_OP_ACTION:
942 return rte_flow_conv_actions(dst, size, src, 1, error);
943 case RTE_FLOW_CONV_OP_PATTERN:
944 return rte_flow_conv_pattern(dst, size, src, 0, error);
945 case RTE_FLOW_CONV_OP_ACTIONS:
946 return rte_flow_conv_actions(dst, size, src, 0, error);
947 case RTE_FLOW_CONV_OP_RULE:
948 return rte_flow_conv_rule(dst, size, src, error);
949 case RTE_FLOW_CONV_OP_ITEM_NAME:
950 return rte_flow_conv_name(0, 0, dst, size, src, error);
951 case RTE_FLOW_CONV_OP_ACTION_NAME:
952 return rte_flow_conv_name(1, 0, dst, size, src, error);
953 case RTE_FLOW_CONV_OP_ITEM_NAME_PTR:
954 return rte_flow_conv_name(0, 1, dst, size, src, error);
955 case RTE_FLOW_CONV_OP_ACTION_NAME_PTR:
956 return rte_flow_conv_name(1, 1, dst, size, src, error);
958 return rte_flow_error_set
959 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
960 "unknown object conversion operation");
963 /** Store a full rte_flow description. */
965 rte_flow_copy(struct rte_flow_desc *desc, size_t len,
966 const struct rte_flow_attr *attr,
967 const struct rte_flow_item *items,
968 const struct rte_flow_action *actions)
971 * Overlap struct rte_flow_conv with struct rte_flow_desc in order
972 * to convert the former to the latter without wasting space.
974 struct rte_flow_conv_rule *dst =
976 (void *)((uintptr_t)desc +
977 (offsetof(struct rte_flow_desc, actions) -
978 offsetof(struct rte_flow_conv_rule, actions))) :
981 len > sizeof(*desc) - sizeof(*dst) ?
982 len - (sizeof(*desc) - sizeof(*dst)) :
984 struct rte_flow_conv_rule src = {
987 .actions_ro = actions,
991 RTE_BUILD_BUG_ON(sizeof(struct rte_flow_desc) <
992 sizeof(struct rte_flow_conv_rule));
994 (&dst->pattern != &desc->items ||
995 &dst->actions != &desc->actions ||
996 (uintptr_t)(dst + 1) != (uintptr_t)(desc + 1))) {
1000 ret = rte_flow_conv(RTE_FLOW_CONV_OP_RULE, dst, dst_size, &src, NULL);
1003 ret += sizeof(*desc) - sizeof(*dst);
1005 (&(struct rte_flow_desc){
1008 .items = dst_size ? dst->pattern : NULL,
1009 .actions = dst_size ? dst->actions : NULL,
1011 len > sizeof(*desc) ? sizeof(*desc) : len);
1016 * Expand RSS flows into several possible flows according to the RSS hash
1017 * fields requested and the driver capabilities.
1020 rte_flow_expand_rss(struct rte_flow_expand_rss *buf, size_t size,
1021 const struct rte_flow_item *pattern, uint64_t types,
1022 const struct rte_flow_expand_node graph[],
1023 int graph_root_index)
1025 const int elt_n = 8;
1026 const struct rte_flow_item *item;
1027 const struct rte_flow_expand_node *node = &graph[graph_root_index];
1028 const int *next_node;
1029 const int *stack[elt_n];
1031 struct rte_flow_item flow_items[elt_n];
1034 size_t user_pattern_size = 0;
1036 const struct rte_flow_expand_node *next = NULL;
1037 struct rte_flow_item missed_item;
1040 const struct rte_flow_item *last_item = NULL;
1042 lsize = offsetof(struct rte_flow_expand_rss, entry) +
1043 elt_n * sizeof(buf->entry[0]);
1044 if (lsize <= size) {
1045 buf->entry[0].priority = 0;
1046 buf->entry[0].pattern = (void *)&buf->entry[elt_n];
1048 addr = buf->entry[0].pattern;
1050 for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
1051 if (item->type != RTE_FLOW_ITEM_TYPE_VOID)
1053 for (i = 0; node->next && node->next[i]; ++i) {
1054 next = &graph[node->next[i]];
1055 if (next->type == item->type)
1060 user_pattern_size += sizeof(*item);
1062 user_pattern_size += sizeof(*item); /* Handle END item. */
1063 lsize += user_pattern_size;
1064 /* Copy the user pattern in the first entry of the buffer. */
1065 if (lsize <= size) {
1066 rte_memcpy(addr, pattern, user_pattern_size);
1067 addr = (void *)(((uintptr_t)addr) + user_pattern_size);
1070 /* Start expanding. */
1071 memset(flow_items, 0, sizeof(flow_items));
1072 user_pattern_size -= sizeof(*item);
1074 * Check if the last valid item has spec set
1075 * and need complete pattern.
1077 missed_item.type = rte_flow_expand_rss_item_complete(last_item);
1078 if (missed_item.type != RTE_FLOW_ITEM_TYPE_VOID) {
1081 for (i = 0; node->next && node->next[i]; ++i) {
1082 next = &graph[node->next[i]];
1083 if (next->type == missed_item.type) {
1084 flow_items[0].type = missed_item.type;
1085 flow_items[1].type = RTE_FLOW_ITEM_TYPE_END;
1091 if (next && missed) {
1092 elt = 2; /* missed item + item end. */
1094 lsize += elt * sizeof(*item) + user_pattern_size;
1095 if ((node->rss_types & types) && lsize <= size) {
1096 buf->entry[buf->entries].priority = 1;
1097 buf->entry[buf->entries].pattern = addr;
1099 rte_memcpy(addr, buf->entry[0].pattern,
1101 addr = (void *)(((uintptr_t)addr) + user_pattern_size);
1102 rte_memcpy(addr, flow_items, elt * sizeof(*item));
1103 addr = (void *)(((uintptr_t)addr) +
1104 elt * sizeof(*item));
1107 memset(flow_items, 0, sizeof(flow_items));
1108 next_node = node->next;
1109 stack[stack_pos] = next_node;
1110 node = next_node ? &graph[*next_node] : NULL;
1112 flow_items[stack_pos].type = node->type;
1113 if (node->rss_types & types) {
1115 * compute the number of items to copy from the
1116 * expansion and copy it.
1117 * When the stack_pos is 0, there are 1 element in it,
1118 * plus the addition END item.
1120 elt = stack_pos + 2;
1121 flow_items[stack_pos + 1].type = RTE_FLOW_ITEM_TYPE_END;
1122 lsize += elt * sizeof(*item) + user_pattern_size;
1123 if (lsize <= size) {
1124 size_t n = elt * sizeof(*item);
1126 buf->entry[buf->entries].priority =
1127 stack_pos + 1 + missed;
1128 buf->entry[buf->entries].pattern = addr;
1130 rte_memcpy(addr, buf->entry[0].pattern,
1132 addr = (void *)(((uintptr_t)addr) +
1134 rte_memcpy(addr, &missed_item,
1135 missed * sizeof(*item));
1136 addr = (void *)(((uintptr_t)addr) +
1137 missed * sizeof(*item));
1138 rte_memcpy(addr, flow_items, n);
1139 addr = (void *)(((uintptr_t)addr) + n);
1144 next_node = node->next;
1145 if (stack_pos++ == elt_n) {
1149 stack[stack_pos] = next_node;
1150 } else if (*(next_node + 1)) {
1151 /* Follow up with the next possibility. */
1154 /* Move to the next path. */
1156 next_node = stack[--stack_pos];
1158 stack[stack_pos] = next_node;
1160 node = *next_node ? &graph[*next_node] : NULL;
1162 /* no expanded flows but we have missed item, create one rule for it */
1163 if (buf->entries == 1 && missed != 0) {
1165 lsize += elt * sizeof(*item) + user_pattern_size;
1166 if (lsize <= size) {
1167 buf->entry[buf->entries].priority = 1;
1168 buf->entry[buf->entries].pattern = addr;
1170 flow_items[0].type = missed_item.type;
1171 flow_items[1].type = RTE_FLOW_ITEM_TYPE_END;
1172 rte_memcpy(addr, buf->entry[0].pattern,
1174 addr = (void *)(((uintptr_t)addr) + user_pattern_size);
1175 rte_memcpy(addr, flow_items, elt * sizeof(*item));
1176 addr = (void *)(((uintptr_t)addr) +
1177 elt * sizeof(*item));