4 * Copyright 2016 6WIND S.A.
5 * Copyright 2016 Mellanox.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of 6WIND S.A. nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 * RTE generic flow API
41 * This interface provides the ability to program packet matching and
42 * associated actions in hardware through flow rules.
46 #include <rte_ether.h>
52 #include <rte_byteorder.h>
59 * Flow rule attributes.
61 * Priorities are set on two levels: per group and per rule within groups.
63 * Lower values denote higher priority, the highest priority for both levels
64 * is 0, so that a rule with priority 0 in group 8 is always matched after a
65 * rule with priority 8 in group 0.
67 * Although optional, applications are encouraged to group similar rules as
68 * much as possible to fully take advantage of hardware capabilities
69 * (e.g. optimized matching) and work around limitations (e.g. a single
70 * pattern type possibly allowed in a given group).
72 * Group and priority levels are arbitrary and up to the application, they
73 * do not need to be contiguous nor start from 0, however the maximum number
74 * varies between devices and may be affected by existing flow rules.
76 * If a packet is matched by several rules of a given group for a given
77 * priority level, the outcome is undefined. It can take any path, may be
78 * duplicated or even cause unrecoverable errors.
80 * Note that support for more than a single group and priority level is not
83 * Flow rules can apply to inbound and/or outbound traffic (ingress/egress).
85 * Several pattern items and actions are valid and can be used in both
86 * directions. Those valid for only one direction are described as such.
88 * At least one direction must be specified.
90 * Specifying both directions at once for a given rule is not recommended
91 * but may be valid in a few cases (e.g. shared counter).
93 struct rte_flow_attr {
94 uint32_t group; /**< Priority group. */
95 uint32_t priority; /**< Priority level within group. */
96 uint32_t ingress:1; /**< Rule applies to ingress traffic. */
97 uint32_t egress:1; /**< Rule applies to egress traffic. */
98 uint32_t reserved:30; /**< Reserved, must be zero. */
102 * Matching pattern item types.
104 * Pattern items fall in two categories:
106 * - Matching protocol headers and packet data (ANY, RAW, ETH, VLAN, IPV4,
107 * IPV6, ICMP, UDP, TCP, SCTP, VXLAN and so on), usually associated with a
108 * specification structure. These must be stacked in the same order as the
109 * protocol layers to match, starting from the lowest.
111 * - Matching meta-data or affecting pattern processing (END, VOID, INVERT,
112 * PF, VF, PORT and so on), often without a specification structure. Since
113 * they do not match packet contents, these can be specified anywhere
114 * within item lists without affecting others.
116 * See the description of individual types for more information. Those
117 * marked with [META] fall into the second category.
119 enum rte_flow_item_type {
123 * End marker for item lists. Prevents further processing of items,
124 * thereby ending the pattern.
126 * No associated specification structure.
128 RTE_FLOW_ITEM_TYPE_END,
133 * Used as a placeholder for convenience. It is ignored and simply
136 * No associated specification structure.
138 RTE_FLOW_ITEM_TYPE_VOID,
143 * Inverted matching, i.e. process packets that do not match the
146 * No associated specification structure.
148 RTE_FLOW_ITEM_TYPE_INVERT,
151 * Matches any protocol in place of the current layer, a single ANY
152 * may also stand for several protocol layers.
154 * See struct rte_flow_item_any.
156 RTE_FLOW_ITEM_TYPE_ANY,
161 * Matches packets addressed to the physical function of the device.
163 * If the underlying device function differs from the one that would
164 * normally receive the matched traffic, specifying this item
165 * prevents it from reaching that device unless the flow rule
166 * contains a PF action. Packets are not duplicated between device
167 * instances by default.
169 * No associated specification structure.
171 RTE_FLOW_ITEM_TYPE_PF,
176 * Matches packets addressed to a virtual function ID of the device.
178 * If the underlying device function differs from the one that would
179 * normally receive the matched traffic, specifying this item
180 * prevents it from reaching that device unless the flow rule
181 * contains a VF action. Packets are not duplicated between device
182 * instances by default.
184 * See struct rte_flow_item_vf.
186 RTE_FLOW_ITEM_TYPE_VF,
191 * Matches packets coming from the specified physical port of the
194 * The first PORT item overrides the physical port normally
195 * associated with the specified DPDK input port (port_id). This
196 * item can be provided several times to match additional physical
199 * See struct rte_flow_item_port.
201 RTE_FLOW_ITEM_TYPE_PORT,
204 * Matches a byte string of a given length at a given offset.
206 * See struct rte_flow_item_raw.
208 RTE_FLOW_ITEM_TYPE_RAW,
211 * Matches an Ethernet header.
213 * See struct rte_flow_item_eth.
215 RTE_FLOW_ITEM_TYPE_ETH,
218 * Matches an 802.1Q/ad VLAN tag.
220 * See struct rte_flow_item_vlan.
222 RTE_FLOW_ITEM_TYPE_VLAN,
225 * Matches an IPv4 header.
227 * See struct rte_flow_item_ipv4.
229 RTE_FLOW_ITEM_TYPE_IPV4,
232 * Matches an IPv6 header.
234 * See struct rte_flow_item_ipv6.
236 RTE_FLOW_ITEM_TYPE_IPV6,
239 * Matches an ICMP header.
241 * See struct rte_flow_item_icmp.
243 RTE_FLOW_ITEM_TYPE_ICMP,
246 * Matches a UDP header.
248 * See struct rte_flow_item_udp.
250 RTE_FLOW_ITEM_TYPE_UDP,
253 * Matches a TCP header.
255 * See struct rte_flow_item_tcp.
257 RTE_FLOW_ITEM_TYPE_TCP,
260 * Matches a SCTP header.
262 * See struct rte_flow_item_sctp.
264 RTE_FLOW_ITEM_TYPE_SCTP,
267 * Matches a VXLAN header.
269 * See struct rte_flow_item_vxlan.
271 RTE_FLOW_ITEM_TYPE_VXLAN,
274 * Matches a E_TAG header.
276 * See struct rte_flow_item_e_tag.
278 RTE_FLOW_ITEM_TYPE_E_TAG,
281 * Matches a NVGRE header.
283 * See struct rte_flow_item_nvgre.
285 RTE_FLOW_ITEM_TYPE_NVGRE,
288 * Matches a MPLS header.
290 * See struct rte_flow_item_mpls.
292 RTE_FLOW_ITEM_TYPE_MPLS,
295 * Matches a GRE header.
297 * See struct rte_flow_item_gre.
299 RTE_FLOW_ITEM_TYPE_GRE,
304 * Fuzzy pattern match, expect faster than default.
306 * This is for device that support fuzzy matching option.
307 * Usually a fuzzy matching is fast but the cost is accuracy.
309 * See struct rte_flow_item_fuzzy.
311 RTE_FLOW_ITEM_TYPE_FUZZY,
315 * RTE_FLOW_ITEM_TYPE_ANY
317 * Matches any protocol in place of the current layer, a single ANY may also
318 * stand for several protocol layers.
320 * This is usually specified as the first pattern item when looking for a
321 * protocol anywhere in a packet.
323 * A zeroed mask stands for any number of layers.
325 struct rte_flow_item_any {
326 uint32_t num; /**< Number of layers covered. */
329 /** Default mask for RTE_FLOW_ITEM_TYPE_ANY. */
331 static const struct rte_flow_item_any rte_flow_item_any_mask = {
337 * RTE_FLOW_ITEM_TYPE_VF
339 * Matches packets addressed to a virtual function ID of the device.
341 * If the underlying device function differs from the one that would
342 * normally receive the matched traffic, specifying this item prevents it
343 * from reaching that device unless the flow rule contains a VF
344 * action. Packets are not duplicated between device instances by default.
346 * - Likely to return an error or never match any traffic if this causes a
347 * VF device to match traffic addressed to a different VF.
348 * - Can be specified multiple times to match traffic addressed to several
350 * - Can be combined with a PF item to match both PF and VF traffic.
352 * A zeroed mask can be used to match any VF ID.
354 struct rte_flow_item_vf {
355 uint32_t id; /**< Destination VF ID. */
358 /** Default mask for RTE_FLOW_ITEM_TYPE_VF. */
360 static const struct rte_flow_item_vf rte_flow_item_vf_mask = {
366 * RTE_FLOW_ITEM_TYPE_PORT
368 * Matches packets coming from the specified physical port of the underlying
371 * The first PORT item overrides the physical port normally associated with
372 * the specified DPDK input port (port_id). This item can be provided
373 * several times to match additional physical ports.
375 * Note that physical ports are not necessarily tied to DPDK input ports
376 * (port_id) when those are not under DPDK control. Possible values are
377 * specific to each device, they are not necessarily indexed from zero and
378 * may not be contiguous.
380 * As a device property, the list of allowed values as well as the value
381 * associated with a port_id should be retrieved by other means.
383 * A zeroed mask can be used to match any port index.
385 struct rte_flow_item_port {
386 uint32_t index; /**< Physical port index. */
389 /** Default mask for RTE_FLOW_ITEM_TYPE_PORT. */
391 static const struct rte_flow_item_port rte_flow_item_port_mask = {
397 * RTE_FLOW_ITEM_TYPE_RAW
399 * Matches a byte string of a given length at a given offset.
401 * Offset is either absolute (using the start of the packet) or relative to
402 * the end of the previous matched item in the stack, in which case negative
403 * values are allowed.
405 * If search is enabled, offset is used as the starting point. The search
406 * area can be delimited by setting limit to a nonzero value, which is the
407 * maximum number of bytes after offset where the pattern may start.
409 * Matching a zero-length pattern is allowed, doing so resets the relative
410 * offset for subsequent items.
412 * This type does not support ranges (struct rte_flow_item.last).
414 struct rte_flow_item_raw {
415 uint32_t relative:1; /**< Look for pattern after the previous item. */
416 uint32_t search:1; /**< Search pattern from offset (see also limit). */
417 uint32_t reserved:30; /**< Reserved, must be set to zero. */
418 int32_t offset; /**< Absolute or relative offset for pattern. */
419 uint16_t limit; /**< Search area limit for start of pattern. */
420 uint16_t length; /**< Pattern length. */
421 uint8_t pattern[]; /**< Byte string to look for. */
424 /** Default mask for RTE_FLOW_ITEM_TYPE_RAW. */
426 static const struct rte_flow_item_raw rte_flow_item_raw_mask = {
429 .reserved = 0x3fffffff,
430 .offset = 0xffffffff,
437 * RTE_FLOW_ITEM_TYPE_ETH
439 * Matches an Ethernet header.
441 struct rte_flow_item_eth {
442 struct ether_addr dst; /**< Destination MAC. */
443 struct ether_addr src; /**< Source MAC. */
444 rte_be16_t type; /**< EtherType. */
447 /** Default mask for RTE_FLOW_ITEM_TYPE_ETH. */
449 static const struct rte_flow_item_eth rte_flow_item_eth_mask = {
450 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
451 .src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
452 .type = RTE_BE16(0x0000),
457 * RTE_FLOW_ITEM_TYPE_VLAN
459 * Matches an 802.1Q/ad VLAN tag.
461 * This type normally follows either RTE_FLOW_ITEM_TYPE_ETH or
462 * RTE_FLOW_ITEM_TYPE_VLAN.
464 struct rte_flow_item_vlan {
465 rte_be16_t tpid; /**< Tag protocol identifier. */
466 rte_be16_t tci; /**< Tag control information. */
469 /** Default mask for RTE_FLOW_ITEM_TYPE_VLAN. */
471 static const struct rte_flow_item_vlan rte_flow_item_vlan_mask = {
472 .tpid = RTE_BE16(0x0000),
473 .tci = RTE_BE16(0xffff),
478 * RTE_FLOW_ITEM_TYPE_IPV4
480 * Matches an IPv4 header.
482 * Note: IPv4 options are handled by dedicated pattern items.
484 struct rte_flow_item_ipv4 {
485 struct ipv4_hdr hdr; /**< IPv4 header definition. */
488 /** Default mask for RTE_FLOW_ITEM_TYPE_IPV4. */
490 static const struct rte_flow_item_ipv4 rte_flow_item_ipv4_mask = {
492 .src_addr = RTE_BE32(0xffffffff),
493 .dst_addr = RTE_BE32(0xffffffff),
499 * RTE_FLOW_ITEM_TYPE_IPV6.
501 * Matches an IPv6 header.
503 * Note: IPv6 options are handled by dedicated pattern items.
505 struct rte_flow_item_ipv6 {
506 struct ipv6_hdr hdr; /**< IPv6 header definition. */
509 /** Default mask for RTE_FLOW_ITEM_TYPE_IPV6. */
511 static const struct rte_flow_item_ipv6 rte_flow_item_ipv6_mask = {
514 "\xff\xff\xff\xff\xff\xff\xff\xff"
515 "\xff\xff\xff\xff\xff\xff\xff\xff",
517 "\xff\xff\xff\xff\xff\xff\xff\xff"
518 "\xff\xff\xff\xff\xff\xff\xff\xff",
524 * RTE_FLOW_ITEM_TYPE_ICMP.
526 * Matches an ICMP header.
528 struct rte_flow_item_icmp {
529 struct icmp_hdr hdr; /**< ICMP header definition. */
532 /** Default mask for RTE_FLOW_ITEM_TYPE_ICMP. */
534 static const struct rte_flow_item_icmp rte_flow_item_icmp_mask = {
543 * RTE_FLOW_ITEM_TYPE_UDP.
545 * Matches a UDP header.
547 struct rte_flow_item_udp {
548 struct udp_hdr hdr; /**< UDP header definition. */
551 /** Default mask for RTE_FLOW_ITEM_TYPE_UDP. */
553 static const struct rte_flow_item_udp rte_flow_item_udp_mask = {
555 .src_port = RTE_BE16(0xffff),
556 .dst_port = RTE_BE16(0xffff),
562 * RTE_FLOW_ITEM_TYPE_TCP.
564 * Matches a TCP header.
566 struct rte_flow_item_tcp {
567 struct tcp_hdr hdr; /**< TCP header definition. */
570 /** Default mask for RTE_FLOW_ITEM_TYPE_TCP. */
572 static const struct rte_flow_item_tcp rte_flow_item_tcp_mask = {
574 .src_port = RTE_BE16(0xffff),
575 .dst_port = RTE_BE16(0xffff),
581 * RTE_FLOW_ITEM_TYPE_SCTP.
583 * Matches a SCTP header.
585 struct rte_flow_item_sctp {
586 struct sctp_hdr hdr; /**< SCTP header definition. */
589 /** Default mask for RTE_FLOW_ITEM_TYPE_SCTP. */
591 static const struct rte_flow_item_sctp rte_flow_item_sctp_mask = {
593 .src_port = RTE_BE16(0xffff),
594 .dst_port = RTE_BE16(0xffff),
600 * RTE_FLOW_ITEM_TYPE_VXLAN.
602 * Matches a VXLAN header (RFC 7348).
604 struct rte_flow_item_vxlan {
605 uint8_t flags; /**< Normally 0x08 (I flag). */
606 uint8_t rsvd0[3]; /**< Reserved, normally 0x000000. */
607 uint8_t vni[3]; /**< VXLAN identifier. */
608 uint8_t rsvd1; /**< Reserved, normally 0x00. */
611 /** Default mask for RTE_FLOW_ITEM_TYPE_VXLAN. */
613 static const struct rte_flow_item_vxlan rte_flow_item_vxlan_mask = {
614 .vni = "\xff\xff\xff",
619 * RTE_FLOW_ITEM_TYPE_E_TAG.
621 * Matches a E-tag header.
623 struct rte_flow_item_e_tag {
624 rte_be16_t tpid; /**< Tag protocol identifier (0x893F). */
626 * E-Tag control information (E-TCI).
627 * E-PCP (3b), E-DEI (1b), ingress E-CID base (12b).
629 rte_be16_t epcp_edei_in_ecid_b;
630 /** Reserved (2b), GRP (2b), E-CID base (12b). */
631 rte_be16_t rsvd_grp_ecid_b;
632 uint8_t in_ecid_e; /**< Ingress E-CID ext. */
633 uint8_t ecid_e; /**< E-CID ext. */
636 /** Default mask for RTE_FLOW_ITEM_TYPE_E_TAG. */
638 static const struct rte_flow_item_e_tag rte_flow_item_e_tag_mask = {
639 .rsvd_grp_ecid_b = RTE_BE16(0x3fff),
644 * RTE_FLOW_ITEM_TYPE_NVGRE.
646 * Matches a NVGRE header.
648 struct rte_flow_item_nvgre {
650 * Checksum (1b), undefined (1b), key bit (1b), sequence number (1b),
651 * reserved 0 (9b), version (3b).
653 * c_k_s_rsvd0_ver must have value 0x2000 according to RFC 7637.
655 rte_be16_t c_k_s_rsvd0_ver;
656 rte_be16_t protocol; /**< Protocol type (0x6558). */
657 uint8_t tni[3]; /**< Virtual subnet ID. */
658 uint8_t flow_id; /**< Flow ID. */
661 /** Default mask for RTE_FLOW_ITEM_TYPE_NVGRE. */
663 static const struct rte_flow_item_nvgre rte_flow_item_nvgre_mask = {
664 .tni = "\xff\xff\xff",
669 * RTE_FLOW_ITEM_TYPE_MPLS.
671 * Matches a MPLS header.
673 struct rte_flow_item_mpls {
675 * Label (20b), TC (3b), Bottom of Stack (1b).
677 uint8_t label_tc_s[3];
678 uint8_t ttl; /** Time-to-Live. */
681 /** Default mask for RTE_FLOW_ITEM_TYPE_MPLS. */
683 static const struct rte_flow_item_mpls rte_flow_item_mpls_mask = {
684 .label_tc_s = "\xff\xff\xf0",
689 * RTE_FLOW_ITEM_TYPE_GRE.
691 * Matches a GRE header.
693 struct rte_flow_item_gre {
695 * Checksum (1b), reserved 0 (12b), version (3b).
698 rte_be16_t c_rsvd0_ver;
699 rte_be16_t protocol; /**< Protocol type. */
702 /** Default mask for RTE_FLOW_ITEM_TYPE_GRE. */
704 static const struct rte_flow_item_gre rte_flow_item_gre_mask = {
705 .protocol = RTE_BE16(0xffff),
710 * RTE_FLOW_ITEM_TYPE_FUZZY
712 * Fuzzy pattern match, expect faster than default.
714 * This is for device that support fuzzy match option.
715 * Usually a fuzzy match is fast but the cost is accuracy.
716 * i.e. Signature Match only match pattern's hash value, but it is
717 * possible two different patterns have the same hash value.
719 * Matching accuracy level can be configure by threshold.
720 * Driver can divide the range of threshold and map to different
721 * accuracy levels that device support.
723 struct rte_flow_item_fuzzy {
724 uint32_t thresh; /**< Accuracy threshold*/
727 /** Default mask for RTE_FLOW_ITEM_TYPE_FUZZY. */
729 static const struct rte_flow_item_fuzzy rte_flow_item_fuzzy_mask = {
730 .thresh = 0xffffffff,
735 * Matching pattern item definition.
737 * A pattern is formed by stacking items starting from the lowest protocol
738 * layer to match. This stacking restriction does not apply to meta items
739 * which can be placed anywhere in the stack without affecting the meaning
740 * of the resulting pattern.
742 * Patterns are terminated by END items.
744 * The spec field should be a valid pointer to a structure of the related
745 * item type. It may remain unspecified (NULL) in many cases to request
746 * broad (nonspecific) matching. In such cases, last and mask must also be
749 * Optionally, last can point to a structure of the same type to define an
750 * inclusive range. This is mostly supported by integer and address fields,
751 * may cause errors otherwise. Fields that do not support ranges must be set
752 * to 0 or to the same value as the corresponding fields in spec.
754 * Only the fields defined to nonzero values in the default masks (see
755 * rte_flow_item_{name}_mask constants) are considered relevant by
756 * default. This can be overridden by providing a mask structure of the
757 * same type with applicable bits set to one. It can also be used to
758 * partially filter out specific fields (e.g. as an alternate mean to match
759 * ranges of IP addresses).
761 * Mask is a simple bit-mask applied before interpreting the contents of
762 * spec and last, which may yield unexpected results if not used
763 * carefully. For example, if for an IPv4 address field, spec provides
764 * 10.1.2.3, last provides 10.3.4.5 and mask provides 255.255.0.0, the
765 * effective range becomes 10.1.0.0 to 10.3.255.255.
767 struct rte_flow_item {
768 enum rte_flow_item_type type; /**< Item type. */
769 const void *spec; /**< Pointer to item specification structure. */
770 const void *last; /**< Defines an inclusive range (spec to last). */
771 const void *mask; /**< Bit-mask applied to spec and last. */
777 * Each possible action is represented by a type. Some have associated
778 * configuration structures. Several actions combined in a list can be
779 * affected to a flow rule. That list is not ordered.
781 * They fall in three categories:
783 * - Terminating actions (such as QUEUE, DROP, RSS, PF, VF) that prevent
784 * processing matched packets by subsequent flow rules, unless overridden
787 * - Non terminating actions (PASSTHRU, DUP) that leave matched packets up
788 * for additional processing by subsequent flow rules.
790 * - Other non terminating meta actions that do not affect the fate of
791 * packets (END, VOID, MARK, FLAG, COUNT).
793 * When several actions are combined in a flow rule, they should all have
794 * different types (e.g. dropping a packet twice is not possible).
796 * Only the last action of a given type is taken into account. PMDs still
797 * perform error checking on the entire list.
799 * Note that PASSTHRU is the only action able to override a terminating
802 enum rte_flow_action_type {
806 * End marker for action lists. Prevents further processing of
807 * actions, thereby ending the list.
809 * No associated configuration structure.
811 RTE_FLOW_ACTION_TYPE_END,
816 * Used as a placeholder for convenience. It is ignored and simply
819 * No associated configuration structure.
821 RTE_FLOW_ACTION_TYPE_VOID,
824 * Leaves packets up for additional processing by subsequent flow
825 * rules. This is the default when a rule does not contain a
826 * terminating action, but can be specified to force a rule to
827 * become non-terminating.
829 * No associated configuration structure.
831 RTE_FLOW_ACTION_TYPE_PASSTHRU,
836 * Attaches an integer value to packets and sets PKT_RX_FDIR and
837 * PKT_RX_FDIR_ID mbuf flags.
839 * See struct rte_flow_action_mark.
841 RTE_FLOW_ACTION_TYPE_MARK,
846 * Flags packets. Similar to MARK without a specific value; only
847 * sets the PKT_RX_FDIR mbuf flag.
849 * No associated configuration structure.
851 RTE_FLOW_ACTION_TYPE_FLAG,
854 * Assigns packets to a given queue index.
856 * See struct rte_flow_action_queue.
858 RTE_FLOW_ACTION_TYPE_QUEUE,
863 * PASSTHRU overrides this action if both are specified.
865 * No associated configuration structure.
867 RTE_FLOW_ACTION_TYPE_DROP,
872 * Enables counters for this rule.
874 * These counters can be retrieved and reset through rte_flow_query(),
875 * see struct rte_flow_query_count.
877 * No associated configuration structure.
879 RTE_FLOW_ACTION_TYPE_COUNT,
882 * Duplicates packets to a given queue index.
884 * This is normally combined with QUEUE, however when used alone, it
885 * is actually similar to QUEUE + PASSTHRU.
887 * See struct rte_flow_action_dup.
889 RTE_FLOW_ACTION_TYPE_DUP,
892 * Similar to QUEUE, except RSS is additionally performed on packets
893 * to spread them among several queues according to the provided
896 * See struct rte_flow_action_rss.
898 RTE_FLOW_ACTION_TYPE_RSS,
901 * Redirects packets to the physical function (PF) of the current
904 * No associated configuration structure.
906 RTE_FLOW_ACTION_TYPE_PF,
909 * Redirects packets to the virtual function (VF) of the current
910 * device with the specified ID.
912 * See struct rte_flow_action_vf.
914 RTE_FLOW_ACTION_TYPE_VF,
918 * RTE_FLOW_ACTION_TYPE_MARK
920 * Attaches an integer value to packets and sets PKT_RX_FDIR and
921 * PKT_RX_FDIR_ID mbuf flags.
923 * This value is arbitrary and application-defined. Maximum allowed value
924 * depends on the underlying implementation. It is returned in the
925 * hash.fdir.hi mbuf field.
927 struct rte_flow_action_mark {
928 uint32_t id; /**< Integer value to return with packets. */
932 * RTE_FLOW_ACTION_TYPE_QUEUE
934 * Assign packets to a given queue index.
936 * Terminating by default.
938 struct rte_flow_action_queue {
939 uint16_t index; /**< Queue index to use. */
943 * RTE_FLOW_ACTION_TYPE_COUNT (query)
945 * Query structure to retrieve and reset flow rule counters.
947 struct rte_flow_query_count {
948 uint32_t reset:1; /**< Reset counters after query [in]. */
949 uint32_t hits_set:1; /**< hits field is set [out]. */
950 uint32_t bytes_set:1; /**< bytes field is set [out]. */
951 uint32_t reserved:29; /**< Reserved, must be zero [in, out]. */
952 uint64_t hits; /**< Number of hits for this rule [out]. */
953 uint64_t bytes; /**< Number of bytes through this rule [out]. */
957 * RTE_FLOW_ACTION_TYPE_DUP
959 * Duplicates packets to a given queue index.
961 * This is normally combined with QUEUE, however when used alone, it is
962 * actually similar to QUEUE + PASSTHRU.
964 * Non-terminating by default.
966 struct rte_flow_action_dup {
967 uint16_t index; /**< Queue index to duplicate packets to. */
971 * RTE_FLOW_ACTION_TYPE_RSS
973 * Similar to QUEUE, except RSS is additionally performed on packets to
974 * spread them among several queues according to the provided parameters.
976 * Note: RSS hash result is stored in the hash.rss mbuf field which overlaps
977 * hash.fdir.lo. Since the MARK action sets the hash.fdir.hi field only,
978 * both can be requested simultaneously.
980 * Terminating by default.
982 struct rte_flow_action_rss {
983 const struct rte_eth_rss_conf *rss_conf; /**< RSS parameters. */
984 uint16_t num; /**< Number of entries in queue[]. */
985 uint16_t queue[]; /**< Queues indices to use. */
989 * RTE_FLOW_ACTION_TYPE_VF
991 * Redirects packets to a virtual function (VF) of the current device.
993 * Packets matched by a VF pattern item can be redirected to their original
994 * VF ID instead of the specified one. This parameter may not be available
995 * and is not guaranteed to work properly if the VF part is matched by a
996 * prior flow rule or if packets are not addressed to a VF in the first
999 * Terminating by default.
1001 struct rte_flow_action_vf {
1002 uint32_t original:1; /**< Use original VF ID if possible. */
1003 uint32_t reserved:31; /**< Reserved, must be zero. */
1004 uint32_t id; /**< VF ID to redirect packets to. */
1008 * Definition of a single action.
1010 * A list of actions is terminated by a END action.
1012 * For simple actions without a configuration structure, conf remains NULL.
1014 struct rte_flow_action {
1015 enum rte_flow_action_type type; /**< Action type. */
1016 const void *conf; /**< Pointer to action configuration structure. */
1020 * Opaque type returned after successfully creating a flow.
1022 * This handle can be used to manage and query the related flow (e.g. to
1023 * destroy it or retrieve counters).
1028 * Verbose error types.
1030 * Most of them provide the type of the object referenced by struct
1031 * rte_flow_error.cause.
1033 enum rte_flow_error_type {
1034 RTE_FLOW_ERROR_TYPE_NONE, /**< No error. */
1035 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, /**< Cause unspecified. */
1036 RTE_FLOW_ERROR_TYPE_HANDLE, /**< Flow rule (handle). */
1037 RTE_FLOW_ERROR_TYPE_ATTR_GROUP, /**< Group field. */
1038 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, /**< Priority field. */
1039 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, /**< Ingress field. */
1040 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, /**< Egress field. */
1041 RTE_FLOW_ERROR_TYPE_ATTR, /**< Attributes structure. */
1042 RTE_FLOW_ERROR_TYPE_ITEM_NUM, /**< Pattern length. */
1043 RTE_FLOW_ERROR_TYPE_ITEM, /**< Specific pattern item. */
1044 RTE_FLOW_ERROR_TYPE_ACTION_NUM, /**< Number of actions. */
1045 RTE_FLOW_ERROR_TYPE_ACTION, /**< Specific action. */
1049 * Verbose error structure definition.
1051 * This object is normally allocated by applications and set by PMDs, the
1052 * message points to a constant string which does not need to be freed by
1053 * the application, however its pointer can be considered valid only as long
1054 * as its associated DPDK port remains configured. Closing the underlying
1055 * device or unloading the PMD invalidates it.
1057 * Both cause and message may be NULL regardless of the error type.
1059 struct rte_flow_error {
1060 enum rte_flow_error_type type; /**< Cause field and error types. */
1061 const void *cause; /**< Object responsible for the error. */
1062 const char *message; /**< Human-readable error message. */
1066 * Check whether a flow rule can be created on a given port.
1068 * The flow rule is validated for correctness and whether it could be accepted
1069 * by the device given sufficient resources. The rule is checked against the
1070 * current device mode and queue configuration. The flow rule may also
1071 * optionally be validated against existing flow rules and device resources.
1072 * This function has no effect on the target device.
1074 * The returned value is guaranteed to remain valid only as long as no
1075 * successful calls to rte_flow_create() or rte_flow_destroy() are made in
1076 * the meantime and no device parameter affecting flow rules in any way are
1077 * modified, due to possible collisions or resource limitations (although in
1078 * such cases EINVAL should not be returned).
1081 * Port identifier of Ethernet device.
1083 * Flow rule attributes.
1084 * @param[in] pattern
1085 * Pattern specification (list terminated by the END pattern item).
1086 * @param[in] actions
1087 * Associated actions (list terminated by the END action).
1089 * Perform verbose error reporting if not NULL. PMDs initialize this
1090 * structure in case of error only.
1093 * 0 if flow rule is valid and can be created. A negative errno value
1094 * otherwise (rte_errno is also set), the following errors are defined:
1096 * -ENOSYS: underlying device does not support this functionality.
1098 * -EINVAL: unknown or invalid rule specification.
1100 * -ENOTSUP: valid but unsupported rule specification (e.g. partial
1101 * bit-masks are unsupported).
1103 * -EEXIST: collision with an existing rule. Only returned if device
1104 * supports flow rule collision checking and there was a flow rule
1105 * collision. Not receiving this return code is no guarantee that creating
1106 * the rule will not fail due to a collision.
1108 * -ENOMEM: not enough memory to execute the function, or if the device
1109 * supports resource validation, resource limitation on the device.
1111 * -EBUSY: action cannot be performed due to busy device resources, may
1112 * succeed if the affected queues or even the entire port are in a stopped
1113 * state (see rte_eth_dev_rx_queue_stop() and rte_eth_dev_stop()).
1116 rte_flow_validate(uint8_t port_id,
1117 const struct rte_flow_attr *attr,
1118 const struct rte_flow_item pattern[],
1119 const struct rte_flow_action actions[],
1120 struct rte_flow_error *error);
1123 * Create a flow rule on a given port.
1126 * Port identifier of Ethernet device.
1128 * Flow rule attributes.
1129 * @param[in] pattern
1130 * Pattern specification (list terminated by the END pattern item).
1131 * @param[in] actions
1132 * Associated actions (list terminated by the END action).
1134 * Perform verbose error reporting if not NULL. PMDs initialize this
1135 * structure in case of error only.
1138 * A valid handle in case of success, NULL otherwise and rte_errno is set
1139 * to the positive version of one of the error codes defined for
1140 * rte_flow_validate().
1143 rte_flow_create(uint8_t port_id,
1144 const struct rte_flow_attr *attr,
1145 const struct rte_flow_item pattern[],
1146 const struct rte_flow_action actions[],
1147 struct rte_flow_error *error);
1150 * Destroy a flow rule on a given port.
1152 * Failure to destroy a flow rule handle may occur when other flow rules
1153 * depend on it, and destroying it would result in an inconsistent state.
1155 * This function is only guaranteed to succeed if handles are destroyed in
1156 * reverse order of their creation.
1159 * Port identifier of Ethernet device.
1161 * Flow rule handle to destroy.
1163 * Perform verbose error reporting if not NULL. PMDs initialize this
1164 * structure in case of error only.
1167 * 0 on success, a negative errno value otherwise and rte_errno is set.
1170 rte_flow_destroy(uint8_t port_id,
1171 struct rte_flow *flow,
1172 struct rte_flow_error *error);
1175 * Destroy all flow rules associated with a port.
1177 * In the unlikely event of failure, handles are still considered destroyed
1178 * and no longer valid but the port must be assumed to be in an inconsistent
1182 * Port identifier of Ethernet device.
1184 * Perform verbose error reporting if not NULL. PMDs initialize this
1185 * structure in case of error only.
1188 * 0 on success, a negative errno value otherwise and rte_errno is set.
1191 rte_flow_flush(uint8_t port_id,
1192 struct rte_flow_error *error);
1195 * Query an existing flow rule.
1197 * This function allows retrieving flow-specific data such as counters.
1198 * Data is gathered by special actions which must be present in the flow
1201 * \see RTE_FLOW_ACTION_TYPE_COUNT
1204 * Port identifier of Ethernet device.
1206 * Flow rule handle to query.
1208 * Action type to query.
1209 * @param[in, out] data
1210 * Pointer to storage for the associated query data type.
1212 * Perform verbose error reporting if not NULL. PMDs initialize this
1213 * structure in case of error only.
1216 * 0 on success, a negative errno value otherwise and rte_errno is set.
1219 rte_flow_query(uint8_t port_id,
1220 struct rte_flow *flow,
1221 enum rte_flow_action_type action,
1223 struct rte_flow_error *error);
1226 * Restrict ingress traffic to the defined flow rules.
1228 * Isolated mode guarantees that all ingress traffic comes from defined flow
1229 * rules only (current and future).
1231 * Besides making ingress more deterministic, it allows PMDs to safely reuse
1232 * resources otherwise assigned to handle the remaining traffic, such as
1233 * global RSS configuration settings, VLAN filters, MAC address entries,
1234 * legacy filter API rules and so on in order to expand the set of possible
1237 * Calling this function as soon as possible after device initialization,
1238 * ideally before the first call to rte_eth_dev_configure(), is recommended
1239 * to avoid possible failures due to conflicting settings.
1241 * Once effective, leaving isolated mode may not be possible depending on
1242 * PMD implementation.
1244 * Additionally, the following functionality has no effect on the underlying
1245 * port and may return errors such as ENOTSUP ("not supported"):
1247 * - Toggling promiscuous mode.
1248 * - Toggling allmulticast mode.
1249 * - Configuring MAC addresses.
1250 * - Configuring multicast addresses.
1251 * - Configuring VLAN filters.
1252 * - Configuring Rx filters through the legacy API (e.g. FDIR).
1253 * - Configuring global RSS settings.
1256 * Port identifier of Ethernet device.
1258 * Nonzero to enter isolated mode, attempt to leave it otherwise.
1260 * Perform verbose error reporting if not NULL. PMDs initialize this
1261 * structure in case of error only.
1264 * 0 on success, a negative errno value otherwise and rte_errno is set.
1267 rte_flow_isolate(uint8_t port_id, int set, struct rte_flow_error *error);
1273 #endif /* RTE_FLOW_H_ */