1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2016 6WIND S.A.
3 * Copyright 2016 Mellanox.
11 * RTE generic flow API
13 * This interface provides the ability to program packet matching and
14 * associated actions in hardware through flow rules.
18 #include <rte_ether.h>
24 #include <rte_byteorder.h>
32 * Flow rule attributes.
34 * Priorities are set on two levels: per group and per rule within groups.
36 * Lower values denote higher priority, the highest priority for both levels
37 * is 0, so that a rule with priority 0 in group 8 is always matched after a
38 * rule with priority 8 in group 0.
40 * Although optional, applications are encouraged to group similar rules as
41 * much as possible to fully take advantage of hardware capabilities
42 * (e.g. optimized matching) and work around limitations (e.g. a single
43 * pattern type possibly allowed in a given group).
45 * Group and priority levels are arbitrary and up to the application, they
46 * do not need to be contiguous nor start from 0, however the maximum number
47 * varies between devices and may be affected by existing flow rules.
49 * If a packet is matched by several rules of a given group for a given
50 * priority level, the outcome is undefined. It can take any path, may be
51 * duplicated or even cause unrecoverable errors.
53 * Note that support for more than a single group and priority level is not
56 * Flow rules can apply to inbound and/or outbound traffic (ingress/egress).
58 * Several pattern items and actions are valid and can be used in both
59 * directions. Those valid for only one direction are described as such.
61 * At least one direction must be specified.
63 * Specifying both directions at once for a given rule is not recommended
64 * but may be valid in a few cases (e.g. shared counter).
66 struct rte_flow_attr {
67 uint32_t group; /**< Priority group. */
68 uint32_t priority; /**< Priority level within group. */
69 uint32_t ingress:1; /**< Rule applies to ingress traffic. */
70 uint32_t egress:1; /**< Rule applies to egress traffic. */
71 uint32_t reserved:30; /**< Reserved, must be zero. */
75 * Matching pattern item types.
77 * Pattern items fall in two categories:
79 * - Matching protocol headers and packet data (ANY, RAW, ETH, VLAN, IPV4,
80 * IPV6, ICMP, UDP, TCP, SCTP, VXLAN and so on), usually associated with a
81 * specification structure. These must be stacked in the same order as the
82 * protocol layers to match, starting from the lowest.
84 * - Matching meta-data or affecting pattern processing (END, VOID, INVERT,
85 * PF, VF, PORT and so on), often without a specification structure. Since
86 * they do not match packet contents, these can be specified anywhere
87 * within item lists without affecting others.
89 * See the description of individual types for more information. Those
90 * marked with [META] fall into the second category.
92 enum rte_flow_item_type {
96 * End marker for item lists. Prevents further processing of items,
97 * thereby ending the pattern.
99 * No associated specification structure.
101 RTE_FLOW_ITEM_TYPE_END,
106 * Used as a placeholder for convenience. It is ignored and simply
109 * No associated specification structure.
111 RTE_FLOW_ITEM_TYPE_VOID,
116 * Inverted matching, i.e. process packets that do not match the
119 * No associated specification structure.
121 RTE_FLOW_ITEM_TYPE_INVERT,
124 * Matches any protocol in place of the current layer, a single ANY
125 * may also stand for several protocol layers.
127 * See struct rte_flow_item_any.
129 RTE_FLOW_ITEM_TYPE_ANY,
134 * Matches packets addressed to the physical function of the device.
136 * If the underlying device function differs from the one that would
137 * normally receive the matched traffic, specifying this item
138 * prevents it from reaching that device unless the flow rule
139 * contains a PF action. Packets are not duplicated between device
140 * instances by default.
142 * No associated specification structure.
144 RTE_FLOW_ITEM_TYPE_PF,
149 * Matches packets addressed to a virtual function ID of the device.
151 * If the underlying device function differs from the one that would
152 * normally receive the matched traffic, specifying this item
153 * prevents it from reaching that device unless the flow rule
154 * contains a VF action. Packets are not duplicated between device
155 * instances by default.
157 * See struct rte_flow_item_vf.
159 RTE_FLOW_ITEM_TYPE_VF,
164 * Matches packets coming from the specified physical port of the
167 * The first PORT item overrides the physical port normally
168 * associated with the specified DPDK input port (port_id). This
169 * item can be provided several times to match additional physical
172 * See struct rte_flow_item_port.
174 RTE_FLOW_ITEM_TYPE_PORT,
177 * Matches a byte string of a given length at a given offset.
179 * See struct rte_flow_item_raw.
181 RTE_FLOW_ITEM_TYPE_RAW,
184 * Matches an Ethernet header.
186 * See struct rte_flow_item_eth.
188 RTE_FLOW_ITEM_TYPE_ETH,
191 * Matches an 802.1Q/ad VLAN tag.
193 * See struct rte_flow_item_vlan.
195 RTE_FLOW_ITEM_TYPE_VLAN,
198 * Matches an IPv4 header.
200 * See struct rte_flow_item_ipv4.
202 RTE_FLOW_ITEM_TYPE_IPV4,
205 * Matches an IPv6 header.
207 * See struct rte_flow_item_ipv6.
209 RTE_FLOW_ITEM_TYPE_IPV6,
212 * Matches an ICMP header.
214 * See struct rte_flow_item_icmp.
216 RTE_FLOW_ITEM_TYPE_ICMP,
219 * Matches a UDP header.
221 * See struct rte_flow_item_udp.
223 RTE_FLOW_ITEM_TYPE_UDP,
226 * Matches a TCP header.
228 * See struct rte_flow_item_tcp.
230 RTE_FLOW_ITEM_TYPE_TCP,
233 * Matches a SCTP header.
235 * See struct rte_flow_item_sctp.
237 RTE_FLOW_ITEM_TYPE_SCTP,
240 * Matches a VXLAN header.
242 * See struct rte_flow_item_vxlan.
244 RTE_FLOW_ITEM_TYPE_VXLAN,
247 * Matches a E_TAG header.
249 * See struct rte_flow_item_e_tag.
251 RTE_FLOW_ITEM_TYPE_E_TAG,
254 * Matches a NVGRE header.
256 * See struct rte_flow_item_nvgre.
258 RTE_FLOW_ITEM_TYPE_NVGRE,
261 * Matches a MPLS header.
263 * See struct rte_flow_item_mpls.
265 RTE_FLOW_ITEM_TYPE_MPLS,
268 * Matches a GRE header.
270 * See struct rte_flow_item_gre.
272 RTE_FLOW_ITEM_TYPE_GRE,
277 * Fuzzy pattern match, expect faster than default.
279 * This is for device that support fuzzy matching option.
280 * Usually a fuzzy matching is fast but the cost is accuracy.
282 * See struct rte_flow_item_fuzzy.
284 RTE_FLOW_ITEM_TYPE_FUZZY,
287 * Matches a GTP header.
289 * Configure flow for GTP packets.
291 * See struct rte_flow_item_gtp.
293 RTE_FLOW_ITEM_TYPE_GTP,
296 * Matches a GTP header.
298 * Configure flow for GTP-C packets.
300 * See struct rte_flow_item_gtp.
302 RTE_FLOW_ITEM_TYPE_GTPC,
305 * Matches a GTP header.
307 * Configure flow for GTP-U packets.
309 * See struct rte_flow_item_gtp.
311 RTE_FLOW_ITEM_TYPE_GTPU,
314 * Matches a ESP header.
316 * See struct rte_flow_item_esp.
318 RTE_FLOW_ITEM_TYPE_ESP,
321 * Matches a GENEVE header.
323 * See struct rte_flow_item_geneve.
325 RTE_FLOW_ITEM_TYPE_GENEVE,
329 * RTE_FLOW_ITEM_TYPE_ANY
331 * Matches any protocol in place of the current layer, a single ANY may also
332 * stand for several protocol layers.
334 * This is usually specified as the first pattern item when looking for a
335 * protocol anywhere in a packet.
337 * A zeroed mask stands for any number of layers.
339 struct rte_flow_item_any {
340 uint32_t num; /**< Number of layers covered. */
343 /** Default mask for RTE_FLOW_ITEM_TYPE_ANY. */
345 static const struct rte_flow_item_any rte_flow_item_any_mask = {
351 * RTE_FLOW_ITEM_TYPE_VF
353 * Matches packets addressed to a virtual function ID of the device.
355 * If the underlying device function differs from the one that would
356 * normally receive the matched traffic, specifying this item prevents it
357 * from reaching that device unless the flow rule contains a VF
358 * action. Packets are not duplicated between device instances by default.
360 * - Likely to return an error or never match any traffic if this causes a
361 * VF device to match traffic addressed to a different VF.
362 * - Can be specified multiple times to match traffic addressed to several
364 * - Can be combined with a PF item to match both PF and VF traffic.
366 * A zeroed mask can be used to match any VF ID.
368 struct rte_flow_item_vf {
369 uint32_t id; /**< Destination VF ID. */
372 /** Default mask for RTE_FLOW_ITEM_TYPE_VF. */
374 static const struct rte_flow_item_vf rte_flow_item_vf_mask = {
380 * RTE_FLOW_ITEM_TYPE_PORT
382 * Matches packets coming from the specified physical port of the underlying
385 * The first PORT item overrides the physical port normally associated with
386 * the specified DPDK input port (port_id). This item can be provided
387 * several times to match additional physical ports.
389 * Note that physical ports are not necessarily tied to DPDK input ports
390 * (port_id) when those are not under DPDK control. Possible values are
391 * specific to each device, they are not necessarily indexed from zero and
392 * may not be contiguous.
394 * As a device property, the list of allowed values as well as the value
395 * associated with a port_id should be retrieved by other means.
397 * A zeroed mask can be used to match any port index.
399 struct rte_flow_item_port {
400 uint32_t index; /**< Physical port index. */
403 /** Default mask for RTE_FLOW_ITEM_TYPE_PORT. */
405 static const struct rte_flow_item_port rte_flow_item_port_mask = {
411 * RTE_FLOW_ITEM_TYPE_RAW
413 * Matches a byte string of a given length at a given offset.
415 * Offset is either absolute (using the start of the packet) or relative to
416 * the end of the previous matched item in the stack, in which case negative
417 * values are allowed.
419 * If search is enabled, offset is used as the starting point. The search
420 * area can be delimited by setting limit to a nonzero value, which is the
421 * maximum number of bytes after offset where the pattern may start.
423 * Matching a zero-length pattern is allowed, doing so resets the relative
424 * offset for subsequent items.
426 * This type does not support ranges (struct rte_flow_item.last).
428 struct rte_flow_item_raw {
429 uint32_t relative:1; /**< Look for pattern after the previous item. */
430 uint32_t search:1; /**< Search pattern from offset (see also limit). */
431 uint32_t reserved:30; /**< Reserved, must be set to zero. */
432 int32_t offset; /**< Absolute or relative offset for pattern. */
433 uint16_t limit; /**< Search area limit for start of pattern. */
434 uint16_t length; /**< Pattern length. */
435 uint8_t pattern[]; /**< Byte string to look for. */
438 /** Default mask for RTE_FLOW_ITEM_TYPE_RAW. */
440 static const struct rte_flow_item_raw rte_flow_item_raw_mask = {
443 .reserved = 0x3fffffff,
444 .offset = 0xffffffff,
451 * RTE_FLOW_ITEM_TYPE_ETH
453 * Matches an Ethernet header.
455 struct rte_flow_item_eth {
456 struct ether_addr dst; /**< Destination MAC. */
457 struct ether_addr src; /**< Source MAC. */
458 rte_be16_t type; /**< EtherType. */
461 /** Default mask for RTE_FLOW_ITEM_TYPE_ETH. */
463 static const struct rte_flow_item_eth rte_flow_item_eth_mask = {
464 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
465 .src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
466 .type = RTE_BE16(0x0000),
471 * RTE_FLOW_ITEM_TYPE_VLAN
473 * Matches an 802.1Q/ad VLAN tag.
475 * This type normally follows either RTE_FLOW_ITEM_TYPE_ETH or
476 * RTE_FLOW_ITEM_TYPE_VLAN.
478 struct rte_flow_item_vlan {
479 rte_be16_t tpid; /**< Tag protocol identifier. */
480 rte_be16_t tci; /**< Tag control information. */
483 /** Default mask for RTE_FLOW_ITEM_TYPE_VLAN. */
485 static const struct rte_flow_item_vlan rte_flow_item_vlan_mask = {
486 .tpid = RTE_BE16(0x0000),
487 .tci = RTE_BE16(0xffff),
492 * RTE_FLOW_ITEM_TYPE_IPV4
494 * Matches an IPv4 header.
496 * Note: IPv4 options are handled by dedicated pattern items.
498 struct rte_flow_item_ipv4 {
499 struct ipv4_hdr hdr; /**< IPv4 header definition. */
502 /** Default mask for RTE_FLOW_ITEM_TYPE_IPV4. */
504 static const struct rte_flow_item_ipv4 rte_flow_item_ipv4_mask = {
506 .src_addr = RTE_BE32(0xffffffff),
507 .dst_addr = RTE_BE32(0xffffffff),
513 * RTE_FLOW_ITEM_TYPE_IPV6.
515 * Matches an IPv6 header.
517 * Note: IPv6 options are handled by dedicated pattern items.
519 struct rte_flow_item_ipv6 {
520 struct ipv6_hdr hdr; /**< IPv6 header definition. */
523 /** Default mask for RTE_FLOW_ITEM_TYPE_IPV6. */
525 static const struct rte_flow_item_ipv6 rte_flow_item_ipv6_mask = {
528 "\xff\xff\xff\xff\xff\xff\xff\xff"
529 "\xff\xff\xff\xff\xff\xff\xff\xff",
531 "\xff\xff\xff\xff\xff\xff\xff\xff"
532 "\xff\xff\xff\xff\xff\xff\xff\xff",
538 * RTE_FLOW_ITEM_TYPE_ICMP.
540 * Matches an ICMP header.
542 struct rte_flow_item_icmp {
543 struct icmp_hdr hdr; /**< ICMP header definition. */
546 /** Default mask for RTE_FLOW_ITEM_TYPE_ICMP. */
548 static const struct rte_flow_item_icmp rte_flow_item_icmp_mask = {
557 * RTE_FLOW_ITEM_TYPE_UDP.
559 * Matches a UDP header.
561 struct rte_flow_item_udp {
562 struct udp_hdr hdr; /**< UDP header definition. */
565 /** Default mask for RTE_FLOW_ITEM_TYPE_UDP. */
567 static const struct rte_flow_item_udp rte_flow_item_udp_mask = {
569 .src_port = RTE_BE16(0xffff),
570 .dst_port = RTE_BE16(0xffff),
576 * RTE_FLOW_ITEM_TYPE_TCP.
578 * Matches a TCP header.
580 struct rte_flow_item_tcp {
581 struct tcp_hdr hdr; /**< TCP header definition. */
584 /** Default mask for RTE_FLOW_ITEM_TYPE_TCP. */
586 static const struct rte_flow_item_tcp rte_flow_item_tcp_mask = {
588 .src_port = RTE_BE16(0xffff),
589 .dst_port = RTE_BE16(0xffff),
595 * RTE_FLOW_ITEM_TYPE_SCTP.
597 * Matches a SCTP header.
599 struct rte_flow_item_sctp {
600 struct sctp_hdr hdr; /**< SCTP header definition. */
603 /** Default mask for RTE_FLOW_ITEM_TYPE_SCTP. */
605 static const struct rte_flow_item_sctp rte_flow_item_sctp_mask = {
607 .src_port = RTE_BE16(0xffff),
608 .dst_port = RTE_BE16(0xffff),
614 * RTE_FLOW_ITEM_TYPE_VXLAN.
616 * Matches a VXLAN header (RFC 7348).
618 struct rte_flow_item_vxlan {
619 uint8_t flags; /**< Normally 0x08 (I flag). */
620 uint8_t rsvd0[3]; /**< Reserved, normally 0x000000. */
621 uint8_t vni[3]; /**< VXLAN identifier. */
622 uint8_t rsvd1; /**< Reserved, normally 0x00. */
625 /** Default mask for RTE_FLOW_ITEM_TYPE_VXLAN. */
627 static const struct rte_flow_item_vxlan rte_flow_item_vxlan_mask = {
628 .vni = "\xff\xff\xff",
633 * RTE_FLOW_ITEM_TYPE_E_TAG.
635 * Matches a E-tag header.
637 struct rte_flow_item_e_tag {
638 rte_be16_t tpid; /**< Tag protocol identifier (0x893F). */
640 * E-Tag control information (E-TCI).
641 * E-PCP (3b), E-DEI (1b), ingress E-CID base (12b).
643 rte_be16_t epcp_edei_in_ecid_b;
644 /** Reserved (2b), GRP (2b), E-CID base (12b). */
645 rte_be16_t rsvd_grp_ecid_b;
646 uint8_t in_ecid_e; /**< Ingress E-CID ext. */
647 uint8_t ecid_e; /**< E-CID ext. */
650 /** Default mask for RTE_FLOW_ITEM_TYPE_E_TAG. */
652 static const struct rte_flow_item_e_tag rte_flow_item_e_tag_mask = {
653 .rsvd_grp_ecid_b = RTE_BE16(0x3fff),
658 * RTE_FLOW_ITEM_TYPE_NVGRE.
660 * Matches a NVGRE header.
662 struct rte_flow_item_nvgre {
664 * Checksum (1b), undefined (1b), key bit (1b), sequence number (1b),
665 * reserved 0 (9b), version (3b).
667 * c_k_s_rsvd0_ver must have value 0x2000 according to RFC 7637.
669 rte_be16_t c_k_s_rsvd0_ver;
670 rte_be16_t protocol; /**< Protocol type (0x6558). */
671 uint8_t tni[3]; /**< Virtual subnet ID. */
672 uint8_t flow_id; /**< Flow ID. */
675 /** Default mask for RTE_FLOW_ITEM_TYPE_NVGRE. */
677 static const struct rte_flow_item_nvgre rte_flow_item_nvgre_mask = {
678 .tni = "\xff\xff\xff",
683 * RTE_FLOW_ITEM_TYPE_MPLS.
685 * Matches a MPLS header.
687 struct rte_flow_item_mpls {
689 * Label (20b), TC (3b), Bottom of Stack (1b).
691 uint8_t label_tc_s[3];
692 uint8_t ttl; /** Time-to-Live. */
695 /** Default mask for RTE_FLOW_ITEM_TYPE_MPLS. */
697 static const struct rte_flow_item_mpls rte_flow_item_mpls_mask = {
698 .label_tc_s = "\xff\xff\xf0",
703 * RTE_FLOW_ITEM_TYPE_GRE.
705 * Matches a GRE header.
707 struct rte_flow_item_gre {
709 * Checksum (1b), reserved 0 (12b), version (3b).
712 rte_be16_t c_rsvd0_ver;
713 rte_be16_t protocol; /**< Protocol type. */
716 /** Default mask for RTE_FLOW_ITEM_TYPE_GRE. */
718 static const struct rte_flow_item_gre rte_flow_item_gre_mask = {
719 .protocol = RTE_BE16(0xffff),
724 * RTE_FLOW_ITEM_TYPE_FUZZY
726 * Fuzzy pattern match, expect faster than default.
728 * This is for device that support fuzzy match option.
729 * Usually a fuzzy match is fast but the cost is accuracy.
730 * i.e. Signature Match only match pattern's hash value, but it is
731 * possible two different patterns have the same hash value.
733 * Matching accuracy level can be configure by threshold.
734 * Driver can divide the range of threshold and map to different
735 * accuracy levels that device support.
737 * Threshold 0 means perfect match (no fuzziness), while threshold
738 * 0xffffffff means fuzziest match.
740 struct rte_flow_item_fuzzy {
741 uint32_t thresh; /**< Accuracy threshold. */
744 /** Default mask for RTE_FLOW_ITEM_TYPE_FUZZY. */
746 static const struct rte_flow_item_fuzzy rte_flow_item_fuzzy_mask = {
747 .thresh = 0xffffffff,
752 * RTE_FLOW_ITEM_TYPE_GTP.
754 * Matches a GTPv1 header.
756 struct rte_flow_item_gtp {
758 * Version (3b), protocol type (1b), reserved (1b),
759 * Extension header flag (1b),
760 * Sequence number flag (1b),
761 * N-PDU number flag (1b).
763 uint8_t v_pt_rsv_flags;
764 uint8_t msg_type; /**< Message type. */
765 rte_be16_t msg_len; /**< Message length. */
766 rte_be32_t teid; /**< Tunnel endpoint identifier. */
769 /** Default mask for RTE_FLOW_ITEM_TYPE_GTP. */
771 static const struct rte_flow_item_gtp rte_flow_item_gtp_mask = {
772 .teid = RTE_BE32(0xffffffff),
777 * RTE_FLOW_ITEM_TYPE_ESP
779 * Matches an ESP header.
781 struct rte_flow_item_esp {
782 struct esp_hdr hdr; /**< ESP header definition. */
785 /** Default mask for RTE_FLOW_ITEM_TYPE_ESP. */
787 static const struct rte_flow_item_esp rte_flow_item_esp_mask = {
795 * RTE_FLOW_ITEM_TYPE_GENEVE.
797 * Matches a GENEVE header.
799 struct rte_flow_item_geneve {
801 * Version (2b), length of the options fields (6b), OAM packet (1b),
802 * critical options present (1b), reserved 0 (6b).
804 rte_be16_t ver_opt_len_o_c_rsvd0;
805 rte_be16_t protocol; /**< Protocol type. */
806 uint8_t vni[3]; /**< Virtual Network Identifier. */
807 uint8_t rsvd1; /**< Reserved, normally 0x00. */
810 /** Default mask for RTE_FLOW_ITEM_TYPE_GENEVE. */
812 static const struct rte_flow_item_geneve rte_flow_item_geneve_mask = {
813 .vni = "\xff\xff\xff",
818 * Matching pattern item definition.
820 * A pattern is formed by stacking items starting from the lowest protocol
821 * layer to match. This stacking restriction does not apply to meta items
822 * which can be placed anywhere in the stack without affecting the meaning
823 * of the resulting pattern.
825 * Patterns are terminated by END items.
827 * The spec field should be a valid pointer to a structure of the related
828 * item type. It may remain unspecified (NULL) in many cases to request
829 * broad (nonspecific) matching. In such cases, last and mask must also be
832 * Optionally, last can point to a structure of the same type to define an
833 * inclusive range. This is mostly supported by integer and address fields,
834 * may cause errors otherwise. Fields that do not support ranges must be set
835 * to 0 or to the same value as the corresponding fields in spec.
837 * Only the fields defined to nonzero values in the default masks (see
838 * rte_flow_item_{name}_mask constants) are considered relevant by
839 * default. This can be overridden by providing a mask structure of the
840 * same type with applicable bits set to one. It can also be used to
841 * partially filter out specific fields (e.g. as an alternate mean to match
842 * ranges of IP addresses).
844 * Mask is a simple bit-mask applied before interpreting the contents of
845 * spec and last, which may yield unexpected results if not used
846 * carefully. For example, if for an IPv4 address field, spec provides
847 * 10.1.2.3, last provides 10.3.4.5 and mask provides 255.255.0.0, the
848 * effective range becomes 10.1.0.0 to 10.3.255.255.
850 struct rte_flow_item {
851 enum rte_flow_item_type type; /**< Item type. */
852 const void *spec; /**< Pointer to item specification structure. */
853 const void *last; /**< Defines an inclusive range (spec to last). */
854 const void *mask; /**< Bit-mask applied to spec and last. */
860 * Each possible action is represented by a type. Some have associated
861 * configuration structures. Several actions combined in a list can be
862 * affected to a flow rule. That list is not ordered.
864 * They fall in three categories:
866 * - Terminating actions (such as QUEUE, DROP, RSS, PF, VF) that prevent
867 * processing matched packets by subsequent flow rules, unless overridden
870 * - Non terminating actions (PASSTHRU, DUP) that leave matched packets up
871 * for additional processing by subsequent flow rules.
873 * - Other non terminating meta actions that do not affect the fate of
874 * packets (END, VOID, MARK, FLAG, COUNT).
876 * When several actions are combined in a flow rule, they should all have
877 * different types (e.g. dropping a packet twice is not possible).
879 * Only the last action of a given type is taken into account. PMDs still
880 * perform error checking on the entire list.
882 * Note that PASSTHRU is the only action able to override a terminating
885 enum rte_flow_action_type {
889 * End marker for action lists. Prevents further processing of
890 * actions, thereby ending the list.
892 * No associated configuration structure.
894 RTE_FLOW_ACTION_TYPE_END,
899 * Used as a placeholder for convenience. It is ignored and simply
902 * No associated configuration structure.
904 RTE_FLOW_ACTION_TYPE_VOID,
907 * Leaves packets up for additional processing by subsequent flow
908 * rules. This is the default when a rule does not contain a
909 * terminating action, but can be specified to force a rule to
910 * become non-terminating.
912 * No associated configuration structure.
914 RTE_FLOW_ACTION_TYPE_PASSTHRU,
919 * Attaches an integer value to packets and sets PKT_RX_FDIR and
920 * PKT_RX_FDIR_ID mbuf flags.
922 * See struct rte_flow_action_mark.
924 RTE_FLOW_ACTION_TYPE_MARK,
929 * Flags packets. Similar to MARK without a specific value; only
930 * sets the PKT_RX_FDIR mbuf flag.
932 * No associated configuration structure.
934 RTE_FLOW_ACTION_TYPE_FLAG,
937 * Assigns packets to a given queue index.
939 * See struct rte_flow_action_queue.
941 RTE_FLOW_ACTION_TYPE_QUEUE,
946 * PASSTHRU overrides this action if both are specified.
948 * No associated configuration structure.
950 RTE_FLOW_ACTION_TYPE_DROP,
955 * Enables counters for this rule.
957 * These counters can be retrieved and reset through rte_flow_query(),
958 * see struct rte_flow_query_count.
960 * No associated configuration structure.
962 RTE_FLOW_ACTION_TYPE_COUNT,
965 * Duplicates packets to a given queue index.
967 * This is normally combined with QUEUE, however when used alone, it
968 * is actually similar to QUEUE + PASSTHRU.
970 * See struct rte_flow_action_dup.
972 RTE_FLOW_ACTION_TYPE_DUP,
975 * Similar to QUEUE, except RSS is additionally performed on packets
976 * to spread them among several queues according to the provided
979 * See struct rte_flow_action_rss.
981 RTE_FLOW_ACTION_TYPE_RSS,
984 * Redirects packets to the physical function (PF) of the current
987 * No associated configuration structure.
989 RTE_FLOW_ACTION_TYPE_PF,
992 * Redirects packets to the virtual function (VF) of the current
993 * device with the specified ID.
995 * See struct rte_flow_action_vf.
997 RTE_FLOW_ACTION_TYPE_VF,
1000 * Traffic metering and policing (MTR).
1002 * See struct rte_flow_action_meter.
1003 * See file rte_mtr.h for MTR object configuration.
1005 RTE_FLOW_ACTION_TYPE_METER,
1008 * Redirects packets to security engine of current device for security
1009 * processing as specified by security session.
1011 * See struct rte_flow_action_security.
1013 RTE_FLOW_ACTION_TYPE_SECURITY
1017 * RTE_FLOW_ACTION_TYPE_MARK
1019 * Attaches an integer value to packets and sets PKT_RX_FDIR and
1020 * PKT_RX_FDIR_ID mbuf flags.
1022 * This value is arbitrary and application-defined. Maximum allowed value
1023 * depends on the underlying implementation. It is returned in the
1024 * hash.fdir.hi mbuf field.
1026 struct rte_flow_action_mark {
1027 uint32_t id; /**< Integer value to return with packets. */
1031 * RTE_FLOW_ACTION_TYPE_QUEUE
1033 * Assign packets to a given queue index.
1035 * Terminating by default.
1037 struct rte_flow_action_queue {
1038 uint16_t index; /**< Queue index to use. */
1042 * RTE_FLOW_ACTION_TYPE_COUNT (query)
1044 * Query structure to retrieve and reset flow rule counters.
1046 struct rte_flow_query_count {
1047 uint32_t reset:1; /**< Reset counters after query [in]. */
1048 uint32_t hits_set:1; /**< hits field is set [out]. */
1049 uint32_t bytes_set:1; /**< bytes field is set [out]. */
1050 uint32_t reserved:29; /**< Reserved, must be zero [in, out]. */
1051 uint64_t hits; /**< Number of hits for this rule [out]. */
1052 uint64_t bytes; /**< Number of bytes through this rule [out]. */
1056 * RTE_FLOW_ACTION_TYPE_DUP
1058 * Duplicates packets to a given queue index.
1060 * This is normally combined with QUEUE, however when used alone, it is
1061 * actually similar to QUEUE + PASSTHRU.
1063 * Non-terminating by default.
1065 struct rte_flow_action_dup {
1066 uint16_t index; /**< Queue index to duplicate packets to. */
1070 * RTE_FLOW_ACTION_TYPE_RSS
1072 * Similar to QUEUE, except RSS is additionally performed on packets to
1073 * spread them among several queues according to the provided parameters.
1075 * Note: RSS hash result is stored in the hash.rss mbuf field which overlaps
1076 * hash.fdir.lo. Since the MARK action sets the hash.fdir.hi field only,
1077 * both can be requested simultaneously.
1079 * Terminating by default.
1081 struct rte_flow_action_rss {
1082 const struct rte_eth_rss_conf *rss_conf; /**< RSS parameters. */
1083 uint16_t num; /**< Number of entries in queue[]. */
1084 uint16_t queue[]; /**< Queues indices to use. */
1088 * RTE_FLOW_ACTION_TYPE_VF
1090 * Redirects packets to a virtual function (VF) of the current device.
1092 * Packets matched by a VF pattern item can be redirected to their original
1093 * VF ID instead of the specified one. This parameter may not be available
1094 * and is not guaranteed to work properly if the VF part is matched by a
1095 * prior flow rule or if packets are not addressed to a VF in the first
1098 * Terminating by default.
1100 struct rte_flow_action_vf {
1101 uint32_t original:1; /**< Use original VF ID if possible. */
1102 uint32_t reserved:31; /**< Reserved, must be zero. */
1103 uint32_t id; /**< VF ID to redirect packets to. */
1107 * RTE_FLOW_ACTION_TYPE_METER
1109 * Traffic metering and policing (MTR).
1111 * Packets matched by items of this type can be either dropped or passed to the
1112 * next item with their color set by the MTR object.
1114 * Non-terminating by default.
1116 struct rte_flow_action_meter {
1117 uint32_t mtr_id; /**< MTR object ID created with rte_mtr_create(). */
1121 * RTE_FLOW_ACTION_TYPE_SECURITY
1123 * Perform the security action on flows matched by the pattern items
1124 * according to the configuration of the security session.
1126 * This action modifies the payload of matched flows. For INLINE_CRYPTO, the
1127 * security protocol headers and IV are fully provided by the application as
1128 * specified in the flow pattern. The payload of matching packets is
1129 * encrypted on egress, and decrypted and authenticated on ingress.
1130 * For INLINE_PROTOCOL, the security protocol is fully offloaded to HW,
1131 * providing full encapsulation and decapsulation of packets in security
1132 * protocols. The flow pattern specifies both the outer security header fields
1133 * and the inner packet fields. The security session specified in the action
1134 * must match the pattern parameters.
1136 * The security session specified in the action must be created on the same
1137 * port as the flow action that is being specified.
1139 * The ingress/egress flow attribute should match that specified in the
1140 * security session if the security session supports the definition of the
1143 * Multiple flows can be configured to use the same security session.
1145 * Non-terminating by default.
1147 struct rte_flow_action_security {
1148 void *security_session; /**< Pointer to security session structure. */
1152 * Definition of a single action.
1154 * A list of actions is terminated by a END action.
1156 * For simple actions without a configuration structure, conf remains NULL.
1158 struct rte_flow_action {
1159 enum rte_flow_action_type type; /**< Action type. */
1160 const void *conf; /**< Pointer to action configuration structure. */
1164 * Opaque type returned after successfully creating a flow.
1166 * This handle can be used to manage and query the related flow (e.g. to
1167 * destroy it or retrieve counters).
1172 * Verbose error types.
1174 * Most of them provide the type of the object referenced by struct
1175 * rte_flow_error.cause.
1177 enum rte_flow_error_type {
1178 RTE_FLOW_ERROR_TYPE_NONE, /**< No error. */
1179 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, /**< Cause unspecified. */
1180 RTE_FLOW_ERROR_TYPE_HANDLE, /**< Flow rule (handle). */
1181 RTE_FLOW_ERROR_TYPE_ATTR_GROUP, /**< Group field. */
1182 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, /**< Priority field. */
1183 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, /**< Ingress field. */
1184 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, /**< Egress field. */
1185 RTE_FLOW_ERROR_TYPE_ATTR, /**< Attributes structure. */
1186 RTE_FLOW_ERROR_TYPE_ITEM_NUM, /**< Pattern length. */
1187 RTE_FLOW_ERROR_TYPE_ITEM, /**< Specific pattern item. */
1188 RTE_FLOW_ERROR_TYPE_ACTION_NUM, /**< Number of actions. */
1189 RTE_FLOW_ERROR_TYPE_ACTION, /**< Specific action. */
1193 * Verbose error structure definition.
1195 * This object is normally allocated by applications and set by PMDs, the
1196 * message points to a constant string which does not need to be freed by
1197 * the application, however its pointer can be considered valid only as long
1198 * as its associated DPDK port remains configured. Closing the underlying
1199 * device or unloading the PMD invalidates it.
1201 * Both cause and message may be NULL regardless of the error type.
1203 struct rte_flow_error {
1204 enum rte_flow_error_type type; /**< Cause field and error types. */
1205 const void *cause; /**< Object responsible for the error. */
1206 const char *message; /**< Human-readable error message. */
1210 * Check whether a flow rule can be created on a given port.
1212 * The flow rule is validated for correctness and whether it could be accepted
1213 * by the device given sufficient resources. The rule is checked against the
1214 * current device mode and queue configuration. The flow rule may also
1215 * optionally be validated against existing flow rules and device resources.
1216 * This function has no effect on the target device.
1218 * The returned value is guaranteed to remain valid only as long as no
1219 * successful calls to rte_flow_create() or rte_flow_destroy() are made in
1220 * the meantime and no device parameter affecting flow rules in any way are
1221 * modified, due to possible collisions or resource limitations (although in
1222 * such cases EINVAL should not be returned).
1225 * Port identifier of Ethernet device.
1227 * Flow rule attributes.
1228 * @param[in] pattern
1229 * Pattern specification (list terminated by the END pattern item).
1230 * @param[in] actions
1231 * Associated actions (list terminated by the END action).
1233 * Perform verbose error reporting if not NULL. PMDs initialize this
1234 * structure in case of error only.
1237 * 0 if flow rule is valid and can be created. A negative errno value
1238 * otherwise (rte_errno is also set), the following errors are defined:
1240 * -ENOSYS: underlying device does not support this functionality.
1242 * -EIO: underlying device is removed.
1244 * -EINVAL: unknown or invalid rule specification.
1246 * -ENOTSUP: valid but unsupported rule specification (e.g. partial
1247 * bit-masks are unsupported).
1249 * -EEXIST: collision with an existing rule. Only returned if device
1250 * supports flow rule collision checking and there was a flow rule
1251 * collision. Not receiving this return code is no guarantee that creating
1252 * the rule will not fail due to a collision.
1254 * -ENOMEM: not enough memory to execute the function, or if the device
1255 * supports resource validation, resource limitation on the device.
1257 * -EBUSY: action cannot be performed due to busy device resources, may
1258 * succeed if the affected queues or even the entire port are in a stopped
1259 * state (see rte_eth_dev_rx_queue_stop() and rte_eth_dev_stop()).
1262 rte_flow_validate(uint16_t port_id,
1263 const struct rte_flow_attr *attr,
1264 const struct rte_flow_item pattern[],
1265 const struct rte_flow_action actions[],
1266 struct rte_flow_error *error);
1269 * Create a flow rule on a given port.
1272 * Port identifier of Ethernet device.
1274 * Flow rule attributes.
1275 * @param[in] pattern
1276 * Pattern specification (list terminated by the END pattern item).
1277 * @param[in] actions
1278 * Associated actions (list terminated by the END action).
1280 * Perform verbose error reporting if not NULL. PMDs initialize this
1281 * structure in case of error only.
1284 * A valid handle in case of success, NULL otherwise and rte_errno is set
1285 * to the positive version of one of the error codes defined for
1286 * rte_flow_validate().
1289 rte_flow_create(uint16_t port_id,
1290 const struct rte_flow_attr *attr,
1291 const struct rte_flow_item pattern[],
1292 const struct rte_flow_action actions[],
1293 struct rte_flow_error *error);
1296 * Destroy a flow rule on a given port.
1298 * Failure to destroy a flow rule handle may occur when other flow rules
1299 * depend on it, and destroying it would result in an inconsistent state.
1301 * This function is only guaranteed to succeed if handles are destroyed in
1302 * reverse order of their creation.
1305 * Port identifier of Ethernet device.
1307 * Flow rule handle to destroy.
1309 * Perform verbose error reporting if not NULL. PMDs initialize this
1310 * structure in case of error only.
1313 * 0 on success, a negative errno value otherwise and rte_errno is set.
1316 rte_flow_destroy(uint16_t port_id,
1317 struct rte_flow *flow,
1318 struct rte_flow_error *error);
1321 * Destroy all flow rules associated with a port.
1323 * In the unlikely event of failure, handles are still considered destroyed
1324 * and no longer valid but the port must be assumed to be in an inconsistent
1328 * Port identifier of Ethernet device.
1330 * Perform verbose error reporting if not NULL. PMDs initialize this
1331 * structure in case of error only.
1334 * 0 on success, a negative errno value otherwise and rte_errno is set.
1337 rte_flow_flush(uint16_t port_id,
1338 struct rte_flow_error *error);
1341 * Query an existing flow rule.
1343 * This function allows retrieving flow-specific data such as counters.
1344 * Data is gathered by special actions which must be present in the flow
1347 * \see RTE_FLOW_ACTION_TYPE_COUNT
1350 * Port identifier of Ethernet device.
1352 * Flow rule handle to query.
1354 * Action type to query.
1355 * @param[in, out] data
1356 * Pointer to storage for the associated query data type.
1358 * Perform verbose error reporting if not NULL. PMDs initialize this
1359 * structure in case of error only.
1362 * 0 on success, a negative errno value otherwise and rte_errno is set.
1365 rte_flow_query(uint16_t port_id,
1366 struct rte_flow *flow,
1367 enum rte_flow_action_type action,
1369 struct rte_flow_error *error);
1372 * Restrict ingress traffic to the defined flow rules.
1374 * Isolated mode guarantees that all ingress traffic comes from defined flow
1375 * rules only (current and future).
1377 * Besides making ingress more deterministic, it allows PMDs to safely reuse
1378 * resources otherwise assigned to handle the remaining traffic, such as
1379 * global RSS configuration settings, VLAN filters, MAC address entries,
1380 * legacy filter API rules and so on in order to expand the set of possible
1383 * Calling this function as soon as possible after device initialization,
1384 * ideally before the first call to rte_eth_dev_configure(), is recommended
1385 * to avoid possible failures due to conflicting settings.
1387 * Once effective, leaving isolated mode may not be possible depending on
1388 * PMD implementation.
1390 * Additionally, the following functionality has no effect on the underlying
1391 * port and may return errors such as ENOTSUP ("not supported"):
1393 * - Toggling promiscuous mode.
1394 * - Toggling allmulticast mode.
1395 * - Configuring MAC addresses.
1396 * - Configuring multicast addresses.
1397 * - Configuring VLAN filters.
1398 * - Configuring Rx filters through the legacy API (e.g. FDIR).
1399 * - Configuring global RSS settings.
1402 * Port identifier of Ethernet device.
1404 * Nonzero to enter isolated mode, attempt to leave it otherwise.
1406 * Perform verbose error reporting if not NULL. PMDs initialize this
1407 * structure in case of error only.
1410 * 0 on success, a negative errno value otherwise and rte_errno is set.
1413 rte_flow_isolate(uint16_t port_id, int set, struct rte_flow_error *error);
1416 * Initialize flow error structure.
1419 * Pointer to flow error structure (may be NULL).
1421 * Related error code (rte_errno).
1423 * Cause field and error types.
1425 * Object responsible for the error.
1427 * Human-readable error message.
1430 * Negative error code (errno value) and rte_errno is set.
1433 rte_flow_error_set(struct rte_flow_error *error,
1435 enum rte_flow_error_type type,
1437 const char *message);
1440 * Generic flow representation.
1442 * This form is sufficient to describe an rte_flow independently from any
1443 * PMD implementation and allows for replayability and identification.
1445 struct rte_flow_desc {
1446 size_t size; /**< Allocated space including data[]. */
1447 struct rte_flow_attr attr; /**< Attributes. */
1448 struct rte_flow_item *items; /**< Items. */
1449 struct rte_flow_action *actions; /**< Actions. */
1450 uint8_t data[]; /**< Storage for items/actions. */
1454 * Copy an rte_flow rule description.
1457 * Flow rule description.
1459 * Total size of allocated data for the flow description.
1461 * Flow rule attributes.
1463 * Pattern specification (list terminated by the END pattern item).
1464 * @param[in] actions
1465 * Associated actions (list terminated by the END action).
1468 * If len is greater or equal to the size of the flow, the total size of the
1469 * flow description and its data.
1470 * If len is lower than the size of the flow, the number of bytes that would
1471 * have been written to desc had it been sufficient. Nothing is written.
1474 rte_flow_copy(struct rte_flow_desc *fd, size_t len,
1475 const struct rte_flow_attr *attr,
1476 const struct rte_flow_item *items,
1477 const struct rte_flow_action *actions);
1483 #endif /* RTE_FLOW_H_ */