1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2016 6WIND S.A.
3 * Copyright 2016 Mellanox Technologies, Ltd
11 * RTE generic flow API
13 * This interface provides the ability to program packet matching and
14 * associated actions in hardware through flow rules.
21 #include <rte_ether.h>
27 #include <rte_byteorder.h>
35 * Flow rule attributes.
37 * Priorities are set on two levels: per group and per rule within groups.
39 * Lower values denote higher priority, the highest priority for both levels
40 * is 0, so that a rule with priority 0 in group 8 is always matched after a
41 * rule with priority 8 in group 0.
43 * Although optional, applications are encouraged to group similar rules as
44 * much as possible to fully take advantage of hardware capabilities
45 * (e.g. optimized matching) and work around limitations (e.g. a single
46 * pattern type possibly allowed in a given group).
48 * Group and priority levels are arbitrary and up to the application, they
49 * do not need to be contiguous nor start from 0, however the maximum number
50 * varies between devices and may be affected by existing flow rules.
52 * If a packet is matched by several rules of a given group for a given
53 * priority level, the outcome is undefined. It can take any path, may be
54 * duplicated or even cause unrecoverable errors.
56 * Note that support for more than a single group and priority level is not
59 * Flow rules can apply to inbound and/or outbound traffic (ingress/egress).
61 * Several pattern items and actions are valid and can be used in both
62 * directions. Those valid for only one direction are described as such.
64 * At least one direction must be specified.
66 * Specifying both directions at once for a given rule is not recommended
67 * but may be valid in a few cases (e.g. shared counter).
69 struct rte_flow_attr {
70 uint32_t group; /**< Priority group. */
71 uint32_t priority; /**< Priority level within group. */
72 uint32_t ingress:1; /**< Rule applies to ingress traffic. */
73 uint32_t egress:1; /**< Rule applies to egress traffic. */
74 uint32_t reserved:30; /**< Reserved, must be zero. */
78 * Matching pattern item types.
80 * Pattern items fall in two categories:
82 * - Matching protocol headers and packet data, usually associated with a
83 * specification structure. These must be stacked in the same order as the
84 * protocol layers to match inside packets, starting from the lowest.
86 * - Matching meta-data or affecting pattern processing, often without a
87 * specification structure. Since they do not match packet contents, their
88 * position in the list is usually not relevant.
90 * See the description of individual types for more information. Those
91 * marked with [META] fall into the second category.
93 enum rte_flow_item_type {
97 * End marker for item lists. Prevents further processing of items,
98 * thereby ending the pattern.
100 * No associated specification structure.
102 RTE_FLOW_ITEM_TYPE_END,
107 * Used as a placeholder for convenience. It is ignored and simply
110 * No associated specification structure.
112 RTE_FLOW_ITEM_TYPE_VOID,
117 * Inverted matching, i.e. process packets that do not match the
120 * No associated specification structure.
122 RTE_FLOW_ITEM_TYPE_INVERT,
125 * Matches any protocol in place of the current layer, a single ANY
126 * may also stand for several protocol layers.
128 * See struct rte_flow_item_any.
130 RTE_FLOW_ITEM_TYPE_ANY,
135 * Matches packets addressed to the physical function of the device.
137 * If the underlying device function differs from the one that would
138 * normally receive the matched traffic, specifying this item
139 * prevents it from reaching that device unless the flow rule
140 * contains a PF action. Packets are not duplicated between device
141 * instances by default.
143 * No associated specification structure.
145 RTE_FLOW_ITEM_TYPE_PF,
150 * Matches packets addressed to a virtual function ID of the device.
152 * If the underlying device function differs from the one that would
153 * normally receive the matched traffic, specifying this item
154 * prevents it from reaching that device unless the flow rule
155 * contains a VF action. Packets are not duplicated between device
156 * instances by default.
158 * See struct rte_flow_item_vf.
160 RTE_FLOW_ITEM_TYPE_VF,
165 * Matches packets coming from the specified physical port of the
168 * The first PORT item overrides the physical port normally
169 * associated with the specified DPDK input port (port_id). This
170 * item can be provided several times to match additional physical
173 * See struct rte_flow_item_port.
175 RTE_FLOW_ITEM_TYPE_PORT,
178 * Matches a byte string of a given length at a given offset.
180 * See struct rte_flow_item_raw.
182 RTE_FLOW_ITEM_TYPE_RAW,
185 * Matches an Ethernet header.
187 * See struct rte_flow_item_eth.
189 RTE_FLOW_ITEM_TYPE_ETH,
192 * Matches an 802.1Q/ad VLAN tag.
194 * See struct rte_flow_item_vlan.
196 RTE_FLOW_ITEM_TYPE_VLAN,
199 * Matches an IPv4 header.
201 * See struct rte_flow_item_ipv4.
203 RTE_FLOW_ITEM_TYPE_IPV4,
206 * Matches an IPv6 header.
208 * See struct rte_flow_item_ipv6.
210 RTE_FLOW_ITEM_TYPE_IPV6,
213 * Matches an ICMP header.
215 * See struct rte_flow_item_icmp.
217 RTE_FLOW_ITEM_TYPE_ICMP,
220 * Matches a UDP header.
222 * See struct rte_flow_item_udp.
224 RTE_FLOW_ITEM_TYPE_UDP,
227 * Matches a TCP header.
229 * See struct rte_flow_item_tcp.
231 RTE_FLOW_ITEM_TYPE_TCP,
234 * Matches a SCTP header.
236 * See struct rte_flow_item_sctp.
238 RTE_FLOW_ITEM_TYPE_SCTP,
241 * Matches a VXLAN header.
243 * See struct rte_flow_item_vxlan.
245 RTE_FLOW_ITEM_TYPE_VXLAN,
248 * Matches a E_TAG header.
250 * See struct rte_flow_item_e_tag.
252 RTE_FLOW_ITEM_TYPE_E_TAG,
255 * Matches a NVGRE header.
257 * See struct rte_flow_item_nvgre.
259 RTE_FLOW_ITEM_TYPE_NVGRE,
262 * Matches a MPLS header.
264 * See struct rte_flow_item_mpls.
266 RTE_FLOW_ITEM_TYPE_MPLS,
269 * Matches a GRE header.
271 * See struct rte_flow_item_gre.
273 RTE_FLOW_ITEM_TYPE_GRE,
278 * Fuzzy pattern match, expect faster than default.
280 * This is for device that support fuzzy matching option.
281 * Usually a fuzzy matching is fast but the cost is accuracy.
283 * See struct rte_flow_item_fuzzy.
285 RTE_FLOW_ITEM_TYPE_FUZZY,
288 * Matches a GTP header.
290 * Configure flow for GTP packets.
292 * See struct rte_flow_item_gtp.
294 RTE_FLOW_ITEM_TYPE_GTP,
297 * Matches a GTP header.
299 * Configure flow for GTP-C packets.
301 * See struct rte_flow_item_gtp.
303 RTE_FLOW_ITEM_TYPE_GTPC,
306 * Matches a GTP header.
308 * Configure flow for GTP-U packets.
310 * See struct rte_flow_item_gtp.
312 RTE_FLOW_ITEM_TYPE_GTPU,
315 * Matches a ESP header.
317 * See struct rte_flow_item_esp.
319 RTE_FLOW_ITEM_TYPE_ESP,
322 * Matches a GENEVE header.
324 * See struct rte_flow_item_geneve.
326 RTE_FLOW_ITEM_TYPE_GENEVE,
330 * RTE_FLOW_ITEM_TYPE_ANY
332 * Matches any protocol in place of the current layer, a single ANY may also
333 * stand for several protocol layers.
335 * This is usually specified as the first pattern item when looking for a
336 * protocol anywhere in a packet.
338 * A zeroed mask stands for any number of layers.
340 struct rte_flow_item_any {
341 uint32_t num; /**< Number of layers covered. */
344 /** Default mask for RTE_FLOW_ITEM_TYPE_ANY. */
346 static const struct rte_flow_item_any rte_flow_item_any_mask = {
352 * RTE_FLOW_ITEM_TYPE_VF
354 * Matches packets addressed to a virtual function ID of the device.
356 * If the underlying device function differs from the one that would
357 * normally receive the matched traffic, specifying this item prevents it
358 * from reaching that device unless the flow rule contains a VF
359 * action. Packets are not duplicated between device instances by default.
361 * - Likely to return an error or never match any traffic if this causes a
362 * VF device to match traffic addressed to a different VF.
363 * - Can be specified multiple times to match traffic addressed to several
365 * - Can be combined with a PF item to match both PF and VF traffic.
367 * A zeroed mask can be used to match any VF ID.
369 struct rte_flow_item_vf {
370 uint32_t id; /**< Destination VF ID. */
373 /** Default mask for RTE_FLOW_ITEM_TYPE_VF. */
375 static const struct rte_flow_item_vf rte_flow_item_vf_mask = {
381 * RTE_FLOW_ITEM_TYPE_PORT
383 * Matches packets coming from the specified physical port of the underlying
386 * The first PORT item overrides the physical port normally associated with
387 * the specified DPDK input port (port_id). This item can be provided
388 * several times to match additional physical ports.
390 * Note that physical ports are not necessarily tied to DPDK input ports
391 * (port_id) when those are not under DPDK control. Possible values are
392 * specific to each device, they are not necessarily indexed from zero and
393 * may not be contiguous.
395 * As a device property, the list of allowed values as well as the value
396 * associated with a port_id should be retrieved by other means.
398 * A zeroed mask can be used to match any port index.
400 struct rte_flow_item_port {
401 uint32_t index; /**< Physical port index. */
404 /** Default mask for RTE_FLOW_ITEM_TYPE_PORT. */
406 static const struct rte_flow_item_port rte_flow_item_port_mask = {
412 * RTE_FLOW_ITEM_TYPE_RAW
414 * Matches a byte string of a given length at a given offset.
416 * Offset is either absolute (using the start of the packet) or relative to
417 * the end of the previous matched item in the stack, in which case negative
418 * values are allowed.
420 * If search is enabled, offset is used as the starting point. The search
421 * area can be delimited by setting limit to a nonzero value, which is the
422 * maximum number of bytes after offset where the pattern may start.
424 * Matching a zero-length pattern is allowed, doing so resets the relative
425 * offset for subsequent items.
427 * This type does not support ranges (struct rte_flow_item.last).
429 struct rte_flow_item_raw {
430 uint32_t relative:1; /**< Look for pattern after the previous item. */
431 uint32_t search:1; /**< Search pattern from offset (see also limit). */
432 uint32_t reserved:30; /**< Reserved, must be set to zero. */
433 int32_t offset; /**< Absolute or relative offset for pattern. */
434 uint16_t limit; /**< Search area limit for start of pattern. */
435 uint16_t length; /**< Pattern length. */
436 const uint8_t *pattern; /**< Byte string to look for. */
439 /** Default mask for RTE_FLOW_ITEM_TYPE_RAW. */
441 static const struct rte_flow_item_raw rte_flow_item_raw_mask = {
444 .reserved = 0x3fffffff,
445 .offset = 0xffffffff,
453 * RTE_FLOW_ITEM_TYPE_ETH
455 * Matches an Ethernet header.
457 struct rte_flow_item_eth {
458 struct ether_addr dst; /**< Destination MAC. */
459 struct ether_addr src; /**< Source MAC. */
460 rte_be16_t type; /**< EtherType. */
463 /** Default mask for RTE_FLOW_ITEM_TYPE_ETH. */
465 static const struct rte_flow_item_eth rte_flow_item_eth_mask = {
466 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
467 .src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
468 .type = RTE_BE16(0x0000),
473 * RTE_FLOW_ITEM_TYPE_VLAN
475 * Matches an 802.1Q/ad VLAN tag.
477 * This type normally follows either RTE_FLOW_ITEM_TYPE_ETH or
478 * RTE_FLOW_ITEM_TYPE_VLAN.
480 struct rte_flow_item_vlan {
481 rte_be16_t tpid; /**< Tag protocol identifier. */
482 rte_be16_t tci; /**< Tag control information. */
485 /** Default mask for RTE_FLOW_ITEM_TYPE_VLAN. */
487 static const struct rte_flow_item_vlan rte_flow_item_vlan_mask = {
488 .tpid = RTE_BE16(0x0000),
489 .tci = RTE_BE16(0xffff),
494 * RTE_FLOW_ITEM_TYPE_IPV4
496 * Matches an IPv4 header.
498 * Note: IPv4 options are handled by dedicated pattern items.
500 struct rte_flow_item_ipv4 {
501 struct ipv4_hdr hdr; /**< IPv4 header definition. */
504 /** Default mask for RTE_FLOW_ITEM_TYPE_IPV4. */
506 static const struct rte_flow_item_ipv4 rte_flow_item_ipv4_mask = {
508 .src_addr = RTE_BE32(0xffffffff),
509 .dst_addr = RTE_BE32(0xffffffff),
515 * RTE_FLOW_ITEM_TYPE_IPV6.
517 * Matches an IPv6 header.
519 * Note: IPv6 options are handled by dedicated pattern items.
521 struct rte_flow_item_ipv6 {
522 struct ipv6_hdr hdr; /**< IPv6 header definition. */
525 /** Default mask for RTE_FLOW_ITEM_TYPE_IPV6. */
527 static const struct rte_flow_item_ipv6 rte_flow_item_ipv6_mask = {
530 "\xff\xff\xff\xff\xff\xff\xff\xff"
531 "\xff\xff\xff\xff\xff\xff\xff\xff",
533 "\xff\xff\xff\xff\xff\xff\xff\xff"
534 "\xff\xff\xff\xff\xff\xff\xff\xff",
540 * RTE_FLOW_ITEM_TYPE_ICMP.
542 * Matches an ICMP header.
544 struct rte_flow_item_icmp {
545 struct icmp_hdr hdr; /**< ICMP header definition. */
548 /** Default mask for RTE_FLOW_ITEM_TYPE_ICMP. */
550 static const struct rte_flow_item_icmp rte_flow_item_icmp_mask = {
559 * RTE_FLOW_ITEM_TYPE_UDP.
561 * Matches a UDP header.
563 struct rte_flow_item_udp {
564 struct udp_hdr hdr; /**< UDP header definition. */
567 /** Default mask for RTE_FLOW_ITEM_TYPE_UDP. */
569 static const struct rte_flow_item_udp rte_flow_item_udp_mask = {
571 .src_port = RTE_BE16(0xffff),
572 .dst_port = RTE_BE16(0xffff),
578 * RTE_FLOW_ITEM_TYPE_TCP.
580 * Matches a TCP header.
582 struct rte_flow_item_tcp {
583 struct tcp_hdr hdr; /**< TCP header definition. */
586 /** Default mask for RTE_FLOW_ITEM_TYPE_TCP. */
588 static const struct rte_flow_item_tcp rte_flow_item_tcp_mask = {
590 .src_port = RTE_BE16(0xffff),
591 .dst_port = RTE_BE16(0xffff),
597 * RTE_FLOW_ITEM_TYPE_SCTP.
599 * Matches a SCTP header.
601 struct rte_flow_item_sctp {
602 struct sctp_hdr hdr; /**< SCTP header definition. */
605 /** Default mask for RTE_FLOW_ITEM_TYPE_SCTP. */
607 static const struct rte_flow_item_sctp rte_flow_item_sctp_mask = {
609 .src_port = RTE_BE16(0xffff),
610 .dst_port = RTE_BE16(0xffff),
616 * RTE_FLOW_ITEM_TYPE_VXLAN.
618 * Matches a VXLAN header (RFC 7348).
620 struct rte_flow_item_vxlan {
621 uint8_t flags; /**< Normally 0x08 (I flag). */
622 uint8_t rsvd0[3]; /**< Reserved, normally 0x000000. */
623 uint8_t vni[3]; /**< VXLAN identifier. */
624 uint8_t rsvd1; /**< Reserved, normally 0x00. */
627 /** Default mask for RTE_FLOW_ITEM_TYPE_VXLAN. */
629 static const struct rte_flow_item_vxlan rte_flow_item_vxlan_mask = {
630 .vni = "\xff\xff\xff",
635 * RTE_FLOW_ITEM_TYPE_E_TAG.
637 * Matches a E-tag header.
639 struct rte_flow_item_e_tag {
640 rte_be16_t tpid; /**< Tag protocol identifier (0x893F). */
642 * E-Tag control information (E-TCI).
643 * E-PCP (3b), E-DEI (1b), ingress E-CID base (12b).
645 rte_be16_t epcp_edei_in_ecid_b;
646 /** Reserved (2b), GRP (2b), E-CID base (12b). */
647 rte_be16_t rsvd_grp_ecid_b;
648 uint8_t in_ecid_e; /**< Ingress E-CID ext. */
649 uint8_t ecid_e; /**< E-CID ext. */
652 /** Default mask for RTE_FLOW_ITEM_TYPE_E_TAG. */
654 static const struct rte_flow_item_e_tag rte_flow_item_e_tag_mask = {
655 .rsvd_grp_ecid_b = RTE_BE16(0x3fff),
660 * RTE_FLOW_ITEM_TYPE_NVGRE.
662 * Matches a NVGRE header.
664 struct rte_flow_item_nvgre {
666 * Checksum (1b), undefined (1b), key bit (1b), sequence number (1b),
667 * reserved 0 (9b), version (3b).
669 * c_k_s_rsvd0_ver must have value 0x2000 according to RFC 7637.
671 rte_be16_t c_k_s_rsvd0_ver;
672 rte_be16_t protocol; /**< Protocol type (0x6558). */
673 uint8_t tni[3]; /**< Virtual subnet ID. */
674 uint8_t flow_id; /**< Flow ID. */
677 /** Default mask for RTE_FLOW_ITEM_TYPE_NVGRE. */
679 static const struct rte_flow_item_nvgre rte_flow_item_nvgre_mask = {
680 .tni = "\xff\xff\xff",
685 * RTE_FLOW_ITEM_TYPE_MPLS.
687 * Matches a MPLS header.
689 struct rte_flow_item_mpls {
691 * Label (20b), TC (3b), Bottom of Stack (1b).
693 uint8_t label_tc_s[3];
694 uint8_t ttl; /** Time-to-Live. */
697 /** Default mask for RTE_FLOW_ITEM_TYPE_MPLS. */
699 static const struct rte_flow_item_mpls rte_flow_item_mpls_mask = {
700 .label_tc_s = "\xff\xff\xf0",
705 * RTE_FLOW_ITEM_TYPE_GRE.
707 * Matches a GRE header.
709 struct rte_flow_item_gre {
711 * Checksum (1b), reserved 0 (12b), version (3b).
714 rte_be16_t c_rsvd0_ver;
715 rte_be16_t protocol; /**< Protocol type. */
718 /** Default mask for RTE_FLOW_ITEM_TYPE_GRE. */
720 static const struct rte_flow_item_gre rte_flow_item_gre_mask = {
721 .protocol = RTE_BE16(0xffff),
726 * RTE_FLOW_ITEM_TYPE_FUZZY
728 * Fuzzy pattern match, expect faster than default.
730 * This is for device that support fuzzy match option.
731 * Usually a fuzzy match is fast but the cost is accuracy.
732 * i.e. Signature Match only match pattern's hash value, but it is
733 * possible two different patterns have the same hash value.
735 * Matching accuracy level can be configure by threshold.
736 * Driver can divide the range of threshold and map to different
737 * accuracy levels that device support.
739 * Threshold 0 means perfect match (no fuzziness), while threshold
740 * 0xffffffff means fuzziest match.
742 struct rte_flow_item_fuzzy {
743 uint32_t thresh; /**< Accuracy threshold. */
746 /** Default mask for RTE_FLOW_ITEM_TYPE_FUZZY. */
748 static const struct rte_flow_item_fuzzy rte_flow_item_fuzzy_mask = {
749 .thresh = 0xffffffff,
754 * RTE_FLOW_ITEM_TYPE_GTP.
756 * Matches a GTPv1 header.
758 struct rte_flow_item_gtp {
760 * Version (3b), protocol type (1b), reserved (1b),
761 * Extension header flag (1b),
762 * Sequence number flag (1b),
763 * N-PDU number flag (1b).
765 uint8_t v_pt_rsv_flags;
766 uint8_t msg_type; /**< Message type. */
767 rte_be16_t msg_len; /**< Message length. */
768 rte_be32_t teid; /**< Tunnel endpoint identifier. */
771 /** Default mask for RTE_FLOW_ITEM_TYPE_GTP. */
773 static const struct rte_flow_item_gtp rte_flow_item_gtp_mask = {
774 .teid = RTE_BE32(0xffffffff),
779 * RTE_FLOW_ITEM_TYPE_ESP
781 * Matches an ESP header.
783 struct rte_flow_item_esp {
784 struct esp_hdr hdr; /**< ESP header definition. */
787 /** Default mask for RTE_FLOW_ITEM_TYPE_ESP. */
789 static const struct rte_flow_item_esp rte_flow_item_esp_mask = {
797 * RTE_FLOW_ITEM_TYPE_GENEVE.
799 * Matches a GENEVE header.
801 struct rte_flow_item_geneve {
803 * Version (2b), length of the options fields (6b), OAM packet (1b),
804 * critical options present (1b), reserved 0 (6b).
806 rte_be16_t ver_opt_len_o_c_rsvd0;
807 rte_be16_t protocol; /**< Protocol type. */
808 uint8_t vni[3]; /**< Virtual Network Identifier. */
809 uint8_t rsvd1; /**< Reserved, normally 0x00. */
812 /** Default mask for RTE_FLOW_ITEM_TYPE_GENEVE. */
814 static const struct rte_flow_item_geneve rte_flow_item_geneve_mask = {
815 .vni = "\xff\xff\xff",
820 * Matching pattern item definition.
822 * A pattern is formed by stacking items starting from the lowest protocol
823 * layer to match. This stacking restriction does not apply to meta items
824 * which can be placed anywhere in the stack without affecting the meaning
825 * of the resulting pattern.
827 * Patterns are terminated by END items.
829 * The spec field should be a valid pointer to a structure of the related
830 * item type. It may remain unspecified (NULL) in many cases to request
831 * broad (nonspecific) matching. In such cases, last and mask must also be
834 * Optionally, last can point to a structure of the same type to define an
835 * inclusive range. This is mostly supported by integer and address fields,
836 * may cause errors otherwise. Fields that do not support ranges must be set
837 * to 0 or to the same value as the corresponding fields in spec.
839 * Only the fields defined to nonzero values in the default masks (see
840 * rte_flow_item_{name}_mask constants) are considered relevant by
841 * default. This can be overridden by providing a mask structure of the
842 * same type with applicable bits set to one. It can also be used to
843 * partially filter out specific fields (e.g. as an alternate mean to match
844 * ranges of IP addresses).
846 * Mask is a simple bit-mask applied before interpreting the contents of
847 * spec and last, which may yield unexpected results if not used
848 * carefully. For example, if for an IPv4 address field, spec provides
849 * 10.1.2.3, last provides 10.3.4.5 and mask provides 255.255.0.0, the
850 * effective range becomes 10.1.0.0 to 10.3.255.255.
852 struct rte_flow_item {
853 enum rte_flow_item_type type; /**< Item type. */
854 const void *spec; /**< Pointer to item specification structure. */
855 const void *last; /**< Defines an inclusive range (spec to last). */
856 const void *mask; /**< Bit-mask applied to spec and last. */
862 * Each possible action is represented by a type. Some have associated
863 * configuration structures. Several actions combined in a list can be
864 * assigned to a flow rule and are performed in order.
866 * They fall in three categories:
868 * - Actions that modify the fate of matching traffic, for instance by
869 * dropping or assigning it a specific destination.
871 * - Actions that modify matching traffic contents or its properties. This
872 * includes adding/removing encapsulation, encryption, compression and
875 * - Actions related to the flow rule itself, such as updating counters or
876 * making it non-terminating.
878 * Flow rules being terminating by default, not specifying any action of the
879 * fate kind results in undefined behavior. This applies to both ingress and
882 * PASSTHRU, when supported, makes a flow rule non-terminating.
884 enum rte_flow_action_type {
886 * End marker for action lists. Prevents further processing of
887 * actions, thereby ending the list.
889 * No associated configuration structure.
891 RTE_FLOW_ACTION_TYPE_END,
894 * Used as a placeholder for convenience. It is ignored and simply
897 * No associated configuration structure.
899 RTE_FLOW_ACTION_TYPE_VOID,
902 * Leaves traffic up for additional processing by subsequent flow
903 * rules; makes a flow rule non-terminating.
905 * No associated configuration structure.
907 RTE_FLOW_ACTION_TYPE_PASSTHRU,
910 * Attaches an integer value to packets and sets PKT_RX_FDIR and
911 * PKT_RX_FDIR_ID mbuf flags.
913 * See struct rte_flow_action_mark.
915 RTE_FLOW_ACTION_TYPE_MARK,
918 * Flags packets. Similar to MARK without a specific value; only
919 * sets the PKT_RX_FDIR mbuf flag.
921 * No associated configuration structure.
923 RTE_FLOW_ACTION_TYPE_FLAG,
926 * Assigns packets to a given queue index.
928 * See struct rte_flow_action_queue.
930 RTE_FLOW_ACTION_TYPE_QUEUE,
935 * PASSTHRU overrides this action if both are specified.
937 * No associated configuration structure.
939 RTE_FLOW_ACTION_TYPE_DROP,
942 * Enables counters for this flow rule.
944 * These counters can be retrieved and reset through rte_flow_query(),
945 * see struct rte_flow_query_count.
947 * No associated configuration structure.
949 RTE_FLOW_ACTION_TYPE_COUNT,
952 * Similar to QUEUE, except RSS is additionally performed on packets
953 * to spread them among several queues according to the provided
956 * See struct rte_flow_action_rss.
958 RTE_FLOW_ACTION_TYPE_RSS,
961 * Redirects packets to the physical function (PF) of the current
964 * No associated configuration structure.
966 RTE_FLOW_ACTION_TYPE_PF,
969 * Redirects packets to the virtual function (VF) of the current
970 * device with the specified ID.
972 * See struct rte_flow_action_vf.
974 RTE_FLOW_ACTION_TYPE_VF,
977 * Traffic metering and policing (MTR).
979 * See struct rte_flow_action_meter.
980 * See file rte_mtr.h for MTR object configuration.
982 RTE_FLOW_ACTION_TYPE_METER,
985 * Redirects packets to security engine of current device for security
986 * processing as specified by security session.
988 * See struct rte_flow_action_security.
990 RTE_FLOW_ACTION_TYPE_SECURITY
994 * RTE_FLOW_ACTION_TYPE_MARK
996 * Attaches an integer value to packets and sets PKT_RX_FDIR and
997 * PKT_RX_FDIR_ID mbuf flags.
999 * This value is arbitrary and application-defined. Maximum allowed value
1000 * depends on the underlying implementation. It is returned in the
1001 * hash.fdir.hi mbuf field.
1003 struct rte_flow_action_mark {
1004 uint32_t id; /**< Integer value to return with packets. */
1008 * RTE_FLOW_ACTION_TYPE_QUEUE
1010 * Assign packets to a given queue index.
1012 struct rte_flow_action_queue {
1013 uint16_t index; /**< Queue index to use. */
1017 * RTE_FLOW_ACTION_TYPE_COUNT (query)
1019 * Query structure to retrieve and reset flow rule counters.
1021 struct rte_flow_query_count {
1022 uint32_t reset:1; /**< Reset counters after query [in]. */
1023 uint32_t hits_set:1; /**< hits field is set [out]. */
1024 uint32_t bytes_set:1; /**< bytes field is set [out]. */
1025 uint32_t reserved:29; /**< Reserved, must be zero [in, out]. */
1026 uint64_t hits; /**< Number of hits for this rule [out]. */
1027 uint64_t bytes; /**< Number of bytes through this rule [out]. */
1031 * RTE_FLOW_ACTION_TYPE_RSS
1033 * Similar to QUEUE, except RSS is additionally performed on packets to
1034 * spread them among several queues according to the provided parameters.
1036 * Unlike global RSS settings used by other DPDK APIs, unsetting the
1037 * @p types field does not disable RSS in a flow rule. Doing so instead
1038 * requests safe unspecified "best-effort" settings from the underlying PMD,
1039 * which depending on the flow rule, may result in anything ranging from
1040 * empty (single queue) to all-inclusive RSS.
1042 * Note: RSS hash result is stored in the hash.rss mbuf field which overlaps
1043 * hash.fdir.lo. Since the MARK action sets the hash.fdir.hi field only,
1044 * both can be requested simultaneously.
1046 struct rte_flow_action_rss {
1047 uint64_t types; /**< Specific RSS hash types (see ETH_RSS_*). */
1048 uint32_t key_len; /**< Hash key length in bytes. */
1049 uint32_t queue_num; /**< Number of entries in @p queue. */
1050 const uint8_t *key; /**< Hash key. */
1051 const uint16_t *queue; /**< Queue indices to use. */
1055 * RTE_FLOW_ACTION_TYPE_VF
1057 * Redirects packets to a virtual function (VF) of the current device.
1059 * Packets matched by a VF pattern item can be redirected to their original
1060 * VF ID instead of the specified one. This parameter may not be available
1061 * and is not guaranteed to work properly if the VF part is matched by a
1062 * prior flow rule or if packets are not addressed to a VF in the first
1065 struct rte_flow_action_vf {
1066 uint32_t original:1; /**< Use original VF ID if possible. */
1067 uint32_t reserved:31; /**< Reserved, must be zero. */
1068 uint32_t id; /**< VF ID to redirect packets to. */
1072 * RTE_FLOW_ACTION_TYPE_METER
1074 * Traffic metering and policing (MTR).
1076 * Packets matched by items of this type can be either dropped or passed to the
1077 * next item with their color set by the MTR object.
1079 struct rte_flow_action_meter {
1080 uint32_t mtr_id; /**< MTR object ID created with rte_mtr_create(). */
1084 * RTE_FLOW_ACTION_TYPE_SECURITY
1086 * Perform the security action on flows matched by the pattern items
1087 * according to the configuration of the security session.
1089 * This action modifies the payload of matched flows. For INLINE_CRYPTO, the
1090 * security protocol headers and IV are fully provided by the application as
1091 * specified in the flow pattern. The payload of matching packets is
1092 * encrypted on egress, and decrypted and authenticated on ingress.
1093 * For INLINE_PROTOCOL, the security protocol is fully offloaded to HW,
1094 * providing full encapsulation and decapsulation of packets in security
1095 * protocols. The flow pattern specifies both the outer security header fields
1096 * and the inner packet fields. The security session specified in the action
1097 * must match the pattern parameters.
1099 * The security session specified in the action must be created on the same
1100 * port as the flow action that is being specified.
1102 * The ingress/egress flow attribute should match that specified in the
1103 * security session if the security session supports the definition of the
1106 * Multiple flows can be configured to use the same security session.
1108 struct rte_flow_action_security {
1109 void *security_session; /**< Pointer to security session structure. */
1113 * Definition of a single action.
1115 * A list of actions is terminated by a END action.
1117 * For simple actions without a configuration structure, conf remains NULL.
1119 struct rte_flow_action {
1120 enum rte_flow_action_type type; /**< Action type. */
1121 const void *conf; /**< Pointer to action configuration structure. */
1125 * Opaque type returned after successfully creating a flow.
1127 * This handle can be used to manage and query the related flow (e.g. to
1128 * destroy it or retrieve counters).
1133 * Verbose error types.
1135 * Most of them provide the type of the object referenced by struct
1136 * rte_flow_error.cause.
1138 enum rte_flow_error_type {
1139 RTE_FLOW_ERROR_TYPE_NONE, /**< No error. */
1140 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, /**< Cause unspecified. */
1141 RTE_FLOW_ERROR_TYPE_HANDLE, /**< Flow rule (handle). */
1142 RTE_FLOW_ERROR_TYPE_ATTR_GROUP, /**< Group field. */
1143 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, /**< Priority field. */
1144 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, /**< Ingress field. */
1145 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, /**< Egress field. */
1146 RTE_FLOW_ERROR_TYPE_ATTR, /**< Attributes structure. */
1147 RTE_FLOW_ERROR_TYPE_ITEM_NUM, /**< Pattern length. */
1148 RTE_FLOW_ERROR_TYPE_ITEM_SPEC, /**< Item specification. */
1149 RTE_FLOW_ERROR_TYPE_ITEM_LAST, /**< Item specification range. */
1150 RTE_FLOW_ERROR_TYPE_ITEM_MASK, /**< Item specification mask. */
1151 RTE_FLOW_ERROR_TYPE_ITEM, /**< Specific pattern item. */
1152 RTE_FLOW_ERROR_TYPE_ACTION_NUM, /**< Number of actions. */
1153 RTE_FLOW_ERROR_TYPE_ACTION_CONF, /**< Action configuration. */
1154 RTE_FLOW_ERROR_TYPE_ACTION, /**< Specific action. */
1158 * Verbose error structure definition.
1160 * This object is normally allocated by applications and set by PMDs, the
1161 * message points to a constant string which does not need to be freed by
1162 * the application, however its pointer can be considered valid only as long
1163 * as its associated DPDK port remains configured. Closing the underlying
1164 * device or unloading the PMD invalidates it.
1166 * Both cause and message may be NULL regardless of the error type.
1168 struct rte_flow_error {
1169 enum rte_flow_error_type type; /**< Cause field and error types. */
1170 const void *cause; /**< Object responsible for the error. */
1171 const char *message; /**< Human-readable error message. */
1175 * Check whether a flow rule can be created on a given port.
1177 * The flow rule is validated for correctness and whether it could be accepted
1178 * by the device given sufficient resources. The rule is checked against the
1179 * current device mode and queue configuration. The flow rule may also
1180 * optionally be validated against existing flow rules and device resources.
1181 * This function has no effect on the target device.
1183 * The returned value is guaranteed to remain valid only as long as no
1184 * successful calls to rte_flow_create() or rte_flow_destroy() are made in
1185 * the meantime and no device parameter affecting flow rules in any way are
1186 * modified, due to possible collisions or resource limitations (although in
1187 * such cases EINVAL should not be returned).
1190 * Port identifier of Ethernet device.
1192 * Flow rule attributes.
1193 * @param[in] pattern
1194 * Pattern specification (list terminated by the END pattern item).
1195 * @param[in] actions
1196 * Associated actions (list terminated by the END action).
1198 * Perform verbose error reporting if not NULL. PMDs initialize this
1199 * structure in case of error only.
1202 * 0 if flow rule is valid and can be created. A negative errno value
1203 * otherwise (rte_errno is also set), the following errors are defined:
1205 * -ENOSYS: underlying device does not support this functionality.
1207 * -EIO: underlying device is removed.
1209 * -EINVAL: unknown or invalid rule specification.
1211 * -ENOTSUP: valid but unsupported rule specification (e.g. partial
1212 * bit-masks are unsupported).
1214 * -EEXIST: collision with an existing rule. Only returned if device
1215 * supports flow rule collision checking and there was a flow rule
1216 * collision. Not receiving this return code is no guarantee that creating
1217 * the rule will not fail due to a collision.
1219 * -ENOMEM: not enough memory to execute the function, or if the device
1220 * supports resource validation, resource limitation on the device.
1222 * -EBUSY: action cannot be performed due to busy device resources, may
1223 * succeed if the affected queues or even the entire port are in a stopped
1224 * state (see rte_eth_dev_rx_queue_stop() and rte_eth_dev_stop()).
1227 rte_flow_validate(uint16_t port_id,
1228 const struct rte_flow_attr *attr,
1229 const struct rte_flow_item pattern[],
1230 const struct rte_flow_action actions[],
1231 struct rte_flow_error *error);
1234 * Create a flow rule on a given port.
1237 * Port identifier of Ethernet device.
1239 * Flow rule attributes.
1240 * @param[in] pattern
1241 * Pattern specification (list terminated by the END pattern item).
1242 * @param[in] actions
1243 * Associated actions (list terminated by the END action).
1245 * Perform verbose error reporting if not NULL. PMDs initialize this
1246 * structure in case of error only.
1249 * A valid handle in case of success, NULL otherwise and rte_errno is set
1250 * to the positive version of one of the error codes defined for
1251 * rte_flow_validate().
1254 rte_flow_create(uint16_t port_id,
1255 const struct rte_flow_attr *attr,
1256 const struct rte_flow_item pattern[],
1257 const struct rte_flow_action actions[],
1258 struct rte_flow_error *error);
1261 * Destroy a flow rule on a given port.
1263 * Failure to destroy a flow rule handle may occur when other flow rules
1264 * depend on it, and destroying it would result in an inconsistent state.
1266 * This function is only guaranteed to succeed if handles are destroyed in
1267 * reverse order of their creation.
1270 * Port identifier of Ethernet device.
1272 * Flow rule handle to destroy.
1274 * Perform verbose error reporting if not NULL. PMDs initialize this
1275 * structure in case of error only.
1278 * 0 on success, a negative errno value otherwise and rte_errno is set.
1281 rte_flow_destroy(uint16_t port_id,
1282 struct rte_flow *flow,
1283 struct rte_flow_error *error);
1286 * Destroy all flow rules associated with a port.
1288 * In the unlikely event of failure, handles are still considered destroyed
1289 * and no longer valid but the port must be assumed to be in an inconsistent
1293 * Port identifier of Ethernet device.
1295 * Perform verbose error reporting if not NULL. PMDs initialize this
1296 * structure in case of error only.
1299 * 0 on success, a negative errno value otherwise and rte_errno is set.
1302 rte_flow_flush(uint16_t port_id,
1303 struct rte_flow_error *error);
1306 * Query an existing flow rule.
1308 * This function allows retrieving flow-specific data such as counters.
1309 * Data is gathered by special actions which must be present in the flow
1312 * \see RTE_FLOW_ACTION_TYPE_COUNT
1315 * Port identifier of Ethernet device.
1317 * Flow rule handle to query.
1319 * Action type to query.
1320 * @param[in, out] data
1321 * Pointer to storage for the associated query data type.
1323 * Perform verbose error reporting if not NULL. PMDs initialize this
1324 * structure in case of error only.
1327 * 0 on success, a negative errno value otherwise and rte_errno is set.
1330 rte_flow_query(uint16_t port_id,
1331 struct rte_flow *flow,
1332 enum rte_flow_action_type action,
1334 struct rte_flow_error *error);
1337 * Restrict ingress traffic to the defined flow rules.
1339 * Isolated mode guarantees that all ingress traffic comes from defined flow
1340 * rules only (current and future).
1342 * Besides making ingress more deterministic, it allows PMDs to safely reuse
1343 * resources otherwise assigned to handle the remaining traffic, such as
1344 * global RSS configuration settings, VLAN filters, MAC address entries,
1345 * legacy filter API rules and so on in order to expand the set of possible
1348 * Calling this function as soon as possible after device initialization,
1349 * ideally before the first call to rte_eth_dev_configure(), is recommended
1350 * to avoid possible failures due to conflicting settings.
1352 * Once effective, leaving isolated mode may not be possible depending on
1353 * PMD implementation.
1355 * Additionally, the following functionality has no effect on the underlying
1356 * port and may return errors such as ENOTSUP ("not supported"):
1358 * - Toggling promiscuous mode.
1359 * - Toggling allmulticast mode.
1360 * - Configuring MAC addresses.
1361 * - Configuring multicast addresses.
1362 * - Configuring VLAN filters.
1363 * - Configuring Rx filters through the legacy API (e.g. FDIR).
1364 * - Configuring global RSS settings.
1367 * Port identifier of Ethernet device.
1369 * Nonzero to enter isolated mode, attempt to leave it otherwise.
1371 * Perform verbose error reporting if not NULL. PMDs initialize this
1372 * structure in case of error only.
1375 * 0 on success, a negative errno value otherwise and rte_errno is set.
1378 rte_flow_isolate(uint16_t port_id, int set, struct rte_flow_error *error);
1381 * Initialize flow error structure.
1384 * Pointer to flow error structure (may be NULL).
1386 * Related error code (rte_errno).
1388 * Cause field and error types.
1390 * Object responsible for the error.
1392 * Human-readable error message.
1395 * Negative error code (errno value) and rte_errno is set.
1398 rte_flow_error_set(struct rte_flow_error *error,
1400 enum rte_flow_error_type type,
1402 const char *message);
1405 * Generic flow representation.
1407 * This form is sufficient to describe an rte_flow independently from any
1408 * PMD implementation and allows for replayability and identification.
1410 struct rte_flow_desc {
1411 size_t size; /**< Allocated space including data[]. */
1412 struct rte_flow_attr attr; /**< Attributes. */
1413 struct rte_flow_item *items; /**< Items. */
1414 struct rte_flow_action *actions; /**< Actions. */
1415 uint8_t data[]; /**< Storage for items/actions. */
1419 * Copy an rte_flow rule description.
1422 * Flow rule description.
1424 * Total size of allocated data for the flow description.
1426 * Flow rule attributes.
1428 * Pattern specification (list terminated by the END pattern item).
1429 * @param[in] actions
1430 * Associated actions (list terminated by the END action).
1433 * If len is greater or equal to the size of the flow, the total size of the
1434 * flow description and its data.
1435 * If len is lower than the size of the flow, the number of bytes that would
1436 * have been written to desc had it been sufficient. Nothing is written.
1439 rte_flow_copy(struct rte_flow_desc *fd, size_t len,
1440 const struct rte_flow_attr *attr,
1441 const struct rte_flow_item *items,
1442 const struct rte_flow_action *actions);
1448 #endif /* RTE_FLOW_H_ */