4 * Copyright 2016 6WIND S.A.
5 * Copyright 2016 Mellanox.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of 6WIND S.A. nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 * RTE generic flow API
41 * This interface provides the ability to program packet matching and
42 * associated actions in hardware through flow rules.
46 #include <rte_ether.h>
58 * Flow rule attributes.
60 * Priorities are set on two levels: per group and per rule within groups.
62 * Lower values denote higher priority, the highest priority for both levels
63 * is 0, so that a rule with priority 0 in group 8 is always matched after a
64 * rule with priority 8 in group 0.
66 * Although optional, applications are encouraged to group similar rules as
67 * much as possible to fully take advantage of hardware capabilities
68 * (e.g. optimized matching) and work around limitations (e.g. a single
69 * pattern type possibly allowed in a given group).
71 * Group and priority levels are arbitrary and up to the application, they
72 * do not need to be contiguous nor start from 0, however the maximum number
73 * varies between devices and may be affected by existing flow rules.
75 * If a packet is matched by several rules of a given group for a given
76 * priority level, the outcome is undefined. It can take any path, may be
77 * duplicated or even cause unrecoverable errors.
79 * Note that support for more than a single group and priority level is not
82 * Flow rules can apply to inbound and/or outbound traffic (ingress/egress).
84 * Several pattern items and actions are valid and can be used in both
85 * directions. Those valid for only one direction are described as such.
87 * At least one direction must be specified.
89 * Specifying both directions at once for a given rule is not recommended
90 * but may be valid in a few cases (e.g. shared counter).
92 struct rte_flow_attr {
93 uint32_t group; /**< Priority group. */
94 uint32_t priority; /**< Priority level within group. */
95 uint32_t ingress:1; /**< Rule applies to ingress traffic. */
96 uint32_t egress:1; /**< Rule applies to egress traffic. */
97 uint32_t reserved:30; /**< Reserved, must be zero. */
101 * Matching pattern item types.
103 * Pattern items fall in two categories:
105 * - Matching protocol headers and packet data (ANY, RAW, ETH, VLAN, IPV4,
106 * IPV6, ICMP, UDP, TCP, SCTP, VXLAN and so on), usually associated with a
107 * specification structure. These must be stacked in the same order as the
108 * protocol layers to match, starting from the lowest.
110 * - Matching meta-data or affecting pattern processing (END, VOID, INVERT,
111 * PF, VF, PORT and so on), often without a specification structure. Since
112 * they do not match packet contents, these can be specified anywhere
113 * within item lists without affecting others.
115 * See the description of individual types for more information. Those
116 * marked with [META] fall into the second category.
118 enum rte_flow_item_type {
122 * End marker for item lists. Prevents further processing of items,
123 * thereby ending the pattern.
125 * No associated specification structure.
127 RTE_FLOW_ITEM_TYPE_END,
132 * Used as a placeholder for convenience. It is ignored and simply
135 * No associated specification structure.
137 RTE_FLOW_ITEM_TYPE_VOID,
142 * Inverted matching, i.e. process packets that do not match the
145 * No associated specification structure.
147 RTE_FLOW_ITEM_TYPE_INVERT,
150 * Matches any protocol in place of the current layer, a single ANY
151 * may also stand for several protocol layers.
153 * See struct rte_flow_item_any.
155 RTE_FLOW_ITEM_TYPE_ANY,
160 * Matches packets addressed to the physical function of the device.
162 * If the underlying device function differs from the one that would
163 * normally receive the matched traffic, specifying this item
164 * prevents it from reaching that device unless the flow rule
165 * contains a PF action. Packets are not duplicated between device
166 * instances by default.
168 * No associated specification structure.
170 RTE_FLOW_ITEM_TYPE_PF,
175 * Matches packets addressed to a virtual function ID of the device.
177 * If the underlying device function differs from the one that would
178 * normally receive the matched traffic, specifying this item
179 * prevents it from reaching that device unless the flow rule
180 * contains a VF action. Packets are not duplicated between device
181 * instances by default.
183 * See struct rte_flow_item_vf.
185 RTE_FLOW_ITEM_TYPE_VF,
190 * Matches packets coming from the specified physical port of the
193 * The first PORT item overrides the physical port normally
194 * associated with the specified DPDK input port (port_id). This
195 * item can be provided several times to match additional physical
198 * See struct rte_flow_item_port.
200 RTE_FLOW_ITEM_TYPE_PORT,
203 * Matches a byte string of a given length at a given offset.
205 * See struct rte_flow_item_raw.
207 RTE_FLOW_ITEM_TYPE_RAW,
210 * Matches an Ethernet header.
212 * See struct rte_flow_item_eth.
214 RTE_FLOW_ITEM_TYPE_ETH,
217 * Matches an 802.1Q/ad VLAN tag.
219 * See struct rte_flow_item_vlan.
221 RTE_FLOW_ITEM_TYPE_VLAN,
224 * Matches an IPv4 header.
226 * See struct rte_flow_item_ipv4.
228 RTE_FLOW_ITEM_TYPE_IPV4,
231 * Matches an IPv6 header.
233 * See struct rte_flow_item_ipv6.
235 RTE_FLOW_ITEM_TYPE_IPV6,
238 * Matches an ICMP header.
240 * See struct rte_flow_item_icmp.
242 RTE_FLOW_ITEM_TYPE_ICMP,
245 * Matches a UDP header.
247 * See struct rte_flow_item_udp.
249 RTE_FLOW_ITEM_TYPE_UDP,
252 * Matches a TCP header.
254 * See struct rte_flow_item_tcp.
256 RTE_FLOW_ITEM_TYPE_TCP,
259 * Matches a SCTP header.
261 * See struct rte_flow_item_sctp.
263 RTE_FLOW_ITEM_TYPE_SCTP,
266 * Matches a VXLAN header.
268 * See struct rte_flow_item_vxlan.
270 RTE_FLOW_ITEM_TYPE_VXLAN,
274 * RTE_FLOW_ITEM_TYPE_ANY
276 * Matches any protocol in place of the current layer, a single ANY may also
277 * stand for several protocol layers.
279 * This is usually specified as the first pattern item when looking for a
280 * protocol anywhere in a packet.
282 * A zeroed mask stands for any number of layers.
284 struct rte_flow_item_any {
285 uint32_t num; /**< Number of layers covered. */
288 /** Default mask for RTE_FLOW_ITEM_TYPE_ANY. */
289 static const struct rte_flow_item_any rte_flow_item_any_mask = {
294 * RTE_FLOW_ITEM_TYPE_VF
296 * Matches packets addressed to a virtual function ID of the device.
298 * If the underlying device function differs from the one that would
299 * normally receive the matched traffic, specifying this item prevents it
300 * from reaching that device unless the flow rule contains a VF
301 * action. Packets are not duplicated between device instances by default.
303 * - Likely to return an error or never match any traffic if this causes a
304 * VF device to match traffic addressed to a different VF.
305 * - Can be specified multiple times to match traffic addressed to several
307 * - Can be combined with a PF item to match both PF and VF traffic.
309 * A zeroed mask can be used to match any VF ID.
311 struct rte_flow_item_vf {
312 uint32_t id; /**< Destination VF ID. */
315 /** Default mask for RTE_FLOW_ITEM_TYPE_VF. */
316 static const struct rte_flow_item_vf rte_flow_item_vf_mask = {
321 * RTE_FLOW_ITEM_TYPE_PORT
323 * Matches packets coming from the specified physical port of the underlying
326 * The first PORT item overrides the physical port normally associated with
327 * the specified DPDK input port (port_id). This item can be provided
328 * several times to match additional physical ports.
330 * Note that physical ports are not necessarily tied to DPDK input ports
331 * (port_id) when those are not under DPDK control. Possible values are
332 * specific to each device, they are not necessarily indexed from zero and
333 * may not be contiguous.
335 * As a device property, the list of allowed values as well as the value
336 * associated with a port_id should be retrieved by other means.
338 * A zeroed mask can be used to match any port index.
340 struct rte_flow_item_port {
341 uint32_t index; /**< Physical port index. */
344 /** Default mask for RTE_FLOW_ITEM_TYPE_PORT. */
345 static const struct rte_flow_item_port rte_flow_item_port_mask = {
350 * RTE_FLOW_ITEM_TYPE_RAW
352 * Matches a byte string of a given length at a given offset.
354 * Offset is either absolute (using the start of the packet) or relative to
355 * the end of the previous matched item in the stack, in which case negative
356 * values are allowed.
358 * If search is enabled, offset is used as the starting point. The search
359 * area can be delimited by setting limit to a nonzero value, which is the
360 * maximum number of bytes after offset where the pattern may start.
362 * Matching a zero-length pattern is allowed, doing so resets the relative
363 * offset for subsequent items.
365 * This type does not support ranges (struct rte_flow_item.last).
367 struct rte_flow_item_raw {
368 uint32_t relative:1; /**< Look for pattern after the previous item. */
369 uint32_t search:1; /**< Search pattern from offset (see also limit). */
370 uint32_t reserved:30; /**< Reserved, must be set to zero. */
371 int32_t offset; /**< Absolute or relative offset for pattern. */
372 uint16_t limit; /**< Search area limit for start of pattern. */
373 uint16_t length; /**< Pattern length. */
374 uint8_t pattern[]; /**< Byte string to look for. */
377 /** Default mask for RTE_FLOW_ITEM_TYPE_RAW. */
378 static const struct rte_flow_item_raw rte_flow_item_raw_mask = {
381 .reserved = 0x3fffffff,
382 .offset = 0xffffffff,
388 * RTE_FLOW_ITEM_TYPE_ETH
390 * Matches an Ethernet header.
392 struct rte_flow_item_eth {
393 struct ether_addr dst; /**< Destination MAC. */
394 struct ether_addr src; /**< Source MAC. */
395 uint16_t type; /**< EtherType. */
398 /** Default mask for RTE_FLOW_ITEM_TYPE_ETH. */
399 static const struct rte_flow_item_eth rte_flow_item_eth_mask = {
400 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
401 .src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
406 * RTE_FLOW_ITEM_TYPE_VLAN
408 * Matches an 802.1Q/ad VLAN tag.
410 * This type normally follows either RTE_FLOW_ITEM_TYPE_ETH or
411 * RTE_FLOW_ITEM_TYPE_VLAN.
413 struct rte_flow_item_vlan {
414 uint16_t tpid; /**< Tag protocol identifier. */
415 uint16_t tci; /**< Tag control information. */
418 /** Default mask for RTE_FLOW_ITEM_TYPE_VLAN. */
419 static const struct rte_flow_item_vlan rte_flow_item_vlan_mask = {
425 * RTE_FLOW_ITEM_TYPE_IPV4
427 * Matches an IPv4 header.
429 * Note: IPv4 options are handled by dedicated pattern items.
431 struct rte_flow_item_ipv4 {
432 struct ipv4_hdr hdr; /**< IPv4 header definition. */
435 /** Default mask for RTE_FLOW_ITEM_TYPE_IPV4. */
436 static const struct rte_flow_item_ipv4 rte_flow_item_ipv4_mask = {
438 .src_addr = 0xffffffff,
439 .dst_addr = 0xffffffff,
444 * RTE_FLOW_ITEM_TYPE_IPV6.
446 * Matches an IPv6 header.
448 * Note: IPv6 options are handled by dedicated pattern items.
450 struct rte_flow_item_ipv6 {
451 struct ipv6_hdr hdr; /**< IPv6 header definition. */
454 /** Default mask for RTE_FLOW_ITEM_TYPE_IPV6. */
455 static const struct rte_flow_item_ipv6 rte_flow_item_ipv6_mask = {
458 "\xff\xff\xff\xff\xff\xff\xff\xff"
459 "\xff\xff\xff\xff\xff\xff\xff\xff",
461 "\xff\xff\xff\xff\xff\xff\xff\xff"
462 "\xff\xff\xff\xff\xff\xff\xff\xff",
467 * RTE_FLOW_ITEM_TYPE_ICMP.
469 * Matches an ICMP header.
471 struct rte_flow_item_icmp {
472 struct icmp_hdr hdr; /**< ICMP header definition. */
475 /** Default mask for RTE_FLOW_ITEM_TYPE_ICMP. */
476 static const struct rte_flow_item_icmp rte_flow_item_icmp_mask = {
484 * RTE_FLOW_ITEM_TYPE_UDP.
486 * Matches a UDP header.
488 struct rte_flow_item_udp {
489 struct udp_hdr hdr; /**< UDP header definition. */
492 /** Default mask for RTE_FLOW_ITEM_TYPE_UDP. */
493 static const struct rte_flow_item_udp rte_flow_item_udp_mask = {
501 * RTE_FLOW_ITEM_TYPE_TCP.
503 * Matches a TCP header.
505 struct rte_flow_item_tcp {
506 struct tcp_hdr hdr; /**< TCP header definition. */
509 /** Default mask for RTE_FLOW_ITEM_TYPE_TCP. */
510 static const struct rte_flow_item_tcp rte_flow_item_tcp_mask = {
518 * RTE_FLOW_ITEM_TYPE_SCTP.
520 * Matches a SCTP header.
522 struct rte_flow_item_sctp {
523 struct sctp_hdr hdr; /**< SCTP header definition. */
526 /** Default mask for RTE_FLOW_ITEM_TYPE_SCTP. */
527 static const struct rte_flow_item_sctp rte_flow_item_sctp_mask = {
535 * RTE_FLOW_ITEM_TYPE_VXLAN.
537 * Matches a VXLAN header (RFC 7348).
539 struct rte_flow_item_vxlan {
540 uint8_t flags; /**< Normally 0x08 (I flag). */
541 uint8_t rsvd0[3]; /**< Reserved, normally 0x000000. */
542 uint8_t vni[3]; /**< VXLAN identifier. */
543 uint8_t rsvd1; /**< Reserved, normally 0x00. */
546 /** Default mask for RTE_FLOW_ITEM_TYPE_VXLAN. */
547 static const struct rte_flow_item_vxlan rte_flow_item_vxlan_mask = {
548 .vni = "\xff\xff\xff",
552 * Matching pattern item definition.
554 * A pattern is formed by stacking items starting from the lowest protocol
555 * layer to match. This stacking restriction does not apply to meta items
556 * which can be placed anywhere in the stack without affecting the meaning
557 * of the resulting pattern.
559 * Patterns are terminated by END items.
561 * The spec field should be a valid pointer to a structure of the related
562 * item type. It may remain unspecified (NULL) in many cases to request
563 * broad (nonspecific) matching. In such cases, last and mask must also be
566 * Optionally, last can point to a structure of the same type to define an
567 * inclusive range. This is mostly supported by integer and address fields,
568 * may cause errors otherwise. Fields that do not support ranges must be set
569 * to 0 or to the same value as the corresponding fields in spec.
571 * Only the fields defined to nonzero values in the default masks (see
572 * rte_flow_item_{name}_mask constants) are considered relevant by
573 * default. This can be overridden by providing a mask structure of the
574 * same type with applicable bits set to one. It can also be used to
575 * partially filter out specific fields (e.g. as an alternate mean to match
576 * ranges of IP addresses).
578 * Mask is a simple bit-mask applied before interpreting the contents of
579 * spec and last, which may yield unexpected results if not used
580 * carefully. For example, if for an IPv4 address field, spec provides
581 * 10.1.2.3, last provides 10.3.4.5 and mask provides 255.255.0.0, the
582 * effective range becomes 10.1.0.0 to 10.3.255.255.
584 struct rte_flow_item {
585 enum rte_flow_item_type type; /**< Item type. */
586 const void *spec; /**< Pointer to item specification structure. */
587 const void *last; /**< Defines an inclusive range (spec to last). */
588 const void *mask; /**< Bit-mask applied to spec and last. */
594 * Each possible action is represented by a type. Some have associated
595 * configuration structures. Several actions combined in a list can be
596 * affected to a flow rule. That list is not ordered.
598 * They fall in three categories:
600 * - Terminating actions (such as QUEUE, DROP, RSS, PF, VF) that prevent
601 * processing matched packets by subsequent flow rules, unless overridden
604 * - Non terminating actions (PASSTHRU, DUP) that leave matched packets up
605 * for additional processing by subsequent flow rules.
607 * - Other non terminating meta actions that do not affect the fate of
608 * packets (END, VOID, MARK, FLAG, COUNT).
610 * When several actions are combined in a flow rule, they should all have
611 * different types (e.g. dropping a packet twice is not possible).
613 * Only the last action of a given type is taken into account. PMDs still
614 * perform error checking on the entire list.
616 * Note that PASSTHRU is the only action able to override a terminating
619 enum rte_flow_action_type {
623 * End marker for action lists. Prevents further processing of
624 * actions, thereby ending the list.
626 * No associated configuration structure.
628 RTE_FLOW_ACTION_TYPE_END,
633 * Used as a placeholder for convenience. It is ignored and simply
636 * No associated configuration structure.
638 RTE_FLOW_ACTION_TYPE_VOID,
641 * Leaves packets up for additional processing by subsequent flow
642 * rules. This is the default when a rule does not contain a
643 * terminating action, but can be specified to force a rule to
644 * become non-terminating.
646 * No associated configuration structure.
648 RTE_FLOW_ACTION_TYPE_PASSTHRU,
653 * Attaches an integer value to packets and sets PKT_RX_FDIR and
654 * PKT_RX_FDIR_ID mbuf flags.
656 * See struct rte_flow_action_mark.
658 RTE_FLOW_ACTION_TYPE_MARK,
663 * Flags packets. Similar to MARK without a specific value; only
664 * sets the PKT_RX_FDIR mbuf flag.
666 * No associated configuration structure.
668 RTE_FLOW_ACTION_TYPE_FLAG,
671 * Assigns packets to a given queue index.
673 * See struct rte_flow_action_queue.
675 RTE_FLOW_ACTION_TYPE_QUEUE,
680 * PASSTHRU overrides this action if both are specified.
682 * No associated configuration structure.
684 RTE_FLOW_ACTION_TYPE_DROP,
689 * Enables counters for this rule.
691 * These counters can be retrieved and reset through rte_flow_query(),
692 * see struct rte_flow_query_count.
694 * No associated configuration structure.
696 RTE_FLOW_ACTION_TYPE_COUNT,
699 * Duplicates packets to a given queue index.
701 * This is normally combined with QUEUE, however when used alone, it
702 * is actually similar to QUEUE + PASSTHRU.
704 * See struct rte_flow_action_dup.
706 RTE_FLOW_ACTION_TYPE_DUP,
709 * Similar to QUEUE, except RSS is additionally performed on packets
710 * to spread them among several queues according to the provided
713 * See struct rte_flow_action_rss.
715 RTE_FLOW_ACTION_TYPE_RSS,
718 * Redirects packets to the physical function (PF) of the current
721 * No associated configuration structure.
723 RTE_FLOW_ACTION_TYPE_PF,
726 * Redirects packets to the virtual function (VF) of the current
727 * device with the specified ID.
729 * See struct rte_flow_action_vf.
731 RTE_FLOW_ACTION_TYPE_VF,
735 * RTE_FLOW_ACTION_TYPE_MARK
737 * Attaches an integer value to packets and sets PKT_RX_FDIR and
738 * PKT_RX_FDIR_ID mbuf flags.
740 * This value is arbitrary and application-defined. Maximum allowed value
741 * depends on the underlying implementation. It is returned in the
742 * hash.fdir.hi mbuf field.
744 struct rte_flow_action_mark {
745 uint32_t id; /**< Integer value to return with packets. */
749 * RTE_FLOW_ACTION_TYPE_QUEUE
751 * Assign packets to a given queue index.
753 * Terminating by default.
755 struct rte_flow_action_queue {
756 uint16_t index; /**< Queue index to use. */
760 * RTE_FLOW_ACTION_TYPE_COUNT (query)
762 * Query structure to retrieve and reset flow rule counters.
764 struct rte_flow_query_count {
765 uint32_t reset:1; /**< Reset counters after query [in]. */
766 uint32_t hits_set:1; /**< hits field is set [out]. */
767 uint32_t bytes_set:1; /**< bytes field is set [out]. */
768 uint32_t reserved:29; /**< Reserved, must be zero [in, out]. */
769 uint64_t hits; /**< Number of hits for this rule [out]. */
770 uint64_t bytes; /**< Number of bytes through this rule [out]. */
774 * RTE_FLOW_ACTION_TYPE_DUP
776 * Duplicates packets to a given queue index.
778 * This is normally combined with QUEUE, however when used alone, it is
779 * actually similar to QUEUE + PASSTHRU.
781 * Non-terminating by default.
783 struct rte_flow_action_dup {
784 uint16_t index; /**< Queue index to duplicate packets to. */
788 * RTE_FLOW_ACTION_TYPE_RSS
790 * Similar to QUEUE, except RSS is additionally performed on packets to
791 * spread them among several queues according to the provided parameters.
793 * Note: RSS hash result is stored in the hash.rss mbuf field which overlaps
794 * hash.fdir.lo. Since the MARK action sets the hash.fdir.hi field only,
795 * both can be requested simultaneously.
797 * Terminating by default.
799 struct rte_flow_action_rss {
800 const struct rte_eth_rss_conf *rss_conf; /**< RSS parameters. */
801 uint16_t num; /**< Number of entries in queue[]. */
802 uint16_t queue[]; /**< Queues indices to use. */
806 * RTE_FLOW_ACTION_TYPE_VF
808 * Redirects packets to a virtual function (VF) of the current device.
810 * Packets matched by a VF pattern item can be redirected to their original
811 * VF ID instead of the specified one. This parameter may not be available
812 * and is not guaranteed to work properly if the VF part is matched by a
813 * prior flow rule or if packets are not addressed to a VF in the first
816 * Terminating by default.
818 struct rte_flow_action_vf {
819 uint32_t original:1; /**< Use original VF ID if possible. */
820 uint32_t reserved:31; /**< Reserved, must be zero. */
821 uint32_t id; /**< VF ID to redirect packets to. */
825 * Definition of a single action.
827 * A list of actions is terminated by a END action.
829 * For simple actions without a configuration structure, conf remains NULL.
831 struct rte_flow_action {
832 enum rte_flow_action_type type; /**< Action type. */
833 const void *conf; /**< Pointer to action configuration structure. */
837 * Opaque type returned after successfully creating a flow.
839 * This handle can be used to manage and query the related flow (e.g. to
840 * destroy it or retrieve counters).
845 * Verbose error types.
847 * Most of them provide the type of the object referenced by struct
848 * rte_flow_error.cause.
850 enum rte_flow_error_type {
851 RTE_FLOW_ERROR_TYPE_NONE, /**< No error. */
852 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, /**< Cause unspecified. */
853 RTE_FLOW_ERROR_TYPE_HANDLE, /**< Flow rule (handle). */
854 RTE_FLOW_ERROR_TYPE_ATTR_GROUP, /**< Group field. */
855 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, /**< Priority field. */
856 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, /**< Ingress field. */
857 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, /**< Egress field. */
858 RTE_FLOW_ERROR_TYPE_ATTR, /**< Attributes structure. */
859 RTE_FLOW_ERROR_TYPE_ITEM_NUM, /**< Pattern length. */
860 RTE_FLOW_ERROR_TYPE_ITEM, /**< Specific pattern item. */
861 RTE_FLOW_ERROR_TYPE_ACTION_NUM, /**< Number of actions. */
862 RTE_FLOW_ERROR_TYPE_ACTION, /**< Specific action. */
866 * Verbose error structure definition.
868 * This object is normally allocated by applications and set by PMDs, the
869 * message points to a constant string which does not need to be freed by
870 * the application, however its pointer can be considered valid only as long
871 * as its associated DPDK port remains configured. Closing the underlying
872 * device or unloading the PMD invalidates it.
874 * Both cause and message may be NULL regardless of the error type.
876 struct rte_flow_error {
877 enum rte_flow_error_type type; /**< Cause field and error types. */
878 const void *cause; /**< Object responsible for the error. */
879 const char *message; /**< Human-readable error message. */
883 * Check whether a flow rule can be created on a given port.
885 * While this function has no effect on the target device, the flow rule is
886 * validated against its current configuration state and the returned value
887 * should be considered valid by the caller for that state only.
889 * The returned value is guaranteed to remain valid only as long as no
890 * successful calls to rte_flow_create() or rte_flow_destroy() are made in
891 * the meantime and no device parameter affecting flow rules in any way are
892 * modified, due to possible collisions or resource limitations (although in
893 * such cases EINVAL should not be returned).
896 * Port identifier of Ethernet device.
898 * Flow rule attributes.
900 * Pattern specification (list terminated by the END pattern item).
902 * Associated actions (list terminated by the END action).
904 * Perform verbose error reporting if not NULL. PMDs initialize this
905 * structure in case of error only.
908 * 0 if flow rule is valid and can be created. A negative errno value
909 * otherwise (rte_errno is also set), the following errors are defined:
911 * -ENOSYS: underlying device does not support this functionality.
913 * -EINVAL: unknown or invalid rule specification.
915 * -ENOTSUP: valid but unsupported rule specification (e.g. partial
916 * bit-masks are unsupported).
918 * -EEXIST: collision with an existing rule.
920 * -ENOMEM: not enough resources.
922 * -EBUSY: action cannot be performed due to busy device resources, may
923 * succeed if the affected queues or even the entire port are in a stopped
924 * state (see rte_eth_dev_rx_queue_stop() and rte_eth_dev_stop()).
927 rte_flow_validate(uint8_t port_id,
928 const struct rte_flow_attr *attr,
929 const struct rte_flow_item pattern[],
930 const struct rte_flow_action actions[],
931 struct rte_flow_error *error);
934 * Create a flow rule on a given port.
937 * Port identifier of Ethernet device.
939 * Flow rule attributes.
941 * Pattern specification (list terminated by the END pattern item).
943 * Associated actions (list terminated by the END action).
945 * Perform verbose error reporting if not NULL. PMDs initialize this
946 * structure in case of error only.
949 * A valid handle in case of success, NULL otherwise and rte_errno is set
950 * to the positive version of one of the error codes defined for
951 * rte_flow_validate().
954 rte_flow_create(uint8_t port_id,
955 const struct rte_flow_attr *attr,
956 const struct rte_flow_item pattern[],
957 const struct rte_flow_action actions[],
958 struct rte_flow_error *error);
961 * Destroy a flow rule on a given port.
963 * Failure to destroy a flow rule handle may occur when other flow rules
964 * depend on it, and destroying it would result in an inconsistent state.
966 * This function is only guaranteed to succeed if handles are destroyed in
967 * reverse order of their creation.
970 * Port identifier of Ethernet device.
972 * Flow rule handle to destroy.
974 * Perform verbose error reporting if not NULL. PMDs initialize this
975 * structure in case of error only.
978 * 0 on success, a negative errno value otherwise and rte_errno is set.
981 rte_flow_destroy(uint8_t port_id,
982 struct rte_flow *flow,
983 struct rte_flow_error *error);
986 * Destroy all flow rules associated with a port.
988 * In the unlikely event of failure, handles are still considered destroyed
989 * and no longer valid but the port must be assumed to be in an inconsistent
993 * Port identifier of Ethernet device.
995 * Perform verbose error reporting if not NULL. PMDs initialize this
996 * structure in case of error only.
999 * 0 on success, a negative errno value otherwise and rte_errno is set.
1002 rte_flow_flush(uint8_t port_id,
1003 struct rte_flow_error *error);
1006 * Query an existing flow rule.
1008 * This function allows retrieving flow-specific data such as counters.
1009 * Data is gathered by special actions which must be present in the flow
1012 * \see RTE_FLOW_ACTION_TYPE_COUNT
1015 * Port identifier of Ethernet device.
1017 * Flow rule handle to query.
1019 * Action type to query.
1020 * @param[in, out] data
1021 * Pointer to storage for the associated query data type.
1023 * Perform verbose error reporting if not NULL. PMDs initialize this
1024 * structure in case of error only.
1027 * 0 on success, a negative errno value otherwise and rte_errno is set.
1030 rte_flow_query(uint8_t port_id,
1031 struct rte_flow *flow,
1032 enum rte_flow_action_type action,
1034 struct rte_flow_error *error);
1040 #endif /* RTE_FLOW_H_ */