#include "mlx5.h"
+/* E-Switch Manager port, used for rte_flow_item_port_id. */
+#define MLX5_PORT_ESW_MGR UINT32_MAX
+
/* Private rte flow items. */
enum mlx5_rte_flow_item_type {
MLX5_RTE_FLOW_ITEM_TYPE_END = INT_MIN,
MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS,
MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET,
MLX5_RTE_FLOW_ACTION_TYPE_AGE,
+ MLX5_RTE_FLOW_ACTION_TYPE_COUNT,
+ MLX5_RTE_FLOW_ACTION_TYPE_JUMP,
+ MLX5_RTE_FLOW_ACTION_TYPE_RSS,
};
#define MLX5_INDIRECT_ACTION_TYPE_OFFSET 30
enum {
MLX5_INDIRECT_ACTION_TYPE_RSS,
MLX5_INDIRECT_ACTION_TYPE_AGE,
+ MLX5_INDIRECT_ACTION_TYPE_COUNT,
+ MLX5_INDIRECT_ACTION_TYPE_CT,
};
+/* Now, the maximal ports will be supported is 256, action number is 4M. */
+#define MLX5_INDIRECT_ACT_CT_MAX_PORT 0x100
+
+#define MLX5_INDIRECT_ACT_CT_OWNER_SHIFT 22
+#define MLX5_INDIRECT_ACT_CT_OWNER_MASK (MLX5_INDIRECT_ACT_CT_MAX_PORT - 1)
+
+/* 30-31: type, 22-29: owner port, 0-21: index. */
+#define MLX5_INDIRECT_ACT_CT_GEN_IDX(owner, index) \
+ ((MLX5_INDIRECT_ACTION_TYPE_CT << MLX5_INDIRECT_ACTION_TYPE_OFFSET) | \
+ (((owner) & MLX5_INDIRECT_ACT_CT_OWNER_MASK) << \
+ MLX5_INDIRECT_ACT_CT_OWNER_SHIFT) | (index))
+
+#define MLX5_INDIRECT_ACT_CT_GET_OWNER(index) \
+ (((index) >> MLX5_INDIRECT_ACT_CT_OWNER_SHIFT) & \
+ MLX5_INDIRECT_ACT_CT_OWNER_MASK)
+
+#define MLX5_INDIRECT_ACT_CT_GET_IDX(index) \
+ ((index) & ((1 << MLX5_INDIRECT_ACT_CT_OWNER_SHIFT) - 1))
+
/* Matches on selected register. */
struct mlx5_rte_flow_item_tag {
enum modify_reg id;
MLX5_MTR_COLOR,
MLX5_MTR_ID,
MLX5_ASO_FLOW_HIT,
+ MLX5_ASO_CONNTRACK,
+ MLX5_SAMPLE_ID,
};
/* Default queue number. */
#define MLX5_FLOW_LAYER_GENEVE_OPT (UINT64_C(1) << 32)
#define MLX5_FLOW_LAYER_GTP_PSC (UINT64_C(1) << 33)
+/* INTEGRITY item bits */
+#define MLX5_FLOW_ITEM_OUTER_INTEGRITY (UINT64_C(1) << 34)
+#define MLX5_FLOW_ITEM_INNER_INTEGRITY (UINT64_C(1) << 35)
+#define MLX5_FLOW_ITEM_INTEGRITY \
+ (MLX5_FLOW_ITEM_OUTER_INTEGRITY | MLX5_FLOW_ITEM_INNER_INTEGRITY)
+
+/* Conntrack item. */
+#define MLX5_FLOW_LAYER_ASO_CT (UINT64_C(1) << 36)
+
+/* Flex item */
+#define MLX5_FLOW_ITEM_OUTER_FLEX (UINT64_C(1) << 37)
+#define MLX5_FLOW_ITEM_INNER_FLEX (UINT64_C(1) << 38)
+#define MLX5_FLOW_ITEM_FLEX_TUNNEL (UINT64_C(1) << 39)
+
+/* ESP item */
+#define MLX5_FLOW_ITEM_ESP (UINT64_C(1) << 40)
+
/* Outer Masks. */
#define MLX5_FLOW_LAYER_OUTER_L3 \
(MLX5_FLOW_LAYER_OUTER_L3_IPV4 | MLX5_FLOW_LAYER_OUTER_L3_IPV6)
(MLX5_FLOW_LAYER_VXLAN | MLX5_FLOW_LAYER_VXLAN_GPE | \
MLX5_FLOW_LAYER_GRE | MLX5_FLOW_LAYER_NVGRE | MLX5_FLOW_LAYER_MPLS | \
MLX5_FLOW_LAYER_IPIP | MLX5_FLOW_LAYER_IPV6_ENCAP | \
- MLX5_FLOW_LAYER_GENEVE | MLX5_FLOW_LAYER_GTP)
+ MLX5_FLOW_LAYER_GENEVE | MLX5_FLOW_LAYER_GTP | \
+ MLX5_FLOW_ITEM_FLEX_TUNNEL)
/* Inner Masks. */
#define MLX5_FLOW_LAYER_INNER_L3 \
#define MLX5_FLOW_ACTION_TUNNEL_SET (1ull << 37)
#define MLX5_FLOW_ACTION_TUNNEL_MATCH (1ull << 38)
#define MLX5_FLOW_ACTION_MODIFY_FIELD (1ull << 39)
+#define MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY (1ull << 40)
+#define MLX5_FLOW_ACTION_CT (1ull << 41)
#define MLX5_FLOW_FATE_ACTIONS \
(MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_QUEUE | \
MLX5_FLOW_ACTION_RSS | MLX5_FLOW_ACTION_JUMP | \
- MLX5_FLOW_ACTION_DEFAULT_MISS)
+ MLX5_FLOW_ACTION_DEFAULT_MISS | \
+ MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY)
#define MLX5_FLOW_FATE_ESWITCH_ACTIONS \
(MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_PORT_ID | \
- MLX5_FLOW_ACTION_JUMP)
-
+ MLX5_FLOW_ACTION_JUMP | MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY)
#define MLX5_FLOW_MODIFY_HDR_ACTIONS (MLX5_FLOW_ACTION_SET_IPV4_SRC | \
MLX5_FLOW_ACTION_SET_IPV4_DST | \
/* Valid layer type for IPV4 RSS. */
#define MLX5_IPV4_LAYER_TYPES \
- (ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 | \
- ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_NONFRAG_IPV4_UDP | \
- ETH_RSS_NONFRAG_IPV4_OTHER)
+ (RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_FRAG_IPV4 | \
+ RTE_ETH_RSS_NONFRAG_IPV4_TCP | RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
+ RTE_ETH_RSS_NONFRAG_IPV4_OTHER)
/* IBV hash source bits for IPV4. */
#define MLX5_IPV4_IBV_RX_HASH (IBV_RX_HASH_SRC_IPV4 | IBV_RX_HASH_DST_IPV4)
/* Valid layer type for IPV6 RSS. */
#define MLX5_IPV6_LAYER_TYPES \
- (ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 | ETH_RSS_NONFRAG_IPV6_TCP | \
- ETH_RSS_NONFRAG_IPV6_UDP | ETH_RSS_IPV6_EX | ETH_RSS_IPV6_TCP_EX | \
- ETH_RSS_IPV6_UDP_EX | ETH_RSS_NONFRAG_IPV6_OTHER)
+ (RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_FRAG_IPV6 | RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
+ RTE_ETH_RSS_NONFRAG_IPV6_UDP | RTE_ETH_RSS_IPV6_EX | RTE_ETH_RSS_IPV6_TCP_EX | \
+ RTE_ETH_RSS_IPV6_UDP_EX | RTE_ETH_RSS_NONFRAG_IPV6_OTHER)
/* IBV hash source bits for IPV6. */
#define MLX5_IPV6_IBV_RX_HASH (IBV_RX_HASH_SRC_IPV6 | IBV_RX_HASH_DST_IPV6)
/* GTP extension header flag. */
#define MLX5_GTP_EXT_HEADER_FLAG 4
-/* GTP extension header max PDU type value. */
-#define MLX5_GTP_EXT_MAX_PDU_TYPE 15
-
/* GTP extension header PDU type shift. */
#define MLX5_GTP_PDU_TYPE_SHIFT(a) ((a) << 4)
#define MLX5_ACT_NUM_MDF_IPV6 4
#define MLX5_ACT_NUM_MDF_MAC 2
#define MLX5_ACT_NUM_MDF_VID 1
-#define MLX5_ACT_NUM_MDF_PORT 2
+#define MLX5_ACT_NUM_MDF_PORT 1
#define MLX5_ACT_NUM_MDF_TTL 1
#define MLX5_ACT_NUM_DEC_TTL MLX5_ACT_NUM_MDF_TTL
#define MLX5_ACT_NUM_MDF_TCPSEQ 1
/* Maximum number of fields to modify in MODIFY_FIELD */
#define MLX5_ACT_MAX_MOD_FIELDS 5
+/* Syndrome bits definition for connection tracking. */
+#define MLX5_CT_SYNDROME_VALID (0x0 << 6)
+#define MLX5_CT_SYNDROME_INVALID (0x1 << 6)
+#define MLX5_CT_SYNDROME_TRAP (0x2 << 6)
+#define MLX5_CT_SYNDROME_STATE_CHANGE (0x1 << 1)
+#define MLX5_CT_SYNDROME_BAD_PACKET (0x1 << 0)
+
enum mlx5_flow_drv_type {
MLX5_FLOW_TYPE_MIN,
MLX5_FLOW_TYPE_DV,
MLX5_FLOW_TYPE_VERBS,
+ MLX5_FLOW_TYPE_HW,
MLX5_FLOW_TYPE_MAX,
};
MLX5_FLOW_FATE_DROP,
MLX5_FLOW_FATE_DEFAULT_MISS,
MLX5_FLOW_FATE_SHARED_RSS,
+ MLX5_FLOW_FATE_MTR,
MLX5_FLOW_FATE_MAX,
};
/* Matcher structure. */
struct mlx5_flow_dv_matcher {
- struct mlx5_cache_entry entry; /**< Pointer to the next element. */
+ struct mlx5_list_entry entry; /**< Pointer to the next element. */
struct mlx5_flow_tbl_resource *tbl;
/**< Pointer to the table(group) the matcher associated with. */
void *matcher_object; /**< Pointer to DV matcher */
/* Encap/decap resource structure. */
struct mlx5_flow_dv_encap_decap_resource {
- struct mlx5_hlist_entry entry;
+ struct mlx5_list_entry entry;
/* Pointer to next element. */
uint32_t refcnt; /**< Reference counter. */
void *action;
/* Tag resource structure. */
struct mlx5_flow_dv_tag_resource {
- struct mlx5_hlist_entry entry;
+ struct mlx5_list_entry entry;
/**< hash list entry for tag resource, tag value as the key. */
void *action;
/**< Tag action object. */
uint32_t tag_id; /**< Tag ID. */
};
-/*
- * Number of modification commands.
- * The maximal actions amount in FW is some constant, and it is 16 in the
- * latest releases. In some old releases, it will be limited to 8.
- * Since there is no interface to query the capacity, the maximal value should
- * be used to allow PMD to create the flow. The validation will be done in the
- * lower driver layer or FW. A failure will be returned if exceeds the maximal
- * supported actions number on the root table.
- * On non-root tables, there is no limitation, but 32 is enough right now.
- */
-#define MLX5_MAX_MODIFY_NUM 32
-#define MLX5_ROOT_TBL_MODIFY_NUM 16
-
/* Modify resource structure */
struct mlx5_flow_dv_modify_hdr_resource {
- struct mlx5_hlist_entry entry;
+ struct mlx5_list_entry entry;
void *action; /**< Modify header action object. */
+ uint32_t idx;
/* Key area for hash list matching: */
uint8_t ft_type; /**< Flow table type, Rx or Tx. */
- uint32_t actions_num; /**< Number of modification actions. */
- uint64_t flags; /**< Flags for RDMA API. */
+ uint8_t actions_num; /**< Number of modification actions. */
+ bool root; /**< Whether action is in root table. */
struct mlx5_modification_cmd actions[];
/**< Modification actions. */
-};
+} __rte_packed;
/* Modify resource key of the hash organization. */
union mlx5_flow_modify_hdr_key {
/* Port ID resource structure. */
struct mlx5_flow_dv_port_id_action_resource {
- struct mlx5_cache_entry entry;
+ struct mlx5_list_entry entry;
void *action; /**< Action object. */
uint32_t port_id; /**< Port ID value. */
uint32_t idx; /**< Indexed pool memory index. */
/* Push VLAN action resource structure */
struct mlx5_flow_dv_push_vlan_action_resource {
- struct mlx5_cache_entry entry; /* Cache entry. */
+ struct mlx5_list_entry entry; /* Cache entry. */
void *action; /**< Action object. */
uint8_t ft_type; /**< Flow table type, Rx, Tx or FDB. */
rte_be32_t vlan_tag; /**< VLAN tag value. */
* - Key is 32/64-bit MARK action ID.
* - MUST be the first entry.
*/
- struct mlx5_hlist_entry hlist_ent;
+ struct mlx5_list_entry hlist_ent;
LIST_ENTRY(mlx5_flow_mreg_copy_resource) next;
/* List entry for device flows. */
uint32_t idx;
/* Table data structure of the hash organization. */
struct mlx5_flow_tbl_data_entry {
- struct mlx5_hlist_entry entry;
+ struct mlx5_list_entry entry;
/**< hash list entry, 64-bits key inside. */
struct mlx5_flow_tbl_resource tbl;
/**< flow table resource. */
- struct mlx5_cache_list matchers;
+ struct mlx5_list *matchers;
/**< matchers' header associated with the flow table. */
struct mlx5_flow_dv_jump_tbl_resource jump;
/**< jump resource, at most one for each table created. */
const struct mlx5_flow_tunnel *tunnel;
uint32_t group_id;
uint32_t external:1;
- uint32_t tunnel_offload:1; /* Tunnel offlod table or not. */
+ uint32_t tunnel_offload:1; /* Tunnel offload table or not. */
uint32_t is_egress:1; /**< Egress table. */
uint32_t is_transfer:1; /**< Transfer table. */
uint32_t dummy:1; /**< DR table. */
/* Sample action resource structure. */
struct mlx5_flow_dv_sample_resource {
- struct mlx5_cache_entry entry; /**< Cache entry. */
+ struct mlx5_list_entry entry; /**< Cache entry. */
union {
void *verbs_action; /**< Verbs sample action object. */
void **sub_actions; /**< Sample sub-action array. */
/* Destination array action resource structure. */
struct mlx5_flow_dv_dest_array_resource {
- struct mlx5_cache_entry entry; /**< Cache entry. */
+ struct mlx5_list_entry entry; /**< Cache entry. */
uint32_t idx; /** Destination array action object index. */
uint8_t ft_type; /** Flow Table Type */
uint8_t num_of_dest; /**< Number of destination actions. */
/**< Bit-fields of present layers, see MLX5_FLOW_LAYER_*. */
void *drv_flow; /**< pointer to driver flow object. */
uint32_t split_flow_id:27; /**< Sub flow unique match flow id. */
- uint32_t is_meter_flow_id:1; /**< Indate if flow_id is for meter. */
- uint32_t mark:1; /**< Metadate rxq mark flag. */
+ uint32_t is_meter_flow_id:1; /**< Indicate if flow_id is for meter. */
uint32_t fate_action:3; /**< Fate action type. */
union {
uint32_t rix_hrxq; /**< Hash Rx queue object index. */
#if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
struct mlx5_flow_handle_dv dvh;
#endif
+ uint8_t flex_item; /**< referenced Flex Item bitmask. */
} __rte_packed;
/*
#define MLX5_FLOW_HANDLE_VERBS_SIZE (sizeof(struct mlx5_flow_handle))
#endif
-/*
- * Max number of actions per DV flow.
- * See CREATE_FLOW_MAX_FLOW_ACTIONS_SUPPORTED
- * in rdma-core file providers/mlx5/verbs.c.
- */
-#define MLX5_DV_MAX_NUMBER_OF_ACTIONS 8
-
/** Device flow structure only for DV flow creation. */
struct mlx5_flow_dv_workspace {
uint32_t group; /**< The group index. */
/** Maximal number of device sub-flows supported. */
#define MLX5_NUM_MAX_DEV_FLOWS 32
+/**
+ * tunnel offload rules type
+ */
+enum mlx5_tof_rule_type {
+ MLX5_TUNNEL_OFFLOAD_NONE = 0,
+ MLX5_TUNNEL_OFFLOAD_SET_RULE,
+ MLX5_TUNNEL_OFFLOAD_MATCH_RULE,
+ MLX5_TUNNEL_OFFLOAD_MISS_RULE,
+};
+
/** Device flow structure. */
__extension__
struct mlx5_flow {
struct mlx5_flow_handle *handle;
uint32_t handle_idx; /* Index of the mlx5 flow handle memory. */
const struct mlx5_flow_tunnel *tunnel;
+ enum mlx5_tof_rule_type tof_type;
};
/* Flow meter state. */
#define MLX5_ASO_WQE_CQE_RESPONSE_DELAY 10u
#define MLX5_MTR_POLL_WQE_CQE_TIMES 100000u
+#define MLX5_CT_POLL_WQE_CQE_TIMES MLX5_MTR_POLL_WQE_CQE_TIMES
+
#define MLX5_MAN_WIDTH 8
/* Legacy Meter parameter structure. */
struct mlx5_legacy_flow_meter {
/* Must be the first in struct. */
TAILQ_ENTRY(mlx5_legacy_flow_meter) next;
/**< Pointer to the next flow meter structure. */
- uint32_t idx; /* Index to meter object. */
+ uint32_t idx;
+ /* Index to meter object. */
};
#define MLX5_MAX_TUNNELS 256
/* convert jump group to flow table ID in tunnel rules */
struct tunnel_tbl_entry {
- struct mlx5_hlist_entry hash;
+ struct mlx5_list_entry hash;
uint32_t flow_table;
uint32_t tunnel_id;
uint32_t group;
}
static inline bool
-is_tunnel_offload_active(struct rte_eth_dev *dev)
+is_tunnel_offload_active(const struct rte_eth_dev *dev)
{
#ifdef HAVE_IBV_FLOW_DV_SUPPORT
- struct mlx5_priv *priv = dev->data->dev_private;
- return !!priv->config.dv_miss_info;
+ const struct mlx5_priv *priv = dev->data->dev_private;
+ return !!priv->sh->config.dv_miss_info;
#else
RTE_SET_USED(dev);
return false;
}
static inline bool
-is_flow_tunnel_match_rule(__rte_unused struct rte_eth_dev *dev,
- __rte_unused const struct rte_flow_attr *attr,
- __rte_unused const struct rte_flow_item items[],
- __rte_unused const struct rte_flow_action actions[])
+is_flow_tunnel_match_rule(enum mlx5_tof_rule_type tof_rule_type)
{
- return (items[0].type == (typeof(items[0].type))
- MLX5_RTE_FLOW_ITEM_TYPE_TUNNEL);
+ return tof_rule_type == MLX5_TUNNEL_OFFLOAD_MATCH_RULE;
}
static inline bool
-is_flow_tunnel_steer_rule(__rte_unused struct rte_eth_dev *dev,
- __rte_unused const struct rte_flow_attr *attr,
- __rte_unused const struct rte_flow_item items[],
- __rte_unused const struct rte_flow_action actions[])
+is_flow_tunnel_steer_rule(enum mlx5_tof_rule_type tof_rule_type)
{
- return (actions[0].type == (typeof(actions[0].type))
- MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET);
+ return tof_rule_type == MLX5_TUNNEL_OFFLOAD_SET_RULE;
}
static inline const struct mlx5_flow_tunnel *
/* Flow structure. */
struct rte_flow {
- ILIST_ENTRY(uint32_t)next; /**< Index to the next flow structure. */
uint32_t dev_handles;
/**< Device flow handles that are part of the flow. */
+ uint32_t type:2;
uint32_t drv_type:2; /**< Driver type. */
uint32_t tunnel:1;
uint32_t meter:24; /**< Holds flow meter id. */
+ uint32_t indirect_type:2; /**< Indirect action type. */
uint32_t rix_mreg_copy;
/**< Index to metadata register copy table resource. */
uint32_t counter; /**< Holds flow counter. */
uint32_t tunnel_id; /**< Tunnel id */
- uint32_t age; /**< Holds ASO age bit index. */
+ union {
+ uint32_t age; /**< Holds ASO age bit index. */
+ uint32_t ct; /**< Holds ASO CT index. */
+ };
uint32_t geneve_tlv_option; /**< Holds Geneve TLV option id. > */
} __rte_packed;
+#if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
+
+/* HWS flow struct. */
+struct rte_flow_hw {
+ uint32_t idx; /* Flow index from indexed pool. */
+ uint32_t fate_type; /* Fate action type. */
+ union {
+ /* Jump action. */
+ struct mlx5_hw_jump_action *jump;
+ struct mlx5_hrxq *hrxq; /* TIR action. */
+ };
+ struct rte_flow_template_table *table; /* The table flow allcated from. */
+ struct mlx5dr_rule rule; /* HWS layer data struct. */
+} __rte_packed;
+
+/* rte flow action translate to DR action struct. */
+struct mlx5_action_construct_data {
+ LIST_ENTRY(mlx5_action_construct_data) next;
+ /* Ensure the action types are matched. */
+ int type;
+ uint32_t idx; /* Data index. */
+ uint16_t action_src; /* rte_flow_action src offset. */
+ uint16_t action_dst; /* mlx5dr_rule_action dst offset. */
+ union {
+ struct {
+ /* encap src(item) offset. */
+ uint16_t src;
+ /* encap dst data offset. */
+ uint16_t dst;
+ /* encap data len. */
+ uint16_t len;
+ } encap;
+ struct {
+ uint64_t types; /* RSS hash types. */
+ uint32_t level; /* RSS level. */
+ uint32_t idx; /* Shared action index. */
+ } shared_rss;
+ };
+};
+
+/* Flow item template struct. */
+struct rte_flow_pattern_template {
+ LIST_ENTRY(rte_flow_pattern_template) next;
+ /* Template attributes. */
+ struct rte_flow_pattern_template_attr attr;
+ struct mlx5dr_match_template *mt; /* mlx5 match template. */
+ uint64_t item_flags; /* Item layer flags. */
+ uint32_t refcnt; /* Reference counter. */
+};
+
+/* Flow action template struct. */
+struct rte_flow_actions_template {
+ LIST_ENTRY(rte_flow_actions_template) next;
+ /* Template attributes. */
+ struct rte_flow_actions_template_attr attr;
+ struct rte_flow_action *actions; /* Cached flow actions. */
+ struct rte_flow_action *masks; /* Cached action masks.*/
+ uint32_t refcnt; /* Reference counter. */
+};
+
+/* Jump action struct. */
+struct mlx5_hw_jump_action {
+ /* Action jump from root. */
+ struct mlx5dr_action *root_action;
+ /* HW steering jump action. */
+ struct mlx5dr_action *hws_action;
+};
+
+/* Encap decap action struct. */
+struct mlx5_hw_encap_decap_action {
+ struct mlx5dr_action *action; /* Action object. */
+ size_t data_size; /* Action metadata size. */
+ uint8_t data[]; /* Action data. */
+};
+
+/* The maximum actions support in the flow. */
+#define MLX5_HW_MAX_ACTS 16
+
+/* DR action set struct. */
+struct mlx5_hw_actions {
+ /* Dynamic action list. */
+ LIST_HEAD(act_list, mlx5_action_construct_data) act_list;
+ struct mlx5_hw_jump_action *jump; /* Jump action. */
+ struct mlx5_hrxq *tir; /* TIR action. */
+ /* Encap/Decap action. */
+ struct mlx5_hw_encap_decap_action *encap_decap;
+ uint16_t encap_decap_pos; /* Encap/Decap action position. */
+ uint32_t acts_num:4; /* Total action number. */
+ uint32_t mark:1; /* Indicate the mark action. */
+ /* Translated DR action array from action template. */
+ struct mlx5dr_rule_action rule_acts[MLX5_HW_MAX_ACTS];
+};
+
+/* mlx5 action template struct. */
+struct mlx5_hw_action_template {
+ /* Action template pointer. */
+ struct rte_flow_actions_template *action_template;
+ struct mlx5_hw_actions acts; /* Template actions. */
+};
+
+/* mlx5 flow group struct. */
+struct mlx5_flow_group {
+ struct mlx5_list_entry entry;
+ struct mlx5dr_table *tbl; /* HWS table object. */
+ struct mlx5_hw_jump_action jump; /* Jump action. */
+ enum mlx5dr_table_type type; /* Table type. */
+ uint32_t group_id; /* Group id. */
+ uint32_t idx; /* Group memory index. */
+};
+
+
+#define MLX5_HW_TBL_MAX_ITEM_TEMPLATE 2
+#define MLX5_HW_TBL_MAX_ACTION_TEMPLATE 32
+
+struct rte_flow_template_table {
+ LIST_ENTRY(rte_flow_template_table) next;
+ struct mlx5_flow_group *grp; /* The group rte_flow_template_table uses. */
+ struct mlx5dr_matcher *matcher; /* Template matcher. */
+ /* Item templates bind to the table. */
+ struct rte_flow_pattern_template *its[MLX5_HW_TBL_MAX_ITEM_TEMPLATE];
+ /* Action templates bind to the table. */
+ struct mlx5_hw_action_template ats[MLX5_HW_TBL_MAX_ACTION_TEMPLATE];
+ struct mlx5_indexed_pool *flow; /* The table's flow ipool. */
+ uint32_t type; /* Flow table type RX/TX/FDB. */
+ uint8_t nb_item_templates; /* Item template number. */
+ uint8_t nb_action_templates; /* Action template number. */
+ uint32_t refcnt; /* Table reference counter. */
+};
+
+#endif
+
/*
* Define list of valid combinations of RX Hash fields
* (see enum ibv_rx_hash_fields).
(MLX5_RSS_HASH_IPV6 | IBV_RX_HASH_SRC_PORT_TCP)
#define MLX5_RSS_HASH_IPV6_TCP_DST_ONLY \
(MLX5_RSS_HASH_IPV6 | IBV_RX_HASH_DST_PORT_TCP)
+
+#ifndef HAVE_IBV_RX_HASH_IPSEC_SPI
+#define IBV_RX_HASH_IPSEC_SPI (1U << 8)
+#endif
+
+#define MLX5_RSS_HASH_ESP_SPI IBV_RX_HASH_IPSEC_SPI
+#define MLX5_RSS_HASH_IPV4_ESP (MLX5_RSS_HASH_IPV4 | \
+ MLX5_RSS_HASH_ESP_SPI)
+#define MLX5_RSS_HASH_IPV6_ESP (MLX5_RSS_HASH_IPV6 | \
+ MLX5_RSS_HASH_ESP_SPI)
#define MLX5_RSS_HASH_NONE 0ULL
+
+/* extract next protocol type from Ethernet & VLAN headers */
+#define MLX5_ETHER_TYPE_FROM_HEADER(_s, _m, _itm, _prt) do { \
+ (_prt) = ((const struct _s *)(_itm)->mask)->_m; \
+ (_prt) &= ((const struct _s *)(_itm)->spec)->_m; \
+ (_prt) = rte_be_to_cpu_16((_prt)); \
+} while (0)
+
/* array of valid combinations of RX Hash fields for RSS */
static const uint64_t mlx5_rss_hash_fields[] = {
MLX5_RSS_HASH_IPV4,
MLX5_RSS_HASH_IPV4_TCP,
MLX5_RSS_HASH_IPV4_UDP,
+ MLX5_RSS_HASH_IPV4_ESP,
MLX5_RSS_HASH_IPV6,
MLX5_RSS_HASH_IPV6_TCP,
MLX5_RSS_HASH_IPV6_UDP,
+ MLX5_RSS_HASH_IPV6_ESP,
+ MLX5_RSS_HASH_ESP_SPI,
MLX5_RSS_HASH_NONE,
};
uint32_t rssq_num; /* Allocated queue num in rss_desc. */
uint32_t flow_idx; /* Intermediate device flow index. */
struct mlx5_flow_meter_info *fm; /* Pointer to the meter in flow. */
+ struct mlx5_flow_meter_policy *policy;
+ /* The meter policy used by meter in flow. */
+ struct mlx5_flow_meter_policy *final_policy;
+ /* The final policy when meter policy is hierarchy. */
+ uint32_t skip_matcher_reg:1;
+ /* Indicates if need to skip matcher register in translate. */
+ uint32_t mark:1; /* Indicates if flow contains mark action. */
};
struct mlx5_flow_split_info {
- bool external;
+ uint32_t external:1;
/**< True if flow is created by request external to PMD. */
- uint8_t skip_scale; /**< Skip the scale the table with factor. */
+ uint32_t prefix_mark:1; /**< Prefix subflow mark flag. */
+ uint32_t skip_scale:8; /**< Skip the scale the table with factor. */
uint32_t flow_idx; /**< This memory pool index to the flow. */
- uint32_t prefix_mark; /**< Prefix subflow mark flag. */
- uint64_t prefix_layers; /**< Prefix subflow layers. */
uint32_t table_id; /**< Flow table identifier. */
+ uint64_t prefix_layers; /**< Prefix subflow layers. */
};
typedef int (*mlx5_flow_validate_t)(struct rte_eth_dev *dev,
const struct rte_flow_action *actions,
void *data,
struct rte_flow_error *error);
-typedef struct mlx5_meter_domains_infos *(*mlx5_flow_create_mtr_tbls_t)
- (struct rte_eth_dev *dev);
-typedef int (*mlx5_flow_destroy_mtr_tbls_t)(struct rte_eth_dev *dev,
- struct mlx5_meter_domains_infos *tbls);
-typedef int (*mlx5_flow_create_policer_rules_t)
- (struct rte_eth_dev *dev,
- struct mlx5_flow_meter_info *fm,
- const struct rte_flow_attr *attr);
-typedef int (*mlx5_flow_destroy_policer_rules_t)
- (struct rte_eth_dev *dev,
- const struct mlx5_flow_meter_info *fm,
- const struct rte_flow_attr *attr);
+typedef int (*mlx5_flow_create_mtr_tbls_t)(struct rte_eth_dev *dev,
+ struct mlx5_flow_meter_info *fm,
+ uint32_t mtr_idx,
+ uint8_t domain_bitmap);
+typedef void (*mlx5_flow_destroy_mtr_tbls_t)(struct rte_eth_dev *dev,
+ struct mlx5_flow_meter_info *fm);
+typedef void (*mlx5_flow_destroy_mtr_drop_tbls_t)(struct rte_eth_dev *dev);
+typedef struct mlx5_flow_meter_sub_policy *
+ (*mlx5_flow_meter_sub_policy_rss_prepare_t)
+ (struct rte_eth_dev *dev,
+ struct mlx5_flow_meter_policy *mtr_policy,
+ struct mlx5_flow_rss_desc *rss_desc[MLX5_MTR_RTE_COLORS]);
+typedef int (*mlx5_flow_meter_hierarchy_rule_create_t)
+ (struct rte_eth_dev *dev,
+ struct mlx5_flow_meter_info *fm,
+ int32_t src_port,
+ const struct rte_flow_item *item,
+ struct rte_flow_error *error);
+typedef void (*mlx5_flow_destroy_sub_policy_with_rxq_t)
+ (struct rte_eth_dev *dev,
+ struct mlx5_flow_meter_policy *mtr_policy);
typedef uint32_t (*mlx5_flow_mtr_alloc_t)
(struct rte_eth_dev *dev);
typedef void (*mlx5_flow_mtr_free_t)(struct rte_eth_dev *dev,
typedef int (*mlx5_flow_counter_query_t)(struct rte_eth_dev *dev,
uint32_t cnt,
bool clear, uint64_t *pkts,
- uint64_t *bytes);
+ uint64_t *bytes, void **action);
typedef int (*mlx5_flow_get_aged_flows_t)
(struct rte_eth_dev *dev,
void **context,
(struct rte_eth_dev *dev,
uint32_t domains,
uint32_t flags);
+typedef int (*mlx5_flow_validate_mtr_acts_t)
+ (struct rte_eth_dev *dev,
+ const struct rte_flow_action *actions[RTE_COLORS],
+ struct rte_flow_attr *attr,
+ bool *is_rss,
+ uint8_t *domain_bitmap,
+ uint8_t *policy_mode,
+ struct rte_mtr_error *error);
+typedef int (*mlx5_flow_create_mtr_acts_t)
+ (struct rte_eth_dev *dev,
+ struct mlx5_flow_meter_policy *mtr_policy,
+ const struct rte_flow_action *actions[RTE_COLORS],
+ struct rte_mtr_error *error);
+typedef void (*mlx5_flow_destroy_mtr_acts_t)
+ (struct rte_eth_dev *dev,
+ struct mlx5_flow_meter_policy *mtr_policy);
+typedef int (*mlx5_flow_create_policy_rules_t)
+ (struct rte_eth_dev *dev,
+ struct mlx5_flow_meter_policy *mtr_policy);
+typedef void (*mlx5_flow_destroy_policy_rules_t)
+ (struct rte_eth_dev *dev,
+ struct mlx5_flow_meter_policy *mtr_policy);
+typedef int (*mlx5_flow_create_def_policy_t)
+ (struct rte_eth_dev *dev);
+typedef void (*mlx5_flow_destroy_def_policy_t)
+ (struct rte_eth_dev *dev);
+typedef int (*mlx5_flow_discover_priorities_t)
+ (struct rte_eth_dev *dev,
+ const uint16_t *vprio, int vprio_n);
+typedef struct rte_flow_item_flex_handle *(*mlx5_flow_item_create_t)
+ (struct rte_eth_dev *dev,
+ const struct rte_flow_item_flex_conf *conf,
+ struct rte_flow_error *error);
+typedef int (*mlx5_flow_item_release_t)
+ (struct rte_eth_dev *dev,
+ const struct rte_flow_item_flex_handle *handle,
+ struct rte_flow_error *error);
+typedef int (*mlx5_flow_item_update_t)
+ (struct rte_eth_dev *dev,
+ const struct rte_flow_item_flex_handle *handle,
+ const struct rte_flow_item_flex_conf *conf,
+ struct rte_flow_error *error);
+typedef int (*mlx5_flow_info_get_t)
+ (struct rte_eth_dev *dev,
+ struct rte_flow_port_info *port_info,
+ struct rte_flow_queue_info *queue_info,
+ struct rte_flow_error *error);
+typedef int (*mlx5_flow_port_configure_t)
+ (struct rte_eth_dev *dev,
+ const struct rte_flow_port_attr *port_attr,
+ uint16_t nb_queue,
+ const struct rte_flow_queue_attr *queue_attr[],
+ struct rte_flow_error *err);
+typedef struct rte_flow_pattern_template *(*mlx5_flow_pattern_template_create_t)
+ (struct rte_eth_dev *dev,
+ const struct rte_flow_pattern_template_attr *attr,
+ const struct rte_flow_item items[],
+ struct rte_flow_error *error);
+typedef int (*mlx5_flow_pattern_template_destroy_t)
+ (struct rte_eth_dev *dev,
+ struct rte_flow_pattern_template *template,
+ struct rte_flow_error *error);
+typedef struct rte_flow_actions_template *(*mlx5_flow_actions_template_create_t)
+ (struct rte_eth_dev *dev,
+ const struct rte_flow_actions_template_attr *attr,
+ const struct rte_flow_action actions[],
+ const struct rte_flow_action masks[],
+ struct rte_flow_error *error);
+typedef int (*mlx5_flow_actions_template_destroy_t)
+ (struct rte_eth_dev *dev,
+ struct rte_flow_actions_template *template,
+ struct rte_flow_error *error);
+typedef struct rte_flow_template_table *(*mlx5_flow_table_create_t)
+ (struct rte_eth_dev *dev,
+ const struct rte_flow_template_table_attr *attr,
+ struct rte_flow_pattern_template *item_templates[],
+ uint8_t nb_item_templates,
+ struct rte_flow_actions_template *action_templates[],
+ uint8_t nb_action_templates,
+ struct rte_flow_error *error);
+typedef int (*mlx5_flow_table_destroy_t)
+ (struct rte_eth_dev *dev,
+ struct rte_flow_template_table *table,
+ struct rte_flow_error *error);
+typedef struct rte_flow *(*mlx5_flow_async_flow_create_t)
+ (struct rte_eth_dev *dev,
+ uint32_t queue,
+ const struct rte_flow_op_attr *attr,
+ struct rte_flow_template_table *table,
+ const struct rte_flow_item items[],
+ uint8_t pattern_template_index,
+ const struct rte_flow_action actions[],
+ uint8_t action_template_index,
+ void *user_data,
+ struct rte_flow_error *error);
+typedef int (*mlx5_flow_async_flow_destroy_t)
+ (struct rte_eth_dev *dev,
+ uint32_t queue,
+ const struct rte_flow_op_attr *attr,
+ struct rte_flow *flow,
+ void *user_data,
+ struct rte_flow_error *error);
+typedef int (*mlx5_flow_pull_t)
+ (struct rte_eth_dev *dev,
+ uint32_t queue,
+ struct rte_flow_op_result res[],
+ uint16_t n_res,
+ struct rte_flow_error *error);
+typedef int (*mlx5_flow_push_t)
+ (struct rte_eth_dev *dev,
+ uint32_t queue,
+ struct rte_flow_error *error);
+
+typedef struct rte_flow_action_handle *(*mlx5_flow_async_action_handle_create_t)
+ (struct rte_eth_dev *dev,
+ uint32_t queue,
+ const struct rte_flow_op_attr *attr,
+ const struct rte_flow_indir_action_conf *conf,
+ const struct rte_flow_action *action,
+ void *user_data,
+ struct rte_flow_error *error);
+
+typedef int (*mlx5_flow_async_action_handle_update_t)
+ (struct rte_eth_dev *dev,
+ uint32_t queue,
+ const struct rte_flow_op_attr *attr,
+ struct rte_flow_action_handle *handle,
+ const void *update,
+ void *user_data,
+ struct rte_flow_error *error);
+
+typedef int (*mlx5_flow_async_action_handle_destroy_t)
+ (struct rte_eth_dev *dev,
+ uint32_t queue,
+ const struct rte_flow_op_attr *attr,
+ struct rte_flow_action_handle *handle,
+ void *user_data,
+ struct rte_flow_error *error);
struct mlx5_flow_driver_ops {
mlx5_flow_validate_t validate;
mlx5_flow_query_t query;
mlx5_flow_create_mtr_tbls_t create_mtr_tbls;
mlx5_flow_destroy_mtr_tbls_t destroy_mtr_tbls;
- mlx5_flow_create_policer_rules_t prepare_policer_rules;
- mlx5_flow_destroy_policer_rules_t destroy_policer_rules;
+ mlx5_flow_destroy_mtr_drop_tbls_t destroy_mtr_drop_tbls;
mlx5_flow_mtr_alloc_t create_meter;
mlx5_flow_mtr_free_t free_meter;
+ mlx5_flow_validate_mtr_acts_t validate_mtr_acts;
+ mlx5_flow_create_mtr_acts_t create_mtr_acts;
+ mlx5_flow_destroy_mtr_acts_t destroy_mtr_acts;
+ mlx5_flow_create_policy_rules_t create_policy_rules;
+ mlx5_flow_destroy_policy_rules_t destroy_policy_rules;
+ mlx5_flow_create_def_policy_t create_def_policy;
+ mlx5_flow_destroy_def_policy_t destroy_def_policy;
+ mlx5_flow_meter_sub_policy_rss_prepare_t meter_sub_policy_rss_prepare;
+ mlx5_flow_meter_hierarchy_rule_create_t meter_hierarchy_rule_create;
+ mlx5_flow_destroy_sub_policy_with_rxq_t destroy_sub_policy_with_rxq;
mlx5_flow_counter_alloc_t counter_alloc;
mlx5_flow_counter_free_t counter_free;
mlx5_flow_counter_query_t counter_query;
mlx5_flow_action_update_t action_update;
mlx5_flow_action_query_t action_query;
mlx5_flow_sync_domain_t sync_domain;
+ mlx5_flow_discover_priorities_t discover_priorities;
+ mlx5_flow_item_create_t item_create;
+ mlx5_flow_item_release_t item_release;
+ mlx5_flow_item_update_t item_update;
+ mlx5_flow_info_get_t info_get;
+ mlx5_flow_port_configure_t configure;
+ mlx5_flow_pattern_template_create_t pattern_template_create;
+ mlx5_flow_pattern_template_destroy_t pattern_template_destroy;
+ mlx5_flow_actions_template_create_t actions_template_create;
+ mlx5_flow_actions_template_destroy_t actions_template_destroy;
+ mlx5_flow_table_create_t template_table_create;
+ mlx5_flow_table_destroy_t template_table_destroy;
+ mlx5_flow_async_flow_create_t async_flow_create;
+ mlx5_flow_async_flow_destroy_t async_flow_destroy;
+ mlx5_flow_pull_t pull;
+ mlx5_flow_push_t push;
+ mlx5_flow_async_action_handle_create_t async_action_create;
+ mlx5_flow_async_action_handle_update_t async_action_update;
+ mlx5_flow_async_action_handle_destroy_t async_action_destroy;
};
/* mlx5_flow.c */
static inline bool
tunnel_use_standard_attr_group_translate
- (struct rte_eth_dev *dev,
- const struct mlx5_flow_tunnel *tunnel,
+ (const struct rte_eth_dev *dev,
const struct rte_flow_attr *attr,
- const struct rte_flow_item items[],
- const struct rte_flow_action actions[])
+ const struct mlx5_flow_tunnel *tunnel,
+ enum mlx5_tof_rule_type tof_rule_type)
{
bool verdict;
* method
*/
verdict = !attr->group &&
- is_flow_tunnel_steer_rule(dev, attr, items, actions);
+ is_flow_tunnel_steer_rule(tof_rule_type);
} else {
/*
* non-tunnel group translation uses standard method for
mlx5_aso_meter_by_idx(struct mlx5_priv *priv, uint32_t idx)
{
struct mlx5_aso_mtr_pool *pool;
- struct mlx5_aso_mtr_pools_mng *mtrmng = priv->sh->mtrmng;
+ struct mlx5_aso_mtr_pools_mng *pools_mng =
+ &priv->sh->mtrmng->pools_mng;
/* Decrease to original index. */
idx--;
- MLX5_ASSERT(idx / MLX5_ASO_MTRS_PER_POOL < mtrmng->n);
- pool = mtrmng->pools[idx / MLX5_ASO_MTRS_PER_POOL];
+ MLX5_ASSERT(idx / MLX5_ASO_MTRS_PER_POOL < pools_mng->n);
+ rte_rwlock_read_lock(&pools_mng->resize_mtrwl);
+ pool = pools_mng->pools[idx / MLX5_ASO_MTRS_PER_POOL];
+ rte_rwlock_read_unlock(&pools_mng->resize_mtrwl);
return &pool->mtrs[idx % MLX5_ASO_MTRS_PER_POOL];
}
+static __rte_always_inline const struct rte_flow_item *
+mlx5_find_end_item(const struct rte_flow_item *item)
+{
+ for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++);
+ return item;
+}
+
+static __rte_always_inline bool
+mlx5_validate_integrity_item(const struct rte_flow_item_integrity *item)
+{
+ struct rte_flow_item_integrity test = *item;
+ test.l3_ok = 0;
+ test.l4_ok = 0;
+ test.ipv4_csum_ok = 0;
+ test.l4_csum_ok = 0;
+ return (test.value == 0);
+}
+
+/*
+ * Get ASO CT action by device and index.
+ *
+ * @param[in] dev
+ * Pointer to the Ethernet device structure.
+ * @param[in] idx
+ * Index to the ASO CT action.
+ *
+ * @return
+ * The specified ASO CT action pointer.
+ */
+static inline struct mlx5_aso_ct_action *
+flow_aso_ct_get_by_dev_idx(struct rte_eth_dev *dev, uint32_t idx)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_aso_ct_pools_mng *mng = priv->sh->ct_mng;
+ struct mlx5_aso_ct_pool *pool;
+
+ idx--;
+ MLX5_ASSERT((idx / MLX5_ASO_CT_ACTIONS_PER_POOL) < mng->n);
+ /* Bit operation AND could be used. */
+ rte_rwlock_read_lock(&mng->resize_rwl);
+ pool = mng->pools[idx / MLX5_ASO_CT_ACTIONS_PER_POOL];
+ rte_rwlock_read_unlock(&mng->resize_rwl);
+ return &pool->actions[idx % MLX5_ASO_CT_ACTIONS_PER_POOL];
+}
+
+/*
+ * Get ASO CT action by owner & index.
+ *
+ * @param[in] dev
+ * Pointer to the Ethernet device structure.
+ * @param[in] idx
+ * Index to the ASO CT action and owner port combination.
+ *
+ * @return
+ * The specified ASO CT action pointer.
+ */
+static inline struct mlx5_aso_ct_action *
+flow_aso_ct_get_by_idx(struct rte_eth_dev *dev, uint32_t own_idx)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_aso_ct_action *ct;
+ uint16_t owner = (uint16_t)MLX5_INDIRECT_ACT_CT_GET_OWNER(own_idx);
+ uint32_t idx = MLX5_INDIRECT_ACT_CT_GET_IDX(own_idx);
+
+ if (owner == PORT_ID(priv)) {
+ ct = flow_aso_ct_get_by_dev_idx(dev, idx);
+ } else {
+ struct rte_eth_dev *owndev = &rte_eth_devices[owner];
+
+ MLX5_ASSERT(owner < RTE_MAX_ETHPORTS);
+ if (dev->data->dev_started != 1)
+ return NULL;
+ ct = flow_aso_ct_get_by_dev_idx(owndev, idx);
+ if (ct->peer != PORT_ID(priv))
+ return NULL;
+ }
+ return ct;
+}
+
+static inline uint16_t
+mlx5_translate_tunnel_etypes(uint64_t pattern_flags)
+{
+ if (pattern_flags & MLX5_FLOW_LAYER_INNER_L2)
+ return RTE_ETHER_TYPE_TEB;
+ else if (pattern_flags & MLX5_FLOW_LAYER_INNER_L3_IPV4)
+ return RTE_ETHER_TYPE_IPV4;
+ else if (pattern_flags & MLX5_FLOW_LAYER_INNER_L3_IPV6)
+ return RTE_ETHER_TYPE_IPV6;
+ else if (pattern_flags & MLX5_FLOW_LAYER_MPLS)
+ return RTE_ETHER_TYPE_MPLS;
+ return 0;
+}
+
+int flow_hw_q_flow_flush(struct rte_eth_dev *dev,
+ struct rte_flow_error *error);
int mlx5_flow_group_to_table(struct rte_eth_dev *dev,
const struct mlx5_flow_tunnel *tunnel,
uint32_t group, uint32_t *table,
uint32_t mlx5_get_lowest_priority(struct rte_eth_dev *dev,
const struct rte_flow_attr *attr);
uint16_t mlx5_get_matcher_priority(struct rte_eth_dev *dev,
- const struct rte_flow_attr *attr,
- uint32_t subpriority);
+ const struct rte_flow_attr *attr,
+ uint32_t subpriority, bool external);
int mlx5_flow_get_reg_id(struct rte_eth_dev *dev,
enum mlx5_feature_name feature,
uint32_t id,
uint64_t item_flags,
const struct rte_flow_item *gre_item,
struct rte_flow_error *error);
+int mlx5_flow_validate_item_gre_option(struct rte_eth_dev *dev,
+ const struct rte_flow_item *item,
+ uint64_t item_flags,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item *gre_item,
+ struct rte_flow_error *error);
int mlx5_flow_validate_item_ipv4(const struct rte_flow_item *item,
uint64_t item_flags,
uint64_t last_item,
uint8_t target_protocol,
const struct rte_flow_item_tcp *flow_mask,
struct rte_flow_error *error);
+int mlx5_flow_validate_item_esp(const struct rte_flow_item *item,
+ uint64_t item_flags,
+ uint8_t target_protocol,
+ struct rte_flow_error *error);
int mlx5_flow_validate_item_udp(const struct rte_flow_item *item,
uint64_t item_flags,
uint8_t target_protocol,
uint64_t item_flags,
struct rte_eth_dev *dev,
struct rte_flow_error *error);
-int mlx5_flow_validate_item_vxlan(const struct rte_flow_item *item,
+int mlx5_flow_validate_item_vxlan(struct rte_eth_dev *dev,
+ uint16_t udp_dport,
+ const struct rte_flow_item *item,
uint64_t item_flags,
+ const struct rte_flow_attr *attr,
struct rte_flow_error *error);
int mlx5_flow_validate_item_vxlan_gpe(const struct rte_flow_item *item,
uint64_t item_flags,
uint16_t ether_type,
const struct rte_flow_item_ecpri *acc_mask,
struct rte_flow_error *error);
-struct mlx5_meter_domains_infos *mlx5_flow_create_mtr_tbls
- (struct rte_eth_dev *dev);
-int mlx5_flow_destroy_mtr_tbls(struct rte_eth_dev *dev,
- struct mlx5_meter_domains_infos *tbl);
-int mlx5_flow_prepare_policer_rules(struct rte_eth_dev *dev,
- struct mlx5_flow_meter_info *fm,
- const struct rte_flow_attr *attr);
-int mlx5_flow_destroy_policer_rules(struct rte_eth_dev *dev,
- struct mlx5_flow_meter_info *fm,
- const struct rte_flow_attr *attr);
-int mlx5_flow_meter_flush(struct rte_eth_dev *dev,
- struct rte_mtr_error *error);
+int mlx5_flow_create_mtr_tbls(struct rte_eth_dev *dev,
+ struct mlx5_flow_meter_info *fm,
+ uint32_t mtr_idx,
+ uint8_t domain_bitmap);
+void mlx5_flow_destroy_mtr_tbls(struct rte_eth_dev *dev,
+ struct mlx5_flow_meter_info *fm);
+void mlx5_flow_destroy_mtr_drop_tbls(struct rte_eth_dev *dev);
+struct mlx5_flow_meter_sub_policy *mlx5_flow_meter_sub_policy_rss_prepare
+ (struct rte_eth_dev *dev,
+ struct mlx5_flow_meter_policy *mtr_policy,
+ struct mlx5_flow_rss_desc *rss_desc[MLX5_MTR_RTE_COLORS]);
+void mlx5_flow_destroy_sub_policy_with_rxq(struct rte_eth_dev *dev,
+ struct mlx5_flow_meter_policy *mtr_policy);
int mlx5_flow_dv_discover_counter_offset_support(struct rte_eth_dev *dev);
+int mlx5_flow_discover_dr_action_support(struct rte_eth_dev *dev);
+int mlx5_action_handle_attach(struct rte_eth_dev *dev);
+int mlx5_action_handle_detach(struct rte_eth_dev *dev);
int mlx5_action_handle_flush(struct rte_eth_dev *dev);
void mlx5_release_tunnel_hub(struct mlx5_dev_ctx_shared *sh, uint16_t port_id);
int mlx5_alloc_tunnel_hub(struct mlx5_dev_ctx_shared *sh);
-/* Hash list callbacks for flow tables: */
-struct mlx5_hlist_entry *flow_dv_tbl_create_cb(struct mlx5_hlist *list,
- uint64_t key, void *entry_ctx);
-int flow_dv_tbl_match_cb(struct mlx5_hlist *list,
- struct mlx5_hlist_entry *entry, uint64_t key,
+struct mlx5_list_entry *flow_dv_tbl_create_cb(void *tool_ctx, void *entry_ctx);
+int flow_dv_tbl_match_cb(void *tool_ctx, struct mlx5_list_entry *entry,
void *cb_ctx);
-void flow_dv_tbl_remove_cb(struct mlx5_hlist *list,
- struct mlx5_hlist_entry *entry);
+void flow_dv_tbl_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry);
+struct mlx5_list_entry *flow_dv_tbl_clone_cb(void *tool_ctx,
+ struct mlx5_list_entry *oentry,
+ void *entry_ctx);
+void flow_dv_tbl_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry);
struct mlx5_flow_tbl_resource *flow_dv_tbl_resource_get(struct rte_eth_dev *dev,
uint32_t table_level, uint8_t egress, uint8_t transfer,
bool external, const struct mlx5_flow_tunnel *tunnel,
uint32_t group_id, uint8_t dummy,
uint32_t table_id, struct rte_flow_error *error);
-struct mlx5_hlist_entry *flow_dv_tag_create_cb(struct mlx5_hlist *list,
- uint64_t key, void *cb_ctx);
-int flow_dv_tag_match_cb(struct mlx5_hlist *list,
- struct mlx5_hlist_entry *entry, uint64_t key,
+struct mlx5_list_entry *flow_dv_tag_create_cb(void *tool_ctx, void *cb_ctx);
+int flow_dv_tag_match_cb(void *tool_ctx, struct mlx5_list_entry *entry,
void *cb_ctx);
-void flow_dv_tag_remove_cb(struct mlx5_hlist *list,
- struct mlx5_hlist_entry *entry);
-
-int flow_dv_modify_match_cb(struct mlx5_hlist *list,
- struct mlx5_hlist_entry *entry,
- uint64_t key, void *cb_ctx);
-struct mlx5_hlist_entry *flow_dv_modify_create_cb(struct mlx5_hlist *list,
- uint64_t key, void *ctx);
-void flow_dv_modify_remove_cb(struct mlx5_hlist *list,
- struct mlx5_hlist_entry *entry);
-
-struct mlx5_hlist_entry *flow_dv_mreg_create_cb(struct mlx5_hlist *list,
- uint64_t key, void *ctx);
-int flow_dv_mreg_match_cb(struct mlx5_hlist *list,
- struct mlx5_hlist_entry *entry, uint64_t key,
+void flow_dv_tag_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry);
+struct mlx5_list_entry *flow_dv_tag_clone_cb(void *tool_ctx,
+ struct mlx5_list_entry *oentry,
+ void *cb_ctx);
+void flow_dv_tag_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry);
+
+int flow_dv_modify_match_cb(void *tool_ctx, struct mlx5_list_entry *entry,
+ void *cb_ctx);
+struct mlx5_list_entry *flow_dv_modify_create_cb(void *tool_ctx, void *ctx);
+void flow_dv_modify_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry);
+struct mlx5_list_entry *flow_dv_modify_clone_cb(void *tool_ctx,
+ struct mlx5_list_entry *oentry,
+ void *ctx);
+void flow_dv_modify_clone_free_cb(void *tool_ctx,
+ struct mlx5_list_entry *entry);
+
+struct mlx5_list_entry *flow_dv_mreg_create_cb(void *tool_ctx, void *ctx);
+int flow_dv_mreg_match_cb(void *tool_ctx, struct mlx5_list_entry *entry,
void *cb_ctx);
-void flow_dv_mreg_remove_cb(struct mlx5_hlist *list,
- struct mlx5_hlist_entry *entry);
-
-int flow_dv_encap_decap_match_cb(struct mlx5_hlist *list,
- struct mlx5_hlist_entry *entry,
- uint64_t key, void *cb_ctx);
-struct mlx5_hlist_entry *flow_dv_encap_decap_create_cb(struct mlx5_hlist *list,
- uint64_t key, void *cb_ctx);
-void flow_dv_encap_decap_remove_cb(struct mlx5_hlist *list,
- struct mlx5_hlist_entry *entry);
-
-int flow_dv_matcher_match_cb(struct mlx5_cache_list *list,
- struct mlx5_cache_entry *entry, void *ctx);
-struct mlx5_cache_entry *flow_dv_matcher_create_cb(struct mlx5_cache_list *list,
- struct mlx5_cache_entry *entry, void *ctx);
-void flow_dv_matcher_remove_cb(struct mlx5_cache_list *list,
- struct mlx5_cache_entry *entry);
-
-int flow_dv_port_id_match_cb(struct mlx5_cache_list *list,
- struct mlx5_cache_entry *entry, void *cb_ctx);
-struct mlx5_cache_entry *flow_dv_port_id_create_cb(struct mlx5_cache_list *list,
- struct mlx5_cache_entry *entry, void *cb_ctx);
-void flow_dv_port_id_remove_cb(struct mlx5_cache_list *list,
- struct mlx5_cache_entry *entry);
-
-int flow_dv_push_vlan_match_cb(struct mlx5_cache_list *list,
- struct mlx5_cache_entry *entry, void *cb_ctx);
-struct mlx5_cache_entry *flow_dv_push_vlan_create_cb
- (struct mlx5_cache_list *list,
- struct mlx5_cache_entry *entry, void *cb_ctx);
-void flow_dv_push_vlan_remove_cb(struct mlx5_cache_list *list,
- struct mlx5_cache_entry *entry);
-
-int flow_dv_sample_match_cb(struct mlx5_cache_list *list,
- struct mlx5_cache_entry *entry, void *cb_ctx);
-struct mlx5_cache_entry *flow_dv_sample_create_cb
- (struct mlx5_cache_list *list,
- struct mlx5_cache_entry *entry, void *cb_ctx);
-void flow_dv_sample_remove_cb(struct mlx5_cache_list *list,
- struct mlx5_cache_entry *entry);
-
-int flow_dv_dest_array_match_cb(struct mlx5_cache_list *list,
- struct mlx5_cache_entry *entry, void *cb_ctx);
-struct mlx5_cache_entry *flow_dv_dest_array_create_cb
- (struct mlx5_cache_list *list,
- struct mlx5_cache_entry *entry, void *cb_ctx);
-void flow_dv_dest_array_remove_cb(struct mlx5_cache_list *list,
- struct mlx5_cache_entry *entry);
+void flow_dv_mreg_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry);
+struct mlx5_list_entry *flow_dv_mreg_clone_cb(void *tool_ctx,
+ struct mlx5_list_entry *entry,
+ void *ctx);
+void flow_dv_mreg_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry);
+
+int flow_dv_encap_decap_match_cb(void *tool_ctx, struct mlx5_list_entry *entry,
+ void *cb_ctx);
+struct mlx5_list_entry *flow_dv_encap_decap_create_cb(void *tool_ctx,
+ void *cb_ctx);
+void flow_dv_encap_decap_remove_cb(void *tool_ctx,
+ struct mlx5_list_entry *entry);
+struct mlx5_list_entry *flow_dv_encap_decap_clone_cb(void *tool_ctx,
+ struct mlx5_list_entry *entry,
+ void *cb_ctx);
+void flow_dv_encap_decap_clone_free_cb(void *tool_ctx,
+ struct mlx5_list_entry *entry);
+
+int flow_dv_matcher_match_cb(void *tool_ctx, struct mlx5_list_entry *entry,
+ void *ctx);
+struct mlx5_list_entry *flow_dv_matcher_create_cb(void *tool_ctx, void *ctx);
+void flow_dv_matcher_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry);
+
+int flow_dv_port_id_match_cb(void *tool_ctx, struct mlx5_list_entry *entry,
+ void *cb_ctx);
+struct mlx5_list_entry *flow_dv_port_id_create_cb(void *tool_ctx, void *cb_ctx);
+void flow_dv_port_id_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry);
+struct mlx5_list_entry *flow_dv_port_id_clone_cb(void *tool_ctx,
+ struct mlx5_list_entry *entry, void *cb_ctx);
+void flow_dv_port_id_clone_free_cb(void *tool_ctx,
+ struct mlx5_list_entry *entry);
+
+int flow_dv_push_vlan_match_cb(void *tool_ctx, struct mlx5_list_entry *entry,
+ void *cb_ctx);
+struct mlx5_list_entry *flow_dv_push_vlan_create_cb(void *tool_ctx,
+ void *cb_ctx);
+void flow_dv_push_vlan_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry);
+struct mlx5_list_entry *flow_dv_push_vlan_clone_cb(void *tool_ctx,
+ struct mlx5_list_entry *entry, void *cb_ctx);
+void flow_dv_push_vlan_clone_free_cb(void *tool_ctx,
+ struct mlx5_list_entry *entry);
+
+int flow_dv_sample_match_cb(void *tool_ctx, struct mlx5_list_entry *entry,
+ void *cb_ctx);
+struct mlx5_list_entry *flow_dv_sample_create_cb(void *tool_ctx, void *cb_ctx);
+void flow_dv_sample_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry);
+struct mlx5_list_entry *flow_dv_sample_clone_cb(void *tool_ctx,
+ struct mlx5_list_entry *entry, void *cb_ctx);
+void flow_dv_sample_clone_free_cb(void *tool_ctx,
+ struct mlx5_list_entry *entry);
+
+int flow_dv_dest_array_match_cb(void *tool_ctx, struct mlx5_list_entry *entry,
+ void *cb_ctx);
+struct mlx5_list_entry *flow_dv_dest_array_create_cb(void *tool_ctx,
+ void *cb_ctx);
+void flow_dv_dest_array_remove_cb(void *tool_ctx,
+ struct mlx5_list_entry *entry);
+struct mlx5_list_entry *flow_dv_dest_array_clone_cb(void *tool_ctx,
+ struct mlx5_list_entry *entry, void *cb_ctx);
+void flow_dv_dest_array_clone_free_cb(void *tool_ctx,
+ struct mlx5_list_entry *entry);
+void flow_dv_hashfields_set(uint64_t item_flags,
+ struct mlx5_flow_rss_desc *rss_desc,
+ uint64_t *hash_fields);
+void flow_dv_action_rss_l34_hash_adjust(uint64_t rss_types,
+ uint64_t *hash_field);
+uint32_t flow_dv_action_rss_hrxq_lookup(struct rte_eth_dev *dev, uint32_t idx,
+ const uint64_t hash_fields);
+
+struct mlx5_list_entry *flow_hw_grp_create_cb(void *tool_ctx, void *cb_ctx);
+void flow_hw_grp_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry);
+int flow_hw_grp_match_cb(void *tool_ctx,
+ struct mlx5_list_entry *entry,
+ void *cb_ctx);
+struct mlx5_list_entry *flow_hw_grp_clone_cb(void *tool_ctx,
+ struct mlx5_list_entry *oentry,
+ void *cb_ctx);
+void flow_hw_grp_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry);
+
struct mlx5_aso_age_action *flow_aso_age_get_by_idx(struct rte_eth_dev *dev,
uint32_t age_idx);
int flow_dev_geneve_tlv_option_resource_register(struct rte_eth_dev *dev,
void mlx5_flow_os_release_workspace(void);
uint32_t mlx5_flow_mtr_alloc(struct rte_eth_dev *dev);
void mlx5_flow_mtr_free(struct rte_eth_dev *dev, uint32_t mtr_idx);
+int mlx5_flow_validate_mtr_acts(struct rte_eth_dev *dev,
+ const struct rte_flow_action *actions[RTE_COLORS],
+ struct rte_flow_attr *attr,
+ bool *is_rss,
+ uint8_t *domain_bitmap,
+ uint8_t *policy_mode,
+ struct rte_mtr_error *error);
+void mlx5_flow_destroy_mtr_acts(struct rte_eth_dev *dev,
+ struct mlx5_flow_meter_policy *mtr_policy);
+int mlx5_flow_create_mtr_acts(struct rte_eth_dev *dev,
+ struct mlx5_flow_meter_policy *mtr_policy,
+ const struct rte_flow_action *actions[RTE_COLORS],
+ struct rte_mtr_error *error);
+int mlx5_flow_create_policy_rules(struct rte_eth_dev *dev,
+ struct mlx5_flow_meter_policy *mtr_policy);
+void mlx5_flow_destroy_policy_rules(struct rte_eth_dev *dev,
+ struct mlx5_flow_meter_policy *mtr_policy);
+int mlx5_flow_create_def_policy(struct rte_eth_dev *dev);
+void mlx5_flow_destroy_def_policy(struct rte_eth_dev *dev);
+void flow_drv_rxq_flags_set(struct rte_eth_dev *dev,
+ struct mlx5_flow_handle *dev_handle);
+const struct mlx5_flow_tunnel *
+mlx5_get_tof(const struct rte_flow_item *items,
+ const struct rte_flow_action *actions,
+ enum mlx5_tof_rule_type *rule_type);
+void
+flow_hw_resource_release(struct rte_eth_dev *dev);
+int flow_dv_action_validate(struct rte_eth_dev *dev,
+ const struct rte_flow_indir_action_conf *conf,
+ const struct rte_flow_action *action,
+ struct rte_flow_error *err);
+struct rte_flow_action_handle *flow_dv_action_create(struct rte_eth_dev *dev,
+ const struct rte_flow_indir_action_conf *conf,
+ const struct rte_flow_action *action,
+ struct rte_flow_error *err);
+int flow_dv_action_destroy(struct rte_eth_dev *dev,
+ struct rte_flow_action_handle *handle,
+ struct rte_flow_error *error);
+int flow_dv_action_update(struct rte_eth_dev *dev,
+ struct rte_flow_action_handle *handle,
+ const void *update,
+ struct rte_flow_error *err);
+int flow_dv_action_query(struct rte_eth_dev *dev,
+ const struct rte_flow_action_handle *handle,
+ void *data,
+ struct rte_flow_error *error);
+size_t flow_dv_get_item_hdr_len(const enum rte_flow_item_type item_type);
+int flow_dv_convert_encap_data(const struct rte_flow_item *items, uint8_t *buf,
+ size_t *size, struct rte_flow_error *error);
#endif /* RTE_PMD_MLX5_FLOW_H_ */