MLX5_RTE_FLOW_ACTION_TYPE_AGE,
MLX5_RTE_FLOW_ACTION_TYPE_COUNT,
MLX5_RTE_FLOW_ACTION_TYPE_JUMP,
+ MLX5_RTE_FLOW_ACTION_TYPE_RSS,
};
#define MLX5_INDIRECT_ACTION_TYPE_OFFSET 30
MLX5_MTR_ID,
MLX5_ASO_FLOW_HIT,
MLX5_ASO_CONNTRACK,
+ MLX5_SAMPLE_ID,
};
/* Default queue number. */
#define MLX5_FLOW_LAYER_GENEVE_OPT (UINT64_C(1) << 32)
#define MLX5_FLOW_LAYER_GTP_PSC (UINT64_C(1) << 33)
-/* INTEGRITY item bit */
-#define MLX5_FLOW_ITEM_INTEGRITY (UINT64_C(1) << 34)
+/* INTEGRITY item bits */
+#define MLX5_FLOW_ITEM_OUTER_INTEGRITY (UINT64_C(1) << 34)
+#define MLX5_FLOW_ITEM_INNER_INTEGRITY (UINT64_C(1) << 35)
+#define MLX5_FLOW_ITEM_INTEGRITY \
+ (MLX5_FLOW_ITEM_OUTER_INTEGRITY | MLX5_FLOW_ITEM_INNER_INTEGRITY)
/* Conntrack item. */
-#define MLX5_FLOW_LAYER_ASO_CT (UINT64_C(1) << 35)
+#define MLX5_FLOW_LAYER_ASO_CT (UINT64_C(1) << 36)
+
+/* Flex item */
+#define MLX5_FLOW_ITEM_OUTER_FLEX (UINT64_C(1) << 37)
+#define MLX5_FLOW_ITEM_INNER_FLEX (UINT64_C(1) << 38)
+#define MLX5_FLOW_ITEM_FLEX_TUNNEL (UINT64_C(1) << 39)
+
+/* ESP item */
+#define MLX5_FLOW_ITEM_ESP (UINT64_C(1) << 40)
/* Outer Masks. */
#define MLX5_FLOW_LAYER_OUTER_L3 \
(MLX5_FLOW_LAYER_VXLAN | MLX5_FLOW_LAYER_VXLAN_GPE | \
MLX5_FLOW_LAYER_GRE | MLX5_FLOW_LAYER_NVGRE | MLX5_FLOW_LAYER_MPLS | \
MLX5_FLOW_LAYER_IPIP | MLX5_FLOW_LAYER_IPV6_ENCAP | \
- MLX5_FLOW_LAYER_GENEVE | MLX5_FLOW_LAYER_GTP)
+ MLX5_FLOW_LAYER_GENEVE | MLX5_FLOW_LAYER_GTP | \
+ MLX5_FLOW_ITEM_FLEX_TUNNEL)
/* Inner Masks. */
#define MLX5_FLOW_LAYER_INNER_L3 \
#define MLX5_ACT_NUM_MDF_IPV6 4
#define MLX5_ACT_NUM_MDF_MAC 2
#define MLX5_ACT_NUM_MDF_VID 1
-#define MLX5_ACT_NUM_MDF_PORT 2
+#define MLX5_ACT_NUM_MDF_PORT 1
#define MLX5_ACT_NUM_MDF_TTL 1
#define MLX5_ACT_NUM_DEC_TTL MLX5_ACT_NUM_MDF_TTL
#define MLX5_ACT_NUM_MDF_TCPSEQ 1
MLX5_FLOW_TYPE_MIN,
MLX5_FLOW_TYPE_DV,
MLX5_FLOW_TYPE_VERBS,
+ MLX5_FLOW_TYPE_HW,
MLX5_FLOW_TYPE_MAX,
};
const struct mlx5_flow_tunnel *tunnel;
uint32_t group_id;
uint32_t external:1;
- uint32_t tunnel_offload:1; /* Tunnel offlod table or not. */
+ uint32_t tunnel_offload:1; /* Tunnel offload table or not. */
uint32_t is_egress:1; /**< Egress table. */
uint32_t is_transfer:1; /**< Transfer table. */
uint32_t dummy:1; /**< DR table. */
/**< Bit-fields of present layers, see MLX5_FLOW_LAYER_*. */
void *drv_flow; /**< pointer to driver flow object. */
uint32_t split_flow_id:27; /**< Sub flow unique match flow id. */
- uint32_t is_meter_flow_id:1; /**< Indate if flow_id is for meter. */
- uint32_t mark:1; /**< Metadate rxq mark flag. */
+ uint32_t is_meter_flow_id:1; /**< Indicate if flow_id is for meter. */
uint32_t fate_action:3; /**< Fate action type. */
union {
uint32_t rix_hrxq; /**< Hash Rx queue object index. */
#if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
struct mlx5_flow_handle_dv dvh;
#endif
+ uint8_t flex_item; /**< referenced Flex Item bitmask. */
} __rte_packed;
/*
{
#ifdef HAVE_IBV_FLOW_DV_SUPPORT
const struct mlx5_priv *priv = dev->data->dev_private;
- return !!priv->config.dv_miss_info;
+ return !!priv->sh->config.dv_miss_info;
#else
RTE_SET_USED(dev);
return false;
uint32_t geneve_tlv_option; /**< Holds Geneve TLV option id. > */
} __rte_packed;
+#if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
+
+/* HWS flow struct. */
+struct rte_flow_hw {
+ uint32_t idx; /* Flow index from indexed pool. */
+ uint32_t fate_type; /* Fate action type. */
+ union {
+ /* Jump action. */
+ struct mlx5_hw_jump_action *jump;
+ struct mlx5_hrxq *hrxq; /* TIR action. */
+ };
+ struct rte_flow_template_table *table; /* The table flow allcated from. */
+ struct mlx5dr_rule rule; /* HWS layer data struct. */
+} __rte_packed;
+
+/* rte flow action translate to DR action struct. */
+struct mlx5_action_construct_data {
+ LIST_ENTRY(mlx5_action_construct_data) next;
+ /* Ensure the action types are matched. */
+ int type;
+ uint32_t idx; /* Data index. */
+ uint16_t action_src; /* rte_flow_action src offset. */
+ uint16_t action_dst; /* mlx5dr_rule_action dst offset. */
+ union {
+ struct {
+ /* encap src(item) offset. */
+ uint16_t src;
+ /* encap dst data offset. */
+ uint16_t dst;
+ /* encap data len. */
+ uint16_t len;
+ } encap;
+ struct {
+ uint64_t types; /* RSS hash types. */
+ uint32_t level; /* RSS level. */
+ uint32_t idx; /* Shared action index. */
+ } shared_rss;
+ };
+};
+
+/* Flow item template struct. */
+struct rte_flow_pattern_template {
+ LIST_ENTRY(rte_flow_pattern_template) next;
+ /* Template attributes. */
+ struct rte_flow_pattern_template_attr attr;
+ struct mlx5dr_match_template *mt; /* mlx5 match template. */
+ uint64_t item_flags; /* Item layer flags. */
+ uint32_t refcnt; /* Reference counter. */
+};
+
+/* Flow action template struct. */
+struct rte_flow_actions_template {
+ LIST_ENTRY(rte_flow_actions_template) next;
+ /* Template attributes. */
+ struct rte_flow_actions_template_attr attr;
+ struct rte_flow_action *actions; /* Cached flow actions. */
+ struct rte_flow_action *masks; /* Cached action masks.*/
+ uint32_t refcnt; /* Reference counter. */
+};
+
+/* Jump action struct. */
+struct mlx5_hw_jump_action {
+ /* Action jump from root. */
+ struct mlx5dr_action *root_action;
+ /* HW steering jump action. */
+ struct mlx5dr_action *hws_action;
+};
+
+/* Encap decap action struct. */
+struct mlx5_hw_encap_decap_action {
+ struct mlx5dr_action *action; /* Action object. */
+ size_t data_size; /* Action metadata size. */
+ uint8_t data[]; /* Action data. */
+};
+
+/* The maximum actions support in the flow. */
+#define MLX5_HW_MAX_ACTS 16
+
+/* DR action set struct. */
+struct mlx5_hw_actions {
+ /* Dynamic action list. */
+ LIST_HEAD(act_list, mlx5_action_construct_data) act_list;
+ struct mlx5_hw_jump_action *jump; /* Jump action. */
+ struct mlx5_hrxq *tir; /* TIR action. */
+ /* Encap/Decap action. */
+ struct mlx5_hw_encap_decap_action *encap_decap;
+ uint16_t encap_decap_pos; /* Encap/Decap action position. */
+ uint32_t acts_num:4; /* Total action number. */
+ uint32_t mark:1; /* Indicate the mark action. */
+ /* Translated DR action array from action template. */
+ struct mlx5dr_rule_action rule_acts[MLX5_HW_MAX_ACTS];
+};
+
+/* mlx5 action template struct. */
+struct mlx5_hw_action_template {
+ /* Action template pointer. */
+ struct rte_flow_actions_template *action_template;
+ struct mlx5_hw_actions acts; /* Template actions. */
+};
+
+/* mlx5 flow group struct. */
+struct mlx5_flow_group {
+ struct mlx5_list_entry entry;
+ struct mlx5dr_table *tbl; /* HWS table object. */
+ struct mlx5_hw_jump_action jump; /* Jump action. */
+ enum mlx5dr_table_type type; /* Table type. */
+ uint32_t group_id; /* Group id. */
+ uint32_t idx; /* Group memory index. */
+};
+
+
+#define MLX5_HW_TBL_MAX_ITEM_TEMPLATE 2
+#define MLX5_HW_TBL_MAX_ACTION_TEMPLATE 32
+
+struct rte_flow_template_table {
+ LIST_ENTRY(rte_flow_template_table) next;
+ struct mlx5_flow_group *grp; /* The group rte_flow_template_table uses. */
+ struct mlx5dr_matcher *matcher; /* Template matcher. */
+ /* Item templates bind to the table. */
+ struct rte_flow_pattern_template *its[MLX5_HW_TBL_MAX_ITEM_TEMPLATE];
+ /* Action templates bind to the table. */
+ struct mlx5_hw_action_template ats[MLX5_HW_TBL_MAX_ACTION_TEMPLATE];
+ struct mlx5_indexed_pool *flow; /* The table's flow ipool. */
+ uint32_t type; /* Flow table type RX/TX/FDB. */
+ uint8_t nb_item_templates; /* Item template number. */
+ uint8_t nb_action_templates; /* Action template number. */
+ uint32_t refcnt; /* Table reference counter. */
+};
+
+#endif
+
/*
* Define list of valid combinations of RX Hash fields
* (see enum ibv_rx_hash_fields).
(MLX5_RSS_HASH_IPV6 | IBV_RX_HASH_SRC_PORT_TCP)
#define MLX5_RSS_HASH_IPV6_TCP_DST_ONLY \
(MLX5_RSS_HASH_IPV6 | IBV_RX_HASH_DST_PORT_TCP)
+
+#ifndef HAVE_IBV_RX_HASH_IPSEC_SPI
+#define IBV_RX_HASH_IPSEC_SPI (1U << 8)
+#endif
+
+#define MLX5_RSS_HASH_ESP_SPI IBV_RX_HASH_IPSEC_SPI
+#define MLX5_RSS_HASH_IPV4_ESP (MLX5_RSS_HASH_IPV4 | \
+ MLX5_RSS_HASH_ESP_SPI)
+#define MLX5_RSS_HASH_IPV6_ESP (MLX5_RSS_HASH_IPV6 | \
+ MLX5_RSS_HASH_ESP_SPI)
#define MLX5_RSS_HASH_NONE 0ULL
MLX5_RSS_HASH_IPV4,
MLX5_RSS_HASH_IPV4_TCP,
MLX5_RSS_HASH_IPV4_UDP,
+ MLX5_RSS_HASH_IPV4_ESP,
MLX5_RSS_HASH_IPV6,
MLX5_RSS_HASH_IPV6_TCP,
MLX5_RSS_HASH_IPV6_UDP,
+ MLX5_RSS_HASH_IPV6_ESP,
+ MLX5_RSS_HASH_ESP_SPI,
MLX5_RSS_HASH_NONE,
};
/* The final policy when meter policy is hierarchy. */
uint32_t skip_matcher_reg:1;
/* Indicates if need to skip matcher register in translate. */
+ uint32_t mark:1; /* Indicates if flow contains mark action. */
};
struct mlx5_flow_split_info {
- bool external;
+ uint32_t external:1;
/**< True if flow is created by request external to PMD. */
- uint8_t skip_scale; /**< Skip the scale the table with factor. */
+ uint32_t prefix_mark:1; /**< Prefix subflow mark flag. */
+ uint32_t skip_scale:8; /**< Skip the scale the table with factor. */
uint32_t flow_idx; /**< This memory pool index to the flow. */
- uint32_t prefix_mark; /**< Prefix subflow mark flag. */
- uint64_t prefix_layers; /**< Prefix subflow layers. */
uint32_t table_id; /**< Flow table identifier. */
+ uint64_t prefix_layers; /**< Prefix subflow layers. */
};
typedef int (*mlx5_flow_validate_t)(struct rte_eth_dev *dev,
typedef int (*mlx5_flow_counter_query_t)(struct rte_eth_dev *dev,
uint32_t cnt,
bool clear, uint64_t *pkts,
- uint64_t *bytes);
+ uint64_t *bytes, void **action);
typedef int (*mlx5_flow_get_aged_flows_t)
(struct rte_eth_dev *dev,
void **context,
(struct rte_eth_dev *dev);
typedef void (*mlx5_flow_destroy_def_policy_t)
(struct rte_eth_dev *dev);
+typedef int (*mlx5_flow_discover_priorities_t)
+ (struct rte_eth_dev *dev,
+ const uint16_t *vprio, int vprio_n);
+typedef struct rte_flow_item_flex_handle *(*mlx5_flow_item_create_t)
+ (struct rte_eth_dev *dev,
+ const struct rte_flow_item_flex_conf *conf,
+ struct rte_flow_error *error);
+typedef int (*mlx5_flow_item_release_t)
+ (struct rte_eth_dev *dev,
+ const struct rte_flow_item_flex_handle *handle,
+ struct rte_flow_error *error);
+typedef int (*mlx5_flow_item_update_t)
+ (struct rte_eth_dev *dev,
+ const struct rte_flow_item_flex_handle *handle,
+ const struct rte_flow_item_flex_conf *conf,
+ struct rte_flow_error *error);
+typedef int (*mlx5_flow_info_get_t)
+ (struct rte_eth_dev *dev,
+ struct rte_flow_port_info *port_info,
+ struct rte_flow_queue_info *queue_info,
+ struct rte_flow_error *error);
+typedef int (*mlx5_flow_port_configure_t)
+ (struct rte_eth_dev *dev,
+ const struct rte_flow_port_attr *port_attr,
+ uint16_t nb_queue,
+ const struct rte_flow_queue_attr *queue_attr[],
+ struct rte_flow_error *err);
+typedef struct rte_flow_pattern_template *(*mlx5_flow_pattern_template_create_t)
+ (struct rte_eth_dev *dev,
+ const struct rte_flow_pattern_template_attr *attr,
+ const struct rte_flow_item items[],
+ struct rte_flow_error *error);
+typedef int (*mlx5_flow_pattern_template_destroy_t)
+ (struct rte_eth_dev *dev,
+ struct rte_flow_pattern_template *template,
+ struct rte_flow_error *error);
+typedef struct rte_flow_actions_template *(*mlx5_flow_actions_template_create_t)
+ (struct rte_eth_dev *dev,
+ const struct rte_flow_actions_template_attr *attr,
+ const struct rte_flow_action actions[],
+ const struct rte_flow_action masks[],
+ struct rte_flow_error *error);
+typedef int (*mlx5_flow_actions_template_destroy_t)
+ (struct rte_eth_dev *dev,
+ struct rte_flow_actions_template *template,
+ struct rte_flow_error *error);
+typedef struct rte_flow_template_table *(*mlx5_flow_table_create_t)
+ (struct rte_eth_dev *dev,
+ const struct rte_flow_template_table_attr *attr,
+ struct rte_flow_pattern_template *item_templates[],
+ uint8_t nb_item_templates,
+ struct rte_flow_actions_template *action_templates[],
+ uint8_t nb_action_templates,
+ struct rte_flow_error *error);
+typedef int (*mlx5_flow_table_destroy_t)
+ (struct rte_eth_dev *dev,
+ struct rte_flow_template_table *table,
+ struct rte_flow_error *error);
+typedef struct rte_flow *(*mlx5_flow_async_flow_create_t)
+ (struct rte_eth_dev *dev,
+ uint32_t queue,
+ const struct rte_flow_op_attr *attr,
+ struct rte_flow_template_table *table,
+ const struct rte_flow_item items[],
+ uint8_t pattern_template_index,
+ const struct rte_flow_action actions[],
+ uint8_t action_template_index,
+ void *user_data,
+ struct rte_flow_error *error);
+typedef int (*mlx5_flow_async_flow_destroy_t)
+ (struct rte_eth_dev *dev,
+ uint32_t queue,
+ const struct rte_flow_op_attr *attr,
+ struct rte_flow *flow,
+ void *user_data,
+ struct rte_flow_error *error);
+typedef int (*mlx5_flow_pull_t)
+ (struct rte_eth_dev *dev,
+ uint32_t queue,
+ struct rte_flow_op_result res[],
+ uint16_t n_res,
+ struct rte_flow_error *error);
+typedef int (*mlx5_flow_push_t)
+ (struct rte_eth_dev *dev,
+ uint32_t queue,
+ struct rte_flow_error *error);
+
+typedef struct rte_flow_action_handle *(*mlx5_flow_async_action_handle_create_t)
+ (struct rte_eth_dev *dev,
+ uint32_t queue,
+ const struct rte_flow_op_attr *attr,
+ const struct rte_flow_indir_action_conf *conf,
+ const struct rte_flow_action *action,
+ void *user_data,
+ struct rte_flow_error *error);
+
+typedef int (*mlx5_flow_async_action_handle_update_t)
+ (struct rte_eth_dev *dev,
+ uint32_t queue,
+ const struct rte_flow_op_attr *attr,
+ struct rte_flow_action_handle *handle,
+ const void *update,
+ void *user_data,
+ struct rte_flow_error *error);
+
+typedef int (*mlx5_flow_async_action_handle_destroy_t)
+ (struct rte_eth_dev *dev,
+ uint32_t queue,
+ const struct rte_flow_op_attr *attr,
+ struct rte_flow_action_handle *handle,
+ void *user_data,
+ struct rte_flow_error *error);
struct mlx5_flow_driver_ops {
mlx5_flow_validate_t validate;
mlx5_flow_action_update_t action_update;
mlx5_flow_action_query_t action_query;
mlx5_flow_sync_domain_t sync_domain;
+ mlx5_flow_discover_priorities_t discover_priorities;
+ mlx5_flow_item_create_t item_create;
+ mlx5_flow_item_release_t item_release;
+ mlx5_flow_item_update_t item_update;
+ mlx5_flow_info_get_t info_get;
+ mlx5_flow_port_configure_t configure;
+ mlx5_flow_pattern_template_create_t pattern_template_create;
+ mlx5_flow_pattern_template_destroy_t pattern_template_destroy;
+ mlx5_flow_actions_template_create_t actions_template_create;
+ mlx5_flow_actions_template_destroy_t actions_template_destroy;
+ mlx5_flow_table_create_t template_table_create;
+ mlx5_flow_table_destroy_t template_table_destroy;
+ mlx5_flow_async_flow_create_t async_flow_create;
+ mlx5_flow_async_flow_destroy_t async_flow_destroy;
+ mlx5_flow_pull_t pull;
+ mlx5_flow_push_t push;
+ mlx5_flow_async_action_handle_create_t async_action_create;
+ mlx5_flow_async_action_handle_update_t async_action_update;
+ mlx5_flow_async_action_handle_destroy_t async_action_destroy;
};
/* mlx5_flow.c */
/* Decrease to original index. */
idx--;
MLX5_ASSERT(idx / MLX5_ASO_MTRS_PER_POOL < pools_mng->n);
+ rte_rwlock_read_lock(&pools_mng->resize_mtrwl);
pool = pools_mng->pools[idx / MLX5_ASO_MTRS_PER_POOL];
+ rte_rwlock_read_unlock(&pools_mng->resize_mtrwl);
return &pool->mtrs[idx % MLX5_ASO_MTRS_PER_POOL];
}
return ct;
}
+static inline uint16_t
+mlx5_translate_tunnel_etypes(uint64_t pattern_flags)
+{
+ if (pattern_flags & MLX5_FLOW_LAYER_INNER_L2)
+ return RTE_ETHER_TYPE_TEB;
+ else if (pattern_flags & MLX5_FLOW_LAYER_INNER_L3_IPV4)
+ return RTE_ETHER_TYPE_IPV4;
+ else if (pattern_flags & MLX5_FLOW_LAYER_INNER_L3_IPV6)
+ return RTE_ETHER_TYPE_IPV6;
+ else if (pattern_flags & MLX5_FLOW_LAYER_MPLS)
+ return RTE_ETHER_TYPE_MPLS;
+ return 0;
+}
+
+int flow_hw_q_flow_flush(struct rte_eth_dev *dev,
+ struct rte_flow_error *error);
int mlx5_flow_group_to_table(struct rte_eth_dev *dev,
const struct mlx5_flow_tunnel *tunnel,
uint32_t group, uint32_t *table,
uint64_t item_flags,
const struct rte_flow_item *gre_item,
struct rte_flow_error *error);
+int mlx5_flow_validate_item_gre_option(struct rte_eth_dev *dev,
+ const struct rte_flow_item *item,
+ uint64_t item_flags,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item *gre_item,
+ struct rte_flow_error *error);
int mlx5_flow_validate_item_ipv4(const struct rte_flow_item *item,
uint64_t item_flags,
uint64_t last_item,
uint8_t target_protocol,
const struct rte_flow_item_tcp *flow_mask,
struct rte_flow_error *error);
+int mlx5_flow_validate_item_esp(const struct rte_flow_item *item,
+ uint64_t item_flags,
+ uint8_t target_protocol,
+ struct rte_flow_error *error);
int mlx5_flow_validate_item_udp(const struct rte_flow_item *item,
uint64_t item_flags,
uint8_t target_protocol,
struct mlx5_flow_meter_policy *mtr_policy);
int mlx5_flow_dv_discover_counter_offset_support(struct rte_eth_dev *dev);
int mlx5_flow_discover_dr_action_support(struct rte_eth_dev *dev);
+int mlx5_action_handle_attach(struct rte_eth_dev *dev);
+int mlx5_action_handle_detach(struct rte_eth_dev *dev);
int mlx5_action_handle_flush(struct rte_eth_dev *dev);
void mlx5_release_tunnel_hub(struct mlx5_dev_ctx_shared *sh, uint16_t port_id);
int mlx5_alloc_tunnel_hub(struct mlx5_dev_ctx_shared *sh);
struct mlx5_list_entry *entry, void *cb_ctx);
void flow_dv_dest_array_clone_free_cb(void *tool_ctx,
struct mlx5_list_entry *entry);
-int flow_dv_query_count_ptr(struct rte_eth_dev *dev, uint32_t cnt_idx,
- void **action, struct rte_flow_error *error);
-int
-flow_dv_query_count(struct rte_eth_dev *dev, uint32_t cnt_idx, void *data,
- struct rte_flow_error *error);
+void flow_dv_hashfields_set(uint64_t item_flags,
+ struct mlx5_flow_rss_desc *rss_desc,
+ uint64_t *hash_fields);
+void flow_dv_action_rss_l34_hash_adjust(uint64_t rss_types,
+ uint64_t *hash_field);
+uint32_t flow_dv_action_rss_hrxq_lookup(struct rte_eth_dev *dev, uint32_t idx,
+ const uint64_t hash_fields);
+
+struct mlx5_list_entry *flow_hw_grp_create_cb(void *tool_ctx, void *cb_ctx);
+void flow_hw_grp_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry);
+int flow_hw_grp_match_cb(void *tool_ctx,
+ struct mlx5_list_entry *entry,
+ void *cb_ctx);
+struct mlx5_list_entry *flow_hw_grp_clone_cb(void *tool_ctx,
+ struct mlx5_list_entry *oentry,
+ void *cb_ctx);
+void flow_hw_grp_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry);
struct mlx5_aso_age_action *flow_aso_age_get_by_idx(struct rte_eth_dev *dev,
uint32_t age_idx);
mlx5_get_tof(const struct rte_flow_item *items,
const struct rte_flow_action *actions,
enum mlx5_tof_rule_type *rule_type);
-
-
+void
+flow_hw_resource_release(struct rte_eth_dev *dev);
+int flow_dv_action_validate(struct rte_eth_dev *dev,
+ const struct rte_flow_indir_action_conf *conf,
+ const struct rte_flow_action *action,
+ struct rte_flow_error *err);
+struct rte_flow_action_handle *flow_dv_action_create(struct rte_eth_dev *dev,
+ const struct rte_flow_indir_action_conf *conf,
+ const struct rte_flow_action *action,
+ struct rte_flow_error *err);
+int flow_dv_action_destroy(struct rte_eth_dev *dev,
+ struct rte_flow_action_handle *handle,
+ struct rte_flow_error *error);
+int flow_dv_action_update(struct rte_eth_dev *dev,
+ struct rte_flow_action_handle *handle,
+ const void *update,
+ struct rte_flow_error *err);
+int flow_dv_action_query(struct rte_eth_dev *dev,
+ const struct rte_flow_action_handle *handle,
+ void *data,
+ struct rte_flow_error *error);
+size_t flow_dv_get_item_hdr_len(const enum rte_flow_item_type item_type);
+int flow_dv_convert_encap_data(const struct rte_flow_item *items, uint8_t *buf,
+ size_t *size, struct rte_flow_error *error);
#endif /* RTE_PMD_MLX5_FLOW_H_ */