const struct rte_flow_attr *attr,
const struct rte_flow_action *app_actions,
uint32_t flow_idx,
+ const struct mlx5_flow_tunnel *tunnel,
struct tunnel_default_miss_ctx *ctx,
struct rte_flow_error *error);
static struct mlx5_flow_tunnel *
return ret;
}
-static struct mlx5_flow_tunnel *
-flow_tunnel_from_rule(struct rte_eth_dev *dev,
- const struct rte_flow_attr *attr,
- const struct rte_flow_item items[],
- const struct rte_flow_action actions[])
+static inline struct mlx5_flow_tunnel *
+flow_tunnel_from_rule(const struct mlx5_flow *flow)
{
struct mlx5_flow_tunnel *tunnel;
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wcast-qual"
- if (is_flow_tunnel_match_rule(dev, attr, items, actions))
- tunnel = (struct mlx5_flow_tunnel *)items[0].spec;
- else if (is_flow_tunnel_steer_rule(dev, attr, items, actions))
- tunnel = (struct mlx5_flow_tunnel *)actions[0].conf;
- else
- tunnel = NULL;
+ tunnel = (typeof(tunnel))flow->tunnel;
#pragma GCC diagnostic pop
return tunnel;
error);
if (ret < 0)
goto error;
- if (is_flow_tunnel_steer_rule(dev, attr,
- buf->entry[i].pattern,
- p_actions_rx)) {
+ if (is_flow_tunnel_steer_rule(wks->flows[0].tof_type)) {
ret = flow_tunnel_add_default_miss(dev, flow, attr,
p_actions_rx,
idx,
+ wks->flows[0].tunnel,
&default_miss_ctx,
error);
if (ret < 0) {
}
flow_rxq_flags_set(dev, flow);
rte_free(translated_actions);
- tunnel = flow_tunnel_from_rule(dev, attr, items, actions);
+ tunnel = flow_tunnel_from_rule(wks->flows);
if (tunnel) {
flow->tunnel = 1;
flow->tunnel_id = tunnel->tunnel_id;
return ret;
}
+const struct mlx5_flow_tunnel *
+mlx5_get_tof(const struct rte_flow_item *item,
+ const struct rte_flow_action *action,
+ enum mlx5_tof_rule_type *rule_type)
+{
+ for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
+ if (item->type == (typeof(item->type))
+ MLX5_RTE_FLOW_ITEM_TYPE_TUNNEL) {
+ *rule_type = MLX5_TUNNEL_OFFLOAD_MATCH_RULE;
+ return flow_items_to_tunnel(item);
+ }
+ }
+ for (; action->conf != RTE_FLOW_ACTION_TYPE_END; action++) {
+ if (action->type == (typeof(action->type))
+ MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET) {
+ *rule_type = MLX5_TUNNEL_OFFLOAD_SET_RULE;
+ return flow_actions_to_tunnel(action);
+ }
+ }
+ return NULL;
+}
+
/**
* tunnel offload functionalilty is defined for DV environment only
*/
const struct rte_flow_attr *attr,
const struct rte_flow_action *app_actions,
uint32_t flow_idx,
+ const struct mlx5_flow_tunnel *tunnel,
struct tunnel_default_miss_ctx *ctx,
struct rte_flow_error *error)
{
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_flow *dev_flow;
struct rte_flow_attr miss_attr = *attr;
- const struct mlx5_flow_tunnel *tunnel = app_actions[0].conf;
const struct rte_flow_item miss_items[2] = {
{
.type = RTE_FLOW_ITEM_TYPE_ETH,
dev_flow->flow = flow;
dev_flow->external = true;
dev_flow->tunnel = tunnel;
+ dev_flow->tof_type = MLX5_TUNNEL_OFFLOAD_MISS_RULE;
/* Subflow object was created, we must include one in the list. */
SILIST_INSERT(&flow->dev_handles, dev_flow->handle_idx,
dev_flow->handle, next);
__rte_unused const struct rte_flow_attr *attr,
__rte_unused const struct rte_flow_action *actions,
__rte_unused uint32_t flow_idx,
+ __rte_unused const struct mlx5_flow_tunnel *tunnel,
__rte_unused struct tunnel_default_miss_ctx *ctx,
__rte_unused struct rte_flow_error *error)
{
/** Maximal number of device sub-flows supported. */
#define MLX5_NUM_MAX_DEV_FLOWS 32
+/**
+ * tunnel offload rules type
+ */
+enum mlx5_tof_rule_type {
+ MLX5_TUNNEL_OFFLOAD_NONE = 0,
+ MLX5_TUNNEL_OFFLOAD_SET_RULE,
+ MLX5_TUNNEL_OFFLOAD_MATCH_RULE,
+ MLX5_TUNNEL_OFFLOAD_MISS_RULE,
+};
+
/** Device flow structure. */
__extension__
struct mlx5_flow {
struct mlx5_flow_handle *handle;
uint32_t handle_idx; /* Index of the mlx5 flow handle memory. */
const struct mlx5_flow_tunnel *tunnel;
+ enum mlx5_tof_rule_type tof_type;
};
/* Flow meter state. */
}
static inline bool
-is_tunnel_offload_active(struct rte_eth_dev *dev)
+is_tunnel_offload_active(const struct rte_eth_dev *dev)
{
#ifdef HAVE_IBV_FLOW_DV_SUPPORT
- struct mlx5_priv *priv = dev->data->dev_private;
+ const struct mlx5_priv *priv = dev->data->dev_private;
return !!priv->config.dv_miss_info;
#else
RTE_SET_USED(dev);
}
static inline bool
-is_flow_tunnel_match_rule(__rte_unused struct rte_eth_dev *dev,
- __rte_unused const struct rte_flow_attr *attr,
- __rte_unused const struct rte_flow_item items[],
- __rte_unused const struct rte_flow_action actions[])
+is_flow_tunnel_match_rule(enum mlx5_tof_rule_type tof_rule_type)
{
- return (items[0].type == (typeof(items[0].type))
- MLX5_RTE_FLOW_ITEM_TYPE_TUNNEL);
+ return tof_rule_type == MLX5_TUNNEL_OFFLOAD_MATCH_RULE;
}
static inline bool
-is_flow_tunnel_steer_rule(__rte_unused struct rte_eth_dev *dev,
- __rte_unused const struct rte_flow_attr *attr,
- __rte_unused const struct rte_flow_item items[],
- __rte_unused const struct rte_flow_action actions[])
+is_flow_tunnel_steer_rule(enum mlx5_tof_rule_type tof_rule_type)
{
- return (actions[0].type == (typeof(actions[0].type))
- MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET);
+ return tof_rule_type == MLX5_TUNNEL_OFFLOAD_SET_RULE;
}
static inline const struct mlx5_flow_tunnel *
static inline bool
tunnel_use_standard_attr_group_translate
- (struct rte_eth_dev *dev,
- const struct mlx5_flow_tunnel *tunnel,
+ (const struct rte_eth_dev *dev,
const struct rte_flow_attr *attr,
- const struct rte_flow_item items[],
- const struct rte_flow_action actions[])
+ const struct mlx5_flow_tunnel *tunnel,
+ enum mlx5_tof_rule_type tof_rule_type)
{
bool verdict;
* method
*/
verdict = !attr->group &&
- is_flow_tunnel_steer_rule(dev, attr, items, actions);
+ is_flow_tunnel_steer_rule(tof_rule_type);
} else {
/*
* non-tunnel group translation uses standard method for
void mlx5_flow_destroy_def_policy(struct rte_eth_dev *dev);
void flow_drv_rxq_flags_set(struct rte_eth_dev *dev,
struct mlx5_flow_handle *dev_handle);
+const struct mlx5_flow_tunnel *
+mlx5_get_tof(const struct rte_flow_item *items,
+ const struct rte_flow_action *actions,
+ enum mlx5_tof_rule_type *rule_type);
+
+
#endif /* RTE_PMD_MLX5_FLOW_H_ */
uint32_t rw_act_num = 0;
uint64_t is_root;
const struct mlx5_flow_tunnel *tunnel;
+ enum mlx5_tof_rule_type tof_rule_type;
struct flow_grp_info grp_info = {
.external = !!external,
.transfer = !!attr->transfer,
.fdb_def_rule = !!priv->fdb_def_rule,
+ .std_tbl_fix = true,
};
const struct rte_eth_hairpin_conf *conf;
const struct rte_flow_item *rule_items = items;
if (items == NULL)
return -1;
- if (is_flow_tunnel_match_rule(dev, attr, items, actions)) {
- tunnel = flow_items_to_tunnel(items);
- action_flags |= MLX5_FLOW_ACTION_TUNNEL_MATCH |
- MLX5_FLOW_ACTION_DECAP;
- } else if (is_flow_tunnel_steer_rule(dev, attr, items, actions)) {
- tunnel = flow_actions_to_tunnel(actions);
- action_flags |= MLX5_FLOW_ACTION_TUNNEL_SET;
- } else {
- tunnel = NULL;
+ tunnel = is_tunnel_offload_active(dev) ?
+ mlx5_get_tof(items, actions, &tof_rule_type) : NULL;
+ if (tunnel) {
+ if (priv->representor)
+ return rte_flow_error_set
+ (error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL, "decap not supported for VF representor");
+ if (tof_rule_type == MLX5_TUNNEL_OFFLOAD_SET_RULE)
+ action_flags |= MLX5_FLOW_ACTION_TUNNEL_SET;
+ else if (tof_rule_type == MLX5_TUNNEL_OFFLOAD_MATCH_RULE)
+ action_flags |= MLX5_FLOW_ACTION_TUNNEL_MATCH |
+ MLX5_FLOW_ACTION_DECAP;
+ grp_info.std_tbl_fix = tunnel_use_standard_attr_group_translate
+ (dev, attr, tunnel, tof_rule_type);
}
- if (tunnel && priv->representor)
- return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
- "decap not supported "
- "for VF representor");
- grp_info.std_tbl_fix = tunnel_use_standard_attr_group_translate
- (dev, tunnel, attr, items, actions);
ret = flow_dv_validate_attributes(dev, tunnel, attr, &grp_info, error);
if (ret < 0)
return ret;
RTE_FLOW_ERROR_TYPE_ITEM,
NULL, "item not supported");
switch (type) {
- case MLX5_RTE_FLOW_ITEM_TYPE_TUNNEL:
- if (items[0].type != (typeof(items[0].type))
- MLX5_RTE_FLOW_ITEM_TYPE_TUNNEL)
- return rte_flow_error_set
- (error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM,
- NULL, "MLX5 private items "
- "must be the first");
- break;
case RTE_FLOW_ITEM_TYPE_VOID:
break;
case RTE_FLOW_ITEM_TYPE_PORT_ID:
if (ret < 0)
return ret;
break;
+ case MLX5_RTE_FLOW_ITEM_TYPE_TUNNEL:
+ /* tunnel offload item was processed before
+ * list it here as a supported type
+ */
+ break;
default:
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ITEM,
action_flags |= MLX5_FLOW_ACTION_SAMPLE;
++actions_n;
break;
- case MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET:
- if (actions[0].type != (typeof(actions[0].type))
- MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET)
- return rte_flow_error_set
- (error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ACTION,
- NULL, "MLX5 private action "
- "must be the first");
-
- action_flags |= MLX5_FLOW_ACTION_TUNNEL_SET;
- break;
case RTE_FLOW_ACTION_TYPE_MODIFY_FIELD:
ret = flow_dv_validate_action_modify_field(dev,
action_flags,
return ret;
action_flags |= MLX5_FLOW_ACTION_CT;
break;
+ case MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET:
+ /* tunnel offload action was processed before
+ * list it here as a supported type
+ */
+ break;
default:
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ACTION,
int tmp_actions_n = 0;
uint32_t table;
int ret = 0;
- const struct mlx5_flow_tunnel *tunnel;
+ const struct mlx5_flow_tunnel *tunnel = NULL;
struct flow_grp_info grp_info = {
.external = !!dev_flow->external,
.transfer = !!attr->transfer,
.fdb_def_rule = !!priv->fdb_def_rule,
.skip_scale = dev_flow->skip_scale &
(1 << MLX5_SCALE_FLOW_GROUP_BIT),
+ .std_tbl_fix = true,
};
const struct rte_flow_item *head_item = items;
MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
/* update normal path action resource into last index of array */
sample_act = &mdest_res.sample_act[MLX5_MAX_DEST_NUM - 1];
- tunnel = is_flow_tunnel_match_rule(dev, attr, items, actions) ?
- flow_items_to_tunnel(items) :
- is_flow_tunnel_steer_rule(dev, attr, items, actions) ?
- flow_actions_to_tunnel(actions) :
- dev_flow->tunnel ? dev_flow->tunnel : NULL;
+ if (is_tunnel_offload_active(dev)) {
+ if (dev_flow->tunnel) {
+ RTE_VERIFY(dev_flow->tof_type ==
+ MLX5_TUNNEL_OFFLOAD_MISS_RULE);
+ tunnel = dev_flow->tunnel;
+ } else {
+ tunnel = mlx5_get_tof(items, actions,
+ &dev_flow->tof_type);
+ dev_flow->tunnel = tunnel;
+ }
+ grp_info.std_tbl_fix = tunnel_use_standard_attr_group_translate
+ (dev, attr, tunnel, dev_flow->tof_type);
+ }
mhdr_res->ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
- grp_info.std_tbl_fix = tunnel_use_standard_attr_group_translate
- (dev, tunnel, attr, items, actions);
ret = mlx5_flow_group_to_table(dev, tunnel, attr->group, &table,
&grp_info, error);
if (ret)
mhdr_res->ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
/* number of actions must be set to 0 in case of dirty stack. */
mhdr_res->actions_num = 0;
- if (is_flow_tunnel_match_rule(dev, attr, items, actions)) {
+ if (is_flow_tunnel_match_rule(dev_flow->tof_type)) {
/*
* do not add decap action if match rule drops packet
* HW rejects rules with decap & drop