target_group =
((const struct rte_flow_action_jump *)action->conf)->group;
ret = mlx5_flow_group_to_table(dev, tunnel, target_group, &table,
- grp_info, error);
+ &grp_info, error);
if (ret)
return ret;
if (attributes->group == target_group &&
flow_dv_validate_attributes(struct rte_eth_dev *dev,
const struct mlx5_flow_tunnel *tunnel,
const struct rte_flow_attr *attributes,
- struct flow_grp_info grp_info,
+ const struct flow_grp_info *grp_info,
struct rte_flow_error *error)
{
struct mlx5_priv *priv = dev->data->dev_private;
}
grp_info.std_tbl_fix = tunnel_use_standard_attr_group_translate
(dev, tunnel, attr, items, actions);
- ret = flow_dv_validate_attributes(dev, tunnel, attr, grp_info, error);
+ ret = flow_dv_validate_attributes(dev, tunnel, attr, &grp_info, error);
if (ret < 0)
return ret;
is_root = (uint64_t)ret;
* Flow matcher value.
* @param[in] item
* Flow pattern to translate.
+ * @param[in]
+ * Flow attributes.
*
* @return
* 0 on success, a negative errno value otherwise.
*/
static int
flow_dv_translate_item_port_id(struct rte_eth_dev *dev, void *matcher,
- void *key, const struct rte_flow_item *item)
+ void *key, const struct rte_flow_item *item,
+ const struct rte_flow_attr *attr)
{
const struct rte_flow_item_port_id *pid_m = item ? item->mask : NULL;
const struct rte_flow_item_port_id *pid_v = item ? item->spec : NULL;
priv = mlx5_port_to_eswitch_info(id, item == NULL);
if (!priv)
return -rte_errno;
- /* Translate to vport field or to metadata, depending on mode. */
- if (priv->vport_meta_mask)
- flow_dv_translate_item_meta_vport(matcher, key,
- priv->vport_meta_tag,
- priv->vport_meta_mask);
- else
+ /*
+ * Translate to vport field or to metadata, depending on mode.
+ * Kernel can use either misc.source_port or half of C0 metadata
+ * register.
+ */
+ if (priv->vport_meta_mask) {
+ /*
+ * Provide the hint for SW steering library
+ * to insert the flow into ingress domain and
+ * save the extra vport match.
+ */
+ if (mask == 0xffff && priv->vport_id == 0xffff &&
+ priv->pf_bond < 0 && attr->transfer)
+ flow_dv_translate_item_source_vport
+ (matcher, key, priv->vport_id, mask);
+ else
+ flow_dv_translate_item_meta_vport
+ (matcher, key,
+ priv->vport_meta_tag,
+ priv->vport_meta_mask);
+ } else {
flow_dv_translate_item_source_vport(matcher, key,
priv->vport_id, mask);
+ }
return 0;
}
"cannot get table");
return NULL;
}
+ DRV_LOG(DEBUG, "Table_id %u tunnel %u group %u registered.",
+ table_id, tunnel ? tunnel->tunnel_id : 0, group_id);
tbl_data = container_of(entry, struct mlx5_flow_tbl_data_entry, entry);
return &tbl_data->tbl;
}
if (he)
mlx5_hlist_unregister(tunnel_grp_hash, he);
DRV_LOG(DEBUG,
- "Table_id %#x tunnel %u group %u released.",
+ "Table_id %u tunnel %u group %u released.",
table_id,
tbl_data->tunnel ?
tbl_data->tunnel->tunnel_id : 0,
struct mlx5_flow_dv_matcher *ref,
union mlx5_flow_tbl_key *key,
struct mlx5_flow *dev_flow,
+ const struct mlx5_flow_tunnel *tunnel,
+ uint32_t group_id,
struct rte_flow_error *error)
{
struct mlx5_cache_entry *entry;
.data = ref,
};
- tbl = flow_dv_tbl_resource_get(dev, key->table_id, key->direction,
- key->domain, false, NULL, 0, 0, error);
+ /**
+ * tunnel offload API requires this registration for cases when
+ * tunnel match rule was inserted before tunnel set rule.
+ */
+ tbl = flow_dv_tbl_resource_get(dev, key->table_id,
+ key->direction, key->domain,
+ dev_flow->external, tunnel,
+ group_id, 0, error);
if (!tbl)
return -rte_errno; /* No need to refill the error info */
tbl_data = container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
uint64_t action_flags = 0;
MLX5_ASSERT(wks);
- rss_desc = &wks->rss_desc[!!wks->flow_nested_idx];
+ rss_desc = &wks->rss_desc;
sample_act = &res->sample_act;
sample_idx = &res->sample_idx;
sample_action = (const struct rte_flow_action_sample *)action->conf;
uint32_t hrxq_idx;
MLX5_ASSERT(wks);
- rss_desc = &wks->rss_desc[!!wks->flow_nested_idx];
+ rss_desc = &wks->rss_desc;
if (num_of_dest > 1) {
if (sample_act->action_flags & MLX5_FLOW_ACTION_QUEUE) {
/* Handle QP action for mirroring */
/* First ASO flow hit allocation - starting ASO data-path. */
int ret = mlx5_aso_queue_start(priv->sh);
- if (ret)
+ if (ret) {
+ mlx5_free(pools);
return ret;
+ }
}
mng->n = resize;
mng->pools = pools;
.skip_scale = !!dev_flow->skip_scale,
};
- MLX5_ASSERT(wks);
- rss_desc = &wks->rss_desc[!!wks->flow_nested_idx];
+ if (!wks)
+ return rte_flow_error_set(error, ENOMEM,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "failed to push flow workspace");
+ rss_desc = &wks->rss_desc;
memset(&mdest_res, 0, sizeof(struct mlx5_flow_dv_dest_array_resource));
memset(&sample_res, 0, sizeof(struct mlx5_flow_dv_sample_resource));
mhdr_res->ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
grp_info.std_tbl_fix = tunnel_use_standard_attr_group_translate
(dev, tunnel, attr, items, actions);
ret = mlx5_flow_group_to_table(dev, tunnel, attr->group, &table,
- grp_info, error);
+ &grp_info, error);
if (ret)
return ret;
dev_flow->dv.group = table;
/*
* do not add decap action if match rule drops packet
* HW rejects rules with decap & drop
+ *
+ * if tunnel match rule was inserted before matching tunnel set
+ * rule flow table used in the match rule must be registered.
+ * current implementation handles that in the
+ * flow_dv_match_register() at the function end.
*/
bool add_decap = true;
const struct rte_flow_action *ptr = actions;
- struct mlx5_flow_tbl_resource *tbl;
for (; ptr->type != RTE_FLOW_ACTION_TYPE_END; ptr++) {
if (ptr->type == RTE_FLOW_ACTION_TYPE_DROP) {
dev_flow->dv.encap_decap->action;
action_flags |= MLX5_FLOW_ACTION_DECAP;
}
- /*
- * bind table_id with <group, table> for tunnel match rule.
- * Tunnel set rule establishes that bind in JUMP action handler.
- * Required for scenario when application creates tunnel match
- * rule before tunnel set rule.
- */
- tbl = flow_dv_tbl_resource_get(dev, table, attr->egress,
- attr->transfer,
- !!dev_flow->external, tunnel,
- attr->group, 0, error);
- if (!tbl)
- return rte_flow_error_set
- (error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
- actions, "cannot register tunnel group");
}
for (; !actions_end ; actions++) {
const struct rte_flow_action_queue *queue;
ret = mlx5_flow_group_to_table(dev, tunnel,
jump_group,
&table,
- grp_info, error);
+ &grp_info, error);
if (ret)
return ret;
tbl = flow_dv_tbl_resource_get(dev, table, attr->egress,
NULL, "item not supported");
switch (item_type) {
case RTE_FLOW_ITEM_TYPE_PORT_ID:
- flow_dv_translate_item_port_id(dev, match_mask,
- match_value, items);
+ flow_dv_translate_item_port_id
+ (dev, match_mask, match_value, items, attr);
last_item = MLX5_FLOW_ITEM_PORT_ID;
break;
case RTE_FLOW_ITEM_TYPE_ETH:
if (!(item_flags & MLX5_FLOW_ITEM_PORT_ID) &&
(priv->representor || priv->master)) {
if (flow_dv_translate_item_port_id(dev, match_mask,
- match_value, NULL))
+ match_value, NULL, attr))
return -rte_errno;
}
#ifdef RTE_LIBRTE_MLX5_DEBUG
tbl_key.domain = attr->transfer;
tbl_key.direction = attr->egress;
tbl_key.table_id = dev_flow->dv.group;
- if (flow_dv_matcher_register(dev, &matcher, &tbl_key, dev_flow, error))
+ if (flow_dv_matcher_register(dev, &matcher, &tbl_key, dev_flow,
+ tunnel, attr->group, error))
return -rte_errno;
return 0;
}
int err;
int idx;
struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
- struct mlx5_flow_rss_desc *rss_desc =
- &wks->rss_desc[!!wks->flow_nested_idx];
+ struct mlx5_flow_rss_desc *rss_desc = &wks->rss_desc;
MLX5_ASSERT(wks);
if (rss_desc->shared_rss) {
MLX5_ASSERT(dh->fate_action == MLX5_FLOW_FATE_SHARED_RSS);
dh->rix_srss = rss_desc->shared_rss;
}
- for (idx = wks->flow_idx - 1; idx >= wks->flow_nested_idx; idx--) {
+ for (idx = wks->flow_idx - 1; idx >= 0; idx--) {
dev_flow = &wks->flows[idx];
dv = &dev_flow->dv;
dh = dev_flow->handle;
.match_mask = (void *)&mask,
};
void *actions[2] = { 0 };
- struct mlx5_flow_tbl_resource *tbl = NULL, *dest_tbl = NULL;
+ struct mlx5_flow_tbl_resource *tbl = NULL;
struct mlx5_devx_obj *dcs = NULL;
void *matcher = NULL;
void *flow = NULL;
- int i, ret = -1;
+ int ret = -1;
tbl = flow_dv_tbl_resource_get(dev, 0, 0, 0, false, NULL, 0, 0, NULL);
if (!tbl)
goto err;
- dest_tbl = flow_dv_tbl_resource_get(dev, 1, 0, 0, false,
- NULL, 0, 0, NULL);
- if (!dest_tbl)
- goto err;
dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0x4);
if (!dcs)
goto err;
&actions[0]);
if (ret)
goto err;
- ret = mlx5_flow_os_create_flow_action_dest_flow_tbl
- (dest_tbl->obj, &actions[1]);
- if (ret)
- goto err;
+ actions[1] = priv->drop_queue.hrxq->action;
dv_attr.match_criteria_enable = flow_dv_matcher_enable(mask.buf);
ret = mlx5_flow_os_create_flow_matcher(sh->ctx, &dv_attr, tbl->obj,
&matcher);
"support detection");
ret = 0;
}
- for (i = 0; i < 2; i++) {
- if (actions[i])
- claim_zero(mlx5_flow_os_destroy_flow_action
- (actions[i]));
- }
+ if (actions[0])
+ claim_zero(mlx5_flow_os_destroy_flow_action(actions[0]));
if (matcher)
claim_zero(mlx5_flow_os_destroy_flow_matcher(matcher));
if (tbl)
flow_dv_tbl_resource_release(MLX5_SH(dev), tbl);
- if (dest_tbl)
- flow_dv_tbl_resource_release(MLX5_SH(dev), dest_tbl);
if (dcs)
claim_zero(mlx5_devx_cmd_destroy(dcs));
return ret;