if (reg < 0)
return reg;
+ MLX5_ASSERT(reg != REG_NON);
/*
* In datapath code there is no endianness
* coversions for perfromance reasons, all
reg = flow_dv_get_metadata_reg(dev, attr, error);
if (reg < 0)
return reg;
+ if (reg == REG_NON)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "unavalable extended metadata register");
if (reg == REG_B)
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ITEM, item,
reg = flow_dv_get_metadata_reg(dev, attr, error);
if (reg < 0)
return reg;
+ if (reg == REG_NON)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION, action,
+ "unavalable extended metadata register");
if (reg != REG_A && reg != REG_B) {
struct mlx5_priv *priv = dev->data->dev_private;
cache_resource = container_of(entry,
struct mlx5_flow_dv_encap_decap_resource,
entry);
- if (resource->entry.key == cache_resource->entry.key &&
- resource->reformat_type == cache_resource->reformat_type &&
+ if (resource->reformat_type == cache_resource->reformat_type &&
resource->ft_type == cache_resource->ft_type &&
resource->flags == cache_resource->flags &&
resource->size == cache_resource->size &&
.error = error,
.data = resource,
};
+ uint64_t key64;
resource->flags = dev_flow->dv.group ? 0 : 1;
- resource->entry.key = __rte_raw_cksum(&encap_decap_key.v32,
- sizeof(encap_decap_key.v32), 0);
+ key64 = __rte_raw_cksum(&encap_decap_key.v32,
+ sizeof(encap_decap_key.v32), 0);
if (resource->reformat_type !=
MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TUNNEL_TO_L2 &&
resource->size)
- resource->entry.key = __rte_raw_cksum(resource->buf,
- resource->size,
- resource->entry.key);
- entry = mlx5_hlist_register(sh->encaps_decaps, resource->entry.key,
- &ctx);
+ key64 = __rte_raw_cksum(resource->buf, resource->size, key64);
+ entry = mlx5_hlist_register(sh->encaps_decaps, key64, &ctx);
if (!entry)
return -rte_errno;
resource = container_of(entry, typeof(*resource), entry);
.error = error,
.data = resource,
};
+ uint64_t key64;
resource->flags = dev_flow->dv.group ? 0 :
MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL;
return rte_flow_error_set(error, EOVERFLOW,
RTE_FLOW_ERROR_TYPE_ACTION, NULL,
"too many modify header items");
- resource->entry.key = __rte_raw_cksum(&resource->ft_type, key_len, 0);
- entry = mlx5_hlist_register(sh->modify_cmds, resource->entry.key, &ctx);
+ key64 = __rte_raw_cksum(&resource->ft_type, key_len, 0);
+ entry = mlx5_hlist_register(sh->modify_cmds, key64, &ctx);
if (!entry)
return -rte_errno;
resource = container_of(entry, typeof(*resource), entry);
} else {
tunnel = NULL;
}
+ if (tunnel && priv->representor)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "decap not supported "
+ "for VF representor");
grp_info.std_tbl_fix = tunnel_use_standard_attr_group_translate
(dev, tunnel, attr, items, actions);
ret = flow_dv_validate_attributes(dev, tunnel, attr, &grp_info, error);
rw_act_num += MLX5_ACT_NUM_SET_TAG;
break;
case MLX5_RTE_FLOW_ACTION_TYPE_AGE:
- if (!attr->group)
+ if (!attr->transfer && !attr->group)
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
NULL,
"not enough memory to create flow handle");
return NULL;
}
- MLX5_ASSERT(wks->flow_idx + 1 < RTE_DIM(wks->flows));
+ MLX5_ASSERT(wks->flow_idx < RTE_DIM(wks->flows));
dev_flow = &wks->flows[wks->flow_idx++];
+ memset(dev_flow, 0, sizeof(*dev_flow));
dev_flow->handle = dev_handle;
dev_flow->handle_idx = handle_idx;
/*
*/
dev_flow->dv.value.size = MLX5_ST_SZ_BYTES(fte_match_param) -
MLX5_ST_SZ_BYTES(fte_match_set_misc4);
- /*
- * The matching value needs to be cleared to 0 before using. In the
- * past, it will be automatically cleared when using rte_*alloc
- * API. The time consumption will be almost the same as before.
- */
- memset(dev_flow->dv.value.buf, 0, MLX5_ST_SZ_BYTES(fte_match_param));
dev_flow->ingress = attr->ingress;
dev_flow->dv.transfer = attr->transfer;
return dev_flow;
reg = flow_dv_get_metadata_reg(dev, attr, NULL);
if (reg < 0)
return;
+ MLX5_ASSERT(reg != REG_NON);
/*
* In datapath code there is no endianness
* coversions for perfromance reasons, all
tbl_data->idx = idx;
tbl_data->tunnel = tt_prm->tunnel;
tbl_data->group_id = tt_prm->group_id;
- tbl_data->external = tt_prm->external;
+ tbl_data->external = !!tt_prm->external;
tbl_data->tunnel_offload = is_tunnel_offload_active(dev);
tbl_data->is_egress = !!key.direction;
+ tbl_data->is_transfer = !!key.domain;
+ tbl_data->dummy = !!key.dummy;
+ tbl_data->table_id = key.table_id;
tbl = &tbl_data->tbl;
if (key.dummy)
return &tbl_data->entry;
return &tbl_data->entry;
}
+int
+flow_dv_tbl_match_cb(struct mlx5_hlist *list __rte_unused,
+ struct mlx5_hlist_entry *entry, uint64_t key64,
+ void *cb_ctx __rte_unused)
+{
+ struct mlx5_flow_tbl_data_entry *tbl_data =
+ container_of(entry, struct mlx5_flow_tbl_data_entry, entry);
+ union mlx5_flow_tbl_key key = { .v64 = key64 };
+
+ return tbl_data->table_id != key.table_id ||
+ tbl_data->dummy != key.dummy ||
+ tbl_data->is_transfer != key.domain ||
+ tbl_data->is_egress != key.direction;
+}
+
/**
* Get a flow table.
*
tbl_data->tunnel->tunnel_id : 0,
.group = tbl_data->group_id
};
- union mlx5_flow_tbl_key table_key = {
- .v64 = entry->key
- };
- uint32_t table_id = table_key.table_id;
+ uint32_t table_id = tbl_data->table_id;
tunnel_grp_hash = tbl_data->tunnel ?
tbl_data->tunnel->groups :
return NULL;
}
entry->idx = idx;
+ entry->tag_id = key;
ret = mlx5_flow_os_create_flow_action_tag(key,
&entry->action);
if (ret) {
return &entry->entry;
}
+int
+flow_dv_tag_match_cb(struct mlx5_hlist *list __rte_unused,
+ struct mlx5_hlist_entry *entry, uint64_t key,
+ void *cb_ctx __rte_unused)
+{
+ struct mlx5_flow_dv_tag_resource *tag =
+ container_of(entry, struct mlx5_flow_dv_tag_resource, entry);
+
+ return key != tag->tag_id;
+}
+
/**
* Find existing tag resource or create and register a new one.
*
struct mlx5_priv *priv = dev->data->dev_private;
if (cache_resource->verbs_action)
- claim_zero(mlx5_glue->destroy_flow_action
+ claim_zero(mlx5_flow_os_destroy_flow_action
(cache_resource->verbs_action));
if (cache_resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB) {
if (cache_resource->default_miss)
- claim_zero(mlx5_glue->destroy_flow_action
+ claim_zero(mlx5_flow_os_destroy_flow_action
(cache_resource->default_miss));
}
if (cache_resource->normal_path_tbl)
MLX5_ASSERT(cache_resource->action);
if (cache_resource->action)
- claim_zero(mlx5_glue->destroy_flow_action
+ claim_zero(mlx5_flow_os_destroy_flow_action
(cache_resource->action));
for (; i < cache_resource->num_of_dest; i++)
flow_dv_sample_sub_actions_release(dev,
*/
static int
__flow_dv_action_rss_setup(struct rte_eth_dev *dev,
- uint32_t action_idx,
- struct mlx5_shared_action_rss *action,
- struct rte_flow_error *error)
+ uint32_t action_idx,
+ struct mlx5_shared_action_rss *action,
+ struct rte_flow_error *error)
{
struct mlx5_flow_rss_desc rss_desc = { 0 };
size_t i;
int err;
+ if (mlx5_ind_table_obj_setup(dev, action->ind_tbl)) {
+ return rte_flow_error_set(error, rte_errno,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "cannot setup indirection table");
+ }
memcpy(rss_desc.key, action->origin.key, MLX5_RSS_HASH_KEY_LEN);
rss_desc.key_len = MLX5_RSS_HASH_KEY_LEN;
rss_desc.const_q = action->origin.queue;
rss_desc.queue_num = action->origin.queue_num;
/* Set non-zero value to indicate a shared RSS. */
rss_desc.shared_rss = action_idx;
+ rss_desc.ind_tbl = action->ind_tbl;
for (i = 0; i < MLX5_RSS_HASH_FIELDS_LEN; i++) {
uint32_t hrxq_idx;
uint64_t hash_fields = mlx5_rss_hash_fields[i];
error_hrxq_new:
err = rte_errno;
__flow_dv_action_rss_hrxqs_release(dev, action);
+ if (!mlx5_ind_table_obj_release(dev, action->ind_tbl, true))
+ action->ind_tbl = NULL;
rte_errno = err;
return -rte_errno;
}
"rss action number out of range");
goto error_rss_init;
}
- shared_action->queue = queue;
+ shared_action->ind_tbl = mlx5_malloc(MLX5_MEM_ZERO,
+ sizeof(*shared_action->ind_tbl),
+ 0, SOCKET_ID_ANY);
+ if (!shared_action->ind_tbl) {
+ rte_flow_error_set(error, ENOMEM,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "cannot allocate resource memory");
+ goto error_rss_init;
+ }
+ memcpy(queue, rss->queue, queue_size);
+ shared_action->ind_tbl->queues = queue;
+ shared_action->ind_tbl->queues_n = rss->queue_num;
origin = &shared_action->origin;
origin->func = rss->func;
origin->level = rss->level;
memcpy(shared_action->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
origin->key = &shared_action->key[0];
origin->key_len = MLX5_RSS_HASH_KEY_LEN;
- memcpy(shared_action->queue, rss->queue, queue_size);
- origin->queue = shared_action->queue;
+ origin->queue = queue;
origin->queue_num = rss->queue_num;
if (__flow_dv_action_rss_setup(dev, idx, shared_action, error))
goto error_rss_init;
+ rte_spinlock_init(&shared_action->action_rss_sl);
__atomic_add_fetch(&shared_action->refcnt, 1, __ATOMIC_RELAXED);
rte_spinlock_lock(&priv->shared_act_sl);
ILIST_INSERT(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
rte_spinlock_unlock(&priv->shared_act_sl);
return idx;
error_rss_init:
- if (shared_action)
+ if (shared_action) {
+ if (shared_action->ind_tbl)
+ mlx5_free(shared_action->ind_tbl);
mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
idx);
+ }
if (queue)
mlx5_free(queue);
return 0;
mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], idx);
uint32_t old_refcnt = 1;
int remaining;
+ uint16_t *queue = NULL;
if (!shared_rss)
return rte_flow_error_set(error, EINVAL,
"invalid shared action");
remaining = __flow_dv_action_rss_hrxqs_release(dev, shared_rss);
if (remaining)
- return rte_flow_error_set(error, ETOOMANYREFS,
+ return rte_flow_error_set(error, EBUSY,
RTE_FLOW_ERROR_TYPE_ACTION,
NULL,
"shared rss hrxq has references");
+ queue = shared_rss->ind_tbl->queues;
+ remaining = mlx5_ind_table_obj_release(dev, shared_rss->ind_tbl, true);
+ if (remaining)
+ return rte_flow_error_set(error, EBUSY,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL,
+ "shared rss indirection table has"
+ " references");
if (!__atomic_compare_exchange_n(&shared_rss->refcnt, &old_refcnt,
0, 0, __ATOMIC_ACQUIRE,
__ATOMIC_RELAXED))
- return rte_flow_error_set(error, ETOOMANYREFS,
+ return rte_flow_error_set(error, EBUSY,
RTE_FLOW_ERROR_TYPE_ACTION,
NULL,
"shared rss has references");
- rte_free(shared_rss->queue);
+ mlx5_free(queue);
rte_spinlock_lock(&priv->shared_act_sl);
ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
&priv->rss_shared_actions, idx, shared_rss, next);
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_shared_action_rss *shared_rss =
mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], idx);
- size_t i;
- int ret;
+ int ret = 0;
void *queue = NULL;
- const uint8_t *rss_key;
- uint32_t rss_key_len;
+ uint16_t *queue_old = NULL;
uint32_t queue_size = action_conf->queue_num * sizeof(uint16_t);
if (!shared_rss)
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
NULL,
"cannot allocate resource memory");
- if (action_conf->key) {
- rss_key = action_conf->key;
- rss_key_len = action_conf->key_len;
+ memcpy(queue, action_conf->queue, queue_size);
+ MLX5_ASSERT(shared_rss->ind_tbl);
+ rte_spinlock_lock(&shared_rss->action_rss_sl);
+ queue_old = shared_rss->ind_tbl->queues;
+ ret = mlx5_ind_table_obj_modify(dev, shared_rss->ind_tbl,
+ queue, action_conf->queue_num, true);
+ if (ret) {
+ mlx5_free(queue);
+ ret = rte_flow_error_set(error, rte_errno,
+ RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+ "cannot update indirection table");
} else {
- rss_key = rss_hash_default_key;
- rss_key_len = MLX5_RSS_HASH_KEY_LEN;
+ mlx5_free(queue_old);
+ shared_rss->origin.queue = queue;
+ shared_rss->origin.queue_num = action_conf->queue_num;
}
- for (i = 0; i < MLX5_RSS_HASH_FIELDS_LEN; i++) {
- uint32_t hrxq_idx;
- uint64_t hash_fields = mlx5_rss_hash_fields[i];
- int tunnel;
-
- for (tunnel = 0; tunnel < 2; tunnel++) {
- hrxq_idx = __flow_dv_action_rss_hrxq_lookup
- (dev, idx, hash_fields, tunnel);
- MLX5_ASSERT(hrxq_idx);
- ret = mlx5_hrxq_modify
- (dev, hrxq_idx,
- rss_key, rss_key_len,
- hash_fields,
- action_conf->queue, action_conf->queue_num);
- if (ret) {
- mlx5_free(queue);
- return rte_flow_error_set
- (error, rte_errno,
- RTE_FLOW_ERROR_TYPE_ACTION, NULL,
- "cannot update hash queue");
- }
- }
- }
- mlx5_free(shared_rss->queue);
- shared_rss->queue = queue;
- memcpy(shared_rss->queue, action_conf->queue, queue_size);
- shared_rss->origin.queue = shared_rss->queue;
- shared_rss->origin.queue_num = action_conf->queue_num;
- return 0;
+ rte_spinlock_unlock(&shared_rss->action_rss_sl);
+ return ret;
}
/**