return hrxq;
}
+/**
+ * Release sample sub action resource.
+ *
+ * @param[in, out] dev
+ * Pointer to rte_eth_dev structure.
+ * @param[in] act_res
+ * Pointer to sample sub action resource.
+ */
+static void
+flow_dv_sample_sub_actions_release(struct rte_eth_dev *dev,
+ struct mlx5_flow_sub_actions_idx *act_res)
+{
+ if (act_res->rix_hrxq) {
+ mlx5_hrxq_release(dev, act_res->rix_hrxq);
+ act_res->rix_hrxq = 0;
+ }
+ if (act_res->rix_encap_decap) {
+ flow_dv_encap_decap_resource_release(dev,
+ act_res->rix_encap_decap);
+ act_res->rix_encap_decap = 0;
+ }
+ if (act_res->rix_port_id_action) {
+ flow_dv_port_id_action_resource_release(dev,
+ act_res->rix_port_id_action);
+ act_res->rix_port_id_action = 0;
+ }
+ if (act_res->rix_tag) {
+ flow_dv_tag_release(dev, act_res->rix_tag);
+ act_res->rix_tag = 0;
+ }
+ if (act_res->cnt) {
+ flow_dv_counter_release(dev, act_res->cnt);
+ act_res->cnt = 0;
+ }
+}
+
/**
* Find existing sample resource or create and register a new one.
*
* @param[in, out] dev
* Pointer to rte_eth_dev structure.
- * @param[in] attr
- * Attributes of flow that includes this item.
* @param[in] resource
* Pointer to sample resource.
* @parm[in, out] dev_flow
*/
static int
flow_dv_sample_resource_register(struct rte_eth_dev *dev,
- const struct rte_flow_attr *attr,
struct mlx5_flow_dv_sample_resource *resource,
struct mlx5_flow *dev_flow,
void **sample_dv_actions,
uint32_t idx = 0;
const uint32_t next_ft_step = 1;
uint32_t next_ft_id = resource->ft_id + next_ft_step;
+ uint8_t is_egress = 0;
+ uint8_t is_transfer = 0;
/* Lookup a matching resource from cache. */
ILIST_FOREACH(sh->ipool[MLX5_IPOOL_SAMPLE], sh->sample_action_list,
__ATOMIC_RELAXED);
dev_flow->handle->dvh.rix_sample = idx;
dev_flow->dv.sample_res = cache_resource;
+ /*
+ * Existing sample action should release the prepared
+ * sub-actions reference counter.
+ */
+ flow_dv_sample_sub_actions_release(dev,
+ &resource->sample_idx);
return 0;
}
}
"cannot allocate resource memory");
*cache_resource = *resource;
/* Create normal path table level */
+ if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
+ is_transfer = 1;
+ else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_TX)
+ is_egress = 1;
tbl = flow_dv_tbl_resource_get(dev, next_ft_id,
- attr->egress, attr->transfer,
+ is_egress, is_transfer,
dev_flow->external, NULL, 0, 0, error);
if (!tbl) {
rte_flow_error_set(error, ENOMEM,
__atomic_load_n(&cache_resource->refcnt, __ATOMIC_RELAXED));
return 0;
error:
- if (cache_resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB) {
- if (cache_resource->default_miss)
- claim_zero(mlx5_glue->destroy_flow_action
+ if (cache_resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB &&
+ cache_resource->default_miss)
+ claim_zero(mlx5_glue->destroy_flow_action
(cache_resource->default_miss));
- } else {
- if (cache_resource->sample_idx.rix_hrxq &&
- !mlx5_hrxq_release(dev,
- cache_resource->sample_idx.rix_hrxq))
- cache_resource->sample_idx.rix_hrxq = 0;
- if (cache_resource->sample_idx.rix_tag &&
- !flow_dv_tag_release(dev,
- cache_resource->sample_idx.rix_tag))
- cache_resource->sample_idx.rix_tag = 0;
- if (cache_resource->sample_idx.cnt) {
- flow_dv_counter_release(dev,
- cache_resource->sample_idx.cnt);
- cache_resource->sample_idx.cnt = 0;
- }
- }
+ else
+ flow_dv_sample_sub_actions_release(dev,
+ &cache_resource->sample_idx);
if (cache_resource->normal_path_tbl)
flow_dv_tbl_resource_release(MLX5_SH(dev),
cache_resource->normal_path_tbl);
*
* @param[in, out] dev
* Pointer to rte_eth_dev structure.
- * @param[in] attr
- * Attributes of flow that includes this item.
* @param[in] resource
* Pointer to destination array resource.
* @parm[in, out] dev_flow
*/
static int
flow_dv_dest_array_resource_register(struct rte_eth_dev *dev,
- const struct rte_flow_attr *attr,
struct mlx5_flow_dv_dest_array_resource *resource,
struct mlx5_flow *dev_flow,
struct rte_flow_error *error)
__ATOMIC_RELAXED);
dev_flow->handle->dvh.rix_dest_array = idx;
dev_flow->dv.dest_array_res = cache_resource;
+ /*
+ * Existing sample action should release the prepared
+ * sub-actions reference counter.
+ */
+ for (idx = 0; idx < resource->num_of_dest; idx++)
+ flow_dv_sample_sub_actions_release(dev,
+ &resource->sample_idx[idx]);
return 0;
}
}
NULL,
"cannot allocate resource memory");
*cache_resource = *resource;
- if (attr->transfer)
+ if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
domain = sh->fdb_domain;
- else if (attr->ingress)
+ else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
domain = sh->rx_domain;
else
domain = sh->tx_domain;
res->set_action = action_ctx.set_action;
} else if (attr->ingress) {
res->ft_type = MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
+ } else {
+ res->ft_type = MLX5DV_FLOW_TABLE_TYPE_NIC_TX;
}
return 0;
}
* Pointer to rte_eth_dev structure.
* @param[in, out] dev_flow
* Pointer to the mlx5_flow.
- * @param[in] attr
- * Pointer to the flow attributes.
* @param[in] num_of_dest
* The num of destination.
* @param[in, out] res
static int
flow_dv_create_action_sample(struct rte_eth_dev *dev,
struct mlx5_flow *dev_flow,
- const struct rte_flow_attr *attr,
uint32_t num_of_dest,
struct mlx5_flow_dv_sample_resource *res,
struct mlx5_flow_dv_dest_array_resource *mdest_res,
memcpy(&mdest_res->sample_act[0], &res->sample_act,
sizeof(struct mlx5_flow_sub_actions_list));
mdest_res->num_of_dest = num_of_dest;
- if (flow_dv_dest_array_resource_register(dev, attr, mdest_res,
+ if (flow_dv_dest_array_resource_register(dev, mdest_res,
dev_flow, error))
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION,
NULL, "can't create sample "
"action");
} else {
- if (flow_dv_sample_resource_register(dev, attr, res, dev_flow,
+ if (flow_dv_sample_resource_register(dev, res, dev_flow,
sample_actions, error))
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION,
}
if (action_flags & MLX5_FLOW_ACTION_SAMPLE) {
ret = flow_dv_create_action_sample(dev,
- dev_flow, attr,
+ dev_flow,
num_of_dest,
&sample_res,
&mdest_res,
if (cache_resource->normal_path_tbl)
flow_dv_tbl_resource_release(MLX5_SH(dev),
cache_resource->normal_path_tbl);
- }
- if (cache_resource->sample_idx.rix_hrxq &&
- !mlx5_hrxq_release(dev,
- cache_resource->sample_idx.rix_hrxq))
- cache_resource->sample_idx.rix_hrxq = 0;
- if (cache_resource->sample_idx.rix_tag &&
- !flow_dv_tag_release(dev,
- cache_resource->sample_idx.rix_tag))
- cache_resource->sample_idx.rix_tag = 0;
- if (cache_resource->sample_idx.cnt) {
- flow_dv_counter_release(dev,
- cache_resource->sample_idx.cnt);
- cache_resource->sample_idx.cnt = 0;
- }
- if (!__atomic_load_n(&cache_resource->refcnt, __ATOMIC_RELAXED)) {
+ flow_dv_sample_sub_actions_release(dev,
+ &cache_resource->sample_idx);
ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_SAMPLE],
&priv->sh->sample_action_list, idx,
cache_resource, next);
{
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_flow_dv_dest_array_resource *cache_resource;
- struct mlx5_flow_sub_actions_idx *mdest_act_res;
uint32_t idx = handle->dvh.rix_dest_array;
uint32_t i = 0;
if (cache_resource->action)
claim_zero(mlx5_glue->destroy_flow_action
(cache_resource->action));
- for (; i < cache_resource->num_of_dest; i++) {
- mdest_act_res = &cache_resource->sample_idx[i];
- if (mdest_act_res->rix_hrxq) {
- mlx5_hrxq_release(dev,
- mdest_act_res->rix_hrxq);
- mdest_act_res->rix_hrxq = 0;
- }
- if (mdest_act_res->rix_encap_decap) {
- flow_dv_encap_decap_resource_release(dev,
- mdest_act_res->rix_encap_decap);
- mdest_act_res->rix_encap_decap = 0;
- }
- if (mdest_act_res->rix_port_id_action) {
- flow_dv_port_id_action_resource_release(dev,
- mdest_act_res->rix_port_id_action);
- mdest_act_res->rix_port_id_action = 0;
- }
- if (mdest_act_res->rix_tag) {
- flow_dv_tag_release(dev,
- mdest_act_res->rix_tag);
- mdest_act_res->rix_tag = 0;
- }
- }
+ for (; i < cache_resource->num_of_dest; i++)
+ flow_dv_sample_sub_actions_release(dev,
+ &cache_resource->sample_idx[i]);
ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_DEST_ARRAY],
&priv->sh->dest_array_list, idx,
cache_resource, next);