flow_dv_encap_decap_resource_release(struct rte_eth_dev *dev,
uint32_t encap_decap_idx);
+static int
+flow_dv_port_id_action_resource_release(struct rte_eth_dev *dev,
+ uint32_t port_id);
+
/**
* Initialize flow attributes structure according to flow items' types.
*
*/
static struct mlx5_hrxq *
flow_dv_handle_rx_queue(struct rte_eth_dev *dev,
- struct mlx5_flow *dev_flow,
- struct mlx5_flow_rss_desc *rss_desc,
- uint32_t *hrxq_idx)
+ struct mlx5_flow *dev_flow,
+ struct mlx5_flow_rss_desc *rss_desc,
+ uint32_t *hrxq_idx)
{
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_flow_handle *dh = dev_flow->handle;
MLX5_ASSERT(rss_desc->queue_num);
*hrxq_idx = mlx5_hrxq_get(dev, rss_desc->key,
- MLX5_RSS_HASH_KEY_LEN,
- dev_flow->hash_fields,
- rss_desc->queue,
- rss_desc->queue_num);
+ MLX5_RSS_HASH_KEY_LEN,
+ dev_flow->hash_fields,
+ rss_desc->queue,
+ rss_desc->queue_num);
if (!*hrxq_idx) {
*hrxq_idx = mlx5_hrxq_new
(dev, rss_desc->key,
- MLX5_RSS_HASH_KEY_LEN,
- dev_flow->hash_fields,
- rss_desc->queue,
- rss_desc->queue_num,
- !!(dh->layers &
- MLX5_FLOW_LAYER_TUNNEL));
+ MLX5_RSS_HASH_KEY_LEN,
+ dev_flow->hash_fields,
+ rss_desc->queue,
+ rss_desc->queue_num,
+ !!(dh->layers &
+ MLX5_FLOW_LAYER_TUNNEL));
if (!*hrxq_idx)
return NULL;
}
return -rte_errno;
}
+/**
+ * Find existing destination array resource or create and register a new one.
+ *
+ * @param[in, out] dev
+ * Pointer to rte_eth_dev structure.
+ * @param[in] attr
+ * Attributes of flow that includes this item.
+ * @param[in] resource
+ * Pointer to destination array resource.
+ * @parm[in, out] dev_flow
+ * Pointer to the dev_flow.
+ * @param[out] error
+ * pointer to error structure.
+ *
+ * @return
+ * 0 on success otherwise -errno and errno is set.
+ */
+static int
+flow_dv_dest_array_resource_register(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ struct mlx5_flow_dv_dest_array_resource *resource,
+ struct mlx5_flow *dev_flow,
+ struct rte_flow_error *error)
+{
+ struct mlx5_flow_dv_dest_array_resource *cache_resource;
+ struct mlx5dv_dr_action_dest_attr *dest_attr[MLX5_MAX_DEST_NUM] = { 0 };
+ struct mlx5dv_dr_action_dest_reformat dest_reformat[MLX5_MAX_DEST_NUM];
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_dev_ctx_shared *sh = priv->sh;
+ struct mlx5_flow_sub_actions_list *sample_act;
+ struct mlx5dv_dr_domain *domain;
+ uint32_t idx = 0;
+
+ /* Lookup a matching resource from cache. */
+ ILIST_FOREACH(sh->ipool[MLX5_IPOOL_DEST_ARRAY],
+ sh->dest_array_list,
+ idx, cache_resource, next) {
+ if (resource->num_of_dest == cache_resource->num_of_dest &&
+ resource->ft_type == cache_resource->ft_type &&
+ !memcmp((void *)cache_resource->sample_act,
+ (void *)resource->sample_act,
+ (resource->num_of_dest *
+ sizeof(struct mlx5_flow_sub_actions_list)))) {
+ DRV_LOG(DEBUG, "dest array resource %p: refcnt %d++",
+ (void *)cache_resource,
+ __atomic_load_n(&cache_resource->refcnt,
+ __ATOMIC_RELAXED));
+ __atomic_fetch_add(&cache_resource->refcnt, 1,
+ __ATOMIC_RELAXED);
+ dev_flow->handle->dvh.rix_dest_array = idx;
+ dev_flow->dv.dest_array_res = cache_resource;
+ return 0;
+ }
+ }
+ /* Register new destination array resource. */
+ cache_resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_DEST_ARRAY],
+ &dev_flow->handle->dvh.rix_dest_array);
+ if (!cache_resource)
+ return rte_flow_error_set(error, ENOMEM,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "cannot allocate resource memory");
+ *cache_resource = *resource;
+ if (attr->transfer)
+ domain = sh->fdb_domain;
+ else if (attr->ingress)
+ domain = sh->rx_domain;
+ else
+ domain = sh->tx_domain;
+ for (idx = 0; idx < resource->num_of_dest; idx++) {
+ dest_attr[idx] = (struct mlx5dv_dr_action_dest_attr *)
+ mlx5_malloc(MLX5_MEM_ZERO,
+ sizeof(struct mlx5dv_dr_action_dest_attr),
+ 0, SOCKET_ID_ANY);
+ if (!dest_attr[idx]) {
+ rte_flow_error_set(error, ENOMEM,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "cannot allocate resource memory");
+ goto error;
+ }
+ dest_attr[idx]->type = MLX5DV_DR_ACTION_DEST;
+ sample_act = &resource->sample_act[idx];
+ if (sample_act->action_flags == MLX5_FLOW_ACTION_QUEUE) {
+ dest_attr[idx]->dest = sample_act->dr_queue_action;
+ } else if (sample_act->action_flags ==
+ (MLX5_FLOW_ACTION_PORT_ID | MLX5_FLOW_ACTION_ENCAP)) {
+ dest_attr[idx]->type = MLX5DV_DR_ACTION_DEST_REFORMAT;
+ dest_attr[idx]->dest_reformat = &dest_reformat[idx];
+ dest_attr[idx]->dest_reformat->reformat =
+ sample_act->dr_encap_action;
+ dest_attr[idx]->dest_reformat->dest =
+ sample_act->dr_port_id_action;
+ } else if (sample_act->action_flags ==
+ MLX5_FLOW_ACTION_PORT_ID) {
+ dest_attr[idx]->dest = sample_act->dr_port_id_action;
+ }
+ }
+ /* create a dest array actioin */
+ cache_resource->action = mlx5_glue->dr_create_flow_action_dest_array
+ (domain,
+ cache_resource->num_of_dest,
+ dest_attr);
+ if (!cache_resource->action) {
+ rte_flow_error_set(error, ENOMEM,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "cannot create destination array action");
+ goto error;
+ }
+ __atomic_store_n(&cache_resource->refcnt, 1, __ATOMIC_RELAXED);
+ ILIST_INSERT(sh->ipool[MLX5_IPOOL_DEST_ARRAY],
+ &sh->dest_array_list,
+ dev_flow->handle->dvh.rix_dest_array, cache_resource,
+ next);
+ dev_flow->dv.dest_array_res = cache_resource;
+ DRV_LOG(DEBUG, "new destination array resource %p: refcnt %d++",
+ (void *)cache_resource,
+ __atomic_load_n(&cache_resource->refcnt, __ATOMIC_RELAXED));
+ for (idx = 0; idx < resource->num_of_dest; idx++)
+ mlx5_free(dest_attr[idx]);
+ return 0;
+error:
+ for (idx = 0; idx < resource->num_of_dest; idx++) {
+ struct mlx5_flow_sub_actions_idx *act_res =
+ &cache_resource->sample_idx[idx];
+ if (act_res->rix_hrxq &&
+ !mlx5_hrxq_release(dev,
+ act_res->rix_hrxq))
+ act_res->rix_hrxq = 0;
+ if (act_res->rix_encap_decap &&
+ !flow_dv_encap_decap_resource_release(dev,
+ act_res->rix_encap_decap))
+ act_res->rix_encap_decap = 0;
+ if (act_res->rix_port_id_action &&
+ !flow_dv_port_id_action_resource_release(dev,
+ act_res->rix_port_id_action))
+ act_res->rix_port_id_action = 0;
+ if (dest_attr[idx])
+ mlx5_free(dest_attr[idx]);
+ }
+
+ mlx5_ipool_free(sh->ipool[MLX5_IPOOL_DEST_ARRAY],
+ dev_flow->handle->dvh.rix_dest_array);
+ dev_flow->handle->dvh.rix_dest_array = 0;
+ return -rte_errno;
+}
+
/**
* Convert Sample action to DV specification.
*
* Pointer to the mlx5_flow.
* @param[in] attr
* Pointer to the flow attributes.
+ * @param[in, out] num_of_dest
+ * Pointer to the num of destination.
* @param[in, out] sample_actions
* Pointer to sample actions list.
* @param[in, out] res
const struct rte_flow_action *action,
struct mlx5_flow *dev_flow,
const struct rte_flow_attr *attr,
+ uint32_t *num_of_dest,
void **sample_actions,
struct mlx5_flow_dv_sample_resource *res,
struct rte_flow_error *error)
sample_idx->rix_hrxq = hrxq_idx;
sample_actions[sample_act->actions_num++] =
hrxq->action;
+ (*num_of_dest)++;
action_flags |= MLX5_FLOW_ACTION_QUEUE;
if (action_flags & MLX5_FLOW_ACTION_MARK)
dev_flow->handle->rix_hrxq = hrxq_idx;
uint32_t tag_be = mlx5_flow_mark_set
(((const struct rte_flow_action_mark *)
(sub_actions->conf))->id);
+
dev_flow->handle->mark = 1;
pre_rix = dev_flow->handle->dvh.rix_tag;
/* Save the mark resource before sample */
action_flags |= MLX5_FLOW_ACTION_COUNT;
break;
}
+ case RTE_FLOW_ACTION_TYPE_PORT_ID:
+ {
+ struct mlx5_flow_dv_port_id_action_resource
+ port_id_resource;
+ uint32_t port_id = 0;
+
+ memset(&port_id_resource, 0, sizeof(port_id_resource));
+ /* Save the port id resource before sample */
+ pre_rix = dev_flow->handle->rix_port_id_action;
+ pre_r = dev_flow->dv.port_id_action;
+ if (flow_dv_translate_action_port_id(dev, sub_actions,
+ &port_id, error))
+ return -rte_errno;
+ port_id_resource.port_id = port_id;
+ if (flow_dv_port_id_action_resource_register
+ (dev, &port_id_resource, dev_flow, error))
+ return -rte_errno;
+ sample_act->dr_port_id_action =
+ dev_flow->dv.port_id_action->action;
+ sample_idx->rix_port_id_action =
+ dev_flow->handle->rix_port_id_action;
+ sample_actions[sample_act->actions_num++] =
+ sample_act->dr_port_id_action;
+ /* Recover the port id resource after sample */
+ dev_flow->dv.port_id_action = pre_r;
+ dev_flow->handle->rix_port_id_action = pre_rix;
+ (*num_of_dest)++;
+ action_flags |= MLX5_FLOW_ACTION_PORT_ID;
+ break;
+ }
+ case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
+ /* Save the encap resource before sample */
+ pre_rix = dev_flow->handle->dvh.rix_encap_decap;
+ pre_r = dev_flow->dv.encap_decap;
+ if (flow_dv_create_action_l2_encap(dev, sub_actions,
+ dev_flow,
+ attr->transfer,
+ error))
+ return -rte_errno;
+ sample_act->dr_encap_action =
+ dev_flow->dv.encap_decap->action;
+ sample_idx->rix_encap_decap =
+ dev_flow->handle->dvh.rix_encap_decap;
+ sample_actions[sample_act->actions_num++] =
+ sample_act->dr_encap_action;
+ /* Recover the encap resource after sample */
+ dev_flow->dv.encap_decap = pre_r;
+ dev_flow->handle->dvh.rix_encap_decap = pre_rix;
+ action_flags |= MLX5_FLOW_ACTION_ENCAP;
+ break;
default:
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
* Pointer to the mlx5_flow.
* @param[in] attr
* Pointer to the flow attributes.
+ * @param[in] num_of_dest
+ * The num of destination.
* @param[in, out] res
* Pointer to sample resource.
+ * @param[in, out] mdest_res
+ * Pointer to destination array resource.
* @param[in] sample_actions
* Pointer to sample path actions list.
+ * @param[in] action_flags
+ * Holds the actions detected until now.
* @param[out] error
* Pointer to the error structure.
*
*/
static int
flow_dv_create_action_sample(struct rte_eth_dev *dev,
- struct mlx5_flow *dev_flow,
- const struct rte_flow_attr *attr,
- struct mlx5_flow_dv_sample_resource *res,
- void **sample_actions,
- struct rte_flow_error *error)
+ struct mlx5_flow *dev_flow,
+ const struct rte_flow_attr *attr,
+ uint32_t num_of_dest,
+ struct mlx5_flow_dv_sample_resource *res,
+ struct mlx5_flow_dv_dest_array_resource *mdest_res,
+ void **sample_actions,
+ uint64_t action_flags,
+ struct rte_flow_error *error)
{
- if (flow_dv_sample_resource_register(dev, attr, res, dev_flow,
- sample_actions, error))
- return rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ACTION,
- NULL, "can't create sample action");
+ struct mlx5_priv *priv = dev->data->dev_private;
+ /* update normal path action resource into last index of array */
+ uint32_t dest_index = MLX5_MAX_DEST_NUM - 1;
+ struct mlx5_flow_sub_actions_list *sample_act =
+ &mdest_res->sample_act[dest_index];
+ struct mlx5_flow_rss_desc *rss_desc = &((struct mlx5_flow_rss_desc *)
+ priv->rss_desc)
+ [!!priv->flow_nested_idx];
+ uint32_t normal_idx = 0;
+ struct mlx5_hrxq *hrxq;
+ uint32_t hrxq_idx;
+
+ if (num_of_dest > 1) {
+ if (sample_act->action_flags & MLX5_FLOW_ACTION_QUEUE) {
+ /* Handle QP action for mirroring */
+ hrxq = flow_dv_handle_rx_queue(dev, dev_flow,
+ rss_desc, &hrxq_idx);
+ if (!hrxq)
+ return rte_flow_error_set
+ (error, rte_errno,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL,
+ "cannot create rx queue");
+ normal_idx++;
+ mdest_res->sample_idx[dest_index].rix_hrxq = hrxq_idx;
+ sample_act->dr_queue_action = hrxq->action;
+ if (action_flags & MLX5_FLOW_ACTION_MARK)
+ dev_flow->handle->rix_hrxq = hrxq_idx;
+ dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE;
+ }
+ if (sample_act->action_flags & MLX5_FLOW_ACTION_ENCAP) {
+ normal_idx++;
+ mdest_res->sample_idx[dest_index].rix_encap_decap =
+ dev_flow->handle->dvh.rix_encap_decap;
+ sample_act->dr_encap_action =
+ dev_flow->dv.encap_decap->action;
+ }
+ if (sample_act->action_flags & MLX5_FLOW_ACTION_PORT_ID) {
+ normal_idx++;
+ mdest_res->sample_idx[dest_index].rix_port_id_action =
+ dev_flow->handle->rix_port_id_action;
+ sample_act->dr_port_id_action =
+ dev_flow->dv.port_id_action->action;
+ }
+ sample_act->actions_num = normal_idx;
+ /* update sample action resource into first index of array */
+ mdest_res->ft_type = res->ft_type;
+ memcpy(&mdest_res->sample_idx[0], &res->sample_idx,
+ sizeof(struct mlx5_flow_sub_actions_idx));
+ memcpy(&mdest_res->sample_act[0], &res->sample_act,
+ sizeof(struct mlx5_flow_sub_actions_list));
+ mdest_res->num_of_dest = num_of_dest;
+ if (flow_dv_dest_array_resource_register(dev, attr, mdest_res,
+ dev_flow, error))
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL, "can't create sample "
+ "action");
+ } else {
+ if (flow_dv_sample_resource_register(dev, attr, res, dev_flow,
+ sample_actions, error))
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL,
+ "can't create sample action");
+ }
return 0;
}
void *match_value = dev_flow->dv.value.buf;
uint8_t next_protocol = 0xff;
struct rte_vlan_hdr vlan = { 0 };
+ struct mlx5_flow_dv_dest_array_resource mdest_res;
struct mlx5_flow_dv_sample_resource sample_res;
void *sample_actions[MLX5_DV_MAX_NUMBER_OF_ACTIONS] = {0};
+ struct mlx5_flow_sub_actions_list *sample_act;
uint32_t sample_act_pos = UINT32_MAX;
+ uint32_t num_of_dest = 0;
+ int tmp_actions_n = 0;
uint32_t table;
int ret = 0;
+ memset(&mdest_res, 0, sizeof(struct mlx5_flow_dv_dest_array_resource));
memset(&sample_res, 0, sizeof(struct mlx5_flow_dv_sample_resource));
mhdr_res->ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
+ /* update normal path action resource into last index of array */
+ sample_act = &mdest_res.sample_act[MLX5_MAX_DEST_NUM - 1];
ret = mlx5_flow_group_to_table(attr, dev_flow->external, attr->group,
!!priv->fdb_def_rule, &table, error);
if (ret)
dev_flow->dv.port_id_action->action;
action_flags |= MLX5_FLOW_ACTION_PORT_ID;
dev_flow->handle->fate_action = MLX5_FLOW_FATE_PORT_ID;
+ sample_act->action_flags |= MLX5_FLOW_ACTION_PORT_ID;
+ num_of_dest++;
break;
case RTE_FLOW_ACTION_TYPE_FLAG:
action_flags |= MLX5_FLOW_ACTION_FLAG;
rss_desc->queue[0] = queue->index;
action_flags |= MLX5_FLOW_ACTION_QUEUE;
dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE;
+ sample_act->action_flags |= MLX5_FLOW_ACTION_QUEUE;
+ num_of_dest++;
break;
case RTE_FLOW_ACTION_TYPE_RSS:
rss = actions->conf;
dev_flow->dv.actions[actions_n++] =
dev_flow->dv.encap_decap->action;
action_flags |= MLX5_FLOW_ACTION_ENCAP;
+ if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
+ sample_act->action_flags |=
+ MLX5_FLOW_ACTION_ENCAP;
break;
case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
dev_flow->dv.encap_decap->action;
}
action_flags |= MLX5_FLOW_ACTION_ENCAP;
+ if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
+ sample_act->action_flags |=
+ MLX5_FLOW_ACTION_ENCAP;
break;
case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
while ((++action)->type == RTE_FLOW_ACTION_TYPE_VOID)
ret = flow_dv_translate_action_sample(dev,
actions,
dev_flow, attr,
+ &num_of_dest,
sample_actions,
&sample_res,
error);
return ret;
actions_n++;
action_flags |= MLX5_FLOW_ACTION_SAMPLE;
+ /* put encap action into group if work with port id */
+ if ((action_flags & MLX5_FLOW_ACTION_ENCAP) &&
+ (action_flags & MLX5_FLOW_ACTION_PORT_ID))
+ sample_act->action_flags |=
+ MLX5_FLOW_ACTION_ENCAP;
break;
case RTE_FLOW_ACTION_TYPE_END:
actions_end = true;
NULL,
"cannot create counter"
" object.");
- dev_flow->dv.actions[actions_n++] =
+ dev_flow->dv.actions[actions_n] =
(flow_dv_counter_get_by_idx(dev,
flow->counter, NULL))->action;
+ actions_n++;
}
if (action_flags & MLX5_FLOW_ACTION_SAMPLE) {
ret = flow_dv_create_action_sample(dev,
dev_flow, attr,
+ num_of_dest,
&sample_res,
+ &mdest_res,
sample_actions,
+ action_flags,
error);
if (ret < 0)
return rte_flow_error_set
RTE_FLOW_ERROR_TYPE_ACTION,
NULL,
"cannot create sample action");
- dev_flow->dv.actions[sample_act_pos] =
+ if (num_of_dest > 1) {
+ dev_flow->dv.actions[sample_act_pos] =
+ dev_flow->dv.dest_array_res->action;
+ } else {
+ dev_flow->dv.actions[sample_act_pos] =
dev_flow->dv.sample_res->verbs_action;
+ }
}
break;
default:
modify_action_position == UINT32_MAX)
modify_action_position = actions_n++;
}
+ /*
+ * For multiple destination (sample action with ratio=1), the encap
+ * action and port id action will be combined into group action.
+ * So need remove the original these actions in the flow and only
+ * use the sample action instead of.
+ */
+ if (num_of_dest > 1 && sample_act->dr_port_id_action) {
+ int i;
+ void *temp_actions[MLX5_DV_MAX_NUMBER_OF_ACTIONS] = {0};
+
+ for (i = 0; i < actions_n; i++) {
+ if ((sample_act->dr_encap_action &&
+ sample_act->dr_encap_action ==
+ dev_flow->dv.actions[i]) ||
+ (sample_act->dr_port_id_action &&
+ sample_act->dr_port_id_action ==
+ dev_flow->dv.actions[i]))
+ continue;
+ temp_actions[tmp_actions_n++] = dev_flow->dv.actions[i];
+ }
+ memcpy((void *)dev_flow->dv.actions,
+ (void *)temp_actions,
+ tmp_actions_n * sizeof(void *));
+ actions_n = tmp_actions_n;
+ }
dev_flow->dv.actions_n = actions_n;
dev_flow->act_flags = action_flags;
for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
dv->actions[n++] = drop_hrxq->action;
}
} else if (dh->fate_action == MLX5_FLOW_FATE_QUEUE &&
- !dv_h->rix_sample) {
+ !dv_h->rix_sample && !dv_h->rix_dest_array) {
struct mlx5_hrxq *hrxq;
uint32_t hrxq_idx;
struct mlx5_flow_rss_desc *rss_desc =
*/
static int
flow_dv_port_id_action_resource_release(struct rte_eth_dev *dev,
- struct mlx5_flow_handle *handle)
+ uint32_t port_id)
{
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_flow_dv_port_id_action_resource *cache_resource;
- uint32_t idx = handle->rix_port_id_action;
+ uint32_t idx = port_id;
cache_resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_PORT_ID],
idx);
flow_dv_jump_tbl_resource_release(dev, handle);
break;
case MLX5_FLOW_FATE_PORT_ID:
- flow_dv_port_id_action_resource_release(dev, handle);
+ flow_dv_port_id_action_resource_release(dev,
+ handle->rix_port_id_action);
break;
case MLX5_FLOW_FATE_DEFAULT_MISS:
flow_dv_default_miss_resource_release(dev);
return 1;
}
+/**
+ * Release an destination array resource.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param handle
+ * Pointer to mlx5_flow_handle.
+ *
+ * @return
+ * 1 while a reference on it exists, 0 when freed.
+ */
+static int
+flow_dv_dest_array_resource_release(struct rte_eth_dev *dev,
+ struct mlx5_flow_handle *handle)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_flow_dv_dest_array_resource *cache_resource;
+ struct mlx5_flow_sub_actions_idx *mdest_act_res;
+ uint32_t idx = handle->dvh.rix_dest_array;
+ uint32_t i = 0;
+
+ cache_resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_DEST_ARRAY],
+ idx);
+ if (!cache_resource)
+ return 0;
+ MLX5_ASSERT(cache_resource->action);
+ DRV_LOG(DEBUG, "destination array resource %p: refcnt %d--",
+ (void *)cache_resource,
+ __atomic_load_n(&cache_resource->refcnt, __ATOMIC_RELAXED));
+ if (__atomic_sub_fetch(&cache_resource->refcnt, 1,
+ __ATOMIC_RELAXED) == 0) {
+ if (cache_resource->action)
+ claim_zero(mlx5_glue->destroy_flow_action
+ (cache_resource->action));
+ for (; i < cache_resource->num_of_dest; i++) {
+ mdest_act_res = &cache_resource->sample_idx[i];
+ if (mdest_act_res->rix_hrxq) {
+ mlx5_hrxq_release(dev,
+ mdest_act_res->rix_hrxq);
+ mdest_act_res->rix_hrxq = 0;
+ }
+ if (mdest_act_res->rix_encap_decap) {
+ flow_dv_encap_decap_resource_release(dev,
+ mdest_act_res->rix_encap_decap);
+ mdest_act_res->rix_encap_decap = 0;
+ }
+ if (mdest_act_res->rix_port_id_action) {
+ flow_dv_port_id_action_resource_release(dev,
+ mdest_act_res->rix_port_id_action);
+ mdest_act_res->rix_port_id_action = 0;
+ }
+ if (mdest_act_res->rix_tag) {
+ flow_dv_tag_release(dev,
+ mdest_act_res->rix_tag);
+ mdest_act_res->rix_tag = 0;
+ }
+ }
+ ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_DEST_ARRAY],
+ &priv->sh->dest_array_list, idx,
+ cache_resource, next);
+ mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_DEST_ARRAY], idx);
+ DRV_LOG(DEBUG, "destination array resource %p: removed",
+ (void *)cache_resource);
+ return 0;
+ }
+ return 1;
+}
+
/**
* Remove the flow from the NIC but keeps it in memory.
* Lock free, (mutex should be acquired by caller).
flow_dv_matcher_release(dev, dev_handle);
if (dev_handle->dvh.rix_sample)
flow_dv_sample_resource_release(dev, dev_handle);
+ if (dev_handle->dvh.rix_dest_array)
+ flow_dv_dest_array_resource_release(dev, dev_handle);
if (dev_handle->dvh.rix_encap_decap)
flow_dv_encap_decap_resource_release(dev,
dev_handle->dvh.rix_encap_decap);