+flow_dv_sample_resource_register(struct rte_eth_dev *dev,
+ struct mlx5_flow_dv_sample_resource *resource,
+ struct mlx5_flow *dev_flow,
+ struct rte_flow_error *error)
+{
+ struct mlx5_flow_dv_sample_resource *cache_resource;
+ struct mlx5_cache_entry *entry;
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_flow_cb_ctx ctx = {
+ .dev = dev,
+ .error = error,
+ .data = resource,
+ };
+
+ entry = mlx5_cache_register(&priv->sh->sample_action_list, &ctx);
+ if (!entry)
+ return -rte_errno;
+ cache_resource = container_of(entry, typeof(*cache_resource), entry);
+ dev_flow->handle->dvh.rix_sample = cache_resource->idx;
+ dev_flow->dv.sample_res = cache_resource;
+ return 0;
+}
+
+int
+flow_dv_dest_array_match_cb(struct mlx5_cache_list *list __rte_unused,
+ struct mlx5_cache_entry *entry, void *cb_ctx)
+{
+ struct mlx5_flow_cb_ctx *ctx = cb_ctx;
+ struct mlx5_flow_dv_dest_array_resource *resource = ctx->data;
+ struct rte_eth_dev *dev = ctx->dev;
+ struct mlx5_flow_dv_dest_array_resource *cache_resource =
+ container_of(entry, typeof(*cache_resource), entry);
+ uint32_t idx = 0;
+
+ if (resource->num_of_dest == cache_resource->num_of_dest &&
+ resource->ft_type == cache_resource->ft_type &&
+ !memcmp((void *)cache_resource->sample_act,
+ (void *)resource->sample_act,
+ (resource->num_of_dest *
+ sizeof(struct mlx5_flow_sub_actions_list)))) {
+ /*
+ * Existing sample action should release the prepared
+ * sub-actions reference counter.
+ */
+ for (idx = 0; idx < resource->num_of_dest; idx++)
+ flow_dv_sample_sub_actions_release(dev,
+ &resource->sample_idx[idx]);
+ return 0;
+ }
+ return 1;
+}
+
+struct mlx5_cache_entry *
+flow_dv_dest_array_create_cb(struct mlx5_cache_list *list __rte_unused,
+ struct mlx5_cache_entry *entry __rte_unused,
+ void *cb_ctx)
+{
+ struct mlx5_flow_cb_ctx *ctx = cb_ctx;
+ struct rte_eth_dev *dev = ctx->dev;
+ struct mlx5_flow_dv_dest_array_resource *cache_resource;
+ struct mlx5_flow_dv_dest_array_resource *resource = ctx->data;
+ struct mlx5dv_dr_action_dest_attr *dest_attr[MLX5_MAX_DEST_NUM] = { 0 };
+ struct mlx5dv_dr_action_dest_reformat dest_reformat[MLX5_MAX_DEST_NUM];
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_dev_ctx_shared *sh = priv->sh;
+ struct mlx5_flow_sub_actions_list *sample_act;
+ struct mlx5dv_dr_domain *domain;
+ uint32_t idx = 0, res_idx = 0;
+ struct rte_flow_error *error = ctx->error;
+
+ /* Register new destination array resource. */
+ cache_resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_DEST_ARRAY],
+ &res_idx);
+ if (!cache_resource) {
+ rte_flow_error_set(error, ENOMEM,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "cannot allocate resource memory");
+ return NULL;
+ }
+ *cache_resource = *resource;
+ if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
+ domain = sh->fdb_domain;
+ else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
+ domain = sh->rx_domain;
+ else
+ domain = sh->tx_domain;
+ for (idx = 0; idx < resource->num_of_dest; idx++) {
+ dest_attr[idx] = (struct mlx5dv_dr_action_dest_attr *)
+ mlx5_malloc(MLX5_MEM_ZERO,
+ sizeof(struct mlx5dv_dr_action_dest_attr),
+ 0, SOCKET_ID_ANY);
+ if (!dest_attr[idx]) {
+ rte_flow_error_set(error, ENOMEM,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "cannot allocate resource memory");
+ goto error;
+ }
+ dest_attr[idx]->type = MLX5DV_DR_ACTION_DEST;
+ sample_act = &resource->sample_act[idx];
+ if (sample_act->action_flags == MLX5_FLOW_ACTION_QUEUE) {
+ dest_attr[idx]->dest = sample_act->dr_queue_action;
+ } else if (sample_act->action_flags ==
+ (MLX5_FLOW_ACTION_PORT_ID | MLX5_FLOW_ACTION_ENCAP)) {
+ dest_attr[idx]->type = MLX5DV_DR_ACTION_DEST_REFORMAT;
+ dest_attr[idx]->dest_reformat = &dest_reformat[idx];
+ dest_attr[idx]->dest_reformat->reformat =
+ sample_act->dr_encap_action;
+ dest_attr[idx]->dest_reformat->dest =
+ sample_act->dr_port_id_action;
+ } else if (sample_act->action_flags ==
+ MLX5_FLOW_ACTION_PORT_ID) {
+ dest_attr[idx]->dest = sample_act->dr_port_id_action;
+ }
+ }
+ /* create a dest array actioin */
+ cache_resource->action = mlx5_glue->dr_create_flow_action_dest_array
+ (domain,
+ cache_resource->num_of_dest,
+ dest_attr);
+ if (!cache_resource->action) {
+ rte_flow_error_set(error, ENOMEM,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "cannot create destination array action");
+ goto error;
+ }
+ cache_resource->idx = res_idx;
+ cache_resource->dev = dev;
+ for (idx = 0; idx < resource->num_of_dest; idx++)
+ mlx5_free(dest_attr[idx]);
+ return &cache_resource->entry;
+error:
+ for (idx = 0; idx < resource->num_of_dest; idx++) {
+ struct mlx5_flow_sub_actions_idx *act_res =
+ &cache_resource->sample_idx[idx];
+ if (act_res->rix_hrxq &&
+ !mlx5_hrxq_release(dev,
+ act_res->rix_hrxq))
+ act_res->rix_hrxq = 0;
+ if (act_res->rix_encap_decap &&
+ !flow_dv_encap_decap_resource_release(dev,
+ act_res->rix_encap_decap))
+ act_res->rix_encap_decap = 0;
+ if (act_res->rix_port_id_action &&
+ !flow_dv_port_id_action_resource_release(dev,
+ act_res->rix_port_id_action))
+ act_res->rix_port_id_action = 0;
+ if (dest_attr[idx])
+ mlx5_free(dest_attr[idx]);
+ }
+
+ mlx5_ipool_free(sh->ipool[MLX5_IPOOL_DEST_ARRAY], res_idx);
+ return NULL;
+}
+
+/**
+ * Find existing destination array resource or create and register a new one.
+ *
+ * @param[in, out] dev
+ * Pointer to rte_eth_dev structure.
+ * @param[in] resource
+ * Pointer to destination array resource.
+ * @parm[in, out] dev_flow
+ * Pointer to the dev_flow.
+ * @param[out] error
+ * pointer to error structure.
+ *
+ * @return
+ * 0 on success otherwise -errno and errno is set.
+ */
+static int
+flow_dv_dest_array_resource_register(struct rte_eth_dev *dev,
+ struct mlx5_flow_dv_dest_array_resource *resource,
+ struct mlx5_flow *dev_flow,
+ struct rte_flow_error *error)
+{
+ struct mlx5_flow_dv_dest_array_resource *cache_resource;
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_cache_entry *entry;
+ struct mlx5_flow_cb_ctx ctx = {
+ .dev = dev,
+ .error = error,
+ .data = resource,
+ };
+
+ entry = mlx5_cache_register(&priv->sh->dest_array_list, &ctx);
+ if (!entry)
+ return -rte_errno;
+ cache_resource = container_of(entry, typeof(*cache_resource), entry);
+ dev_flow->handle->dvh.rix_dest_array = cache_resource->idx;
+ dev_flow->dv.dest_array_res = cache_resource;
+ return 0;
+}
+
+/**
+ * Convert Sample action to DV specification.
+ *
+ * @param[in] dev
+ * Pointer to rte_eth_dev structure.
+ * @param[in] action
+ * Pointer to action structure.
+ * @param[in, out] dev_flow
+ * Pointer to the mlx5_flow.
+ * @param[in] attr
+ * Pointer to the flow attributes.
+ * @param[in, out] num_of_dest
+ * Pointer to the num of destination.
+ * @param[in, out] sample_actions
+ * Pointer to sample actions list.
+ * @param[in, out] res
+ * Pointer to sample resource.
+ * @param[out] error
+ * Pointer to the error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+flow_dv_translate_action_sample(struct rte_eth_dev *dev,
+ const struct rte_flow_action *action,
+ struct mlx5_flow *dev_flow,
+ const struct rte_flow_attr *attr,
+ uint32_t *num_of_dest,
+ void **sample_actions,
+ struct mlx5_flow_dv_sample_resource *res,
+ struct rte_flow_error *error)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ const struct rte_flow_action_sample *sample_action;
+ const struct rte_flow_action *sub_actions;
+ const struct rte_flow_action_queue *queue;
+ struct mlx5_flow_sub_actions_list *sample_act;
+ struct mlx5_flow_sub_actions_idx *sample_idx;
+ struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
+ struct mlx5_flow_rss_desc *rss_desc;
+ uint64_t action_flags = 0;
+
+ MLX5_ASSERT(wks);
+ rss_desc = &wks->rss_desc;
+ sample_act = &res->sample_act;
+ sample_idx = &res->sample_idx;
+ sample_action = (const struct rte_flow_action_sample *)action->conf;
+ res->ratio = sample_action->ratio;
+ sub_actions = sample_action->actions;
+ for (; sub_actions->type != RTE_FLOW_ACTION_TYPE_END; sub_actions++) {
+ int type = sub_actions->type;
+ uint32_t pre_rix = 0;
+ void *pre_r;
+ switch (type) {
+ case RTE_FLOW_ACTION_TYPE_QUEUE:
+ {
+ struct mlx5_hrxq *hrxq;
+ uint32_t hrxq_idx;
+
+ queue = sub_actions->conf;
+ rss_desc->queue_num = 1;
+ rss_desc->queue[0] = queue->index;
+ hrxq = flow_dv_hrxq_prepare(dev, dev_flow,
+ rss_desc, &hrxq_idx);
+ if (!hrxq)
+ return rte_flow_error_set
+ (error, rte_errno,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL,
+ "cannot create fate queue");
+ sample_act->dr_queue_action = hrxq->action;
+ sample_idx->rix_hrxq = hrxq_idx;
+ sample_actions[sample_act->actions_num++] =
+ hrxq->action;
+ (*num_of_dest)++;
+ action_flags |= MLX5_FLOW_ACTION_QUEUE;
+ if (action_flags & MLX5_FLOW_ACTION_MARK)
+ dev_flow->handle->rix_hrxq = hrxq_idx;
+ dev_flow->handle->fate_action =
+ MLX5_FLOW_FATE_QUEUE;
+ break;
+ }
+ case RTE_FLOW_ACTION_TYPE_MARK:
+ {
+ uint32_t tag_be = mlx5_flow_mark_set
+ (((const struct rte_flow_action_mark *)
+ (sub_actions->conf))->id);
+
+ dev_flow->handle->mark = 1;
+ pre_rix = dev_flow->handle->dvh.rix_tag;
+ /* Save the mark resource before sample */
+ pre_r = dev_flow->dv.tag_resource;
+ if (flow_dv_tag_resource_register(dev, tag_be,
+ dev_flow, error))
+ return -rte_errno;
+ MLX5_ASSERT(dev_flow->dv.tag_resource);
+ sample_act->dr_tag_action =
+ dev_flow->dv.tag_resource->action;
+ sample_idx->rix_tag =
+ dev_flow->handle->dvh.rix_tag;
+ sample_actions[sample_act->actions_num++] =
+ sample_act->dr_tag_action;
+ /* Recover the mark resource after sample */
+ dev_flow->dv.tag_resource = pre_r;
+ dev_flow->handle->dvh.rix_tag = pre_rix;
+ action_flags |= MLX5_FLOW_ACTION_MARK;
+ break;
+ }
+ case RTE_FLOW_ACTION_TYPE_COUNT:
+ {
+ uint32_t counter;
+
+ counter = flow_dv_translate_create_counter(dev,
+ dev_flow, sub_actions->conf, 0);
+ if (!counter)
+ return rte_flow_error_set
+ (error, rte_errno,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL,
+ "cannot create counter"
+ " object.");
+ sample_idx->cnt = counter;
+ sample_act->dr_cnt_action =
+ (flow_dv_counter_get_by_idx(dev,
+ counter, NULL))->action;
+ sample_actions[sample_act->actions_num++] =
+ sample_act->dr_cnt_action;
+ action_flags |= MLX5_FLOW_ACTION_COUNT;
+ break;
+ }
+ case RTE_FLOW_ACTION_TYPE_PORT_ID:
+ {
+ struct mlx5_flow_dv_port_id_action_resource
+ port_id_resource;
+ uint32_t port_id = 0;
+
+ memset(&port_id_resource, 0, sizeof(port_id_resource));
+ /* Save the port id resource before sample */
+ pre_rix = dev_flow->handle->rix_port_id_action;
+ pre_r = dev_flow->dv.port_id_action;
+ if (flow_dv_translate_action_port_id(dev, sub_actions,
+ &port_id, error))
+ return -rte_errno;
+ port_id_resource.port_id = port_id;
+ if (flow_dv_port_id_action_resource_register
+ (dev, &port_id_resource, dev_flow, error))
+ return -rte_errno;
+ sample_act->dr_port_id_action =
+ dev_flow->dv.port_id_action->action;
+ sample_idx->rix_port_id_action =
+ dev_flow->handle->rix_port_id_action;
+ sample_actions[sample_act->actions_num++] =
+ sample_act->dr_port_id_action;
+ /* Recover the port id resource after sample */
+ dev_flow->dv.port_id_action = pre_r;
+ dev_flow->handle->rix_port_id_action = pre_rix;
+ (*num_of_dest)++;
+ action_flags |= MLX5_FLOW_ACTION_PORT_ID;
+ break;
+ }
+ case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
+ /* Save the encap resource before sample */
+ pre_rix = dev_flow->handle->dvh.rix_encap_decap;
+ pre_r = dev_flow->dv.encap_decap;
+ if (flow_dv_create_action_l2_encap(dev, sub_actions,
+ dev_flow,
+ attr->transfer,
+ error))
+ return -rte_errno;
+ sample_act->dr_encap_action =
+ dev_flow->dv.encap_decap->action;
+ sample_idx->rix_encap_decap =
+ dev_flow->handle->dvh.rix_encap_decap;
+ sample_actions[sample_act->actions_num++] =
+ sample_act->dr_encap_action;
+ /* Recover the encap resource after sample */
+ dev_flow->dv.encap_decap = pre_r;
+ dev_flow->handle->dvh.rix_encap_decap = pre_rix;
+ action_flags |= MLX5_FLOW_ACTION_ENCAP;
+ break;
+ default:
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "Not support for sampler action");
+ }
+ }
+ sample_act->action_flags = action_flags;
+ res->ft_id = dev_flow->dv.group;
+ if (attr->transfer) {
+ union {
+ uint32_t action_in[MLX5_ST_SZ_DW(set_action_in)];
+ uint64_t set_action;
+ } action_ctx = { .set_action = 0 };
+
+ res->ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
+ MLX5_SET(set_action_in, action_ctx.action_in, action_type,
+ MLX5_MODIFICATION_TYPE_SET);
+ MLX5_SET(set_action_in, action_ctx.action_in, field,
+ MLX5_MODI_META_REG_C_0);
+ MLX5_SET(set_action_in, action_ctx.action_in, data,
+ priv->vport_meta_tag);
+ res->set_action = action_ctx.set_action;
+ } else if (attr->ingress) {
+ res->ft_type = MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
+ } else {
+ res->ft_type = MLX5DV_FLOW_TABLE_TYPE_NIC_TX;
+ }
+ return 0;
+}
+
+/**
+ * Convert Sample action to DV specification.
+ *
+ * @param[in] dev
+ * Pointer to rte_eth_dev structure.
+ * @param[in, out] dev_flow
+ * Pointer to the mlx5_flow.
+ * @param[in] num_of_dest
+ * The num of destination.
+ * @param[in, out] res
+ * Pointer to sample resource.
+ * @param[in, out] mdest_res
+ * Pointer to destination array resource.
+ * @param[in] sample_actions
+ * Pointer to sample path actions list.
+ * @param[in] action_flags
+ * Holds the actions detected until now.
+ * @param[out] error
+ * Pointer to the error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+flow_dv_create_action_sample(struct rte_eth_dev *dev,
+ struct mlx5_flow *dev_flow,
+ uint32_t num_of_dest,
+ struct mlx5_flow_dv_sample_resource *res,
+ struct mlx5_flow_dv_dest_array_resource *mdest_res,
+ void **sample_actions,
+ uint64_t action_flags,
+ struct rte_flow_error *error)
+{
+ /* update normal path action resource into last index of array */
+ uint32_t dest_index = MLX5_MAX_DEST_NUM - 1;
+ struct mlx5_flow_sub_actions_list *sample_act =
+ &mdest_res->sample_act[dest_index];
+ struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
+ struct mlx5_flow_rss_desc *rss_desc;
+ uint32_t normal_idx = 0;
+ struct mlx5_hrxq *hrxq;
+ uint32_t hrxq_idx;
+
+ MLX5_ASSERT(wks);
+ rss_desc = &wks->rss_desc;
+ if (num_of_dest > 1) {
+ if (sample_act->action_flags & MLX5_FLOW_ACTION_QUEUE) {
+ /* Handle QP action for mirroring */
+ hrxq = flow_dv_hrxq_prepare(dev, dev_flow,
+ rss_desc, &hrxq_idx);
+ if (!hrxq)
+ return rte_flow_error_set
+ (error, rte_errno,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL,
+ "cannot create rx queue");
+ normal_idx++;
+ mdest_res->sample_idx[dest_index].rix_hrxq = hrxq_idx;
+ sample_act->dr_queue_action = hrxq->action;
+ if (action_flags & MLX5_FLOW_ACTION_MARK)
+ dev_flow->handle->rix_hrxq = hrxq_idx;
+ dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE;
+ }
+ if (sample_act->action_flags & MLX5_FLOW_ACTION_ENCAP) {
+ normal_idx++;
+ mdest_res->sample_idx[dest_index].rix_encap_decap =
+ dev_flow->handle->dvh.rix_encap_decap;
+ sample_act->dr_encap_action =
+ dev_flow->dv.encap_decap->action;
+ }
+ if (sample_act->action_flags & MLX5_FLOW_ACTION_PORT_ID) {
+ normal_idx++;
+ mdest_res->sample_idx[dest_index].rix_port_id_action =
+ dev_flow->handle->rix_port_id_action;
+ sample_act->dr_port_id_action =
+ dev_flow->dv.port_id_action->action;
+ }
+ sample_act->actions_num = normal_idx;
+ /* update sample action resource into first index of array */
+ mdest_res->ft_type = res->ft_type;
+ memcpy(&mdest_res->sample_idx[0], &res->sample_idx,
+ sizeof(struct mlx5_flow_sub_actions_idx));
+ memcpy(&mdest_res->sample_act[0], &res->sample_act,
+ sizeof(struct mlx5_flow_sub_actions_list));
+ mdest_res->num_of_dest = num_of_dest;
+ if (flow_dv_dest_array_resource_register(dev, mdest_res,
+ dev_flow, error))
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL, "can't create sample "
+ "action");
+ } else {
+ res->sub_actions = sample_actions;
+ if (flow_dv_sample_resource_register(dev, res, dev_flow, error))
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL,
+ "can't create sample action");
+ }
+ return 0;
+}
+
+/**
+ * Remove an ASO age action from age actions list.
+ *
+ * @param[in] dev
+ * Pointer to the Ethernet device structure.
+ * @param[in] age
+ * Pointer to the aso age action handler.
+ */
+static void
+flow_dv_aso_age_remove_from_age(struct rte_eth_dev *dev,
+ struct mlx5_aso_age_action *age)
+{
+ struct mlx5_age_info *age_info;
+ struct mlx5_age_param *age_param = &age->age_params;
+ struct mlx5_priv *priv = dev->data->dev_private;
+ uint16_t expected = AGE_CANDIDATE;
+
+ age_info = GET_PORT_AGE_INFO(priv);
+ if (!__atomic_compare_exchange_n(&age_param->state, &expected,
+ AGE_FREE, false, __ATOMIC_RELAXED,
+ __ATOMIC_RELAXED)) {
+ /**
+ * We need the lock even it is age timeout,
+ * since age action may still in process.
+ */
+ rte_spinlock_lock(&age_info->aged_sl);
+ LIST_REMOVE(age, next);
+ rte_spinlock_unlock(&age_info->aged_sl);
+ __atomic_store_n(&age_param->state, AGE_FREE, __ATOMIC_RELAXED);
+ }
+}
+
+/**
+ * Release an ASO age action.
+ *
+ * @param[in] dev
+ * Pointer to the Ethernet device structure.
+ * @param[in] age_idx
+ * Index of ASO age action to release.
+ * @param[in] flow
+ * True if the release operation is during flow destroy operation.
+ * False if the release operation is during action destroy operation.
+ *
+ * @return
+ * 0 when age action was removed, otherwise the number of references.
+ */
+static int
+flow_dv_aso_age_release(struct rte_eth_dev *dev, uint32_t age_idx)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng;
+ struct mlx5_aso_age_action *age = flow_aso_age_get_by_idx(dev, age_idx);
+ uint32_t ret = __atomic_sub_fetch(&age->refcnt, 1, __ATOMIC_RELAXED);
+
+ if (!ret) {
+ flow_dv_aso_age_remove_from_age(dev, age);
+ rte_spinlock_lock(&mng->free_sl);
+ LIST_INSERT_HEAD(&mng->free, age, next);
+ rte_spinlock_unlock(&mng->free_sl);
+ }
+ return ret;
+}
+
+/**
+ * Resize the ASO age pools array by MLX5_CNT_CONTAINER_RESIZE pools.
+ *
+ * @param[in] dev
+ * Pointer to the Ethernet device structure.
+ *
+ * @return
+ * 0 on success, otherwise negative errno value and rte_errno is set.
+ */
+static int
+flow_dv_aso_age_pools_resize(struct rte_eth_dev *dev)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng;
+ void *old_pools = mng->pools;
+ uint32_t resize = mng->n + MLX5_CNT_CONTAINER_RESIZE;
+ uint32_t mem_size = sizeof(struct mlx5_aso_age_pool *) * resize;
+ void *pools = mlx5_malloc(MLX5_MEM_ZERO, mem_size, 0, SOCKET_ID_ANY);
+
+ if (!pools) {
+ rte_errno = ENOMEM;
+ return -ENOMEM;
+ }
+ if (old_pools) {
+ memcpy(pools, old_pools,
+ mng->n * sizeof(struct mlx5_flow_counter_pool *));
+ mlx5_free(old_pools);
+ } else {
+ /* First ASO flow hit allocation - starting ASO data-path. */
+ int ret = mlx5_aso_queue_start(priv->sh);
+
+ if (ret) {
+ mlx5_free(pools);
+ return ret;
+ }
+ }
+ mng->n = resize;
+ mng->pools = pools;
+ return 0;
+}
+
+/**
+ * Create and initialize a new ASO aging pool.
+ *
+ * @param[in] dev
+ * Pointer to the Ethernet device structure.
+ * @param[out] age_free
+ * Where to put the pointer of a new age action.
+ *
+ * @return
+ * The age actions pool pointer and @p age_free is set on success,
+ * NULL otherwise and rte_errno is set.
+ */
+static struct mlx5_aso_age_pool *
+flow_dv_age_pool_create(struct rte_eth_dev *dev,
+ struct mlx5_aso_age_action **age_free)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng;
+ struct mlx5_aso_age_pool *pool = NULL;
+ struct mlx5_devx_obj *obj = NULL;
+ uint32_t i;
+
+ obj = mlx5_devx_cmd_create_flow_hit_aso_obj(priv->sh->ctx,
+ priv->sh->pdn);
+ if (!obj) {
+ rte_errno = ENODATA;
+ DRV_LOG(ERR, "Failed to create flow_hit_aso_obj using DevX.");
+ return NULL;
+ }
+ pool = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*pool), 0, SOCKET_ID_ANY);
+ if (!pool) {
+ claim_zero(mlx5_devx_cmd_destroy(obj));
+ rte_errno = ENOMEM;
+ return NULL;
+ }
+ pool->flow_hit_aso_obj = obj;
+ pool->time_of_last_age_check = MLX5_CURR_TIME_SEC;
+ rte_spinlock_lock(&mng->resize_sl);
+ pool->index = mng->next;
+ /* Resize pools array if there is no room for the new pool in it. */
+ if (pool->index == mng->n && flow_dv_aso_age_pools_resize(dev)) {
+ claim_zero(mlx5_devx_cmd_destroy(obj));
+ mlx5_free(pool);
+ rte_spinlock_unlock(&mng->resize_sl);
+ return NULL;
+ }
+ mng->pools[pool->index] = pool;
+ mng->next++;
+ rte_spinlock_unlock(&mng->resize_sl);
+ /* Assign the first action in the new pool, the rest go to free list. */
+ *age_free = &pool->actions[0];
+ for (i = 1; i < MLX5_ASO_AGE_ACTIONS_PER_POOL; i++) {
+ pool->actions[i].offset = i;
+ LIST_INSERT_HEAD(&mng->free, &pool->actions[i], next);
+ }
+ return pool;
+}
+
+/**
+ * Allocate a ASO aging bit.
+ *
+ * @param[in] dev
+ * Pointer to the Ethernet device structure.
+ * @param[out] error
+ * Pointer to the error structure.
+ *
+ * @return
+ * Index to ASO age action on success, 0 otherwise and rte_errno is set.
+ */
+static uint32_t
+flow_dv_aso_age_alloc(struct rte_eth_dev *dev, struct rte_flow_error *error)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ const struct mlx5_aso_age_pool *pool;
+ struct mlx5_aso_age_action *age_free = NULL;
+ struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng;
+
+ MLX5_ASSERT(mng);
+ /* Try to get the next free age action bit. */
+ rte_spinlock_lock(&mng->free_sl);
+ age_free = LIST_FIRST(&mng->free);
+ if (age_free) {
+ LIST_REMOVE(age_free, next);
+ } else if (!flow_dv_age_pool_create(dev, &age_free)) {
+ rte_spinlock_unlock(&mng->free_sl);
+ rte_flow_error_set(error, rte_errno, RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL, "failed to create ASO age pool");
+ return 0; /* 0 is an error. */
+ }
+ rte_spinlock_unlock(&mng->free_sl);
+ pool = container_of
+ ((const struct mlx5_aso_age_action (*)[MLX5_ASO_AGE_ACTIONS_PER_POOL])
+ (age_free - age_free->offset), const struct mlx5_aso_age_pool,
+ actions);
+ if (!age_free->dr_action) {
+ int reg_c = mlx5_flow_get_reg_id(dev, MLX5_ASO_FLOW_HIT, 0,
+ error);
+
+ if (reg_c < 0) {
+ rte_flow_error_set(error, rte_errno,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL, "failed to get reg_c "
+ "for ASO flow hit");
+ return 0; /* 0 is an error. */
+ }
+#ifdef HAVE_MLX5_DR_CREATE_ACTION_ASO
+ age_free->dr_action = mlx5_glue->dv_create_flow_action_aso
+ (priv->sh->rx_domain,
+ pool->flow_hit_aso_obj->obj, age_free->offset,
+ MLX5DV_DR_ACTION_FLAGS_ASO_FIRST_HIT_SET,
+ (reg_c - REG_C_0));
+#endif /* HAVE_MLX5_DR_CREATE_ACTION_ASO */
+ if (!age_free->dr_action) {
+ rte_errno = errno;
+ rte_spinlock_lock(&mng->free_sl);
+ LIST_INSERT_HEAD(&mng->free, age_free, next);
+ rte_spinlock_unlock(&mng->free_sl);
+ rte_flow_error_set(error, rte_errno,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL, "failed to create ASO "
+ "flow hit action");
+ return 0; /* 0 is an error. */
+ }
+ }
+ __atomic_store_n(&age_free->refcnt, 1, __ATOMIC_RELAXED);
+ return pool->index | ((age_free->offset + 1) << 16);
+}
+
+/**
+ * Create a age action using ASO mechanism.
+ *
+ * @param[in] dev
+ * Pointer to rte_eth_dev structure.
+ * @param[in] age
+ * Pointer to the aging action configuration.
+ * @param[out] error
+ * Pointer to the error structure.
+ *
+ * @return
+ * Index to flow counter on success, 0 otherwise.
+ */
+static uint32_t
+flow_dv_translate_create_aso_age(struct rte_eth_dev *dev,
+ const struct rte_flow_action_age *age,
+ struct rte_flow_error *error)
+{
+ uint32_t age_idx = 0;
+ struct mlx5_aso_age_action *aso_age;
+
+ age_idx = flow_dv_aso_age_alloc(dev, error);
+ if (!age_idx)
+ return 0;
+ aso_age = flow_aso_age_get_by_idx(dev, age_idx);
+ aso_age->age_params.context = age->context;
+ aso_age->age_params.timeout = age->timeout;
+ aso_age->age_params.port_id = dev->data->port_id;
+ __atomic_store_n(&aso_age->age_params.sec_since_last_hit, 0,
+ __ATOMIC_RELAXED);
+ __atomic_store_n(&aso_age->age_params.state, AGE_CANDIDATE,
+ __ATOMIC_RELAXED);
+ return age_idx;
+}
+
+/**
+ * Fill the flow with DV spec, lock free
+ * (mutex should be acquired by caller).
+ *
+ * @param[in] dev
+ * Pointer to rte_eth_dev structure.
+ * @param[in, out] dev_flow
+ * Pointer to the sub flow.
+ * @param[in] attr
+ * Pointer to the flow attributes.
+ * @param[in] items
+ * Pointer to the list of items.
+ * @param[in] actions
+ * Pointer to the list of actions.
+ * @param[out] error
+ * Pointer to the error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+flow_dv_translate(struct rte_eth_dev *dev,
+ struct mlx5_flow *dev_flow,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item items[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error)