+/**
+ * Convert Sample action to DV specification.
+ *
+ * @param[in] dev
+ * Pointer to rte_eth_dev structure.
+ * @param[in, out] dev_flow
+ * Pointer to the mlx5_flow.
+ * @param[in] num_of_dest
+ * The num of destination.
+ * @param[in, out] res
+ * Pointer to sample resource.
+ * @param[in, out] mdest_res
+ * Pointer to destination array resource.
+ * @param[in] sample_actions
+ * Pointer to sample path actions list.
+ * @param[in] action_flags
+ * Holds the actions detected until now.
+ * @param[out] error
+ * Pointer to the error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+flow_dv_create_action_sample(struct rte_eth_dev *dev,
+ struct mlx5_flow *dev_flow,
+ uint32_t num_of_dest,
+ struct mlx5_flow_dv_sample_resource *res,
+ struct mlx5_flow_dv_dest_array_resource *mdest_res,
+ void **sample_actions,
+ uint64_t action_flags,
+ struct rte_flow_error *error)
+{
+ /* update normal path action resource into last index of array */
+ uint32_t dest_index = MLX5_MAX_DEST_NUM - 1;
+ struct mlx5_flow_sub_actions_list *sample_act =
+ &mdest_res->sample_act[dest_index];
+ struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
+ struct mlx5_flow_rss_desc *rss_desc;
+ uint32_t normal_idx = 0;
+ struct mlx5_hrxq *hrxq;
+ uint32_t hrxq_idx;
+
+ MLX5_ASSERT(wks);
+ rss_desc = &wks->rss_desc;
+ if (num_of_dest > 1) {
+ if (sample_act->action_flags & MLX5_FLOW_ACTION_QUEUE) {
+ /* Handle QP action for mirroring */
+ hrxq = flow_dv_hrxq_prepare(dev, dev_flow,
+ rss_desc, &hrxq_idx);
+ if (!hrxq)
+ return rte_flow_error_set
+ (error, rte_errno,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL,
+ "cannot create rx queue");
+ normal_idx++;
+ mdest_res->sample_idx[dest_index].rix_hrxq = hrxq_idx;
+ sample_act->dr_queue_action = hrxq->action;
+ if (action_flags & MLX5_FLOW_ACTION_MARK)
+ dev_flow->handle->rix_hrxq = hrxq_idx;
+ dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE;
+ }
+ if (sample_act->action_flags & MLX5_FLOW_ACTION_ENCAP) {
+ normal_idx++;
+ mdest_res->sample_idx[dest_index].rix_encap_decap =
+ dev_flow->handle->dvh.rix_encap_decap;
+ sample_act->dr_encap_action =
+ dev_flow->dv.encap_decap->action;
+ }
+ if (sample_act->action_flags & MLX5_FLOW_ACTION_PORT_ID) {
+ normal_idx++;
+ mdest_res->sample_idx[dest_index].rix_port_id_action =
+ dev_flow->handle->rix_port_id_action;
+ sample_act->dr_port_id_action =
+ dev_flow->dv.port_id_action->action;
+ }
+ sample_act->actions_num = normal_idx;
+ /* update sample action resource into first index of array */
+ mdest_res->ft_type = res->ft_type;
+ memcpy(&mdest_res->sample_idx[0], &res->sample_idx,
+ sizeof(struct mlx5_flow_sub_actions_idx));
+ memcpy(&mdest_res->sample_act[0], &res->sample_act,
+ sizeof(struct mlx5_flow_sub_actions_list));
+ mdest_res->num_of_dest = num_of_dest;
+ if (flow_dv_dest_array_resource_register(dev, mdest_res,
+ dev_flow, error))
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL, "can't create sample "
+ "action");
+ } else {
+ res->sub_actions = sample_actions;
+ if (flow_dv_sample_resource_register(dev, res, dev_flow, error))
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL,
+ "can't create sample action");
+ }
+ return 0;
+}
+
+/**
+ * Remove an ASO age action from age actions list.
+ *
+ * @param[in] dev
+ * Pointer to the Ethernet device structure.
+ * @param[in] age
+ * Pointer to the aso age action handler.
+ */
+static void
+flow_dv_aso_age_remove_from_age(struct rte_eth_dev *dev,
+ struct mlx5_aso_age_action *age)
+{
+ struct mlx5_age_info *age_info;
+ struct mlx5_age_param *age_param = &age->age_params;
+ struct mlx5_priv *priv = dev->data->dev_private;
+ uint16_t expected = AGE_CANDIDATE;
+
+ age_info = GET_PORT_AGE_INFO(priv);
+ if (!__atomic_compare_exchange_n(&age_param->state, &expected,
+ AGE_FREE, false, __ATOMIC_RELAXED,
+ __ATOMIC_RELAXED)) {
+ /**
+ * We need the lock even it is age timeout,
+ * since age action may still in process.
+ */
+ rte_spinlock_lock(&age_info->aged_sl);
+ LIST_REMOVE(age, next);
+ rte_spinlock_unlock(&age_info->aged_sl);
+ __atomic_store_n(&age_param->state, AGE_FREE, __ATOMIC_RELAXED);
+ }
+}
+
+/**
+ * Release an ASO age action.
+ *
+ * @param[in] dev
+ * Pointer to the Ethernet device structure.
+ * @param[in] age_idx
+ * Index of ASO age action to release.
+ * @param[in] flow
+ * True if the release operation is during flow destroy operation.
+ * False if the release operation is during action destroy operation.
+ *
+ * @return
+ * 0 when age action was removed, otherwise the number of references.
+ */
+static int
+flow_dv_aso_age_release(struct rte_eth_dev *dev, uint32_t age_idx)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng;
+ struct mlx5_aso_age_action *age = flow_aso_age_get_by_idx(dev, age_idx);
+ uint32_t ret = __atomic_sub_fetch(&age->refcnt, 1, __ATOMIC_RELAXED);
+
+ if (!ret) {
+ flow_dv_aso_age_remove_from_age(dev, age);
+ rte_spinlock_lock(&mng->free_sl);
+ LIST_INSERT_HEAD(&mng->free, age, next);
+ rte_spinlock_unlock(&mng->free_sl);
+ }
+ return ret;
+}
+
+/**
+ * Resize the ASO age pools array by MLX5_CNT_CONTAINER_RESIZE pools.
+ *
+ * @param[in] dev
+ * Pointer to the Ethernet device structure.
+ *
+ * @return
+ * 0 on success, otherwise negative errno value and rte_errno is set.
+ */
+static int
+flow_dv_aso_age_pools_resize(struct rte_eth_dev *dev)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng;
+ void *old_pools = mng->pools;
+ uint32_t resize = mng->n + MLX5_CNT_CONTAINER_RESIZE;
+ uint32_t mem_size = sizeof(struct mlx5_aso_age_pool *) * resize;
+ void *pools = mlx5_malloc(MLX5_MEM_ZERO, mem_size, 0, SOCKET_ID_ANY);
+
+ if (!pools) {
+ rte_errno = ENOMEM;
+ return -ENOMEM;
+ }
+ if (old_pools) {
+ memcpy(pools, old_pools,
+ mng->n * sizeof(struct mlx5_flow_counter_pool *));
+ mlx5_free(old_pools);
+ } else {
+ /* First ASO flow hit allocation - starting ASO data-path. */
+ int ret = mlx5_aso_queue_start(priv->sh);
+
+ if (ret)
+ return ret;
+ }
+ mng->n = resize;
+ mng->pools = pools;
+ return 0;
+}
+
+/**
+ * Create and initialize a new ASO aging pool.
+ *
+ * @param[in] dev
+ * Pointer to the Ethernet device structure.
+ * @param[out] age_free
+ * Where to put the pointer of a new age action.
+ *
+ * @return
+ * The age actions pool pointer and @p age_free is set on success,
+ * NULL otherwise and rte_errno is set.
+ */
+static struct mlx5_aso_age_pool *
+flow_dv_age_pool_create(struct rte_eth_dev *dev,
+ struct mlx5_aso_age_action **age_free)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng;
+ struct mlx5_aso_age_pool *pool = NULL;
+ struct mlx5_devx_obj *obj = NULL;
+ uint32_t i;
+
+ obj = mlx5_devx_cmd_create_flow_hit_aso_obj(priv->sh->ctx,
+ priv->sh->pdn);
+ if (!obj) {
+ rte_errno = ENODATA;
+ DRV_LOG(ERR, "Failed to create flow_hit_aso_obj using DevX.");
+ return NULL;
+ }
+ pool = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*pool), 0, SOCKET_ID_ANY);
+ if (!pool) {
+ claim_zero(mlx5_devx_cmd_destroy(obj));
+ rte_errno = ENOMEM;
+ return NULL;
+ }
+ pool->flow_hit_aso_obj = obj;
+ pool->time_of_last_age_check = MLX5_CURR_TIME_SEC;
+ rte_spinlock_lock(&mng->resize_sl);
+ pool->index = mng->next;
+ /* Resize pools array if there is no room for the new pool in it. */
+ if (pool->index == mng->n && flow_dv_aso_age_pools_resize(dev)) {
+ claim_zero(mlx5_devx_cmd_destroy(obj));
+ mlx5_free(pool);
+ rte_spinlock_unlock(&mng->resize_sl);
+ return NULL;
+ }
+ mng->pools[pool->index] = pool;
+ mng->next++;
+ rte_spinlock_unlock(&mng->resize_sl);
+ /* Assign the first action in the new pool, the rest go to free list. */
+ *age_free = &pool->actions[0];
+ for (i = 1; i < MLX5_ASO_AGE_ACTIONS_PER_POOL; i++) {
+ pool->actions[i].offset = i;
+ LIST_INSERT_HEAD(&mng->free, &pool->actions[i], next);
+ }
+ return pool;
+}
+
+/**
+ * Allocate a ASO aging bit.
+ *
+ * @param[in] dev
+ * Pointer to the Ethernet device structure.
+ *
+ * @return
+ * Index to ASO age action on success, 0 otherwise and rte_errno is set.
+ */
+static uint32_t
+flow_dv_aso_age_alloc(struct rte_eth_dev *dev)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ const struct mlx5_aso_age_pool *pool;
+ struct mlx5_aso_age_action *age_free = NULL;
+ struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng;
+
+ MLX5_ASSERT(mng);
+ /* Try to get the next free age action bit. */
+ rte_spinlock_lock(&mng->free_sl);
+ age_free = LIST_FIRST(&mng->free);
+ if (age_free) {
+ LIST_REMOVE(age_free, next);
+ } else if (!flow_dv_age_pool_create(dev, &age_free)) {
+ rte_spinlock_unlock(&mng->free_sl);
+ return 0; /* 0 is an error.*/
+ }
+ rte_spinlock_unlock(&mng->free_sl);
+ pool = container_of
+ ((const struct mlx5_aso_age_action (*)[MLX5_ASO_AGE_ACTIONS_PER_POOL])
+ (age_free - age_free->offset), const struct mlx5_aso_age_pool,
+ actions);
+ if (!age_free->dr_action) {
+ age_free->dr_action = mlx5_glue->dr_action_create_flow_hit
+ (pool->flow_hit_aso_obj->obj,
+ age_free->offset, REG_C_5);
+ if (!age_free->dr_action) {
+ rte_errno = errno;
+ rte_spinlock_lock(&mng->free_sl);
+ LIST_INSERT_HEAD(&mng->free, age_free, next);
+ rte_spinlock_unlock(&mng->free_sl);
+ return 0; /* 0 is an error.*/
+ }
+ }
+ __atomic_store_n(&age_free->refcnt, 1, __ATOMIC_RELAXED);
+ return pool->index | ((age_free->offset + 1) << 16);
+}
+
+/**
+ * Create a age action using ASO mechanism.
+ *
+ * @param[in] dev
+ * Pointer to rte_eth_dev structure.
+ * @param[in] age
+ * Pointer to the aging action configuration.
+ *
+ * @return
+ * Index to flow counter on success, 0 otherwise.
+ */
+static uint32_t
+flow_dv_translate_create_aso_age(struct rte_eth_dev *dev,
+ const struct rte_flow_action_age *age)
+{
+ uint32_t age_idx = 0;
+ struct mlx5_aso_age_action *aso_age;
+
+ age_idx = flow_dv_aso_age_alloc(dev);
+ if (!age_idx)
+ return 0;
+ aso_age = flow_aso_age_get_by_idx(dev, age_idx);
+ aso_age->age_params.context = age->context;
+ aso_age->age_params.timeout = age->timeout;
+ aso_age->age_params.port_id = dev->data->port_id;
+ __atomic_store_n(&aso_age->age_params.sec_since_last_hit, 0,
+ __ATOMIC_RELAXED);
+ __atomic_store_n(&aso_age->age_params.state, AGE_CANDIDATE,
+ __ATOMIC_RELAXED);
+ return age_idx;
+}
+
+/**
+ * Fill the flow with DV spec, lock free
+ * (mutex should be acquired by caller).
+ *
+ * @param[in] dev
+ * Pointer to rte_eth_dev structure.
+ * @param[in, out] dev_flow
+ * Pointer to the sub flow.
+ * @param[in] attr
+ * Pointer to the flow attributes.
+ * @param[in] items
+ * Pointer to the list of items.
+ * @param[in] actions
+ * Pointer to the list of actions.
+ * @param[out] error
+ * Pointer to the error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+flow_dv_translate(struct rte_eth_dev *dev,
+ struct mlx5_flow *dev_flow,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item items[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_dev_config *dev_conf = &priv->config;
+ struct rte_flow *flow = dev_flow->flow;
+ struct mlx5_flow_handle *handle = dev_flow->handle;
+ struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
+ struct mlx5_flow_rss_desc *rss_desc;
+ uint64_t item_flags = 0;
+ uint64_t last_item = 0;
+ uint64_t action_flags = 0;
+ uint64_t priority = attr->priority;
+ struct mlx5_flow_dv_matcher matcher = {
+ .mask = {
+ .size = sizeof(matcher.mask.buf) -
+ MLX5_ST_SZ_BYTES(fte_match_set_misc4),
+ },
+ };
+ int actions_n = 0;
+ bool actions_end = false;
+ union {
+ struct mlx5_flow_dv_modify_hdr_resource res;
+ uint8_t len[sizeof(struct mlx5_flow_dv_modify_hdr_resource) +
+ sizeof(struct mlx5_modification_cmd) *
+ (MLX5_MAX_MODIFY_NUM + 1)];
+ } mhdr_dummy;
+ struct mlx5_flow_dv_modify_hdr_resource *mhdr_res = &mhdr_dummy.res;
+ const struct rte_flow_action_count *count = NULL;
+ const struct rte_flow_action_age *age = NULL;
+ union flow_dv_attr flow_attr = { .attr = 0 };
+ uint32_t tag_be;
+ union mlx5_flow_tbl_key tbl_key;
+ uint32_t modify_action_position = UINT32_MAX;
+ void *match_mask = matcher.mask.buf;
+ void *match_value = dev_flow->dv.value.buf;
+ uint8_t next_protocol = 0xff;
+ struct rte_vlan_hdr vlan = { 0 };
+ struct mlx5_flow_dv_dest_array_resource mdest_res;
+ struct mlx5_flow_dv_sample_resource sample_res;
+ void *sample_actions[MLX5_DV_MAX_NUMBER_OF_ACTIONS] = {0};
+ struct mlx5_flow_sub_actions_list *sample_act;
+ uint32_t sample_act_pos = UINT32_MAX;
+ uint32_t num_of_dest = 0;
+ int tmp_actions_n = 0;
+ uint32_t table;
+ int ret = 0;
+ const struct mlx5_flow_tunnel *tunnel;
+ struct flow_grp_info grp_info = {
+ .external = !!dev_flow->external,
+ .transfer = !!attr->transfer,
+ .fdb_def_rule = !!priv->fdb_def_rule,
+ .skip_scale = !!dev_flow->skip_scale,
+ };
+
+ if (!wks)
+ return rte_flow_error_set(error, ENOMEM,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "failed to push flow workspace");
+ rss_desc = &wks->rss_desc;
+ memset(&mdest_res, 0, sizeof(struct mlx5_flow_dv_dest_array_resource));
+ memset(&sample_res, 0, sizeof(struct mlx5_flow_dv_sample_resource));
+ mhdr_res->ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
+ MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
+ /* update normal path action resource into last index of array */
+ sample_act = &mdest_res.sample_act[MLX5_MAX_DEST_NUM - 1];
+ tunnel = is_flow_tunnel_match_rule(dev, attr, items, actions) ?
+ flow_items_to_tunnel(items) :
+ is_flow_tunnel_steer_rule(dev, attr, items, actions) ?
+ flow_actions_to_tunnel(actions) :
+ dev_flow->tunnel ? dev_flow->tunnel : NULL;
+ mhdr_res->ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
+ MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
+ grp_info.std_tbl_fix = tunnel_use_standard_attr_group_translate
+ (dev, tunnel, attr, items, actions);
+ ret = mlx5_flow_group_to_table(dev, tunnel, attr->group, &table,
+ grp_info, error);
+ if (ret)
+ return ret;
+ dev_flow->dv.group = table;
+ if (attr->transfer)
+ mhdr_res->ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
+ if (priority == MLX5_FLOW_PRIO_RSVD)
+ priority = dev_conf->flow_prio - 1;
+ /* number of actions must be set to 0 in case of dirty stack. */
+ mhdr_res->actions_num = 0;
+ if (is_flow_tunnel_match_rule(dev, attr, items, actions)) {
+ /*
+ * do not add decap action if match rule drops packet
+ * HW rejects rules with decap & drop
+ */
+ bool add_decap = true;
+ const struct rte_flow_action *ptr = actions;
+ struct mlx5_flow_tbl_resource *tbl;
+
+ for (; ptr->type != RTE_FLOW_ACTION_TYPE_END; ptr++) {
+ if (ptr->type == RTE_FLOW_ACTION_TYPE_DROP) {
+ add_decap = false;
+ break;
+ }
+ }
+ if (add_decap) {
+ if (flow_dv_create_action_l2_decap(dev, dev_flow,
+ attr->transfer,
+ error))
+ return -rte_errno;
+ dev_flow->dv.actions[actions_n++] =
+ dev_flow->dv.encap_decap->action;
+ action_flags |= MLX5_FLOW_ACTION_DECAP;
+ }
+ /*
+ * bind table_id with <group, table> for tunnel match rule.
+ * Tunnel set rule establishes that bind in JUMP action handler.
+ * Required for scenario when application creates tunnel match
+ * rule before tunnel set rule.
+ */
+ tbl = flow_dv_tbl_resource_get(dev, table, attr->egress,
+ attr->transfer,
+ !!dev_flow->external, tunnel,
+ attr->group, 0, error);
+ if (!tbl)
+ return rte_flow_error_set
+ (error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
+ actions, "cannot register tunnel group");
+ }
+ for (; !actions_end ; actions++) {
+ const struct rte_flow_action_queue *queue;
+ const struct rte_flow_action_rss *rss;
+ const struct rte_flow_action *action = actions;
+ const uint8_t *rss_key;
+ const struct rte_flow_action_meter *mtr;
+ struct mlx5_flow_tbl_resource *tbl;
+ struct mlx5_aso_age_action *age_act;
+ uint32_t port_id = 0;
+ struct mlx5_flow_dv_port_id_action_resource port_id_resource;
+ int action_type = actions->type;
+ const struct rte_flow_action *found_action = NULL;
+ struct mlx5_flow_meter *fm = NULL;
+ uint32_t jump_group = 0;
+
+ if (!mlx5_flow_os_action_supported(action_type))
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ actions,
+ "action not supported");
+ switch (action_type) {
+ case MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET:
+ action_flags |= MLX5_FLOW_ACTION_TUNNEL_SET;
+ break;
+ case RTE_FLOW_ACTION_TYPE_VOID:
+ break;
+ case RTE_FLOW_ACTION_TYPE_PORT_ID:
+ if (flow_dv_translate_action_port_id(dev, action,
+ &port_id, error))
+ return -rte_errno;
+ port_id_resource.port_id = port_id;
+ MLX5_ASSERT(!handle->rix_port_id_action);
+ if (flow_dv_port_id_action_resource_register
+ (dev, &port_id_resource, dev_flow, error))
+ return -rte_errno;
+ dev_flow->dv.actions[actions_n++] =
+ dev_flow->dv.port_id_action->action;
+ action_flags |= MLX5_FLOW_ACTION_PORT_ID;
+ dev_flow->handle->fate_action = MLX5_FLOW_FATE_PORT_ID;
+ sample_act->action_flags |= MLX5_FLOW_ACTION_PORT_ID;
+ num_of_dest++;
+ break;
+ case RTE_FLOW_ACTION_TYPE_FLAG:
+ action_flags |= MLX5_FLOW_ACTION_FLAG;
+ dev_flow->handle->mark = 1;
+ if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
+ struct rte_flow_action_mark mark = {
+ .id = MLX5_FLOW_MARK_DEFAULT,
+ };
+
+ if (flow_dv_convert_action_mark(dev, &mark,
+ mhdr_res,
+ error))
+ return -rte_errno;
+ action_flags |= MLX5_FLOW_ACTION_MARK_EXT;
+ break;
+ }
+ tag_be = mlx5_flow_mark_set(MLX5_FLOW_MARK_DEFAULT);
+ /*
+ * Only one FLAG or MARK is supported per device flow
+ * right now. So the pointer to the tag resource must be
+ * zero before the register process.
+ */
+ MLX5_ASSERT(!handle->dvh.rix_tag);
+ if (flow_dv_tag_resource_register(dev, tag_be,
+ dev_flow, error))
+ return -rte_errno;
+ MLX5_ASSERT(dev_flow->dv.tag_resource);
+ dev_flow->dv.actions[actions_n++] =
+ dev_flow->dv.tag_resource->action;
+ break;
+ case RTE_FLOW_ACTION_TYPE_MARK:
+ action_flags |= MLX5_FLOW_ACTION_MARK;
+ dev_flow->handle->mark = 1;
+ if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
+ const struct rte_flow_action_mark *mark =
+ (const struct rte_flow_action_mark *)
+ actions->conf;
+
+ if (flow_dv_convert_action_mark(dev, mark,
+ mhdr_res,
+ error))
+ return -rte_errno;
+ action_flags |= MLX5_FLOW_ACTION_MARK_EXT;
+ break;
+ }
+ /* Fall-through */
+ case MLX5_RTE_FLOW_ACTION_TYPE_MARK:
+ /* Legacy (non-extensive) MARK action. */
+ tag_be = mlx5_flow_mark_set
+ (((const struct rte_flow_action_mark *)
+ (actions->conf))->id);
+ MLX5_ASSERT(!handle->dvh.rix_tag);
+ if (flow_dv_tag_resource_register(dev, tag_be,
+ dev_flow, error))
+ return -rte_errno;
+ MLX5_ASSERT(dev_flow->dv.tag_resource);
+ dev_flow->dv.actions[actions_n++] =
+ dev_flow->dv.tag_resource->action;
+ break;