static int
flow_dv_port_id_action_resource_release(struct rte_eth_dev *dev,
uint32_t port_id);
+static void
+flow_dv_shared_rss_action_release(struct rte_eth_dev *dev, uint32_t srss);
/**
* Initialize flow attributes structure according to flow items' types.
target_group =
((const struct rte_flow_action_jump *)action->conf)->group;
ret = mlx5_flow_group_to_table(dev, tunnel, target_group, &table,
- grp_info, error);
+ &grp_info, error);
if (ret)
return ret;
if (attributes->group == target_group &&
struct mlx5_priv *priv = dev->data->dev_private;
const struct rte_flow_action_age *age = action->conf;
- if (!priv->config.devx || priv->sh->cmng.counter_fallback)
+ if (!priv->config.devx || (priv->sh->cmng.counter_fallback &&
+ !priv->sh->aso_age_mng))
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
NULL,
flow_dv_validate_attributes(struct rte_eth_dev *dev,
const struct mlx5_flow_tunnel *tunnel,
const struct rte_flow_attr *attributes,
- struct flow_grp_info grp_info,
+ const struct flow_grp_info *grp_info,
struct rte_flow_error *error)
{
struct mlx5_priv *priv = dev->data->dev_private;
}
grp_info.std_tbl_fix = tunnel_use_standard_attr_group_translate
(dev, tunnel, attr, items, actions);
- ret = flow_dv_validate_attributes(dev, tunnel, attr, grp_info, error);
+ ret = flow_dv_validate_attributes(dev, tunnel, attr, &grp_info, error);
if (ret < 0)
return ret;
is_root = (uint64_t)ret;
/* Meter action will add one more TAG action. */
rw_act_num += MLX5_ACT_NUM_SET_TAG;
break;
+ case MLX5_RTE_FLOW_ACTION_TYPE_AGE:
+ if (!attr->group)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "Shared ASO age action is not supported for group 0");
+ action_flags |= MLX5_FLOW_ACTION_AGE;
+ ++actions_n;
+ break;
case RTE_FLOW_ACTION_TYPE_AGE:
ret = flow_dv_validate_action_age(action_flags,
actions, dev,
struct mlx5_priv *priv = dev->data->dev_private;
const struct rte_flow_item_ecpri *ecpri_m = item->mask;
const struct rte_flow_item_ecpri *ecpri_v = item->spec;
+ struct rte_ecpri_common_hdr common;
void *misc4_m = MLX5_ADDR_OF(fte_match_param, matcher,
misc_parameters_4);
void *misc4_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_4);
* Some wildcard rules only matching type field should be supported.
*/
if (ecpri_m->hdr.dummy[0]) {
- switch (ecpri_v->hdr.common.type) {
+ common.u32 = rte_be_to_cpu_32(ecpri_v->hdr.common.u32);
+ switch (common.type) {
case RTE_ECPRI_MSG_TYPE_IQ_DATA:
case RTE_ECPRI_MSG_TYPE_RTC_CTRL:
case RTE_ECPRI_MSG_TYPE_DLY_MSR:
"cannot get table");
return NULL;
}
+ DRV_LOG(DEBUG, "Table_id %u tunnel %u group %u registered.",
+ table_id, tunnel ? tunnel->tunnel_id : 0, group_id);
tbl_data = container_of(entry, struct mlx5_flow_tbl_data_entry, entry);
return &tbl_data->tbl;
}
if (he)
mlx5_hlist_unregister(tunnel_grp_hash, he);
DRV_LOG(DEBUG,
- "Table_id %#x tunnel %u group %u released.",
+ "Table_id %u tunnel %u group %u released.",
table_id,
tbl_data->tunnel ?
tbl_data->tunnel->tunnel_id : 0,
struct mlx5_flow_dv_matcher *ref,
union mlx5_flow_tbl_key *key,
struct mlx5_flow *dev_flow,
+ const struct mlx5_flow_tunnel *tunnel,
+ uint32_t group_id,
struct rte_flow_error *error)
{
struct mlx5_cache_entry *entry;
.data = ref,
};
- tbl = flow_dv_tbl_resource_get(dev, key->table_id, key->direction,
- key->domain, false, NULL, 0, 0, error);
+ /**
+ * tunnel offload API requires this registration for cases when
+ * tunnel match rule was inserted before tunnel set rule.
+ */
+ tbl = flow_dv_tbl_resource_get(dev, key->table_id,
+ key->direction, key->domain,
+ dev_flow->external, tunnel,
+ group_id, 0, error);
if (!tbl)
return -rte_errno; /* No need to refill the error info */
tbl_data = container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
__atomic_store_n(&age_param->state, AGE_CANDIDATE, __ATOMIC_RELAXED);
return counter;
}
+
/**
* Add Tx queue matcher
*
rss_desc->key_len = MLX5_RSS_HASH_KEY_LEN;
rss_desc->hash_fields = dev_flow->hash_fields;
rss_desc->tunnel = !!(dh->layers & MLX5_FLOW_LAYER_TUNNEL);
- rss_desc->standalone = false;
+ rss_desc->shared_rss = 0;
*hrxq_idx = mlx5_hrxq_get(dev, rss_desc);
if (!*hrxq_idx)
return NULL;
uint64_t action_flags = 0;
MLX5_ASSERT(wks);
- rss_desc = &wks->rss_desc[!!wks->flow_nested_idx];
+ rss_desc = &wks->rss_desc;
sample_act = &res->sample_act;
sample_idx = &res->sample_idx;
sample_action = (const struct rte_flow_action_sample *)action->conf;
uint32_t hrxq_idx;
MLX5_ASSERT(wks);
- rss_desc = &wks->rss_desc[!!wks->flow_nested_idx];
+ rss_desc = &wks->rss_desc;
if (num_of_dest > 1) {
if (sample_act->action_flags & MLX5_FLOW_ACTION_QUEUE) {
/* Handle QP action for mirroring */
return 0;
}
+/**
+ * Remove an ASO age action from age actions list.
+ *
+ * @param[in] dev
+ * Pointer to the Ethernet device structure.
+ * @param[in] age
+ * Pointer to the aso age action handler.
+ */
+static void
+flow_dv_aso_age_remove_from_age(struct rte_eth_dev *dev,
+ struct mlx5_aso_age_action *age)
+{
+ struct mlx5_age_info *age_info;
+ struct mlx5_age_param *age_param = &age->age_params;
+ struct mlx5_priv *priv = dev->data->dev_private;
+ uint16_t expected = AGE_CANDIDATE;
+
+ age_info = GET_PORT_AGE_INFO(priv);
+ if (!__atomic_compare_exchange_n(&age_param->state, &expected,
+ AGE_FREE, false, __ATOMIC_RELAXED,
+ __ATOMIC_RELAXED)) {
+ /**
+ * We need the lock even it is age timeout,
+ * since age action may still in process.
+ */
+ rte_spinlock_lock(&age_info->aged_sl);
+ LIST_REMOVE(age, next);
+ rte_spinlock_unlock(&age_info->aged_sl);
+ __atomic_store_n(&age_param->state, AGE_FREE, __ATOMIC_RELAXED);
+ }
+}
+
+/**
+ * Release an ASO age action.
+ *
+ * @param[in] dev
+ * Pointer to the Ethernet device structure.
+ * @param[in] age_idx
+ * Index of ASO age action to release.
+ * @param[in] flow
+ * True if the release operation is during flow destroy operation.
+ * False if the release operation is during action destroy operation.
+ *
+ * @return
+ * 0 when age action was removed, otherwise the number of references.
+ */
+static int
+flow_dv_aso_age_release(struct rte_eth_dev *dev, uint32_t age_idx)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng;
+ struct mlx5_aso_age_action *age = flow_aso_age_get_by_idx(dev, age_idx);
+ uint32_t ret = __atomic_sub_fetch(&age->refcnt, 1, __ATOMIC_RELAXED);
+
+ if (!ret) {
+ flow_dv_aso_age_remove_from_age(dev, age);
+ rte_spinlock_lock(&mng->free_sl);
+ LIST_INSERT_HEAD(&mng->free, age, next);
+ rte_spinlock_unlock(&mng->free_sl);
+ }
+ return ret;
+}
+
+/**
+ * Resize the ASO age pools array by MLX5_CNT_CONTAINER_RESIZE pools.
+ *
+ * @param[in] dev
+ * Pointer to the Ethernet device structure.
+ *
+ * @return
+ * 0 on success, otherwise negative errno value and rte_errno is set.
+ */
+static int
+flow_dv_aso_age_pools_resize(struct rte_eth_dev *dev)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng;
+ void *old_pools = mng->pools;
+ uint32_t resize = mng->n + MLX5_CNT_CONTAINER_RESIZE;
+ uint32_t mem_size = sizeof(struct mlx5_aso_age_pool *) * resize;
+ void *pools = mlx5_malloc(MLX5_MEM_ZERO, mem_size, 0, SOCKET_ID_ANY);
+
+ if (!pools) {
+ rte_errno = ENOMEM;
+ return -ENOMEM;
+ }
+ if (old_pools) {
+ memcpy(pools, old_pools,
+ mng->n * sizeof(struct mlx5_flow_counter_pool *));
+ mlx5_free(old_pools);
+ } else {
+ /* First ASO flow hit allocation - starting ASO data-path. */
+ int ret = mlx5_aso_queue_start(priv->sh);
+
+ if (ret) {
+ mlx5_free(pools);
+ return ret;
+ }
+ }
+ mng->n = resize;
+ mng->pools = pools;
+ return 0;
+}
+
+/**
+ * Create and initialize a new ASO aging pool.
+ *
+ * @param[in] dev
+ * Pointer to the Ethernet device structure.
+ * @param[out] age_free
+ * Where to put the pointer of a new age action.
+ *
+ * @return
+ * The age actions pool pointer and @p age_free is set on success,
+ * NULL otherwise and rte_errno is set.
+ */
+static struct mlx5_aso_age_pool *
+flow_dv_age_pool_create(struct rte_eth_dev *dev,
+ struct mlx5_aso_age_action **age_free)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng;
+ struct mlx5_aso_age_pool *pool = NULL;
+ struct mlx5_devx_obj *obj = NULL;
+ uint32_t i;
+
+ obj = mlx5_devx_cmd_create_flow_hit_aso_obj(priv->sh->ctx,
+ priv->sh->pdn);
+ if (!obj) {
+ rte_errno = ENODATA;
+ DRV_LOG(ERR, "Failed to create flow_hit_aso_obj using DevX.");
+ return NULL;
+ }
+ pool = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*pool), 0, SOCKET_ID_ANY);
+ if (!pool) {
+ claim_zero(mlx5_devx_cmd_destroy(obj));
+ rte_errno = ENOMEM;
+ return NULL;
+ }
+ pool->flow_hit_aso_obj = obj;
+ pool->time_of_last_age_check = MLX5_CURR_TIME_SEC;
+ rte_spinlock_lock(&mng->resize_sl);
+ pool->index = mng->next;
+ /* Resize pools array if there is no room for the new pool in it. */
+ if (pool->index == mng->n && flow_dv_aso_age_pools_resize(dev)) {
+ claim_zero(mlx5_devx_cmd_destroy(obj));
+ mlx5_free(pool);
+ rte_spinlock_unlock(&mng->resize_sl);
+ return NULL;
+ }
+ mng->pools[pool->index] = pool;
+ mng->next++;
+ rte_spinlock_unlock(&mng->resize_sl);
+ /* Assign the first action in the new pool, the rest go to free list. */
+ *age_free = &pool->actions[0];
+ for (i = 1; i < MLX5_ASO_AGE_ACTIONS_PER_POOL; i++) {
+ pool->actions[i].offset = i;
+ LIST_INSERT_HEAD(&mng->free, &pool->actions[i], next);
+ }
+ return pool;
+}
+
+/**
+ * Allocate a ASO aging bit.
+ *
+ * @param[in] dev
+ * Pointer to the Ethernet device structure.
+ *
+ * @return
+ * Index to ASO age action on success, 0 otherwise and rte_errno is set.
+ */
+static uint32_t
+flow_dv_aso_age_alloc(struct rte_eth_dev *dev)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ const struct mlx5_aso_age_pool *pool;
+ struct mlx5_aso_age_action *age_free = NULL;
+ struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng;
+
+ MLX5_ASSERT(mng);
+ /* Try to get the next free age action bit. */
+ rte_spinlock_lock(&mng->free_sl);
+ age_free = LIST_FIRST(&mng->free);
+ if (age_free) {
+ LIST_REMOVE(age_free, next);
+ } else if (!flow_dv_age_pool_create(dev, &age_free)) {
+ rte_spinlock_unlock(&mng->free_sl);
+ return 0; /* 0 is an error.*/
+ }
+ rte_spinlock_unlock(&mng->free_sl);
+ pool = container_of
+ ((const struct mlx5_aso_age_action (*)[MLX5_ASO_AGE_ACTIONS_PER_POOL])
+ (age_free - age_free->offset), const struct mlx5_aso_age_pool,
+ actions);
+ if (!age_free->dr_action) {
+ age_free->dr_action = mlx5_glue->dr_action_create_flow_hit
+ (pool->flow_hit_aso_obj->obj,
+ age_free->offset, REG_C_5);
+ if (!age_free->dr_action) {
+ rte_errno = errno;
+ rte_spinlock_lock(&mng->free_sl);
+ LIST_INSERT_HEAD(&mng->free, age_free, next);
+ rte_spinlock_unlock(&mng->free_sl);
+ return 0; /* 0 is an error.*/
+ }
+ }
+ __atomic_store_n(&age_free->refcnt, 1, __ATOMIC_RELAXED);
+ return pool->index | ((age_free->offset + 1) << 16);
+}
+
+/**
+ * Create a age action using ASO mechanism.
+ *
+ * @param[in] dev
+ * Pointer to rte_eth_dev structure.
+ * @param[in] age
+ * Pointer to the aging action configuration.
+ *
+ * @return
+ * Index to flow counter on success, 0 otherwise.
+ */
+static uint32_t
+flow_dv_translate_create_aso_age(struct rte_eth_dev *dev,
+ const struct rte_flow_action_age *age)
+{
+ uint32_t age_idx = 0;
+ struct mlx5_aso_age_action *aso_age;
+
+ age_idx = flow_dv_aso_age_alloc(dev);
+ if (!age_idx)
+ return 0;
+ aso_age = flow_aso_age_get_by_idx(dev, age_idx);
+ aso_age->age_params.context = age->context;
+ aso_age->age_params.timeout = age->timeout;
+ aso_age->age_params.port_id = dev->data->port_id;
+ __atomic_store_n(&aso_age->age_params.sec_since_last_hit, 0,
+ __ATOMIC_RELAXED);
+ __atomic_store_n(&aso_age->age_params.state, AGE_CANDIDATE,
+ __ATOMIC_RELAXED);
+ return age_idx;
+}
+
/**
* Fill the flow with DV spec, lock free
* (mutex should be acquired by caller).
.external = !!dev_flow->external,
.transfer = !!attr->transfer,
.fdb_def_rule = !!priv->fdb_def_rule,
+ .skip_scale = !!dev_flow->skip_scale,
};
- MLX5_ASSERT(wks);
- rss_desc = &wks->rss_desc[!!wks->flow_nested_idx];
+ if (!wks)
+ return rte_flow_error_set(error, ENOMEM,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "failed to push flow workspace");
+ rss_desc = &wks->rss_desc;
memset(&mdest_res, 0, sizeof(struct mlx5_flow_dv_dest_array_resource));
memset(&sample_res, 0, sizeof(struct mlx5_flow_dv_sample_resource));
mhdr_res->ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
grp_info.std_tbl_fix = tunnel_use_standard_attr_group_translate
(dev, tunnel, attr, items, actions);
ret = mlx5_flow_group_to_table(dev, tunnel, attr->group, &table,
- grp_info, error);
+ &grp_info, error);
if (ret)
return ret;
dev_flow->dv.group = table;
/*
* do not add decap action if match rule drops packet
* HW rejects rules with decap & drop
+ *
+ * if tunnel match rule was inserted before matching tunnel set
+ * rule flow table used in the match rule must be registered.
+ * current implementation handles that in the
+ * flow_dv_match_register() at the function end.
*/
bool add_decap = true;
const struct rte_flow_action *ptr = actions;
- struct mlx5_flow_tbl_resource *tbl;
for (; ptr->type != RTE_FLOW_ACTION_TYPE_END; ptr++) {
if (ptr->type == RTE_FLOW_ACTION_TYPE_DROP) {
dev_flow->dv.encap_decap->action;
action_flags |= MLX5_FLOW_ACTION_DECAP;
}
- /*
- * bind table_id with <group, table> for tunnel match rule.
- * Tunnel set rule establishes that bind in JUMP action handler.
- * Required for scenario when application creates tunnel match
- * rule before tunnel set rule.
- */
- tbl = flow_dv_tbl_resource_get(dev, table, attr->egress,
- attr->transfer,
- !!dev_flow->external, tunnel,
- attr->group, 0, error);
- if (!tbl)
- return rte_flow_error_set
- (error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
- actions, "cannot register tunnel group");
}
for (; !actions_end ; actions++) {
const struct rte_flow_action_queue *queue;
const uint8_t *rss_key;
const struct rte_flow_action_meter *mtr;
struct mlx5_flow_tbl_resource *tbl;
+ struct mlx5_aso_age_action *age_act;
uint32_t port_id = 0;
struct mlx5_flow_dv_port_id_action_resource port_id_resource;
int action_type = actions->type;
* when expanding items for RSS.
*/
action_flags |= MLX5_FLOW_ACTION_RSS;
- dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE;
+ dev_flow->handle->fate_action = rss_desc->shared_rss ?
+ MLX5_FLOW_FATE_SHARED_RSS :
+ MLX5_FLOW_FATE_QUEUE;
+ break;
+ case MLX5_RTE_FLOW_ACTION_TYPE_AGE:
+ flow->age = (uint32_t)(uintptr_t)(action->conf);
+ age_act = flow_aso_age_get_by_idx(dev, flow->age);
+ __atomic_fetch_add(&age_act->refcnt, 1,
+ __ATOMIC_RELAXED);
+ dev_flow->dv.actions[actions_n++] = age_act->dr_action;
+ action_flags |= MLX5_FLOW_ACTION_AGE;
break;
case RTE_FLOW_ACTION_TYPE_AGE:
+ if (priv->sh->flow_hit_aso_en && attr->group) {
+ flow->age = flow_dv_translate_create_aso_age
+ (dev, action->conf);
+ if (!flow->age)
+ return rte_flow_error_set
+ (error, rte_errno,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL,
+ "can't create ASO age action");
+ dev_flow->dv.actions[actions_n++] =
+ (flow_aso_age_get_by_idx
+ (dev, flow->age))->dr_action;
+ action_flags |= MLX5_FLOW_ACTION_AGE;
+ break;
+ }
+ /* Fall-through */
case RTE_FLOW_ACTION_TYPE_COUNT:
if (!dev_conf->devx) {
return rte_flow_error_set
jump_group = ((const struct rte_flow_action_jump *)
action->conf)->group;
grp_info.std_tbl_fix = 0;
+ grp_info.skip_scale = 0;
ret = mlx5_flow_group_to_table(dev, tunnel,
jump_group,
&table,
- grp_info, error);
+ &grp_info, error);
if (ret)
return ret;
tbl = flow_dv_tbl_resource_get(dev, table, attr->egress,
tbl_key.domain = attr->transfer;
tbl_key.direction = attr->egress;
tbl_key.table_id = dev_flow->dv.group;
- if (flow_dv_matcher_register(dev, &matcher, &tbl_key, dev_flow, error))
+ if (flow_dv_matcher_register(dev, &matcher, &tbl_key, dev_flow,
+ tunnel, attr->group, error))
return -rte_errno;
return 0;
}
* Look up for hash RX queue by hash fields (see enum ibv_rx_hash_fields)
* and tunnel.
*
- * @param[in] action
- * Shred RSS action holding hash RX queue objects.
+ * @param[in] dev
+ * Pointer to the Ethernet device structure.
+ * @param[in] idx
+ * Shared RSS action ID holding hash RX queue objects.
* @param[in] hash_fields
* Defines combination of packet fields to participate in RX hash.
* @param[in] tunnel
* Valid hash RX queue index, otherwise 0.
*/
static uint32_t
-__flow_dv_action_rss_hrxq_lookup(const struct mlx5_shared_action_rss *action,
+__flow_dv_action_rss_hrxq_lookup(struct rte_eth_dev *dev, uint32_t idx,
const uint64_t hash_fields,
const int tunnel)
{
- const uint32_t *hrxqs = tunnel ? action->hrxq : action->hrxq_tunnel;
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_shared_action_rss *shared_rss =
+ mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], idx);
+ const uint32_t *hrxqs = tunnel ? shared_rss->hrxq :
+ shared_rss->hrxq_tunnel;
switch (hash_fields & ~IBV_RX_HASH_INNER) {
case MLX5_RSS_HASH_IPV4:
* If shared action configured for *flow* suitable hash RX queue will be
* retrieved from attached shared action.
*
- * @param[in] flow
- * Shred RSS action holding hash RX queue objects.
+ * @param[in] dev
+ * Pointer to the Ethernet device structure.
* @param[in] dev_flow
* Pointer to the sub flow.
+ * @param[in] rss_desc
+ * Pointer to the RSS descriptor.
* @param[out] hrxq
* Pointer to retrieved hash RX queue object.
*
* Valid hash RX queue index, otherwise 0 and rte_errno is set.
*/
static uint32_t
-__flow_dv_rss_get_hrxq(struct rte_eth_dev *dev, struct rte_flow *flow,
- struct mlx5_flow *dev_flow,
- struct mlx5_hrxq **hrxq)
+__flow_dv_rss_get_hrxq(struct rte_eth_dev *dev, struct mlx5_flow *dev_flow,
+ struct mlx5_flow_rss_desc *rss_desc,
+ struct mlx5_hrxq **hrxq)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
uint32_t hrxq_idx;
- if (flow->shared_rss) {
+ if (rss_desc->shared_rss) {
hrxq_idx = __flow_dv_action_rss_hrxq_lookup
- (flow->shared_rss, dev_flow->hash_fields,
+ (dev, rss_desc->shared_rss,
+ dev_flow->hash_fields,
!!(dev_flow->handle->layers &
MLX5_FLOW_LAYER_TUNNEL));
- if (hrxq_idx) {
+ if (hrxq_idx)
*hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
hrxq_idx);
- __atomic_fetch_add(&(*hrxq)->refcnt, 1,
- __ATOMIC_RELAXED);
- }
} else {
- struct mlx5_flow_rss_desc *rss_desc =
- &wks->rss_desc[!!wks->flow_nested_idx];
-
*hrxq = flow_dv_hrxq_prepare(dev, dev_flow, rss_desc,
&hrxq_idx);
}
int err;
int idx;
struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
+ struct mlx5_flow_rss_desc *rss_desc = &wks->rss_desc;
MLX5_ASSERT(wks);
- for (idx = wks->flow_idx - 1; idx >= wks->flow_nested_idx; idx--) {
+ if (rss_desc->shared_rss) {
+ dh = wks->flows[wks->flow_idx - 1].handle;
+ MLX5_ASSERT(dh->fate_action == MLX5_FLOW_FATE_SHARED_RSS);
+ dh->rix_srss = rss_desc->shared_rss;
+ }
+ for (idx = wks->flow_idx - 1; idx >= 0; idx--) {
dev_flow = &wks->flows[idx];
dv = &dev_flow->dv;
dh = dev_flow->handle;
dv->actions[n++] =
priv->drop_queue.hrxq->action;
}
- } else if (dh->fate_action == MLX5_FLOW_FATE_QUEUE &&
- !dv_h->rix_sample && !dv_h->rix_dest_array) {
+ } else if ((dh->fate_action == MLX5_FLOW_FATE_QUEUE &&
+ !dv_h->rix_sample && !dv_h->rix_dest_array) ||
+ (dh->fate_action == MLX5_FLOW_FATE_SHARED_RSS)) {
struct mlx5_hrxq *hrxq = NULL;
uint32_t hrxq_idx = __flow_dv_rss_get_hrxq
- (dev, flow, dev_flow, &hrxq);
+ (dev, dev_flow, rss_desc, &hrxq);
if (!hrxq) {
rte_flow_error_set
(error, rte_errno,
"cannot get hash queue");
goto error;
}
- dh->rix_hrxq = hrxq_idx;
+ if (dh->fate_action == MLX5_FLOW_FATE_QUEUE)
+ dh->rix_hrxq = hrxq_idx;
dv->actions[n++] = hrxq->action;
} else if (dh->fate_action == MLX5_FLOW_FATE_DEFAULT_MISS) {
if (!priv->sh->default_miss_action) {
if (dh->vf_vlan.tag && dh->vf_vlan.created)
mlx5_vlan_vmwa_release(dev, &dh->vf_vlan);
}
+ if (rss_desc->shared_rss)
+ wks->flows[wks->flow_idx - 1].handle->rix_srss = 0;
rte_errno = err; /* Restore rte_errno. */
return -rte_errno;
}
&cache->entry);
}
+/**
+ * Release shared RSS action resource.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param srss
+ * Shared RSS action index.
+ */
+static void
+flow_dv_shared_rss_action_release(struct rte_eth_dev *dev, uint32_t srss)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_shared_action_rss *shared_rss;
+
+ shared_rss = mlx5_ipool_get
+ (priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], srss);
+ __atomic_sub_fetch(&shared_rss->refcnt, 1, __ATOMIC_RELAXED);
+}
+
void
flow_dv_push_vlan_remove_cb(struct mlx5_cache_list *list,
struct mlx5_cache_entry *entry)
flow_dv_port_id_action_resource_release(dev,
handle->rix_port_id_action);
break;
+ case MLX5_FLOW_FATE_SHARED_RSS:
+ flow_dv_shared_rss_action_release(dev, handle->rix_srss);
+ break;
default:
DRV_LOG(DEBUG, "Incorrect fate action:%d", handle->fate_action);
break;
static void
flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
{
- struct rte_flow_shared_action *shared;
struct mlx5_flow_handle *dev_handle;
struct mlx5_priv *priv = dev->data->dev_private;
if (!flow)
return;
flow_dv_remove(dev, flow);
- shared = mlx5_flow_get_shared_rss(flow);
- if (shared)
- __atomic_sub_fetch(&shared->refcnt, 1, __ATOMIC_RELAXED);
if (flow->counter) {
flow_dv_counter_free(dev, flow->counter);
flow->counter = 0;
mlx5_flow_meter_detach(fm);
flow->meter = 0;
}
+ if (flow->age)
+ flow_dv_aso_age_release(dev, flow->age);
while (flow->dev_handles) {
uint32_t tmp_idx = flow->dev_handles;
*
* @param[in] dev
* Pointer to the Ethernet device structure.
+ * @param[in] action_idx
+ * Shared RSS action ipool index.
* @param[in, out] action
* Partially initialized shared RSS action.
* @param[out] error
*/
static int
__flow_dv_action_rss_setup(struct rte_eth_dev *dev,
+ uint32_t action_idx,
struct mlx5_shared_action_rss *action,
struct rte_flow_error *error)
{
rss_desc.key_len = MLX5_RSS_HASH_KEY_LEN;
rss_desc.const_q = action->origin.queue;
rss_desc.queue_num = action->origin.queue_num;
- rss_desc.standalone = true;
+ /* Set non-zero value to indicate a shared RSS. */
+ rss_desc.shared_rss = action_idx;
for (i = 0; i < MLX5_RSS_HASH_FIELDS_LEN; i++) {
uint32_t hrxq_idx;
uint64_t hash_fields = mlx5_rss_hash_fields[i];
* error only.
*
* @return
- * A valid shared action handle in case of success, NULL otherwise and
+ * A valid shared action ID in case of success, 0 otherwise and
* rte_errno is set.
*/
-static struct rte_flow_shared_action *
+static uint32_t
__flow_dv_action_rss_create(struct rte_eth_dev *dev,
const struct rte_flow_shared_action_conf *conf,
const struct rte_flow_action_rss *rss,
struct rte_flow_error *error)
{
- struct rte_flow_shared_action *shared_action = NULL;
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_shared_action_rss *shared_action = NULL;
void *queue = NULL;
- struct mlx5_shared_action_rss *shared_rss;
struct rte_flow_action_rss *origin;
const uint8_t *rss_key;
uint32_t queue_size = rss->queue_num * sizeof(uint16_t);
+ uint32_t idx;
RTE_SET_USED(conf);
queue = mlx5_malloc(0, RTE_ALIGN_CEIL(queue_size, sizeof(void *)),
0, SOCKET_ID_ANY);
- shared_action = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*shared_action), 0,
- SOCKET_ID_ANY);
+ shared_action = mlx5_ipool_zmalloc
+ (priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], &idx);
if (!shared_action || !queue) {
rte_flow_error_set(error, ENOMEM,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
"cannot allocate resource memory");
goto error_rss_init;
}
- shared_rss = &shared_action->rss;
- shared_rss->queue = queue;
- origin = &shared_rss->origin;
+ if (idx > (1u << MLX5_SHARED_ACTION_TYPE_OFFSET)) {
+ rte_flow_error_set(error, E2BIG,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "rss action number out of range");
+ goto error_rss_init;
+ }
+ shared_action->queue = queue;
+ origin = &shared_action->origin;
origin->func = rss->func;
origin->level = rss->level;
/* RSS type 0 indicates default RSS type (ETH_RSS_IP). */
origin->types = !rss->types ? ETH_RSS_IP : rss->types;
/* NULL RSS key indicates default RSS key. */
rss_key = !rss->key ? rss_hash_default_key : rss->key;
- memcpy(shared_rss->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
- origin->key = &shared_rss->key[0];
+ memcpy(shared_action->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
+ origin->key = &shared_action->key[0];
origin->key_len = MLX5_RSS_HASH_KEY_LEN;
- memcpy(shared_rss->queue, rss->queue, queue_size);
- origin->queue = shared_rss->queue;
+ memcpy(shared_action->queue, rss->queue, queue_size);
+ origin->queue = shared_action->queue;
origin->queue_num = rss->queue_num;
- if (__flow_dv_action_rss_setup(dev, shared_rss, error))
+ if (__flow_dv_action_rss_setup(dev, idx, shared_action, error))
goto error_rss_init;
- shared_action->type = MLX5_RTE_FLOW_ACTION_TYPE_SHARED_RSS;
- return shared_action;
+ __atomic_add_fetch(&shared_action->refcnt, 1, __ATOMIC_RELAXED);
+ rte_spinlock_lock(&priv->shared_act_sl);
+ ILIST_INSERT(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
+ &priv->rss_shared_actions, idx, shared_action, next);
+ rte_spinlock_unlock(&priv->shared_act_sl);
+ return idx;
error_rss_init:
- mlx5_free(shared_action);
- mlx5_free(queue);
- return NULL;
+ if (shared_action)
+ mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
+ idx);
+ if (queue)
+ mlx5_free(queue);
+ return 0;
}
/**
*
* @param[in] dev
* Pointer to the Ethernet device structure.
- * @param[in] shared_rss
- * The shared RSS action object to be removed.
+ * @param[in] idx
+ * The shared RSS action object ID to be removed.
* @param[out] error
* Perform verbose error reporting if not NULL. Initialized in case of
* error only.
* 0 on success, otherwise negative errno value.
*/
static int
-__flow_dv_action_rss_release(struct rte_eth_dev *dev,
- struct mlx5_shared_action_rss *shared_rss,
- struct rte_flow_error *error)
+__flow_dv_action_rss_release(struct rte_eth_dev *dev, uint32_t idx,
+ struct rte_flow_error *error)
{
- struct rte_flow_shared_action *shared_action = NULL;
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_shared_action_rss *shared_rss =
+ mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], idx);
uint32_t old_refcnt = 1;
- int remaining = __flow_dv_action_rss_hrxqs_release(dev, shared_rss);
+ int remaining;
- if (remaining) {
+ if (!shared_rss)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+ "invalid shared action");
+ remaining = __flow_dv_action_rss_hrxqs_release(dev, shared_rss);
+ if (remaining)
return rte_flow_error_set(error, ETOOMANYREFS,
RTE_FLOW_ERROR_TYPE_ACTION,
NULL,
"shared rss hrxq has references");
- }
- shared_action = container_of(shared_rss,
- struct rte_flow_shared_action, rss);
- if (!__atomic_compare_exchange_n(&shared_action->refcnt, &old_refcnt,
- 0, 0,
- __ATOMIC_ACQUIRE, __ATOMIC_RELAXED)) {
+ if (!__atomic_compare_exchange_n(&shared_rss->refcnt, &old_refcnt,
+ 0, 0, __ATOMIC_ACQUIRE,
+ __ATOMIC_RELAXED))
return rte_flow_error_set(error, ETOOMANYREFS,
RTE_FLOW_ERROR_TYPE_ACTION,
NULL,
"shared rss has references");
- }
rte_free(shared_rss->queue);
+ rte_spinlock_lock(&priv->shared_act_sl);
+ ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
+ &priv->rss_shared_actions, idx, shared_rss, next);
+ rte_spinlock_unlock(&priv->shared_act_sl);
+ mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
+ idx);
return 0;
}
flow_dv_action_create(struct rte_eth_dev *dev,
const struct rte_flow_shared_action_conf *conf,
const struct rte_flow_action *action,
- struct rte_flow_error *error)
+ struct rte_flow_error *err)
{
- struct rte_flow_shared_action *shared_action = NULL;
- struct mlx5_priv *priv = dev->data->dev_private;
+ uint32_t idx = 0;
+ uint32_t ret = 0;
switch (action->type) {
case RTE_FLOW_ACTION_TYPE_RSS:
- shared_action = __flow_dv_action_rss_create(dev, conf,
- action->conf,
- error);
+ ret = __flow_dv_action_rss_create(dev, conf, action->conf, err);
+ idx = (MLX5_SHARED_ACTION_TYPE_RSS <<
+ MLX5_SHARED_ACTION_TYPE_OFFSET) | ret;
+ break;
+ case RTE_FLOW_ACTION_TYPE_AGE:
+ ret = flow_dv_translate_create_aso_age(dev, action->conf);
+ idx = (MLX5_SHARED_ACTION_TYPE_AGE <<
+ MLX5_SHARED_ACTION_TYPE_OFFSET) | ret;
+ if (ret) {
+ struct mlx5_aso_age_action *aso_age =
+ flow_aso_age_get_by_idx(dev, ret);
+
+ if (!aso_age->age_params.context)
+ aso_age->age_params.context =
+ (void *)(uintptr_t)idx;
+ }
break;
default:
- rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
+ rte_flow_error_set(err, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
NULL, "action type not supported");
break;
}
- if (shared_action) {
- __atomic_add_fetch(&shared_action->refcnt, 1,
- __ATOMIC_RELAXED);
- rte_spinlock_lock(&priv->shared_act_sl);
- LIST_INSERT_HEAD(&priv->shared_actions, shared_action, next);
- rte_spinlock_unlock(&priv->shared_act_sl);
- }
- return shared_action;
+ return ret ? (struct rte_flow_shared_action *)(uintptr_t)idx : NULL;
}
/**
struct rte_flow_shared_action *action,
struct rte_flow_error *error)
{
- struct mlx5_priv *priv = dev->data->dev_private;
+ uint32_t act_idx = (uint32_t)(uintptr_t)action;
+ uint32_t type = act_idx >> MLX5_SHARED_ACTION_TYPE_OFFSET;
+ uint32_t idx = act_idx & ((1u << MLX5_SHARED_ACTION_TYPE_OFFSET) - 1);
int ret;
- switch (action->type) {
- case MLX5_RTE_FLOW_ACTION_TYPE_SHARED_RSS:
- ret = __flow_dv_action_rss_release(dev, &action->rss, error);
- break;
+ switch (type) {
+ case MLX5_SHARED_ACTION_TYPE_RSS:
+ return __flow_dv_action_rss_release(dev, idx, error);
+ case MLX5_SHARED_ACTION_TYPE_AGE:
+ ret = flow_dv_aso_age_release(dev, idx);
+ if (ret)
+ /*
+ * In this case, the last flow has a reference will
+ * actually release the age action.
+ */
+ DRV_LOG(DEBUG, "Shared age action %" PRIu32 " was"
+ " released with references %d.", idx, ret);
+ return 0;
default:
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ACTION,
NULL,
"action type not supported");
}
- if (ret)
- return ret;
- rte_spinlock_lock(&priv->shared_act_sl);
- LIST_REMOVE(action, next);
- rte_spinlock_unlock(&priv->shared_act_sl);
- rte_free(action);
- return 0;
}
/**
*
* @param[in] dev
* Pointer to the Ethernet device structure.
- * @param[in] shared_rss
- * The shared RSS action object to be updated.
+ * @param[in] idx
+ * The shared RSS action object ID to be updated.
* @param[in] action_conf
* RSS action specification used to modify *shared_rss*.
* @param[out] error
* @note: currently only support update of RSS queues.
*/
static int
-__flow_dv_action_rss_update(struct rte_eth_dev *dev,
- struct mlx5_shared_action_rss *shared_rss,
+__flow_dv_action_rss_update(struct rte_eth_dev *dev, uint32_t idx,
const struct rte_flow_action_rss *action_conf,
struct rte_flow_error *error)
{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_shared_action_rss *shared_rss =
+ mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], idx);
size_t i;
int ret;
void *queue = NULL;
uint32_t rss_key_len;
uint32_t queue_size = action_conf->queue_num * sizeof(uint16_t);
+ if (!shared_rss)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+ "invalid shared action to update");
queue = mlx5_malloc(MLX5_MEM_ZERO,
RTE_ALIGN_CEIL(queue_size, sizeof(void *)),
0, SOCKET_ID_ANY);
for (tunnel = 0; tunnel < 2; tunnel++) {
hrxq_idx = __flow_dv_action_rss_hrxq_lookup
- (shared_rss, hash_fields, tunnel);
+ (dev, idx, hash_fields, tunnel);
MLX5_ASSERT(hrxq_idx);
ret = mlx5_hrxq_modify
(dev, hrxq_idx,
flow_dv_action_update(struct rte_eth_dev *dev,
struct rte_flow_shared_action *action,
const void *action_conf,
- struct rte_flow_error *error)
+ struct rte_flow_error *err)
{
- switch (action->type) {
- case MLX5_RTE_FLOW_ACTION_TYPE_SHARED_RSS:
- return __flow_dv_action_rss_update(dev, &action->rss,
- action_conf, error);
+ uint32_t act_idx = (uint32_t)(uintptr_t)action;
+ uint32_t type = act_idx >> MLX5_SHARED_ACTION_TYPE_OFFSET;
+ uint32_t idx = act_idx & ((1u << MLX5_SHARED_ACTION_TYPE_OFFSET) - 1);
+
+ switch (type) {
+ case MLX5_SHARED_ACTION_TYPE_RSS:
+ return __flow_dv_action_rss_update(dev, idx, action_conf, err);
+ default:
+ return rte_flow_error_set(err, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL,
+ "action type update not supported");
+ }
+}
+
+static int
+flow_dv_action_query(struct rte_eth_dev *dev,
+ const struct rte_flow_shared_action *action, void *data,
+ struct rte_flow_error *error)
+{
+ struct mlx5_age_param *age_param;
+ struct rte_flow_query_age *resp;
+ uint32_t act_idx = (uint32_t)(uintptr_t)action;
+ uint32_t type = act_idx >> MLX5_SHARED_ACTION_TYPE_OFFSET;
+ uint32_t idx = act_idx & ((1u << MLX5_SHARED_ACTION_TYPE_OFFSET) - 1);
+
+ switch (type) {
+ case MLX5_SHARED_ACTION_TYPE_AGE:
+ age_param = &flow_aso_age_get_by_idx(dev, idx)->age_params;
+ resp = data;
+ resp->aged = __atomic_load_n(&age_param->state,
+ __ATOMIC_RELAXED) == AGE_TMOUT ?
+ 1 : 0;
+ resp->sec_since_last_hit_valid = !resp->aged;
+ if (resp->sec_since_last_hit_valid)
+ resp->sec_since_last_hit = __atomic_load_n
+ (&age_param->sec_since_last_hit, __ATOMIC_RELAXED);
+ return 0;
default:
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ACTION,
NULL,
- "action type not supported");
+ "action type query not supported");
}
}
+
/**
* Query a dv flow rule for its statistics via devx.
*
void *data, struct rte_flow_error *error)
{
struct rte_flow_query_age *resp = data;
+ struct mlx5_age_param *age_param;
- if (flow->counter) {
- struct mlx5_age_param *age_param =
- flow_dv_counter_idx_get_age(dev, flow->counter);
+ if (flow->age) {
+ struct mlx5_aso_age_action *act =
+ flow_aso_age_get_by_idx(dev, flow->age);
+
+ age_param = &act->age_params;
+ } else if (flow->counter) {
+ age_param = flow_dv_counter_idx_get_age(dev, flow->counter);
if (!age_param || !age_param->timeout)
return rte_flow_error_set
(error, EINVAL,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
NULL, "cannot read age data");
- resp->aged = __atomic_load_n(&age_param->state,
- __ATOMIC_RELAXED) ==
- AGE_TMOUT ? 1 : 0;
- resp->sec_since_last_hit_valid = !resp->aged;
- if (resp->sec_since_last_hit_valid)
- resp->sec_since_last_hit =
- __atomic_load_n(&age_param->sec_since_last_hit,
- __ATOMIC_RELAXED);
- return 0;
- }
- return rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
- NULL,
- "age data not available");
+ } else {
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL, "age data not available");
+ }
+ resp->aged = __atomic_load_n(&age_param->state, __ATOMIC_RELAXED) ==
+ AGE_TMOUT ? 1 : 0;
+ resp->sec_since_last_hit_valid = !resp->aged;
+ if (resp->sec_since_last_hit_valid)
+ resp->sec_since_last_hit = __atomic_load_n
+ (&age_param->sec_since_last_hit, __ATOMIC_RELAXED);
+ return 0;
}
/**
.match_mask = (void *)&mask,
};
void *actions[2] = { 0 };
- struct mlx5_flow_tbl_resource *tbl = NULL, *dest_tbl = NULL;
+ struct mlx5_flow_tbl_resource *tbl = NULL;
struct mlx5_devx_obj *dcs = NULL;
void *matcher = NULL;
void *flow = NULL;
- int i, ret = -1;
+ int ret = -1;
tbl = flow_dv_tbl_resource_get(dev, 0, 0, 0, false, NULL, 0, 0, NULL);
if (!tbl)
goto err;
- dest_tbl = flow_dv_tbl_resource_get(dev, 1, 0, 0, false,
- NULL, 0, 0, NULL);
- if (!dest_tbl)
- goto err;
dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0x4);
if (!dcs)
goto err;
&actions[0]);
if (ret)
goto err;
- ret = mlx5_flow_os_create_flow_action_dest_flow_tbl
- (dest_tbl->obj, &actions[1]);
- if (ret)
- goto err;
+ actions[1] = priv->drop_queue.hrxq->action;
dv_attr.match_criteria_enable = flow_dv_matcher_enable(mask.buf);
ret = mlx5_flow_os_create_flow_matcher(sh->ctx, &dv_attr, tbl->obj,
&matcher);
"support detection");
ret = 0;
}
- for (i = 0; i < 2; i++) {
- if (actions[i])
- claim_zero(mlx5_flow_os_destroy_flow_action
- (actions[i]));
- }
+ if (actions[0])
+ claim_zero(mlx5_flow_os_destroy_flow_action(actions[0]));
if (matcher)
claim_zero(mlx5_flow_os_destroy_flow_matcher(matcher));
if (tbl)
flow_dv_tbl_resource_release(MLX5_SH(dev), tbl);
- if (dest_tbl)
- flow_dv_tbl_resource_release(MLX5_SH(dev), dest_tbl);
if (dcs)
claim_zero(mlx5_devx_cmd_destroy(dcs));
return ret;
struct mlx5_age_info *age_info;
struct mlx5_age_param *age_param;
struct mlx5_flow_counter *counter;
+ struct mlx5_aso_age_action *act;
int nb_flows = 0;
if (nb_contexts && !context)
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
- NULL,
- "Should assign at least one flow or"
- " context to get if nb_contexts != 0");
+ NULL, "empty context");
age_info = GET_PORT_AGE_INFO(priv);
rte_spinlock_lock(&age_info->aged_sl);
+ LIST_FOREACH(act, &age_info->aged_aso, next) {
+ nb_flows++;
+ if (nb_contexts) {
+ context[nb_flows - 1] =
+ act->age_params.context;
+ if (!(--nb_contexts))
+ break;
+ }
+ }
TAILQ_FOREACH(counter, &age_info->aged_counters, next) {
nb_flows++;
if (nb_contexts) {
flow_dv_action_validate(struct rte_eth_dev *dev,
const struct rte_flow_shared_action_conf *conf,
const struct rte_flow_action *action,
- struct rte_flow_error *error)
+ struct rte_flow_error *err)
{
+ struct mlx5_priv *priv = dev->data->dev_private;
+
RTE_SET_USED(conf);
switch (action->type) {
case RTE_FLOW_ACTION_TYPE_RSS:
- return mlx5_validate_action_rss(dev, action, error);
+ return mlx5_validate_action_rss(dev, action, err);
+ case RTE_FLOW_ACTION_TYPE_AGE:
+ if (!priv->sh->aso_age_mng)
+ return rte_flow_error_set(err, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "shared age action not supported");
+ return flow_dv_validate_action_age(0, action, dev, err);
default:
- return rte_flow_error_set(error, ENOTSUP,
+ return rte_flow_error_set(err, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ACTION,
NULL,
"action type not supported");
.action_create = flow_dv_action_create,
.action_destroy = flow_dv_action_destroy,
.action_update = flow_dv_action_update,
+ .action_query = flow_dv_action_query,
.sync_domain = flow_dv_sync_domain,
};