struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_dev_ctx_shared *sh = priv->sh;
- if (sh->dv_refcnt > 1) {
+ if (sh->refcnt > 1) {
int ret;
ret = pthread_mutex_lock(&sh->dv_mutex);
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_dev_ctx_shared *sh = priv->sh;
- if (sh->dv_refcnt > 1) {
+ if (sh->refcnt > 1) {
int ret;
ret = pthread_mutex_unlock(&sh->dv_mutex);
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
-flow_dv_validate_action_jump(const struct rte_flow_action *action,
+flow_dv_validate_action_jump(struct rte_eth_dev *dev,
+ const struct mlx5_flow_tunnel *tunnel,
+ const struct rte_flow_action *action,
uint64_t action_flags,
const struct rte_flow_attr *attributes,
bool external, struct rte_flow_error *error)
{
uint32_t target_group, table;
int ret = 0;
-
+ struct flow_grp_info grp_info = {
+ .external = !!external,
+ .transfer = !!attributes->transfer,
+ .fdb_def_rule = 1,
+ .std_tbl_fix = 0
+ };
if (action_flags & (MLX5_FLOW_FATE_ACTIONS |
MLX5_FLOW_FATE_ESWITCH_ACTIONS))
return rte_flow_error_set(error, EINVAL,
NULL, "action configuration not set");
target_group =
((const struct rte_flow_action_jump *)action->conf)->group;
- ret = mlx5_flow_group_to_table(attributes, external, target_group,
- true, &table, error);
+ ret = mlx5_flow_group_to_table(dev, tunnel, target_group, &table,
+ grp_info, error);
if (ret)
return ret;
- if (attributes->group == target_group)
+ if (attributes->group == target_group &&
+ !(action_flags & (MLX5_FLOW_ACTION_TUNNEL_SET |
+ MLX5_FLOW_ACTION_TUNNEL_MATCH)))
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION, NULL,
"target group must be other than"
struct mlx5_priv *priv = dev->data->dev_private;
const struct rte_flow_action_age *age = action->conf;
- if (!priv->config.devx || priv->counter_fallback)
+ if (!priv->config.devx || priv->sh->cmng.counter_fallback)
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
NULL,
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_flow_counter_pool *pool = NULL;
struct mlx5_flow_counter *cnt;
- struct mlx5_flow_counter_ext *cnt_ext = NULL;
int offset;
cnt = flow_dv_counter_get_by_idx(dev, counter, &pool);
MLX5_ASSERT(pool);
- if (priv->counter_fallback) {
- cnt_ext = MLX5_CNT_TO_CNT_EXT(pool, cnt);
- return mlx5_devx_cmd_flow_counter_query(cnt_ext->dcs, 0,
+ if (priv->sh->cmng.counter_fallback)
+ return mlx5_devx_cmd_flow_counter_query(cnt->dcs_when_active, 0,
0, pkts, bytes, 0, NULL, NULL, 0);
- }
rte_spinlock_lock(&pool->sl);
if (!pool->raw) {
*pkts = 0;
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_flow_counter_pool *pool;
struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
- uint32_t fallback = priv->counter_fallback;
+ bool fallback = priv->sh->cmng.counter_fallback;
uint32_t size = sizeof(*pool);
- size += MLX5_COUNTERS_PER_POOL * CNT_SIZE;
- size += (!fallback ? 0 : MLX5_COUNTERS_PER_POOL * CNTEXT_SIZE);
- size += (!age ? 0 : MLX5_COUNTERS_PER_POOL * AGE_SIZE);
+ size += MLX5_COUNTERS_PER_POOL * MLX5_CNT_SIZE;
+ size += (!age ? 0 : MLX5_COUNTERS_PER_POOL * MLX5_AGE_SIZE);
pool = mlx5_malloc(MLX5_MEM_ZERO, size, 0, SOCKET_ID_ANY);
if (!pool) {
rte_errno = ENOMEM;
return NULL;
}
pool->raw = NULL;
- pool->type = 0;
- pool->type |= (!age ? 0 : CNT_POOL_TYPE_AGE);
+ pool->is_aged = !!age;
pool->query_gen = 0;
pool->min_dcs = dcs;
rte_spinlock_init(&pool->sl);
if (base > cmng->max_id)
cmng->max_id = base + MLX5_COUNTERS_PER_POOL - 1;
cmng->last_pool_idx = pool->index;
- pool->type |= CNT_POOL_TYPE_EXT;
}
rte_spinlock_unlock(&cmng->pool_update_sl);
return pool;
struct mlx5_flow_counter *cnt;
enum mlx5_counter_type cnt_type =
age ? MLX5_COUNTER_TYPE_AGE : MLX5_COUNTER_TYPE_ORIGIN;
- uint32_t fallback = priv->counter_fallback;
+ bool fallback = priv->sh->cmng.counter_fallback;
uint32_t i;
if (fallback) {
i = dcs->id % MLX5_COUNTERS_PER_POOL;
cnt = MLX5_POOL_GET_CNT(pool, i);
cnt->pool = pool;
- MLX5_GET_POOL_CNT_EXT(pool, i)->dcs = dcs;
+ cnt->dcs_when_free = dcs;
*cnt_free = cnt;
return pool;
}
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_flow_counter_pool *pool = NULL;
struct mlx5_flow_counter *cnt_free = NULL;
- struct mlx5_flow_counter_ext *cnt_ext = NULL;
- uint32_t fallback = priv->counter_fallback;
+ bool fallback = priv->sh->cmng.counter_fallback;
struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
enum mlx5_counter_type cnt_type =
age ? MLX5_COUNTER_TYPE_AGE : MLX5_COUNTER_TYPE_ORIGIN;
goto err;
pool = cnt_free->pool;
if (fallback)
- cnt_ext = MLX5_CNT_TO_CNT_EXT(pool, cnt_free);
+ cnt_free->dcs_when_active = cnt_free->dcs_when_free;
/* Create a DV counter action only in the first time usage. */
if (!cnt_free->action) {
uint16_t offset;
dcs = pool->min_dcs;
} else {
offset = 0;
- dcs = cnt_ext->dcs;
+ dcs = cnt_free->dcs_when_free;
}
ret = mlx5_flow_os_create_flow_action_count(dcs->obj, offset,
&cnt_free->action);
err:
if (cnt_free) {
cnt_free->pool = pool;
+ if (fallback)
+ cnt_free->dcs_when_free = cnt_free->dcs_when_active;
rte_spinlock_lock(&cmng->csl[cnt_type]);
TAILQ_INSERT_TAIL(&cmng->counters[cnt_type], cnt_free, next);
rte_spinlock_unlock(&cmng->csl[cnt_type]);
if (IS_SHARED_CNT(counter) &&
mlx5_l3t_clear_entry(priv->sh->cnt_id_tbl, cnt->shared_info.id))
return;
- if (IS_AGE_POOL(pool))
+ if (pool->is_aged)
flow_dv_counter_remove_from_age(dev, counter, cnt);
cnt->pool = pool;
/*
* function both operate with the different list.
*
*/
- if (!priv->counter_fallback) {
+ if (!priv->sh->cmng.counter_fallback) {
rte_spinlock_lock(&pool->csl);
TAILQ_INSERT_TAIL(&pool->counters[pool->query_gen], cnt, next);
rte_spinlock_unlock(&pool->csl);
} else {
- cnt_type = IS_AGE_POOL(pool) ? MLX5_COUNTER_TYPE_AGE :
- MLX5_COUNTER_TYPE_ORIGIN;
+ cnt->dcs_when_free = cnt->dcs_when_active;
+ cnt_type = pool->is_aged ? MLX5_COUNTER_TYPE_AGE :
+ MLX5_COUNTER_TYPE_ORIGIN;
rte_spinlock_lock(&priv->sh->cmng.csl[cnt_type]);
TAILQ_INSERT_TAIL(&priv->sh->cmng.counters[cnt_type],
cnt, next);
*/
static int
flow_dv_validate_attributes(struct rte_eth_dev *dev,
+ const struct mlx5_flow_tunnel *tunnel,
const struct rte_flow_attr *attributes,
- bool external __rte_unused,
+ struct flow_grp_info grp_info,
struct rte_flow_error *error)
{
struct mlx5_priv *priv = dev->data->dev_private;
int ret = 0;
#ifndef HAVE_MLX5DV_DR
+ RTE_SET_USED(tunnel);
+ RTE_SET_USED(grp_info);
if (attributes->group)
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
#else
uint32_t table = 0;
- ret = mlx5_flow_group_to_table(attributes, external,
- attributes->group, !!priv->fdb_def_rule,
- &table, error);
+ ret = mlx5_flow_group_to_table(dev, tunnel, attributes->group, &table,
+ grp_info, error);
if (ret)
return ret;
if (!table)
const struct rte_flow_item_vlan *vlan_m = NULL;
int16_t rw_act_num = 0;
uint64_t is_root;
+ const struct mlx5_flow_tunnel *tunnel;
+ struct flow_grp_info grp_info = {
+ .external = !!external,
+ .transfer = !!attr->transfer,
+ .fdb_def_rule = !!priv->fdb_def_rule,
+ };
if (items == NULL)
return -1;
- ret = flow_dv_validate_attributes(dev, attr, external, error);
+ if (is_flow_tunnel_match_rule(dev, attr, items, actions)) {
+ tunnel = flow_items_to_tunnel(items);
+ action_flags |= MLX5_FLOW_ACTION_TUNNEL_MATCH |
+ MLX5_FLOW_ACTION_DECAP;
+ } else if (is_flow_tunnel_steer_rule(dev, attr, items, actions)) {
+ tunnel = flow_actions_to_tunnel(actions);
+ action_flags |= MLX5_FLOW_ACTION_TUNNEL_SET;
+ } else {
+ tunnel = NULL;
+ }
+ grp_info.std_tbl_fix = tunnel_use_standard_attr_group_translate
+ (dev, tunnel, attr, items, actions);
+ ret = flow_dv_validate_attributes(dev, tunnel, attr, grp_info, error);
if (ret < 0)
return ret;
is_root = (uint64_t)ret;
RTE_FLOW_ERROR_TYPE_ITEM,
NULL, "item not supported");
switch (type) {
+ case MLX5_RTE_FLOW_ITEM_TYPE_TUNNEL:
+ if (items[0].type != (typeof(items[0].type))
+ MLX5_RTE_FLOW_ITEM_TYPE_TUNNEL)
+ return rte_flow_error_set
+ (error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL, "MLX5 private items "
+ "must be the first");
+ break;
case RTE_FLOW_ITEM_TYPE_VOID:
break;
case RTE_FLOW_ITEM_TYPE_PORT_ID:
rw_act_num += MLX5_ACT_NUM_MDF_TTL;
break;
case RTE_FLOW_ACTION_TYPE_JUMP:
- ret = flow_dv_validate_action_jump(actions,
+ ret = flow_dv_validate_action_jump(dev, tunnel, actions,
action_flags,
attr, external,
error);
action_flags |= MLX5_FLOW_ACTION_SAMPLE;
++actions_n;
break;
+ case MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET:
+ if (actions[0].type != (typeof(actions[0].type))
+ MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET)
+ return rte_flow_error_set
+ (error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL, "MLX5 private action "
+ "must be the first");
+
+ action_flags |= MLX5_FLOW_ACTION_TUNNEL_SET;
+ break;
default:
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ACTION,
"action not supported");
}
}
+ /*
+ * Validate actions in flow rules
+ * - Explicit decap action is prohibited by the tunnel offload API.
+ * - Drop action in tunnel steer rule is prohibited by the API.
+ * - Application cannot use MARK action because it's value can mask
+ * tunnel default miss nitification.
+ * - JUMP in tunnel match rule has no support in current PMD
+ * implementation.
+ * - TAG & META are reserved for future uses.
+ */
+ if (action_flags & MLX5_FLOW_ACTION_TUNNEL_SET) {
+ uint64_t bad_actions_mask = MLX5_FLOW_ACTION_DECAP |
+ MLX5_FLOW_ACTION_MARK |
+ MLX5_FLOW_ACTION_SET_TAG |
+ MLX5_FLOW_ACTION_SET_META |
+ MLX5_FLOW_ACTION_DROP;
+
+ if (action_flags & bad_actions_mask)
+ return rte_flow_error_set
+ (error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+ "Invalid RTE action in tunnel "
+ "set decap rule");
+ if (!(action_flags & MLX5_FLOW_ACTION_JUMP))
+ return rte_flow_error_set
+ (error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+ "tunnel set decap rule must terminate "
+ "with JUMP");
+ if (!attr->ingress)
+ return rte_flow_error_set
+ (error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+ "tunnel flows for ingress traffic only");
+ }
+ if (action_flags & MLX5_FLOW_ACTION_TUNNEL_MATCH) {
+ uint64_t bad_actions_mask = MLX5_FLOW_ACTION_JUMP |
+ MLX5_FLOW_ACTION_MARK |
+ MLX5_FLOW_ACTION_SET_TAG |
+ MLX5_FLOW_ACTION_SET_META;
+
+ if (action_flags & bad_actions_mask)
+ return rte_flow_error_set
+ (error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+ "Invalid RTE action in tunnel "
+ "set match rule");
+ }
/*
* Validate the drop action mutual exclusion with other actions.
* Drop action is mutually-exclusive with any other action, except for
flow_dv_tbl_resource_get(struct rte_eth_dev *dev,
uint32_t table_id, uint8_t egress,
uint8_t transfer,
+ bool external,
+ const struct mlx5_flow_tunnel *tunnel,
+ uint32_t group_id,
struct rte_flow_error *error)
{
struct mlx5_priv *priv = dev->data->dev_private;
return NULL;
}
tbl_data->idx = idx;
+ tbl_data->tunnel = tunnel;
+ tbl_data->group_id = group_id;
+ tbl_data->external = external;
tbl = &tbl_data->tbl;
pos = &tbl_data->entry;
if (transfer)
mlx5_flow_os_destroy_flow_tbl(tbl->obj);
tbl->obj = NULL;
+ if (is_tunnel_offload_active(dev) && tbl_data->external) {
+ struct mlx5_hlist_entry *he;
+ struct mlx5_hlist *tunnel_grp_hash;
+ struct mlx5_flow_tunnel_hub *thub =
+ mlx5_tunnel_hub(dev);
+ union tunnel_tbl_key tunnel_key = {
+ .tunnel_id = tbl_data->tunnel ?
+ tbl_data->tunnel->tunnel_id : 0,
+ .group = tbl_data->group_id
+ };
+ union mlx5_flow_tbl_key table_key = {
+ .v64 = pos->key
+ };
+ uint32_t table_id = table_key.table_id;
+
+ tunnel_grp_hash = tbl_data->tunnel ?
+ tbl_data->tunnel->groups :
+ thub->groups;
+ he = mlx5_hlist_lookup(tunnel_grp_hash, tunnel_key.val);
+ if (he) {
+ struct tunnel_tbl_entry *tte;
+ tte = container_of(he, typeof(*tte), hash);
+ MLX5_ASSERT(tte->flow_table == table_id);
+ mlx5_hlist_remove(tunnel_grp_hash, he);
+ mlx5_free(tte);
+ }
+ mlx5_flow_id_release(mlx5_tunnel_hub(dev)->table_ids,
+ tunnel_flow_tbl_to_id(table_id));
+ DRV_LOG(DEBUG,
+ "port %u release table_id %#x tunnel %u group %u",
+ dev->data->port_id, table_id,
+ tbl_data->tunnel ?
+ tbl_data->tunnel->tunnel_id : 0,
+ tbl_data->group_id);
+ }
/* remove the entry from the hash list and free memory. */
mlx5_hlist_remove(sh->flow_tbls, pos);
mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_JUMP],
int ret;
tbl = flow_dv_tbl_resource_get(dev, key->table_id, key->direction,
- key->domain, error);
+ key->domain, false, NULL, 0, error);
if (!tbl)
return -rte_errno; /* No need to refill the error info */
tbl_data = container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
struct mlx5_hrxq *hrxq;
MLX5_ASSERT(rss_desc->queue_num);
- *hrxq_idx = mlx5_hrxq_get(dev, rss_desc->key,
- MLX5_RSS_HASH_KEY_LEN,
+ *hrxq_idx = mlx5_hrxq_get(dev, rss_desc->key, MLX5_RSS_HASH_KEY_LEN,
dev_flow->hash_fields,
- rss_desc->queue,
- rss_desc->queue_num);
+ rss_desc->queue, rss_desc->queue_num);
if (!*hrxq_idx) {
*hrxq_idx = mlx5_hrxq_new
- (dev, rss_desc->key,
- MLX5_RSS_HASH_KEY_LEN,
+ (dev, rss_desc->key, MLX5_RSS_HASH_KEY_LEN,
dev_flow->hash_fields,
- rss_desc->queue,
- rss_desc->queue_num,
- !!(dh->layers &
- MLX5_FLOW_LAYER_TUNNEL));
+ rss_desc->queue, rss_desc->queue_num,
+ !!(dh->layers & MLX5_FLOW_LAYER_TUNNEL),
+ false);
if (!*hrxq_idx)
return NULL;
}
*cache_resource = *resource;
/* Create normal path table level */
tbl = flow_dv_tbl_resource_get(dev, next_ft_id,
- attr->egress, attr->transfer, error);
+ attr->egress, attr->transfer,
+ dev_flow->external, NULL, 0, error);
if (!tbl) {
rte_flow_error_set(error, ENOMEM,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
int tmp_actions_n = 0;
uint32_t table;
int ret = 0;
+ const struct mlx5_flow_tunnel *tunnel;
+ struct flow_grp_info grp_info = {
+ .external = !!dev_flow->external,
+ .transfer = !!attr->transfer,
+ .fdb_def_rule = !!priv->fdb_def_rule,
+ };
memset(&mdest_res, 0, sizeof(struct mlx5_flow_dv_dest_array_resource));
memset(&sample_res, 0, sizeof(struct mlx5_flow_dv_sample_resource));
MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
/* update normal path action resource into last index of array */
sample_act = &mdest_res.sample_act[MLX5_MAX_DEST_NUM - 1];
- ret = mlx5_flow_group_to_table(attr, dev_flow->external, attr->group,
- !!priv->fdb_def_rule, &table, error);
+ tunnel = is_flow_tunnel_match_rule(dev, attr, items, actions) ?
+ flow_items_to_tunnel(items) :
+ is_flow_tunnel_steer_rule(dev, attr, items, actions) ?
+ flow_actions_to_tunnel(actions) :
+ dev_flow->tunnel ? dev_flow->tunnel : NULL;
+ mhdr_res->ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
+ MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
+ grp_info.std_tbl_fix = tunnel_use_standard_attr_group_translate
+ (dev, tunnel, attr, items, actions);
+ ret = mlx5_flow_group_to_table(dev, tunnel, attr->group, &table,
+ grp_info, error);
if (ret)
return ret;
dev_flow->dv.group = table;
priority = dev_conf->flow_prio - 1;
/* number of actions must be set to 0 in case of dirty stack. */
mhdr_res->actions_num = 0;
+ if (is_flow_tunnel_match_rule(dev, attr, items, actions)) {
+ /*
+ * do not add decap action if match rule drops packet
+ * HW rejects rules with decap & drop
+ */
+ bool add_decap = true;
+ const struct rte_flow_action *ptr = actions;
+ struct mlx5_flow_tbl_resource *tbl;
+
+ for (; ptr->type != RTE_FLOW_ACTION_TYPE_END; ptr++) {
+ if (ptr->type == RTE_FLOW_ACTION_TYPE_DROP) {
+ add_decap = false;
+ break;
+ }
+ }
+ if (add_decap) {
+ if (flow_dv_create_action_l2_decap(dev, dev_flow,
+ attr->transfer,
+ error))
+ return -rte_errno;
+ dev_flow->dv.actions[actions_n++] =
+ dev_flow->dv.encap_decap->action;
+ action_flags |= MLX5_FLOW_ACTION_DECAP;
+ }
+ /*
+ * bind table_id with <group, table> for tunnel match rule.
+ * Tunnel set rule establishes that bind in JUMP action handler.
+ * Required for scenario when application creates tunnel match
+ * rule before tunnel set rule.
+ */
+ tbl = flow_dv_tbl_resource_get(dev, table, attr->egress,
+ attr->transfer,
+ !!dev_flow->external, tunnel,
+ attr->group, error);
+ if (!tbl)
+ return rte_flow_error_set
+ (error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
+ actions, "cannot register tunnel group");
+ }
for (; !actions_end ; actions++) {
const struct rte_flow_action_queue *queue;
const struct rte_flow_action_rss *rss;
actions,
"action not supported");
switch (action_type) {
+ case MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET:
+ action_flags |= MLX5_FLOW_ACTION_TUNNEL_SET;
+ break;
case RTE_FLOW_ACTION_TYPE_VOID:
break;
case RTE_FLOW_ACTION_TYPE_PORT_ID:
case RTE_FLOW_ACTION_TYPE_JUMP:
jump_group = ((const struct rte_flow_action_jump *)
action->conf)->group;
- if (dev_flow->external && jump_group <
- MLX5_MAX_TABLES_EXTERNAL)
- jump_group *= MLX5_FLOW_TABLE_FACTOR;
- ret = mlx5_flow_group_to_table(attr, dev_flow->external,
+ grp_info.std_tbl_fix = 0;
+ ret = mlx5_flow_group_to_table(dev, tunnel,
jump_group,
- !!priv->fdb_def_rule,
- &table, error);
+ &table,
+ grp_info, error);
if (ret)
return ret;
- tbl = flow_dv_tbl_resource_get(dev, table,
- attr->egress,
- attr->transfer, error);
+ tbl = flow_dv_tbl_resource_get(dev, table, attr->egress,
+ attr->transfer,
+ !!dev_flow->external,
+ tunnel, jump_group,
+ error);
if (!tbl)
return rte_flow_error_set
(error, errno,
return 0;
}
+/**
+ * Set hash RX queue by hash fields (see enum ibv_rx_hash_fields)
+ * and tunnel.
+ *
+ * @param[in, out] action
+ * Shred RSS action holding hash RX queue objects.
+ * @param[in] hash_fields
+ * Defines combination of packet fields to participate in RX hash.
+ * @param[in] tunnel
+ * Tunnel type
+ * @param[in] hrxq_idx
+ * Hash RX queue index to set.
+ *
+ * @return
+ * 0 on success, otherwise negative errno value.
+ */
+static int
+__flow_dv_action_rss_hrxq_set(struct mlx5_shared_action_rss *action,
+ const uint64_t hash_fields,
+ const int tunnel,
+ uint32_t hrxq_idx)
+{
+ uint32_t *hrxqs = tunnel ? action->hrxq : action->hrxq_tunnel;
+
+ switch (hash_fields & ~IBV_RX_HASH_INNER) {
+ case MLX5_RSS_HASH_IPV4:
+ hrxqs[0] = hrxq_idx;
+ return 0;
+ case MLX5_RSS_HASH_IPV4_TCP:
+ hrxqs[1] = hrxq_idx;
+ return 0;
+ case MLX5_RSS_HASH_IPV4_UDP:
+ hrxqs[2] = hrxq_idx;
+ return 0;
+ case MLX5_RSS_HASH_IPV6:
+ hrxqs[3] = hrxq_idx;
+ return 0;
+ case MLX5_RSS_HASH_IPV6_TCP:
+ hrxqs[4] = hrxq_idx;
+ return 0;
+ case MLX5_RSS_HASH_IPV6_UDP:
+ hrxqs[5] = hrxq_idx;
+ return 0;
+ case MLX5_RSS_HASH_NONE:
+ hrxqs[6] = hrxq_idx;
+ return 0;
+ default:
+ return -1;
+ }
+}
+
+/**
+ * Look up for hash RX queue by hash fields (see enum ibv_rx_hash_fields)
+ * and tunnel.
+ *
+ * @param[in] action
+ * Shred RSS action holding hash RX queue objects.
+ * @param[in] hash_fields
+ * Defines combination of packet fields to participate in RX hash.
+ * @param[in] tunnel
+ * Tunnel type
+ *
+ * @return
+ * Valid hash RX queue index, otherwise 0.
+ */
+static uint32_t
+__flow_dv_action_rss_hrxq_lookup(const struct mlx5_shared_action_rss *action,
+ const uint64_t hash_fields,
+ const int tunnel)
+{
+ const uint32_t *hrxqs = tunnel ? action->hrxq : action->hrxq_tunnel;
+
+ switch (hash_fields & ~IBV_RX_HASH_INNER) {
+ case MLX5_RSS_HASH_IPV4:
+ return hrxqs[0];
+ case MLX5_RSS_HASH_IPV4_TCP:
+ return hrxqs[1];
+ case MLX5_RSS_HASH_IPV4_UDP:
+ return hrxqs[2];
+ case MLX5_RSS_HASH_IPV6:
+ return hrxqs[3];
+ case MLX5_RSS_HASH_IPV6_TCP:
+ return hrxqs[4];
+ case MLX5_RSS_HASH_IPV6_UDP:
+ return hrxqs[5];
+ case MLX5_RSS_HASH_NONE:
+ return hrxqs[6];
+ default:
+ return 0;
+ }
+}
+
+/**
+ * Retrieves hash RX queue suitable for the *flow*.
+ * If shared action configured for *flow* suitable hash RX queue will be
+ * retrieved from attached shared action.
+ *
+ * @param[in] flow
+ * Shred RSS action holding hash RX queue objects.
+ * @param[in] dev_flow
+ * Pointer to the sub flow.
+ * @param[out] hrxq
+ * Pointer to retrieved hash RX queue object.
+ *
+ * @return
+ * Valid hash RX queue index, otherwise 0 and rte_errno is set.
+ */
+static uint32_t
+__flow_dv_rss_get_hrxq(struct rte_eth_dev *dev, struct rte_flow *flow,
+ struct mlx5_flow *dev_flow,
+ struct mlx5_hrxq **hrxq)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ uint32_t hrxq_idx;
+
+ if (flow->shared_rss) {
+ hrxq_idx = __flow_dv_action_rss_hrxq_lookup
+ (flow->shared_rss, dev_flow->hash_fields,
+ !!(dev_flow->handle->layers &
+ MLX5_FLOW_LAYER_TUNNEL));
+ if (hrxq_idx) {
+ *hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
+ hrxq_idx);
+ rte_atomic32_inc(&(*hrxq)->refcnt);
+ }
+ } else {
+ struct mlx5_flow_rss_desc *rss_desc =
+ &((struct mlx5_flow_rss_desc *)priv->rss_desc)
+ [!!priv->flow_nested_idx];
+
+ MLX5_ASSERT(rss_desc->queue_num);
+ hrxq_idx = mlx5_hrxq_get(dev, rss_desc->key,
+ MLX5_RSS_HASH_KEY_LEN,
+ dev_flow->hash_fields,
+ rss_desc->queue, rss_desc->queue_num);
+ if (!hrxq_idx) {
+ hrxq_idx = mlx5_hrxq_new(dev,
+ rss_desc->key,
+ MLX5_RSS_HASH_KEY_LEN,
+ dev_flow->hash_fields,
+ rss_desc->queue,
+ rss_desc->queue_num,
+ !!(dev_flow->handle->layers &
+ MLX5_FLOW_LAYER_TUNNEL),
+ false);
+ }
+ *hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
+ hrxq_idx);
+ }
+ return hrxq_idx;
+}
+
/**
* Apply the flow to the NIC, lock free,
* (mutex should be acquired by caller).
}
} else if (dh->fate_action == MLX5_FLOW_FATE_QUEUE &&
!dv_h->rix_sample && !dv_h->rix_dest_array) {
- struct mlx5_hrxq *hrxq;
- uint32_t hrxq_idx;
- struct mlx5_flow_rss_desc *rss_desc =
- &((struct mlx5_flow_rss_desc *)priv->rss_desc)
- [!!priv->flow_nested_idx];
+ struct mlx5_hrxq *hrxq = NULL;
+ uint32_t hrxq_idx = __flow_dv_rss_get_hrxq
+ (dev, flow, dev_flow, &hrxq);
- MLX5_ASSERT(rss_desc->queue_num);
- hrxq_idx = mlx5_hrxq_get(dev, rss_desc->key,
- MLX5_RSS_HASH_KEY_LEN,
- dev_flow->hash_fields,
- rss_desc->queue,
- rss_desc->queue_num);
- if (!hrxq_idx) {
- hrxq_idx = mlx5_hrxq_new
- (dev, rss_desc->key,
- MLX5_RSS_HASH_KEY_LEN,
- dev_flow->hash_fields,
- rss_desc->queue,
- rss_desc->queue_num,
- !!(dh->layers &
- MLX5_FLOW_LAYER_TUNNEL));
- }
- hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
- hrxq_idx);
if (!hrxq) {
rte_flow_error_set
(error, rte_errno,
static void
__flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
{
+ struct rte_flow_shared_action *shared;
struct mlx5_flow_handle *dev_handle;
struct mlx5_priv *priv = dev->data->dev_private;
if (!flow)
return;
__flow_dv_remove(dev, flow);
+ shared = mlx5_flow_get_shared_rss(flow);
+ if (shared)
+ __atomic_sub_fetch(&shared->refcnt, 1, __ATOMIC_RELAXED);
if (flow->counter) {
flow_dv_counter_release(dev, flow->counter);
flow->counter = 0;
}
}
+/**
+ * Release array of hash RX queue objects.
+ * Helper function.
+ *
+ * @param[in] dev
+ * Pointer to the Ethernet device structure.
+ * @param[in, out] hrxqs
+ * Array of hash RX queue objects.
+ *
+ * @return
+ * Total number of references to hash RX queue objects in *hrxqs* array
+ * after this operation.
+ */
+static int
+__flow_dv_hrxqs_release(struct rte_eth_dev *dev,
+ uint32_t (*hrxqs)[MLX5_RSS_HASH_FIELDS_LEN])
+{
+ size_t i;
+ int remaining = 0;
+
+ for (i = 0; i < RTE_DIM(*hrxqs); i++) {
+ int ret = mlx5_hrxq_release(dev, (*hrxqs)[i]);
+
+ if (!ret)
+ (*hrxqs)[i] = 0;
+ remaining += ret;
+ }
+ return remaining;
+}
+
+/**
+ * Release all hash RX queue objects representing shared RSS action.
+ *
+ * @param[in] dev
+ * Pointer to the Ethernet device structure.
+ * @param[in, out] action
+ * Shared RSS action to remove hash RX queue objects from.
+ *
+ * @return
+ * Total number of references to hash RX queue objects stored in *action*
+ * after this operation.
+ * Expected to be 0 if no external references held.
+ */
+static int
+__flow_dv_action_rss_hrxqs_release(struct rte_eth_dev *dev,
+ struct mlx5_shared_action_rss *action)
+{
+ return __flow_dv_hrxqs_release(dev, &action->hrxq) +
+ __flow_dv_hrxqs_release(dev, &action->hrxq_tunnel);
+}
+
+/**
+ * Setup shared RSS action.
+ * Prepare set of hash RX queue objects sufficient to handle all valid
+ * hash_fields combinations (see enum ibv_rx_hash_fields).
+ *
+ * @param[in] dev
+ * Pointer to the Ethernet device structure.
+ * @param[in, out] action
+ * Partially initialized shared RSS action.
+ * @param[out] error
+ * Perform verbose error reporting if not NULL. Initialized in case of
+ * error only.
+ *
+ * @return
+ * 0 on success, otherwise negative errno value.
+ */
+static int
+__flow_dv_action_rss_setup(struct rte_eth_dev *dev,
+ struct mlx5_shared_action_rss *action,
+ struct rte_flow_error *error)
+{
+ size_t i;
+ int err;
+
+ for (i = 0; i < MLX5_RSS_HASH_FIELDS_LEN; i++) {
+ uint32_t hrxq_idx;
+ uint64_t hash_fields = mlx5_rss_hash_fields[i];
+ int tunnel;
+
+ for (tunnel = 0; tunnel < 2; tunnel++) {
+ hrxq_idx = mlx5_hrxq_new(dev, action->origin.key,
+ MLX5_RSS_HASH_KEY_LEN,
+ hash_fields,
+ action->origin.queue,
+ action->origin.queue_num,
+ tunnel, true);
+ if (!hrxq_idx) {
+ rte_flow_error_set
+ (error, rte_errno,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "cannot get hash queue");
+ goto error_hrxq_new;
+ }
+ err = __flow_dv_action_rss_hrxq_set
+ (action, hash_fields, tunnel, hrxq_idx);
+ MLX5_ASSERT(!err);
+ }
+ }
+ return 0;
+error_hrxq_new:
+ err = rte_errno;
+ __flow_dv_action_rss_hrxqs_release(dev, action);
+ rte_errno = err;
+ return -rte_errno;
+}
+
+/**
+ * Create shared RSS action.
+ *
+ * @param[in] dev
+ * Pointer to the Ethernet device structure.
+ * @param[in] conf
+ * Shared action configuration.
+ * @param[in] rss
+ * RSS action specification used to create shared action.
+ * @param[out] error
+ * Perform verbose error reporting if not NULL. Initialized in case of
+ * error only.
+ *
+ * @return
+ * A valid shared action handle in case of success, NULL otherwise and
+ * rte_errno is set.
+ */
+static struct rte_flow_shared_action *
+__flow_dv_action_rss_create(struct rte_eth_dev *dev,
+ const struct rte_flow_shared_action_conf *conf,
+ const struct rte_flow_action_rss *rss,
+ struct rte_flow_error *error)
+{
+ struct rte_flow_shared_action *shared_action = NULL;
+ void *queue = NULL;
+ struct mlx5_shared_action_rss *shared_rss;
+ struct rte_flow_action_rss *origin;
+ const uint8_t *rss_key;
+ uint32_t queue_size = rss->queue_num * sizeof(uint16_t);
+
+ RTE_SET_USED(conf);
+ queue = mlx5_malloc(0, RTE_ALIGN_CEIL(queue_size, sizeof(void *)),
+ 0, SOCKET_ID_ANY);
+ shared_action = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*shared_action), 0,
+ SOCKET_ID_ANY);
+ if (!shared_action || !queue) {
+ rte_flow_error_set(error, ENOMEM,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "cannot allocate resource memory");
+ goto error_rss_init;
+ }
+ shared_rss = &shared_action->rss;
+ shared_rss->queue = queue;
+ origin = &shared_rss->origin;
+ origin->func = rss->func;
+ origin->level = rss->level;
+ /* RSS type 0 indicates default RSS type (ETH_RSS_IP). */
+ origin->types = !rss->types ? ETH_RSS_IP : rss->types;
+ /* NULL RSS key indicates default RSS key. */
+ rss_key = !rss->key ? rss_hash_default_key : rss->key;
+ memcpy(shared_rss->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
+ origin->key = &shared_rss->key[0];
+ origin->key_len = MLX5_RSS_HASH_KEY_LEN;
+ memcpy(shared_rss->queue, rss->queue, queue_size);
+ origin->queue = shared_rss->queue;
+ origin->queue_num = rss->queue_num;
+ if (__flow_dv_action_rss_setup(dev, shared_rss, error))
+ goto error_rss_init;
+ shared_action->type = MLX5_RTE_FLOW_ACTION_TYPE_SHARED_RSS;
+ return shared_action;
+error_rss_init:
+ mlx5_free(shared_action);
+ mlx5_free(queue);
+ return NULL;
+}
+
+/**
+ * Destroy the shared RSS action.
+ * Release related hash RX queue objects.
+ *
+ * @param[in] dev
+ * Pointer to the Ethernet device structure.
+ * @param[in] shared_rss
+ * The shared RSS action object to be removed.
+ * @param[out] error
+ * Perform verbose error reporting if not NULL. Initialized in case of
+ * error only.
+ *
+ * @return
+ * 0 on success, otherwise negative errno value.
+ */
+static int
+__flow_dv_action_rss_release(struct rte_eth_dev *dev,
+ struct mlx5_shared_action_rss *shared_rss,
+ struct rte_flow_error *error)
+{
+ struct rte_flow_shared_action *shared_action = NULL;
+ uint32_t old_refcnt = 1;
+ int remaining = __flow_dv_action_rss_hrxqs_release(dev, shared_rss);
+
+ if (remaining) {
+ return rte_flow_error_set(error, ETOOMANYREFS,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL,
+ "shared rss hrxq has references");
+ }
+ shared_action = container_of(shared_rss,
+ struct rte_flow_shared_action, rss);
+ if (!__atomic_compare_exchange_n(&shared_action->refcnt, &old_refcnt,
+ 0, 0,
+ __ATOMIC_ACQUIRE, __ATOMIC_RELAXED)) {
+ return rte_flow_error_set(error, ETOOMANYREFS,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL,
+ "shared rss has references");
+ }
+ rte_free(shared_rss->queue);
+ return 0;
+}
+
+/**
+ * Create shared action, lock free,
+ * (mutex should be acquired by caller).
+ * Dispatcher for action type specific call.
+ *
+ * @param[in] dev
+ * Pointer to the Ethernet device structure.
+ * @param[in] conf
+ * Shared action configuration.
+ * @param[in] action
+ * Action specification used to create shared action.
+ * @param[out] error
+ * Perform verbose error reporting if not NULL. Initialized in case of
+ * error only.
+ *
+ * @return
+ * A valid shared action handle in case of success, NULL otherwise and
+ * rte_errno is set.
+ */
+static struct rte_flow_shared_action *
+__flow_dv_action_create(struct rte_eth_dev *dev,
+ const struct rte_flow_shared_action_conf *conf,
+ const struct rte_flow_action *action,
+ struct rte_flow_error *error)
+{
+ struct rte_flow_shared_action *shared_action = NULL;
+ struct mlx5_priv *priv = dev->data->dev_private;
+
+ switch (action->type) {
+ case RTE_FLOW_ACTION_TYPE_RSS:
+ shared_action = __flow_dv_action_rss_create(dev, conf,
+ action->conf,
+ error);
+ break;
+ default:
+ rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL, "action type not supported");
+ break;
+ }
+ if (shared_action) {
+ __atomic_add_fetch(&shared_action->refcnt, 1,
+ __ATOMIC_RELAXED);
+ LIST_INSERT_HEAD(&priv->shared_actions, shared_action, next);
+ }
+ return shared_action;
+}
+
+/**
+ * Destroy the shared action.
+ * Release action related resources on the NIC and the memory.
+ * Lock free, (mutex should be acquired by caller).
+ * Dispatcher for action type specific call.
+ *
+ * @param[in] dev
+ * Pointer to the Ethernet device structure.
+ * @param[in] action
+ * The shared action object to be removed.
+ * @param[out] error
+ * Perform verbose error reporting if not NULL. Initialized in case of
+ * error only.
+ *
+ * @return
+ * 0 on success, otherwise negative errno value.
+ */
+static int
+__flow_dv_action_destroy(struct rte_eth_dev *dev,
+ struct rte_flow_shared_action *action,
+ struct rte_flow_error *error)
+{
+ int ret;
+
+ switch (action->type) {
+ case MLX5_RTE_FLOW_ACTION_TYPE_SHARED_RSS:
+ ret = __flow_dv_action_rss_release(dev, &action->rss, error);
+ break;
+ default:
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL,
+ "action type not supported");
+ }
+ if (ret)
+ return ret;
+ LIST_REMOVE(action, next);
+ rte_free(action);
+ return 0;
+}
+
+/**
+ * Updates in place shared RSS action configuration.
+ *
+ * @param[in] dev
+ * Pointer to the Ethernet device structure.
+ * @param[in] shared_rss
+ * The shared RSS action object to be updated.
+ * @param[in] action_conf
+ * RSS action specification used to modify *shared_rss*.
+ * @param[out] error
+ * Perform verbose error reporting if not NULL. Initialized in case of
+ * error only.
+ *
+ * @return
+ * 0 on success, otherwise negative errno value.
+ * @note: currently only support update of RSS queues.
+ */
+static int
+__flow_dv_action_rss_update(struct rte_eth_dev *dev,
+ struct mlx5_shared_action_rss *shared_rss,
+ const struct rte_flow_action_rss *action_conf,
+ struct rte_flow_error *error)
+{
+ size_t i;
+ int ret;
+ void *queue = NULL;
+ const uint8_t *rss_key;
+ uint32_t rss_key_len;
+ uint32_t queue_size = action_conf->queue_num * sizeof(uint16_t);
+
+ queue = mlx5_malloc(MLX5_MEM_ZERO,
+ RTE_ALIGN_CEIL(queue_size, sizeof(void *)),
+ 0, SOCKET_ID_ANY);
+ if (!queue)
+ return rte_flow_error_set(error, ENOMEM,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "cannot allocate resource memory");
+ if (action_conf->key) {
+ rss_key = action_conf->key;
+ rss_key_len = action_conf->key_len;
+ } else {
+ rss_key = rss_hash_default_key;
+ rss_key_len = MLX5_RSS_HASH_KEY_LEN;
+ }
+ for (i = 0; i < MLX5_RSS_HASH_FIELDS_LEN; i++) {
+ uint32_t hrxq_idx;
+ uint64_t hash_fields = mlx5_rss_hash_fields[i];
+ int tunnel;
+
+ for (tunnel = 0; tunnel < 2; tunnel++) {
+ hrxq_idx = __flow_dv_action_rss_hrxq_lookup
+ (shared_rss, hash_fields, tunnel);
+ MLX5_ASSERT(hrxq_idx);
+ ret = mlx5_hrxq_modify
+ (dev, hrxq_idx,
+ rss_key, rss_key_len,
+ hash_fields,
+ action_conf->queue, action_conf->queue_num);
+ if (ret) {
+ mlx5_free(queue);
+ return rte_flow_error_set
+ (error, rte_errno,
+ RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+ "cannot update hash queue");
+ }
+ }
+ }
+ mlx5_free(shared_rss->queue);
+ shared_rss->queue = queue;
+ memcpy(shared_rss->queue, action_conf->queue, queue_size);
+ shared_rss->origin.queue = shared_rss->queue;
+ shared_rss->origin.queue_num = action_conf->queue_num;
+ return 0;
+}
+
+/**
+ * Updates in place shared action configuration, lock free,
+ * (mutex should be acquired by caller).
+ *
+ * @param[in] dev
+ * Pointer to the Ethernet device structure.
+ * @param[in] action
+ * The shared action object to be updated.
+ * @param[in] action_conf
+ * Action specification used to modify *action*.
+ * *action_conf* should be of type correlating with type of the *action*,
+ * otherwise considered as invalid.
+ * @param[out] error
+ * Perform verbose error reporting if not NULL. Initialized in case of
+ * error only.
+ *
+ * @return
+ * 0 on success, otherwise negative errno value.
+ */
+static int
+__flow_dv_action_update(struct rte_eth_dev *dev,
+ struct rte_flow_shared_action *action,
+ const void *action_conf,
+ struct rte_flow_error *error)
+{
+ switch (action->type) {
+ case MLX5_RTE_FLOW_ACTION_TYPE_SHARED_RSS:
+ return __flow_dv_action_rss_update(dev, &action->rss,
+ action_conf, error);
+ default:
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL,
+ "action type not supported");
+ }
+}
/**
* Query a dv flow rule for its statistics via devx.
*
dtb = &mtb->ingress;
/* Create the meter table with METER level. */
dtb->tbl = flow_dv_tbl_resource_get(dev, MLX5_FLOW_TABLE_LEVEL_METER,
- egress, transfer, &error);
+ egress, transfer, false, NULL, 0,
+ &error);
if (!dtb->tbl) {
DRV_LOG(ERR, "Failed to create meter policer table.");
return -1;
/* Create the meter suffix table with SUFFIX level. */
dtb->sfx_tbl = flow_dv_tbl_resource_get(dev,
MLX5_FLOW_TABLE_LEVEL_SUFFIX,
- egress, transfer, &error);
+ egress, transfer, false, NULL, 0,
+ &error);
if (!dtb->sfx_tbl) {
DRV_LOG(ERR, "Failed to create meter suffix table.");
return -1;
void *flow = NULL;
int i, ret = -1;
- tbl = flow_dv_tbl_resource_get(dev, 0, 0, 0, NULL);
+ tbl = flow_dv_tbl_resource_get(dev, 0, 0, 0, false, NULL, 0, NULL);
if (!tbl)
goto err;
- dest_tbl = flow_dv_tbl_resource_get(dev, 1, 0, 0, NULL);
+ dest_tbl = flow_dv_tbl_resource_get(dev, 1, 0, 0, false, NULL, 0, NULL);
if (!dest_tbl)
goto err;
dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0x4);
flow_dv_shared_unlock(dev);
}
+/**
+ * Validate shared action.
+ * Dispatcher for action type specific validation.
+ *
+ * @param[in] dev
+ * Pointer to the Ethernet device structure.
+ * @param[in] conf
+ * Shared action configuration.
+ * @param[in] action
+ * The shared action object to validate.
+ * @param[out] error
+ * Perform verbose error reporting if not NULL. Initialized in case of
+ * error only.
+ *
+ * @return
+ * 0 on success, otherwise negative errno value.
+ */
+static int
+flow_dv_action_validate(struct rte_eth_dev *dev,
+ const struct rte_flow_shared_action_conf *conf,
+ const struct rte_flow_action *action,
+ struct rte_flow_error *error)
+{
+ RTE_SET_USED(conf);
+ switch (action->type) {
+ case RTE_FLOW_ACTION_TYPE_RSS:
+ return mlx5_validate_action_rss(dev, action, error);
+ default:
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL,
+ "action type not supported");
+ }
+}
+
+/*
+ * Mutex-protected thunk to lock-free __flow_dv_action_create().
+ */
+static struct rte_flow_shared_action *
+flow_dv_action_create(struct rte_eth_dev *dev,
+ const struct rte_flow_shared_action_conf *conf,
+ const struct rte_flow_action *action,
+ struct rte_flow_error *error)
+{
+ struct rte_flow_shared_action *shared_action = NULL;
+
+ flow_dv_shared_lock(dev);
+ shared_action = __flow_dv_action_create(dev, conf, action, error);
+ flow_dv_shared_unlock(dev);
+ return shared_action;
+}
+
+/*
+ * Mutex-protected thunk to lock-free __flow_dv_action_destroy().
+ */
+static int
+flow_dv_action_destroy(struct rte_eth_dev *dev,
+ struct rte_flow_shared_action *action,
+ struct rte_flow_error *error)
+{
+ int ret;
+
+ flow_dv_shared_lock(dev);
+ ret = __flow_dv_action_destroy(dev, action, error);
+ flow_dv_shared_unlock(dev);
+ return ret;
+}
+
+/*
+ * Mutex-protected thunk to lock-free __flow_dv_action_update().
+ */
+static int
+flow_dv_action_update(struct rte_eth_dev *dev,
+ struct rte_flow_shared_action *action,
+ const void *action_conf,
+ struct rte_flow_error *error)
+{
+ int ret;
+
+ flow_dv_shared_lock(dev);
+ ret = __flow_dv_action_update(dev, action, action_conf,
+ error);
+ flow_dv_shared_unlock(dev);
+ return ret;
+}
+
const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops = {
.validate = flow_dv_validate,
.prepare = flow_dv_prepare,
.counter_free = flow_dv_counter_free,
.counter_query = flow_dv_counter_query,
.get_aged_flows = flow_get_aged_flows,
+ .action_validate = flow_dv_action_validate,
+ .action_create = flow_dv_action_create,
+ .action_destroy = flow_dv_action_destroy,
+ .action_update = flow_dv_action_update,
};
#endif /* HAVE_IBV_FLOW_DV_SUPPORT */