return priv->mtr_color_reg != REG_C_2 ? REG_C_2 :
REG_C_3;
case MLX5_MTR_COLOR:
+ case MLX5_ASO_FLOW_HIT: /* Both features use the same REG_C. */
MLX5_ASSERT(priv->mtr_color_reg != REG_NON);
return priv->mtr_color_reg;
case MLX5_COPY_MARK:
{
struct mlx5_priv *priv = dev->data->dev_private;
const struct rte_flow_action_rss *rss = action->conf;
+ enum mlx5_rxq_type rxq_type = MLX5_RXQ_TYPE_UNDEFINED;
unsigned int i;
if (rss->func != RTE_ETH_HASH_FUNCTION_DEFAULT &&
RTE_FLOW_ERROR_TYPE_ACTION_CONF,
NULL, "No queues configured");
for (i = 0; i != rss->queue_num; ++i) {
+ struct mlx5_rxq_ctrl *rxq_ctrl;
+
if (rss->queue[i] >= priv->rxqs_n)
return rte_flow_error_set
(error, EINVAL,
return rte_flow_error_set
(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION_CONF,
&rss->queue[i], "queue is not configured");
+ rxq_ctrl = container_of((*priv->rxqs)[rss->queue[i]],
+ struct mlx5_rxq_ctrl, rxq);
+ if (i == 0)
+ rxq_type = rxq_ctrl->type;
+ if (rxq_type != rxq_ctrl->type)
+ return rte_flow_error_set
+ (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+ &rss->queue[i],
+ "combining hairpin and regular RSS queues is not supported");
}
return 0;
}
break;
case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
raw_encap = actions->conf;
- if (raw_encap->size >
- (sizeof(struct rte_flow_item_eth) +
- sizeof(struct rte_flow_item_ipv4)))
+ if (raw_encap->size > MLX5_ENCAPSULATION_DECISION_SIZE)
split++;
action_n++;
break;
break;
case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
raw_encap = actions->conf;
- if (raw_encap->size >
- (sizeof(struct rte_flow_item_eth) +
- sizeof(struct rte_flow_item_ipv4))) {
+ if (raw_encap->size > MLX5_ENCAPSULATION_DECISION_SIZE) {
memcpy(actions_tx, actions,
sizeof(struct rte_flow_action));
actions_tx++;
break;
case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
raw_decap = actions->conf;
- if (raw_decap->size <
- (sizeof(struct rte_flow_item_eth) +
- sizeof(struct rte_flow_item_ipv4))) {
+ if (raw_decap->size < MLX5_ENCAPSULATION_DECISION_SIZE) {
memcpy(actions_tx, actions,
sizeof(struct rte_flow_action));
actions_tx++;
if (flow->tunnel) {
struct mlx5_flow_tunnel *tunnel;
- rte_spinlock_lock(&mlx5_tunnel_hub(dev)->sl);
tunnel = mlx5_find_tunnel_id(dev, flow->tunnel_id);
RTE_VERIFY(tunnel);
- LIST_REMOVE(tunnel, chain);
- rte_spinlock_unlock(&mlx5_tunnel_hub(dev)->sl);
if (!__atomic_sub_fetch(&tunnel->refctn, 1, __ATOMIC_RELAXED))
mlx5_flow_tunnel_free(dev, tunnel);
}
static int
flow_group_to_table(uint32_t port_id, uint32_t group, uint32_t *table,
- struct flow_grp_info grp_info, struct rte_flow_error *error)
+ const struct flow_grp_info *grp_info,
+ struct rte_flow_error *error)
{
- if (grp_info.transfer && grp_info.external && grp_info.fdb_def_rule) {
+ if (grp_info->transfer && grp_info->external &&
+ grp_info->fdb_def_rule) {
if (group == UINT32_MAX)
return rte_flow_error_set
(error, EINVAL,
mlx5_flow_group_to_table(struct rte_eth_dev *dev,
const struct mlx5_flow_tunnel *tunnel,
uint32_t group, uint32_t *table,
- struct flow_grp_info grp_info,
+ const struct flow_grp_info *grp_info,
struct rte_flow_error *error)
{
int ret;
bool standard_translation;
- if (!grp_info.skip_scale && grp_info.external &&
+ if (!grp_info->skip_scale && grp_info->external &&
group < MLX5_MAX_TABLES_EXTERNAL)
group *= MLX5_FLOW_TABLE_FACTOR;
if (is_tunnel_offload_active(dev)) {
- standard_translation = !grp_info.external ||
- grp_info.std_tbl_fix;
+ standard_translation = !grp_info->external ||
+ grp_info->std_tbl_fix;
} else {
standard_translation = true;
}
DRV_LOG(DEBUG,
- "port %u group=%#x transfer=%d external=%d fdb_def_rule=%d translate=%s",
- dev->data->port_id, group, grp_info.transfer,
- grp_info.external, grp_info.fdb_def_rule,
+ "port %u group=%u transfer=%d external=%d fdb_def_rule=%d translate=%s",
+ dev->data->port_id, group, grp_info->transfer,
+ grp_info->external, grp_info->fdb_def_rule,
standard_translation ? "STANDARD" : "TUNNEL");
if (standard_translation)
ret = flow_group_to_table(dev->data->port_id, group, table,
};
};
+static bool
+mlx5_access_tunnel_offload_db
+ (struct rte_eth_dev *dev,
+ bool (*match)(struct rte_eth_dev *,
+ struct mlx5_flow_tunnel *, const void *),
+ void (*hit)(struct rte_eth_dev *, struct mlx5_flow_tunnel *, void *),
+ void (*miss)(struct rte_eth_dev *, void *),
+ void *ctx, bool lock_op);
+
static int
flow_tunnel_add_default_miss(struct rte_eth_dev *dev,
struct rte_flow *flow,
miss_attr.priority = MLX5_TNL_MISS_RULE_PRIORITY;
miss_attr.group = jump_data->group;
ret = mlx5_flow_group_to_table(dev, tunnel, jump_data->group,
- &flow_table, grp_info, error);
+ &flow_table, &grp_info, error);
if (ret)
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION_CONF,
struct mlx5_flow_tunnel *tunnel)
{
struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_indexed_pool *ipool;
DRV_LOG(DEBUG, "port %u release pmd tunnel id=0x%x",
dev->data->port_id, tunnel->tunnel_id);
- RTE_VERIFY(!__atomic_load_n(&tunnel->refctn, __ATOMIC_RELAXED));
- mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_TUNNEL_ID],
- tunnel->tunnel_id);
mlx5_hlist_destroy(tunnel->groups);
- mlx5_free(tunnel);
+ ipool = priv->sh->ipool[MLX5_IPOOL_TUNNEL_ID];
+ mlx5_ipool_free(ipool, tunnel->tunnel_id);
}
-static struct mlx5_flow_tunnel *
-mlx5_find_tunnel_id(struct rte_eth_dev *dev, uint32_t id)
+static bool
+mlx5_access_tunnel_offload_db
+ (struct rte_eth_dev *dev,
+ bool (*match)(struct rte_eth_dev *,
+ struct mlx5_flow_tunnel *, const void *),
+ void (*hit)(struct rte_eth_dev *, struct mlx5_flow_tunnel *, void *),
+ void (*miss)(struct rte_eth_dev *, void *),
+ void *ctx, bool lock_op)
{
+ bool verdict = false;
struct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev);
- struct mlx5_flow_tunnel *tun;
+ struct mlx5_flow_tunnel *tunnel;
- LIST_FOREACH(tun, &thub->tunnels, chain) {
- if (tun->tunnel_id == id)
+ rte_spinlock_lock(&thub->sl);
+ LIST_FOREACH(tunnel, &thub->tunnels, chain) {
+ verdict = match(dev, tunnel, (const void *)ctx);
+ if (verdict)
break;
}
+ if (!lock_op)
+ rte_spinlock_unlock(&thub->sl);
+ if (verdict && hit)
+ hit(dev, tunnel, ctx);
+ if (!verdict && miss)
+ miss(dev, ctx);
+ if (lock_op)
+ rte_spinlock_unlock(&thub->sl);
+
+ return verdict;
+}
+
+struct tunnel_db_find_tunnel_id_ctx {
+ uint32_t tunnel_id;
+ struct mlx5_flow_tunnel *tunnel;
+};
+
+static bool
+find_tunnel_id_match(struct rte_eth_dev *dev,
+ struct mlx5_flow_tunnel *tunnel, const void *x)
+{
+ const struct tunnel_db_find_tunnel_id_ctx *ctx = x;
+
+ RTE_SET_USED(dev);
+ return tunnel->tunnel_id == ctx->tunnel_id;
+}
+
+static void
+find_tunnel_id_hit(struct rte_eth_dev *dev,
+ struct mlx5_flow_tunnel *tunnel, void *x)
+{
+ struct tunnel_db_find_tunnel_id_ctx *ctx = x;
+ RTE_SET_USED(dev);
+ ctx->tunnel = tunnel;
+}
+
+static struct mlx5_flow_tunnel *
+mlx5_find_tunnel_id(struct rte_eth_dev *dev, uint32_t id)
+{
+ struct tunnel_db_find_tunnel_id_ctx ctx = {
+ .tunnel_id = id,
+ };
- return tun;
+ mlx5_access_tunnel_offload_db(dev, find_tunnel_id_match,
+ find_tunnel_id_hit, NULL, &ctx, true);
+
+ return ctx.tunnel;
}
static struct mlx5_flow_tunnel *
const struct rte_flow_tunnel *app_tunnel)
{
struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_indexed_pool *ipool;
struct mlx5_flow_tunnel *tunnel;
uint32_t id;
- mlx5_ipool_malloc(priv->sh->ipool[MLX5_IPOOL_RSS_EXPANTION_FLOW_ID],
- &id);
+ ipool = priv->sh->ipool[MLX5_IPOOL_TUNNEL_ID];
+ tunnel = mlx5_ipool_zmalloc(ipool, &id);
+ if (!tunnel)
+ return NULL;
if (id >= MLX5_MAX_TUNNELS) {
- mlx5_ipool_free(priv->sh->ipool
- [MLX5_IPOOL_RSS_EXPANTION_FLOW_ID], id);
+ mlx5_ipool_free(ipool, id);
DRV_LOG(ERR, "Tunnel ID %d exceed max limit.", id);
return NULL;
- } else if (!id) {
- return NULL;
- }
- /**
- * mlx5 flow tunnel is an auxlilary data structure
- * It's not part of IO. No need to allocate it from
- * huge pages pools dedicated for IO
- */
- tunnel = mlx5_malloc(MLX5_MEM_SYS | MLX5_MEM_ZERO, sizeof(*tunnel),
- 0, SOCKET_ID_ANY);
- if (!tunnel) {
- mlx5_ipool_free(priv->sh->ipool
- [MLX5_IPOOL_RSS_EXPANTION_FLOW_ID], id);
- return NULL;
}
tunnel->groups = mlx5_hlist_create("tunnel groups", 1024, 0, 0,
mlx5_flow_tunnel_grp2tbl_create_cb,
NULL,
mlx5_flow_tunnel_grp2tbl_remove_cb);
if (!tunnel->groups) {
- mlx5_ipool_free(priv->sh->ipool
- [MLX5_IPOOL_RSS_EXPANTION_FLOW_ID], id);
- mlx5_free(tunnel);
+ mlx5_ipool_free(ipool, id);
return NULL;
}
tunnel->groups->ctx = priv->sh;
return tunnel;
}
+struct tunnel_db_get_tunnel_ctx {
+ const struct rte_flow_tunnel *app_tunnel;
+ struct mlx5_flow_tunnel *tunnel;
+};
+
+static bool get_tunnel_match(struct rte_eth_dev *dev,
+ struct mlx5_flow_tunnel *tunnel, const void *x)
+{
+ const struct tunnel_db_get_tunnel_ctx *ctx = x;
+
+ RTE_SET_USED(dev);
+ return !memcmp(ctx->app_tunnel, &tunnel->app_tunnel,
+ sizeof(*ctx->app_tunnel));
+}
+
+static void get_tunnel_hit(struct rte_eth_dev *dev,
+ struct mlx5_flow_tunnel *tunnel, void *x)
+{
+ /* called under tunnel spinlock protection */
+ struct tunnel_db_get_tunnel_ctx *ctx = x;
+
+ RTE_SET_USED(dev);
+ tunnel->refctn++;
+ ctx->tunnel = tunnel;
+}
+
+static void get_tunnel_miss(struct rte_eth_dev *dev, void *x)
+{
+ /* called under tunnel spinlock protection */
+ struct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev);
+ struct tunnel_db_get_tunnel_ctx *ctx = x;
+
+ rte_spinlock_unlock(&thub->sl);
+ ctx->tunnel = mlx5_flow_tunnel_allocate(dev, ctx->app_tunnel);
+ ctx->tunnel->refctn = 1;
+ rte_spinlock_lock(&thub->sl);
+ if (ctx->tunnel)
+ LIST_INSERT_HEAD(&thub->tunnels, ctx->tunnel, chain);
+}
+
+
static int
mlx5_get_flow_tunnel(struct rte_eth_dev *dev,
const struct rte_flow_tunnel *app_tunnel,
struct mlx5_flow_tunnel **tunnel)
{
- int ret;
- struct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev);
- struct mlx5_flow_tunnel *tun;
-
- rte_spinlock_lock(&thub->sl);
- LIST_FOREACH(tun, &thub->tunnels, chain) {
- if (!memcmp(app_tunnel, &tun->app_tunnel,
- sizeof(*app_tunnel))) {
- *tunnel = tun;
- ret = 0;
- break;
- }
- }
- if (!tun) {
- tun = mlx5_flow_tunnel_allocate(dev, app_tunnel);
- if (tun) {
- LIST_INSERT_HEAD(&thub->tunnels, tun, chain);
- *tunnel = tun;
- } else {
- ret = -ENOMEM;
- }
- }
- rte_spinlock_unlock(&thub->sl);
- if (tun)
- __atomic_add_fetch(&tun->refctn, 1, __ATOMIC_RELAXED);
+ struct tunnel_db_get_tunnel_ctx ctx = {
+ .app_tunnel = app_tunnel,
+ };
- return ret;
+ mlx5_access_tunnel_offload_db(dev, get_tunnel_match, get_tunnel_hit,
+ get_tunnel_miss, &ctx, true);
+ *tunnel = ctx.tunnel;
+ return ctx.tunnel ? 0 : -ENOMEM;
}
void mlx5_release_tunnel_hub(struct mlx5_dev_ctx_shared *sh, uint16_t port_id)
*num_of_items = 1;
return 0;
}
+
+struct tunnel_db_element_release_ctx {
+ struct rte_flow_item *items;
+ struct rte_flow_action *actions;
+ uint32_t num_elements;
+ struct rte_flow_error *error;
+ int ret;
+};
+
+static bool
+tunnel_element_release_match(struct rte_eth_dev *dev,
+ struct mlx5_flow_tunnel *tunnel, const void *x)
+{
+ const struct tunnel_db_element_release_ctx *ctx = x;
+
+ RTE_SET_USED(dev);
+ if (ctx->num_elements != 1)
+ return false;
+ else if (ctx->items)
+ return ctx->items == &tunnel->item;
+ else if (ctx->actions)
+ return ctx->actions == &tunnel->action;
+
+ return false;
+}
+
+static void
+tunnel_element_release_hit(struct rte_eth_dev *dev,
+ struct mlx5_flow_tunnel *tunnel, void *x)
+{
+ struct tunnel_db_element_release_ctx *ctx = x;
+ ctx->ret = 0;
+ if (!__atomic_sub_fetch(&tunnel->refctn, 1, __ATOMIC_RELAXED))
+ mlx5_flow_tunnel_free(dev, tunnel);
+}
+
+static void
+tunnel_element_release_miss(struct rte_eth_dev *dev, void *x)
+{
+ struct tunnel_db_element_release_ctx *ctx = x;
+ RTE_SET_USED(dev);
+ ctx->ret = rte_flow_error_set(ctx->error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "invalid argument");
+}
+
static int
mlx5_flow_tunnel_item_release(struct rte_eth_dev *dev,
- struct rte_flow_item *pmd_items,
- uint32_t num_items, struct rte_flow_error *err)
-{
- struct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev);
- struct mlx5_flow_tunnel *tun;
+ struct rte_flow_item *pmd_items,
+ uint32_t num_items, struct rte_flow_error *err)
+{
+ struct tunnel_db_element_release_ctx ctx = {
+ .items = pmd_items,
+ .actions = NULL,
+ .num_elements = num_items,
+ .error = err,
+ };
- rte_spinlock_lock(&thub->sl);
- LIST_FOREACH(tun, &thub->tunnels, chain) {
- if (&tun->item == pmd_items) {
- LIST_REMOVE(tun, chain);
- break;
- }
- }
- rte_spinlock_unlock(&thub->sl);
- if (!tun || num_items != 1)
- return rte_flow_error_set(err, EINVAL,
- RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
- "invalid argument");
- if (!__atomic_sub_fetch(&tun->refctn, 1, __ATOMIC_RELAXED))
- mlx5_flow_tunnel_free(dev, tun);
- return 0;
+ mlx5_access_tunnel_offload_db(dev, tunnel_element_release_match,
+ tunnel_element_release_hit,
+ tunnel_element_release_miss, &ctx, false);
+
+ return ctx.ret;
}
static int
mlx5_flow_tunnel_action_release(struct rte_eth_dev *dev,
- struct rte_flow_action *pmd_actions,
- uint32_t num_actions,
- struct rte_flow_error *err)
-{
- struct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev);
- struct mlx5_flow_tunnel *tun;
+ struct rte_flow_action *pmd_actions,
+ uint32_t num_actions, struct rte_flow_error *err)
+{
+ struct tunnel_db_element_release_ctx ctx = {
+ .items = NULL,
+ .actions = pmd_actions,
+ .num_elements = num_actions,
+ .error = err,
+ };
- rte_spinlock_lock(&thub->sl);
- LIST_FOREACH(tun, &thub->tunnels, chain) {
- if (&tun->action == pmd_actions) {
- LIST_REMOVE(tun, chain);
- break;
- }
- }
- rte_spinlock_unlock(&thub->sl);
- if (!tun || num_actions != 1)
- return rte_flow_error_set(err, EINVAL,
- RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
- "invalid argument");
- if (!__atomic_sub_fetch(&tun->refctn, 1, __ATOMIC_RELAXED))
- mlx5_flow_tunnel_free(dev, tun);
+ mlx5_access_tunnel_offload_db(dev, tunnel_element_release_match,
+ tunnel_element_release_hit,
+ tunnel_element_release_miss, &ctx, false);
- return 0;
+ return ctx.ret;
}
static int
const struct mlx5_flow_tbl_data_entry *tble;
const uint64_t mask = PKT_RX_FDIR | PKT_RX_FDIR_ID;
+ if (!is_tunnel_offload_active(dev)) {
+ info->flags = 0;
+ return 0;
+ }
+
if ((ol_flags & mask) != mask)
goto err;
tble = tunnel_mark_decode(dev, m->hash.fdir.hi);