}
/**
- * Destroy table hash list and all the root entries per domain.
+ * Destroy table hash list.
*
* @param[in] priv
* Pointer to the private device data structure.
mlx5_free_table_hash_list(struct mlx5_priv *priv)
{
struct mlx5_dev_ctx_shared *sh = priv->sh;
- struct mlx5_flow_tbl_data_entry *tbl_data;
- union mlx5_flow_tbl_key table_key = {
- {
- .table_id = 0,
- .reserved = 0,
- .domain = 0,
- .direction = 0,
- }
- };
- struct mlx5_hlist_entry *pos;
if (!sh->flow_tbls)
return;
- pos = mlx5_hlist_lookup(sh->flow_tbls, table_key.v64, NULL);
- if (pos) {
- tbl_data = container_of(pos, struct mlx5_flow_tbl_data_entry,
- entry);
- MLX5_ASSERT(tbl_data);
- mlx5_hlist_remove(sh->flow_tbls, pos);
- mlx5_free(tbl_data);
- }
- table_key.direction = 1;
- pos = mlx5_hlist_lookup(sh->flow_tbls, table_key.v64, NULL);
- if (pos) {
- tbl_data = container_of(pos, struct mlx5_flow_tbl_data_entry,
- entry);
- MLX5_ASSERT(tbl_data);
- mlx5_hlist_remove(sh->flow_tbls, pos);
- mlx5_free(tbl_data);
- }
- table_key.direction = 0;
- table_key.domain = 1;
- pos = mlx5_hlist_lookup(sh->flow_tbls, table_key.v64, NULL);
- if (pos) {
- tbl_data = container_of(pos, struct mlx5_flow_tbl_data_entry,
- entry);
- MLX5_ASSERT(tbl_data);
- mlx5_hlist_remove(sh->flow_tbls, pos);
- mlx5_free(tbl_data);
- }
mlx5_hlist_destroy(sh->flow_tbls);
}
* Zero on success, positive error code otherwise.
*/
int
-mlx5_alloc_table_hash_list(struct mlx5_priv *priv)
+mlx5_alloc_table_hash_list(struct mlx5_priv *priv __rte_unused)
{
+ int err = 0;
+ /* Tables are only used in DV and DR modes. */
+#ifdef HAVE_IBV_FLOW_DV_SUPPORT
struct mlx5_dev_ctx_shared *sh = priv->sh;
char s[MLX5_HLIST_NAMESIZE];
- int err = 0;
MLX5_ASSERT(sh);
snprintf(s, sizeof(s), "%s_flow_table", priv->sh->ibdev_name);
sh->flow_tbls = mlx5_hlist_create(s, MLX5_FLOW_TABLE_HLIST_ARRAY_SIZE,
- 0, 0, NULL, NULL, NULL);
+ 0, 0, flow_dv_tbl_create_cb, NULL,
+ flow_dv_tbl_remove_cb);
if (!sh->flow_tbls) {
DRV_LOG(ERR, "flow tables with hash creation failed.");
err = ENOMEM;
return err;
}
+ sh->flow_tbls->ctx = sh;
#ifndef HAVE_MLX5DV_DR
+ struct rte_flow_error error;
+ struct rte_eth_dev *dev = &rte_eth_devices[priv->dev_data->port_id];
+
/*
* In case we have not DR support, the zero tables should be created
* because DV expect to see them even if they cannot be created by
* RDMA-CORE.
*/
- union mlx5_flow_tbl_key table_key = {
- {
- .table_id = 0,
- .reserved = 0,
- .domain = 0,
- .direction = 0,
- }
- };
- struct mlx5_flow_tbl_data_entry *tbl_data = mlx5_malloc(MLX5_MEM_ZERO,
- sizeof(*tbl_data), 0,
- SOCKET_ID_ANY);
-
- if (!tbl_data) {
- err = ENOMEM;
- goto error;
- }
- tbl_data->entry.key = table_key.v64;
- err = mlx5_hlist_insert(sh->flow_tbls, &tbl_data->entry);
- if (err)
- goto error;
- __atomic_store_n(&tbl_data->tbl.refcnt, 1, __ATOMIC_RELAXED);
- table_key.direction = 1;
- tbl_data = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*tbl_data), 0,
- SOCKET_ID_ANY);
- if (!tbl_data) {
+ if (!flow_dv_tbl_resource_get(dev, 0, 0, 0, 0, NULL, 0, 1, &error) ||
+ !flow_dv_tbl_resource_get(dev, 0, 1, 0, 0, NULL, 0, 1, &error) ||
+ !flow_dv_tbl_resource_get(dev, 0, 0, 1, 0, NULL, 0, 1, &error)) {
err = ENOMEM;
goto error;
}
- tbl_data->entry.key = table_key.v64;
- err = mlx5_hlist_insert(sh->flow_tbls, &tbl_data->entry);
- if (err)
- goto error;
- __atomic_store_n(&tbl_data->tbl.refcnt, 1, __ATOMIC_RELAXED);
- table_key.direction = 0;
- table_key.domain = 1;
- tbl_data = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*tbl_data), 0,
- SOCKET_ID_ANY);
- if (!tbl_data) {
- err = ENOMEM;
- goto error;
- }
- tbl_data->entry.key = table_key.v64;
- err = mlx5_hlist_insert(sh->flow_tbls, &tbl_data->entry);
- if (err)
- goto error;
- __atomic_store_n(&tbl_data->tbl.refcnt, 1, __ATOMIC_RELAXED);
return err;
error:
mlx5_free_table_hash_list(priv);
#endif /* HAVE_MLX5DV_DR */
+#endif
return err;
}
return match_criteria_enable;
}
-
-/**
- * Get a flow table.
- *
- * @param[in, out] dev
- * Pointer to rte_eth_dev structure.
- * @param[in] table_id
- * Table id to use.
- * @param[in] egress
- * Direction of the table.
- * @param[in] transfer
- * E-Switch or NIC flow.
- * @param[out] error
- * pointer to error structure.
- *
- * @return
- * Returns tables resource based on the index, NULL in case of failed.
- */
-static struct mlx5_flow_tbl_resource *
-flow_dv_tbl_resource_get(struct rte_eth_dev *dev,
- uint32_t table_id, uint8_t egress,
- uint8_t transfer,
- bool external,
- const struct mlx5_flow_tunnel *tunnel,
- uint32_t group_id,
- struct rte_flow_error *error)
+struct mlx5_hlist_entry *
+flow_dv_tbl_create_cb(struct mlx5_hlist *list, uint64_t key64, void *cb_ctx)
{
- struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_dev_ctx_shared *sh = priv->sh;
- struct mlx5_flow_tbl_resource *tbl;
- union mlx5_flow_tbl_key table_key = {
- {
- .table_id = table_id,
- .reserved = 0,
- .domain = !!transfer,
- .direction = !!egress,
- }
- };
- struct mlx5_hlist_entry *pos = mlx5_hlist_lookup(sh->flow_tbls,
- table_key.v64, NULL);
+ struct mlx5_dev_ctx_shared *sh = list->ctx;
+ struct mlx5_flow_cb_ctx *ctx = cb_ctx;
+ struct rte_eth_dev *dev = ctx->dev;
struct mlx5_flow_tbl_data_entry *tbl_data;
+ struct mlx5_flow_tbl_tunnel_prm *tt_prm = ctx->data;
+ struct rte_flow_error *error = ctx->error;
+ union mlx5_flow_tbl_key key = { .v64 = key64 };
+ struct mlx5_flow_tbl_resource *tbl;
+ void *domain;
uint32_t idx = 0;
int ret;
- void *domain;
- if (pos) {
- tbl_data = container_of(pos, struct mlx5_flow_tbl_data_entry,
- entry);
- tbl = &tbl_data->tbl;
- __atomic_fetch_add(&tbl->refcnt, 1, __ATOMIC_RELAXED);
- return tbl;
- }
tbl_data = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_JUMP], &idx);
if (!tbl_data) {
rte_flow_error_set(error, ENOMEM,
return NULL;
}
tbl_data->idx = idx;
- tbl_data->tunnel = tunnel;
- tbl_data->group_id = group_id;
- tbl_data->external = external;
+ tbl_data->tunnel = tt_prm->tunnel;
+ tbl_data->group_id = tt_prm->group_id;
+ tbl_data->external = tt_prm->external;
tbl_data->tunnel_offload = is_tunnel_offload_active(dev);
tbl = &tbl_data->tbl;
- pos = &tbl_data->entry;
- if (transfer)
+ if (key.dummy)
+ return &tbl_data->entry;
+ if (key.domain)
domain = sh->fdb_domain;
- else if (egress)
+ else if (key.direction)
domain = sh->tx_domain;
else
domain = sh->rx_domain;
- ret = mlx5_flow_os_create_flow_tbl(domain, table_id, &tbl->obj);
+ ret = mlx5_flow_os_create_flow_tbl(domain, key.table_id, &tbl->obj);
if (ret) {
rte_flow_error_set(error, ENOMEM,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], idx);
return NULL;
}
- /*
- * No multi-threads now, but still better to initialize the reference
- * count before insert it into the hash list.
- */
- __atomic_store_n(&tbl->refcnt, 0, __ATOMIC_RELAXED);
-
- if (table_id) {
+ if (key.table_id) {
ret = mlx5_flow_os_create_flow_action_dest_flow_tbl
(tbl->obj, &tbl_data->jump.action);
if (ret) {
return NULL;
}
}
- pos->key = table_key.v64;
- ret = !mlx5_hlist_insert(sh->flow_tbls, pos);
- if (ret < 0) {
- rte_flow_error_set(error, -ret,
+ return &tbl_data->entry;
+}
+
+/**
+ * Get a flow table.
+ *
+ * @param[in, out] dev
+ * Pointer to rte_eth_dev structure.
+ * @param[in] table_id
+ * Table id to use.
+ * @param[in] egress
+ * Direction of the table.
+ * @param[in] transfer
+ * E-Switch or NIC flow.
+ * @param[in] dummy
+ * Dummy entry for dv API.
+ * @param[out] error
+ * pointer to error structure.
+ *
+ * @return
+ * Returns tables resource based on the index, NULL in case of failed.
+ */
+struct mlx5_flow_tbl_resource *
+flow_dv_tbl_resource_get(struct rte_eth_dev *dev,
+ uint32_t table_id, uint8_t egress,
+ uint8_t transfer,
+ bool external,
+ const struct mlx5_flow_tunnel *tunnel,
+ uint32_t group_id, uint8_t dummy,
+ struct rte_flow_error *error)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ union mlx5_flow_tbl_key table_key = {
+ {
+ .table_id = table_id,
+ .dummy = dummy,
+ .domain = !!transfer,
+ .direction = !!egress,
+ }
+ };
+ struct mlx5_flow_tbl_tunnel_prm tt_prm = {
+ .tunnel = tunnel,
+ .group_id = group_id,
+ .external = external,
+ };
+ struct mlx5_flow_cb_ctx ctx = {
+ .dev = dev,
+ .error = error,
+ .data = &tt_prm,
+ };
+ struct mlx5_hlist_entry *entry;
+ struct mlx5_flow_tbl_data_entry *tbl_data;
+
+ entry = mlx5_hlist_register(priv->sh->flow_tbls, table_key.v64, &ctx);
+ if (!entry) {
+ rte_flow_error_set(error, ENOMEM,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
- "cannot insert flow table data entry");
- mlx5_flow_os_destroy_flow_tbl(tbl->obj);
- mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], idx);
+ "cannot get table");
+ return NULL;
}
- __atomic_fetch_add(&tbl->refcnt, 1, __ATOMIC_RELAXED);
- return tbl;
+ tbl_data = container_of(entry, struct mlx5_flow_tbl_data_entry, entry);
+ return &tbl_data->tbl;
+}
+
+void
+flow_dv_tbl_remove_cb(struct mlx5_hlist *list,
+ struct mlx5_hlist_entry *entry)
+{
+ struct mlx5_dev_ctx_shared *sh = list->ctx;
+ struct mlx5_flow_tbl_data_entry *tbl_data =
+ container_of(entry, struct mlx5_flow_tbl_data_entry, entry);
+
+ MLX5_ASSERT(entry && sh);
+ if (tbl_data->jump.action)
+ mlx5_flow_os_destroy_flow_action(tbl_data->jump.action);
+ if (tbl_data->tbl.obj)
+ mlx5_flow_os_destroy_flow_tbl(tbl_data->tbl.obj);
+ if (tbl_data->tunnel_offload && tbl_data->external) {
+ struct mlx5_hlist_entry *he;
+ struct mlx5_hlist *tunnel_grp_hash;
+ struct mlx5_flow_tunnel_hub *thub = sh->tunnel_hub;
+ union tunnel_tbl_key tunnel_key = {
+ .tunnel_id = tbl_data->tunnel ?
+ tbl_data->tunnel->tunnel_id : 0,
+ .group = tbl_data->group_id
+ };
+ union mlx5_flow_tbl_key table_key = {
+ .v64 = entry->key
+ };
+ uint32_t table_id = table_key.table_id;
+
+ tunnel_grp_hash = tbl_data->tunnel ?
+ tbl_data->tunnel->groups :
+ thub->groups;
+ he = mlx5_hlist_lookup(tunnel_grp_hash, tunnel_key.val, NULL);
+ if (he) {
+ struct tunnel_tbl_entry *tte;
+ tte = container_of(he, typeof(*tte), hash);
+ MLX5_ASSERT(tte->flow_table == table_id);
+ mlx5_hlist_remove(tunnel_grp_hash, he);
+ mlx5_free(tte);
+ }
+ mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TNL_TBL_ID],
+ tunnel_flow_tbl_to_id(table_id));
+ DRV_LOG(DEBUG,
+ "Table_id %#x tunnel %u group %u released.",
+ table_id,
+ tbl_data->tunnel ?
+ tbl_data->tunnel->tunnel_id : 0,
+ tbl_data->group_id);
+ }
+ mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], tbl_data->idx);
}
/**
if (!tbl)
return 0;
- if (__atomic_sub_fetch(&tbl->refcnt, 1, __ATOMIC_RELAXED) == 0) {
- struct mlx5_hlist_entry *pos = &tbl_data->entry;
-
- mlx5_flow_os_destroy_flow_tbl(tbl->obj);
- tbl->obj = NULL;
- if (tbl_data->tunnel_offload && tbl_data->external) {
- struct mlx5_hlist_entry *he;
- struct mlx5_hlist *tunnel_grp_hash;
- struct mlx5_flow_tunnel_hub *thub =
- mlx5_tunnel_hub(dev);
- union tunnel_tbl_key tunnel_key = {
- .tunnel_id = tbl_data->tunnel ?
- tbl_data->tunnel->tunnel_id : 0,
- .group = tbl_data->group_id
- };
- union mlx5_flow_tbl_key table_key = {
- .v64 = pos->key
- };
- uint32_t table_id = table_key.table_id;
-
- tunnel_grp_hash = tbl_data->tunnel ?
- tbl_data->tunnel->groups :
- thub->groups;
- he = mlx5_hlist_lookup(tunnel_grp_hash,
- tunnel_key.val, NULL);
- if (he) {
- struct tunnel_tbl_entry *tte;
- tte = container_of(he, typeof(*tte), hash);
- MLX5_ASSERT(tte->flow_table == table_id);
- mlx5_hlist_remove(tunnel_grp_hash, he);
- mlx5_free(tte);
- }
- mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_TNL_TBL_ID],
- tunnel_flow_tbl_to_id(table_id));
- DRV_LOG(DEBUG,
- "port %u release table_id %#x tunnel %u group %u",
- dev->data->port_id, table_id,
- tbl_data->tunnel ?
- tbl_data->tunnel->tunnel_id : 0,
- tbl_data->group_id);
- }
- /* remove the entry from the hash list and free memory. */
- mlx5_hlist_remove(sh->flow_tbls, pos);
- mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_JUMP],
- tbl_data->idx);
- return 0;
- }
- return 1;
+ return mlx5_hlist_unregister(sh->flow_tbls, &tbl_data->entry);
}
/**
int ret;
tbl = flow_dv_tbl_resource_get(dev, key->table_id, key->direction,
- key->domain, false, NULL, 0, error);
+ key->domain, false, NULL, 0, 0, error);
if (!tbl)
return -rte_errno; /* No need to refill the error info */
tbl_data = container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
/* Create normal path table level */
tbl = flow_dv_tbl_resource_get(dev, next_ft_id,
attr->egress, attr->transfer,
- dev_flow->external, NULL, 0, error);
+ dev_flow->external, NULL, 0, 0, error);
if (!tbl) {
rte_flow_error_set(error, ENOMEM,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
tbl = flow_dv_tbl_resource_get(dev, table, attr->egress,
attr->transfer,
!!dev_flow->external, tunnel,
- attr->group, error);
+ attr->group, 0, error);
if (!tbl)
return rte_flow_error_set
(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
tbl = flow_dv_tbl_resource_get(dev, table, attr->egress,
attr->transfer,
!!dev_flow->external,
- tunnel, jump_group,
+ tunnel, jump_group, 0,
error);
if (!tbl)
return rte_flow_error_set
/* Create the meter table with METER level. */
dtb->tbl = flow_dv_tbl_resource_get(dev, MLX5_FLOW_TABLE_LEVEL_METER,
egress, transfer, false, NULL, 0,
- &error);
+ 0, &error);
if (!dtb->tbl) {
DRV_LOG(ERR, "Failed to create meter policer table.");
return -1;
dtb->sfx_tbl = flow_dv_tbl_resource_get(dev,
MLX5_FLOW_TABLE_LEVEL_SUFFIX,
egress, transfer, false, NULL, 0,
- &error);
+ 0, &error);
if (!dtb->sfx_tbl) {
DRV_LOG(ERR, "Failed to create meter suffix table.");
return -1;
void *flow = NULL;
int i, ret = -1;
- tbl = flow_dv_tbl_resource_get(dev, 0, 0, 0, false, NULL, 0, NULL);
+ tbl = flow_dv_tbl_resource_get(dev, 0, 0, 0, false, NULL, 0, 0, NULL);
if (!tbl)
goto err;
- dest_tbl = flow_dv_tbl_resource_get(dev, 1, 0, 0, false, NULL, 0, NULL);
+ dest_tbl = flow_dv_tbl_resource_get(dev, 1, 0, 0, false,
+ NULL, 0, 0, NULL);
if (!dest_tbl)
goto err;
dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0x4);