snprintf(s, sizeof(s), "%s_tags", sh->ibdev_name);
sh->tag_table = mlx5_hlist_create(s, MLX5_TAGS_HLIST_ARRAY_SIZE, 0,
MLX5_HLIST_WRITE_MOST,
- flow_dv_tag_create_cb, NULL,
+ flow_dv_tag_create_cb,
+ flow_dv_tag_match_cb,
flow_dv_tag_remove_cb);
if (!sh->tag_table) {
DRV_LOG(ERR, "tags with hash creation failed.");
MLX5_FLOW_MREG_HTABLE_SZ,
0, 0,
flow_dv_mreg_create_cb,
- NULL,
+ flow_dv_mreg_match_cb,
flow_dv_mreg_remove_cb);
if (!priv->mreg_cp_tbl) {
err = ENOMEM;
MLX5_ASSERT(sh);
snprintf(s, sizeof(s), "%s_flow_table", priv->sh->ibdev_name);
sh->flow_tbls = mlx5_hlist_create(s, MLX5_FLOW_TABLE_HLIST_ARRAY_SIZE,
- 0, 0, flow_dv_tbl_create_cb, NULL,
+ 0, 0, flow_dv_tbl_create_cb,
+ flow_dv_tbl_match_cb,
flow_dv_tbl_remove_cb);
if (!sh->flow_tbls) {
DRV_LOG(ERR, "flow tables with hash creation failed.");
flow_list_destroy(struct rte_eth_dev *dev, uint32_t *list,
uint32_t flow_idx);
+int
+flow_dv_mreg_match_cb(struct mlx5_hlist *list __rte_unused,
+ struct mlx5_hlist_entry *entry,
+ uint64_t key, void *cb_ctx __rte_unused)
+{
+ struct mlx5_flow_mreg_copy_resource *mcp_res =
+ container_of(entry, typeof(*mcp_res), hlist_ent);
+
+ return mcp_res->mark_id != key;
+}
+
struct mlx5_hlist_entry *
flow_dv_mreg_create_cb(struct mlx5_hlist *list, uint64_t key,
void *cb_ctx)
return NULL;
}
mcp_res->idx = idx;
+ mcp_res->mark_id = mark_id;
/*
* The copy Flows are not included in any list. There
* ones are referenced from other Flows and can not
struct mlx5_flow_dv_sample_resource *sample_res;
struct mlx5_flow_tbl_data_entry *sfx_tbl_data;
struct mlx5_flow_tbl_resource *sfx_tbl;
- union mlx5_flow_tbl_key sfx_table_key;
#endif
size_t act_size;
size_t item_size;
sample_res->normal_path_tbl;
sfx_tbl_data = container_of(sfx_tbl,
struct mlx5_flow_tbl_data_entry, tbl);
- sfx_table_key.v64 = sfx_tbl_data->entry.key;
sfx_attr.group = sfx_attr.transfer ?
- (sfx_table_key.table_id - 1) :
- sfx_table_key.table_id;
+ (sfx_tbl_data->table_id - 1) :
+ sfx_tbl_data->table_id;
flow_split_info->prefix_layers =
flow_get_prefix_layer_flags(dev_flow);
flow_split_info->prefix_mark = dev_flow->handle->mark;
mlx5_free(tte);
}
+static int
+mlx5_flow_tunnel_grp2tbl_match_cb(struct mlx5_hlist *list __rte_unused,
+ struct mlx5_hlist_entry *entry,
+ uint64_t key, void *cb_ctx __rte_unused)
+{
+ union tunnel_tbl_key tbl = {
+ .val = key,
+ };
+ struct tunnel_tbl_entry *tte = container_of(entry, typeof(*tte), hash);
+
+ return tbl.tunnel_id != tte->tunnel_id || tbl.group != tte->group;
+}
+
static struct mlx5_hlist_entry *
-mlx5_flow_tunnel_grp2tbl_create_cb(struct mlx5_hlist *list,
- uint64_t key __rte_unused,
+mlx5_flow_tunnel_grp2tbl_create_cb(struct mlx5_hlist *list, uint64_t key,
void *ctx __rte_unused)
{
struct mlx5_dev_ctx_shared *sh = list->ctx;
struct tunnel_tbl_entry *tte;
+ union tunnel_tbl_key tbl = {
+ .val = key,
+ };
tte = mlx5_malloc(MLX5_MEM_SYS | MLX5_MEM_ZERO,
sizeof(*tte), 0,
goto err;
}
tte->flow_table = tunnel_id_to_flow_tbl(tte->flow_table);
+ tte->tunnel_id = tbl.tunnel_id;
+ tte->group = tbl.group;
return &tte->hash;
err:
if (tte)
}
tunnel->groups = mlx5_hlist_create("tunnel groups", 1024, 0, 0,
mlx5_flow_tunnel_grp2tbl_create_cb,
- NULL,
+ mlx5_flow_tunnel_grp2tbl_match_cb,
mlx5_flow_tunnel_grp2tbl_remove_cb);
if (!tunnel->groups) {
mlx5_ipool_free(ipool, id);
rte_spinlock_init(&thub->sl);
thub->groups = mlx5_hlist_create("flow groups", MLX5_MAX_TABLES, 0,
0, mlx5_flow_tunnel_grp2tbl_create_cb,
- NULL,
+ mlx5_flow_tunnel_grp2tbl_match_cb,
mlx5_flow_tunnel_grp2tbl_remove_cb);
if (!thub->groups) {
err = -rte_errno;
/**< Tag action object. */
uint32_t refcnt; /**< Reference counter. */
uint32_t idx; /**< Index for the index memory pool. */
+ uint32_t tag_id; /**< Tag ID. */
};
/*
/* List entry for device flows. */
uint32_t idx;
uint32_t rix_flow; /* Built flow for copy. */
+ uint32_t mark_id;
};
/* Table tunnel parameter. */
/**< tunnel offload */
const struct mlx5_flow_tunnel *tunnel;
uint32_t group_id;
- bool external;
- bool tunnel_offload; /* Tunnel offlod table or not. */
- bool is_egress; /**< Egress table. */
+ uint32_t external:1;
+ uint32_t tunnel_offload:1; /* Tunnel offlod table or not. */
+ uint32_t is_egress:1; /**< Egress table. */
+ uint32_t is_transfer:1; /**< Transfer table. */
+ uint32_t dummy:1; /**< DR table. */
+ uint32_t reserve:27; /**< Reserved to future using. */
+ uint32_t table_id; /**< Table ID. */
};
/* Sub rdma-core actions list. */
struct tunnel_tbl_entry {
struct mlx5_hlist_entry hash;
uint32_t flow_table;
+ uint32_t tunnel_id;
+ uint32_t group;
};
static inline uint32_t
/* Hash list callbacks for flow tables: */
struct mlx5_hlist_entry *flow_dv_tbl_create_cb(struct mlx5_hlist *list,
uint64_t key, void *entry_ctx);
+int flow_dv_tbl_match_cb(struct mlx5_hlist *list,
+ struct mlx5_hlist_entry *entry, uint64_t key,
+ void *cb_ctx);
void flow_dv_tbl_remove_cb(struct mlx5_hlist *list,
struct mlx5_hlist_entry *entry);
struct mlx5_flow_tbl_resource *flow_dv_tbl_resource_get(struct rte_eth_dev *dev,
struct mlx5_hlist_entry *flow_dv_tag_create_cb(struct mlx5_hlist *list,
uint64_t key, void *cb_ctx);
+int flow_dv_tag_match_cb(struct mlx5_hlist *list,
+ struct mlx5_hlist_entry *entry, uint64_t key,
+ void *cb_ctx);
void flow_dv_tag_remove_cb(struct mlx5_hlist *list,
struct mlx5_hlist_entry *entry);
struct mlx5_hlist_entry *flow_dv_mreg_create_cb(struct mlx5_hlist *list,
uint64_t key, void *ctx);
+int flow_dv_mreg_match_cb(struct mlx5_hlist *list,
+ struct mlx5_hlist_entry *entry, uint64_t key,
+ void *cb_ctx);
void flow_dv_mreg_remove_cb(struct mlx5_hlist *list,
struct mlx5_hlist_entry *entry);
cache_resource = container_of(entry,
struct mlx5_flow_dv_encap_decap_resource,
entry);
- if (resource->entry.key == cache_resource->entry.key &&
- resource->reformat_type == cache_resource->reformat_type &&
+ if (resource->reformat_type == cache_resource->reformat_type &&
resource->ft_type == cache_resource->ft_type &&
resource->flags == cache_resource->flags &&
resource->size == cache_resource->size &&
.error = error,
.data = resource,
};
+ uint64_t key64;
resource->flags = dev_flow->dv.group ? 0 : 1;
- resource->entry.key = __rte_raw_cksum(&encap_decap_key.v32,
- sizeof(encap_decap_key.v32), 0);
+ key64 = __rte_raw_cksum(&encap_decap_key.v32,
+ sizeof(encap_decap_key.v32), 0);
if (resource->reformat_type !=
MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TUNNEL_TO_L2 &&
resource->size)
- resource->entry.key = __rte_raw_cksum(resource->buf,
- resource->size,
- resource->entry.key);
- entry = mlx5_hlist_register(sh->encaps_decaps, resource->entry.key,
- &ctx);
+ key64 = __rte_raw_cksum(resource->buf, resource->size, key64);
+ entry = mlx5_hlist_register(sh->encaps_decaps, key64, &ctx);
if (!entry)
return -rte_errno;
resource = container_of(entry, typeof(*resource), entry);
.error = error,
.data = resource,
};
+ uint64_t key64;
resource->flags = dev_flow->dv.group ? 0 :
MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL;
return rte_flow_error_set(error, EOVERFLOW,
RTE_FLOW_ERROR_TYPE_ACTION, NULL,
"too many modify header items");
- resource->entry.key = __rte_raw_cksum(&resource->ft_type, key_len, 0);
- entry = mlx5_hlist_register(sh->modify_cmds, resource->entry.key, &ctx);
+ key64 = __rte_raw_cksum(&resource->ft_type, key_len, 0);
+ entry = mlx5_hlist_register(sh->modify_cmds, key64, &ctx);
if (!entry)
return -rte_errno;
resource = container_of(entry, typeof(*resource), entry);
tbl_data->idx = idx;
tbl_data->tunnel = tt_prm->tunnel;
tbl_data->group_id = tt_prm->group_id;
- tbl_data->external = tt_prm->external;
+ tbl_data->external = !!tt_prm->external;
tbl_data->tunnel_offload = is_tunnel_offload_active(dev);
tbl_data->is_egress = !!key.direction;
+ tbl_data->is_transfer = !!key.domain;
+ tbl_data->dummy = !!key.dummy;
+ tbl_data->table_id = key.table_id;
tbl = &tbl_data->tbl;
if (key.dummy)
return &tbl_data->entry;
return &tbl_data->entry;
}
+int
+flow_dv_tbl_match_cb(struct mlx5_hlist *list __rte_unused,
+ struct mlx5_hlist_entry *entry, uint64_t key64,
+ void *cb_ctx __rte_unused)
+{
+ struct mlx5_flow_tbl_data_entry *tbl_data =
+ container_of(entry, struct mlx5_flow_tbl_data_entry, entry);
+ union mlx5_flow_tbl_key key = { .v64 = key64 };
+
+ return tbl_data->table_id != key.table_id ||
+ tbl_data->dummy != key.dummy ||
+ tbl_data->is_transfer != key.domain ||
+ tbl_data->is_egress != key.direction;
+}
+
/**
* Get a flow table.
*
tbl_data->tunnel->tunnel_id : 0,
.group = tbl_data->group_id
};
- union mlx5_flow_tbl_key table_key = {
- .v64 = entry->key
- };
- uint32_t table_id = table_key.table_id;
+ uint32_t table_id = tbl_data->table_id;
tunnel_grp_hash = tbl_data->tunnel ?
tbl_data->tunnel->groups :
return NULL;
}
entry->idx = idx;
+ entry->tag_id = key;
ret = mlx5_flow_os_create_flow_action_tag(key,
&entry->action);
if (ret) {
return &entry->entry;
}
+int
+flow_dv_tag_match_cb(struct mlx5_hlist *list __rte_unused,
+ struct mlx5_hlist_entry *entry, uint64_t key,
+ void *cb_ctx __rte_unused)
+{
+ struct mlx5_flow_dv_tag_resource *tag =
+ container_of(entry, struct mlx5_flow_dv_tag_resource, entry);
+
+ return key != tag->tag_id;
+}
+
/**
* Find existing tag resource or create and register a new one.
*
mlx5_free(entry);
}
-static int
-mlx5_hlist_default_match_cb(struct mlx5_hlist *h __rte_unused,
- struct mlx5_hlist_entry *entry,
- uint64_t key, void *ctx __rte_unused)
-{
- return entry->key != key;
-}
-
struct mlx5_hlist *
mlx5_hlist_create(const char *name, uint32_t size, uint32_t entry_size,
uint32_t flags, mlx5_hlist_create_cb cb_create,
uint32_t alloc_size;
uint32_t i;
- if (!size || (!cb_create ^ !cb_remove))
+ if (!size || !cb_match || (!cb_create ^ !cb_remove))
return NULL;
/* Align to the next power of 2, 32bits integer is enough now. */
if (!rte_is_power_of_2(size)) {
h->direct_key = !!(flags & MLX5_HLIST_DIRECT_KEY);
h->write_most = !!(flags & MLX5_HLIST_WRITE_MOST);
h->cb_create = cb_create ? cb_create : mlx5_hlist_default_create_cb;
- h->cb_match = cb_match ? cb_match : mlx5_hlist_default_match_cb;
+ h->cb_match = cb_match;
h->cb_remove = cb_remove ? cb_remove : mlx5_hlist_default_remove_cb;
for (i = 0; i < act_size; i++)
rte_rwlock_init(&h->buckets[i].lock);
DRV_LOG(DEBUG, "Can't allocate hash list %s entry.", h->name);
goto done;
}
- entry->key = key;
+ entry->idx = idx;
entry->ref_cnt = 1;
LIST_INSERT_HEAD(first, entry, next);
__atomic_add_fetch(&b->gen_cnt, 1, __ATOMIC_ACQ_REL);
int
mlx5_hlist_unregister(struct mlx5_hlist *h, struct mlx5_hlist_entry *entry)
{
- uint32_t idx;
-
- if (h->direct_key)
- idx = (uint32_t)(entry->key & h->mask);
- else
- idx = rte_hash_crc_8byte(entry->key, 0) & h->mask;
+ uint32_t idx = entry->idx;
rte_rwlock_write_lock(&h->buckets[idx].lock);
MLX5_ASSERT(entry && entry->ref_cnt && entry->next.le_prev);
*/
struct mlx5_hlist_entry {
LIST_ENTRY(mlx5_hlist_entry) next; /* entry pointers in the list. */
- uint64_t key; /* user defined 'key', could be the hash signature. */
+ uint32_t idx; /* Bucket index the entry belongs to. */
uint32_t ref_cnt; /* Reference count. */
};