}
}
+static inline struct mlx5_hlist *
+flow_dv_hlist_prepare(struct mlx5_dev_ctx_shared *sh, struct mlx5_hlist **phl,
+ const char *name, uint32_t size, bool direct_key,
+ bool lcores_share, void *ctx,
+ mlx5_list_create_cb cb_create,
+ mlx5_list_match_cb cb_match,
+ mlx5_list_remove_cb cb_remove,
+ mlx5_list_clone_cb cb_clone,
+ mlx5_list_clone_free_cb cb_clone_free)
+{
+ struct mlx5_hlist *hl;
+ struct mlx5_hlist *expected = NULL;
+ char s[MLX5_NAME_SIZE];
+
+ hl = __atomic_load_n(phl, __ATOMIC_SEQ_CST);
+ if (likely(hl))
+ return hl;
+ snprintf(s, sizeof(s), "%s_%s", sh->ibdev_name, name);
+ hl = mlx5_hlist_create(s, size, direct_key, lcores_share,
+ ctx, cb_create, cb_match, cb_remove, cb_clone,
+ cb_clone_free);
+ if (!hl) {
+ DRV_LOG(ERR, "%s hash creation failed", name);
+ rte_errno = ENOMEM;
+ return NULL;
+ }
+ if (!__atomic_compare_exchange_n(phl, &expected, hl, false,
+ __ATOMIC_SEQ_CST,
+ __ATOMIC_SEQ_CST)) {
+ mlx5_hlist_destroy(hl);
+ hl = __atomic_load_n(phl, __ATOMIC_SEQ_CST);
+ }
+ return hl;
+}
+
/* Update VLAN's VID/PCP based on input rte_flow_action.
*
* @param[in] action
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
-flow_dv_validate_item_ipv4(const struct rte_flow_item *item,
- uint64_t item_flags,
- uint64_t last_item,
- uint16_t ether_type,
- struct rte_flow_error *error)
+flow_dv_validate_item_ipv4(struct rte_eth_dev *dev,
+ const struct rte_flow_item *item,
+ uint64_t item_flags, uint64_t last_item,
+ uint16_t ether_type, struct rte_flow_error *error)
{
int ret;
+ struct mlx5_priv *priv = dev->data->dev_private;
const struct rte_flow_item_ipv4 *spec = item->spec;
const struct rte_flow_item_ipv4 *last = item->last;
const struct rte_flow_item_ipv4 *mask = item->mask;
rte_be16_t fragment_offset_spec = 0;
rte_be16_t fragment_offset_last = 0;
- const struct rte_flow_item_ipv4 nic_ipv4_mask = {
+ struct rte_flow_item_ipv4 nic_ipv4_mask = {
.hdr = {
.src_addr = RTE_BE32(0xffffffff),
.dst_addr = RTE_BE32(0xffffffff),
},
};
+ if (mask && (mask->hdr.version_ihl & RTE_IPV4_HDR_IHL_MASK)) {
+ int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
+ bool ihl_cap = !tunnel ? priv->config.hca_attr.outer_ipv4_ihl :
+ priv->config.hca_attr.inner_ipv4_ihl;
+ if (!ihl_cap)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "IPV4 ihl offload not supported");
+ nic_ipv4_mask.hdr.version_ihl = mask->hdr.version_ihl;
+ }
ret = mlx5_flow_validate_item_ipv4(item, item_flags, last_item,
ether_type, &nic_ipv4_mask,
MLX5_ITEM_RANGE_ACCEPTED, error);
return 0;
}
-/**
- * Match encap_decap resource.
- *
- * @param list
- * Pointer to the hash list.
- * @param entry
- * Pointer to exist resource entry object.
- * @param key
- * Key of the new entry.
- * @param ctx_cb
- * Pointer to new encap_decap resource.
- *
- * @return
- * 0 on matching, none-zero otherwise.
- */
int
-flow_dv_encap_decap_match_cb(struct mlx5_hlist *list __rte_unused,
- struct mlx5_hlist_entry *entry,
- uint64_t key __rte_unused, void *cb_ctx)
+flow_dv_encap_decap_match_cb(void *tool_ctx __rte_unused,
+ struct mlx5_list_entry *entry, void *cb_ctx)
{
struct mlx5_flow_cb_ctx *ctx = cb_ctx;
struct mlx5_flow_dv_encap_decap_resource *ctx_resource = ctx->data;
return -1;
}
-/**
- * Allocate encap_decap resource.
- *
- * @param list
- * Pointer to the hash list.
- * @param entry
- * Pointer to exist resource entry object.
- * @param ctx_cb
- * Pointer to new encap_decap resource.
- *
- * @return
- * 0 on matching, none-zero otherwise.
- */
-struct mlx5_hlist_entry *
-flow_dv_encap_decap_create_cb(struct mlx5_hlist *list,
- uint64_t key __rte_unused,
- void *cb_ctx)
+struct mlx5_list_entry *
+flow_dv_encap_decap_create_cb(void *tool_ctx, void *cb_ctx)
{
- struct mlx5_dev_ctx_shared *sh = list->ctx;
+ struct mlx5_dev_ctx_shared *sh = tool_ctx;
struct mlx5_flow_cb_ctx *ctx = cb_ctx;
struct mlx5dv_dr_domain *domain;
struct mlx5_flow_dv_encap_decap_resource *ctx_resource = ctx->data;
return &resource->entry;
}
+struct mlx5_list_entry *
+flow_dv_encap_decap_clone_cb(void *tool_ctx, struct mlx5_list_entry *oentry,
+ void *cb_ctx)
+{
+ struct mlx5_dev_ctx_shared *sh = tool_ctx;
+ struct mlx5_flow_cb_ctx *ctx = cb_ctx;
+ struct mlx5_flow_dv_encap_decap_resource *cache_resource;
+ uint32_t idx;
+
+ cache_resource = mlx5_ipool_malloc(sh->ipool[MLX5_IPOOL_DECAP_ENCAP],
+ &idx);
+ if (!cache_resource) {
+ rte_flow_error_set(ctx->error, ENOMEM,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "cannot allocate resource memory");
+ return NULL;
+ }
+ memcpy(cache_resource, oentry, sizeof(*cache_resource));
+ cache_resource->idx = idx;
+ return &cache_resource->entry;
+}
+
+void
+flow_dv_encap_decap_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry)
+{
+ struct mlx5_dev_ctx_shared *sh = tool_ctx;
+ struct mlx5_flow_dv_encap_decap_resource *res =
+ container_of(entry, typeof(*res), entry);
+
+ mlx5_ipool_free(sh->ipool[MLX5_IPOOL_DECAP_ENCAP], res->idx);
+}
+
/**
* Find existing encap/decap resource or create and register a new one.
*
{
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_dev_ctx_shared *sh = priv->sh;
- struct mlx5_hlist_entry *entry;
+ struct mlx5_list_entry *entry;
union {
struct {
uint32_t ft_type:8;
.error = error,
.data = resource,
};
+ struct mlx5_hlist *encaps_decaps;
uint64_t key64;
+ encaps_decaps = flow_dv_hlist_prepare(sh, &sh->encaps_decaps,
+ "encaps_decaps",
+ MLX5_FLOW_ENCAP_DECAP_HTABLE_SZ,
+ true, true, sh,
+ flow_dv_encap_decap_create_cb,
+ flow_dv_encap_decap_match_cb,
+ flow_dv_encap_decap_remove_cb,
+ flow_dv_encap_decap_clone_cb,
+ flow_dv_encap_decap_clone_free_cb);
+ if (unlikely(!encaps_decaps))
+ return -rte_errno;
resource->flags = dev_flow->dv.group ? 0 : 1;
key64 = __rte_raw_cksum(&encap_decap_key.v32,
sizeof(encap_decap_key.v32), 0);
MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TUNNEL_TO_L2 &&
resource->size)
key64 = __rte_raw_cksum(resource->buf, resource->size, key64);
- entry = mlx5_hlist_register(sh->encaps_decaps, key64, &ctx);
+ entry = mlx5_hlist_register(encaps_decaps, key64, &ctx);
if (!entry)
return -rte_errno;
resource = container_of(entry, typeof(*resource), entry);
}
int
-flow_dv_port_id_match_cb(struct mlx5_list *list __rte_unused,
+flow_dv_port_id_match_cb(void *tool_ctx __rte_unused,
struct mlx5_list_entry *entry, void *cb_ctx)
{
struct mlx5_flow_cb_ctx *ctx = cb_ctx;
struct mlx5_flow_dv_port_id_action_resource *ref = ctx->data;
struct mlx5_flow_dv_port_id_action_resource *res =
- container_of(entry, typeof(*res), entry);
+ container_of(entry, typeof(*res), entry);
return ref->port_id != res->port_id;
}
struct mlx5_list_entry *
-flow_dv_port_id_create_cb(struct mlx5_list *list,
- struct mlx5_list_entry *entry __rte_unused,
- void *cb_ctx)
+flow_dv_port_id_create_cb(void *tool_ctx, void *cb_ctx)
{
- struct mlx5_dev_ctx_shared *sh = list->ctx;
+ struct mlx5_dev_ctx_shared *sh = tool_ctx;
struct mlx5_flow_cb_ctx *ctx = cb_ctx;
struct mlx5_flow_dv_port_id_action_resource *ref = ctx->data;
struct mlx5_flow_dv_port_id_action_resource *resource;
return &resource->entry;
}
+struct mlx5_list_entry *
+flow_dv_port_id_clone_cb(void *tool_ctx,
+ struct mlx5_list_entry *entry __rte_unused,
+ void *cb_ctx)
+{
+ struct mlx5_dev_ctx_shared *sh = tool_ctx;
+ struct mlx5_flow_cb_ctx *ctx = cb_ctx;
+ struct mlx5_flow_dv_port_id_action_resource *resource;
+ uint32_t idx;
+
+ resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_PORT_ID], &idx);
+ if (!resource) {
+ rte_flow_error_set(ctx->error, ENOMEM,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "cannot allocate port_id action memory");
+ return NULL;
+ }
+ memcpy(resource, entry, sizeof(*resource));
+ resource->idx = idx;
+ return &resource->entry;
+}
+
+void
+flow_dv_port_id_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry)
+{
+ struct mlx5_dev_ctx_shared *sh = tool_ctx;
+ struct mlx5_flow_dv_port_id_action_resource *resource =
+ container_of(entry, typeof(*resource), entry);
+
+ mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PORT_ID], resource->idx);
+}
+
/**
* Find existing table port ID resource or create and register a new one.
*
.data = ref,
};
- entry = mlx5_list_register(&priv->sh->port_id_action_list, &ctx);
+ entry = mlx5_list_register(priv->sh->port_id_action_list, &ctx);
if (!entry)
return -rte_errno;
resource = container_of(entry, typeof(*resource), entry);
}
int
-flow_dv_push_vlan_match_cb(struct mlx5_list *list __rte_unused,
- struct mlx5_list_entry *entry, void *cb_ctx)
+flow_dv_push_vlan_match_cb(void *tool_ctx __rte_unused,
+ struct mlx5_list_entry *entry, void *cb_ctx)
{
struct mlx5_flow_cb_ctx *ctx = cb_ctx;
struct mlx5_flow_dv_push_vlan_action_resource *ref = ctx->data;
struct mlx5_flow_dv_push_vlan_action_resource *res =
- container_of(entry, typeof(*res), entry);
+ container_of(entry, typeof(*res), entry);
return ref->vlan_tag != res->vlan_tag || ref->ft_type != res->ft_type;
}
struct mlx5_list_entry *
-flow_dv_push_vlan_create_cb(struct mlx5_list *list,
- struct mlx5_list_entry *entry __rte_unused,
- void *cb_ctx)
+flow_dv_push_vlan_create_cb(void *tool_ctx, void *cb_ctx)
{
- struct mlx5_dev_ctx_shared *sh = list->ctx;
+ struct mlx5_dev_ctx_shared *sh = tool_ctx;
struct mlx5_flow_cb_ctx *ctx = cb_ctx;
struct mlx5_flow_dv_push_vlan_action_resource *ref = ctx->data;
struct mlx5_flow_dv_push_vlan_action_resource *resource;
return &resource->entry;
}
+struct mlx5_list_entry *
+flow_dv_push_vlan_clone_cb(void *tool_ctx,
+ struct mlx5_list_entry *entry __rte_unused,
+ void *cb_ctx)
+{
+ struct mlx5_dev_ctx_shared *sh = tool_ctx;
+ struct mlx5_flow_cb_ctx *ctx = cb_ctx;
+ struct mlx5_flow_dv_push_vlan_action_resource *resource;
+ uint32_t idx;
+
+ resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_PUSH_VLAN], &idx);
+ if (!resource) {
+ rte_flow_error_set(ctx->error, ENOMEM,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "cannot allocate push_vlan action memory");
+ return NULL;
+ }
+ memcpy(resource, entry, sizeof(*resource));
+ resource->idx = idx;
+ return &resource->entry;
+}
+
+void
+flow_dv_push_vlan_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry)
+{
+ struct mlx5_dev_ctx_shared *sh = tool_ctx;
+ struct mlx5_flow_dv_push_vlan_action_resource *resource =
+ container_of(entry, typeof(*resource), entry);
+
+ mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PUSH_VLAN], resource->idx);
+}
+
/**
* Find existing push vlan resource or create and register a new one.
*
.data = ref,
};
- entry = mlx5_list_register(&priv->sh->push_vlan_action_list, &ctx);
+ entry = mlx5_list_register(priv->sh->push_vlan_action_list, &ctx);
if (!entry)
return -rte_errno;
resource = container_of(entry, typeof(*resource), entry);
return ret;
}
-/**
- * Match modify-header resource.
- *
- * @param list
- * Pointer to the hash list.
- * @param entry
- * Pointer to exist resource entry object.
- * @param key
- * Key of the new entry.
- * @param ctx
- * Pointer to new modify-header resource.
- *
- * @return
- * 0 on matching, non-zero otherwise.
- */
int
-flow_dv_modify_match_cb(struct mlx5_hlist *list __rte_unused,
- struct mlx5_hlist_entry *entry,
- uint64_t key __rte_unused, void *cb_ctx)
+flow_dv_modify_match_cb(void *tool_ctx __rte_unused,
+ struct mlx5_list_entry *entry, void *cb_ctx)
{
struct mlx5_flow_cb_ctx *ctx = cb_ctx;
struct mlx5_flow_dv_modify_hdr_resource *ref = ctx->data;
struct mlx5_flow_dv_modify_hdr_resource *resource =
- container_of(entry, typeof(*resource), entry);
+ container_of(entry, typeof(*resource), entry);
uint32_t key_len = sizeof(*ref) - offsetof(typeof(*ref), ft_type);
key_len += ref->actions_num * sizeof(ref->actions[0]);
memcmp(&ref->ft_type, &resource->ft_type, key_len);
}
-struct mlx5_hlist_entry *
-flow_dv_modify_create_cb(struct mlx5_hlist *list, uint64_t key __rte_unused,
- void *cb_ctx)
+static struct mlx5_indexed_pool *
+flow_dv_modify_ipool_get(struct mlx5_dev_ctx_shared *sh, uint8_t index)
+{
+ struct mlx5_indexed_pool *ipool = __atomic_load_n
+ (&sh->mdh_ipools[index], __ATOMIC_SEQ_CST);
+
+ if (!ipool) {
+ struct mlx5_indexed_pool *expected = NULL;
+ struct mlx5_indexed_pool_config cfg =
+ (struct mlx5_indexed_pool_config) {
+ .size = sizeof(struct mlx5_flow_dv_modify_hdr_resource) +
+ (index + 1) *
+ sizeof(struct mlx5_modification_cmd),
+ .trunk_size = 64,
+ .grow_trunk = 3,
+ .grow_shift = 2,
+ .need_lock = 1,
+ .release_mem_en = !!sh->reclaim_mode,
+ .per_core_cache = sh->reclaim_mode ? 0 : (1 << 16),
+ .malloc = mlx5_malloc,
+ .free = mlx5_free,
+ .type = "mlx5_modify_action_resource",
+ };
+
+ cfg.size = RTE_ALIGN(cfg.size, sizeof(ipool));
+ ipool = mlx5_ipool_create(&cfg);
+ if (!ipool)
+ return NULL;
+ if (!__atomic_compare_exchange_n(&sh->mdh_ipools[index],
+ &expected, ipool, false,
+ __ATOMIC_SEQ_CST,
+ __ATOMIC_SEQ_CST)) {
+ mlx5_ipool_destroy(ipool);
+ ipool = __atomic_load_n(&sh->mdh_ipools[index],
+ __ATOMIC_SEQ_CST);
+ }
+ }
+ return ipool;
+}
+
+struct mlx5_list_entry *
+flow_dv_modify_create_cb(void *tool_ctx, void *cb_ctx)
{
- struct mlx5_dev_ctx_shared *sh = list->ctx;
+ struct mlx5_dev_ctx_shared *sh = tool_ctx;
struct mlx5_flow_cb_ctx *ctx = cb_ctx;
struct mlx5dv_dr_domain *ns;
struct mlx5_flow_dv_modify_hdr_resource *entry;
struct mlx5_flow_dv_modify_hdr_resource *ref = ctx->data;
+ struct mlx5_indexed_pool *ipool = flow_dv_modify_ipool_get(sh,
+ ref->actions_num - 1);
int ret;
uint32_t data_len = ref->actions_num * sizeof(ref->actions[0]);
uint32_t key_len = sizeof(*ref) - offsetof(typeof(*ref), ft_type);
+ uint32_t idx;
- entry = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*entry) + data_len, 0,
- SOCKET_ID_ANY);
+ if (unlikely(!ipool)) {
+ rte_flow_error_set(ctx->error, ENOMEM,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL, "cannot allocate modify ipool");
+ return NULL;
+ }
+ entry = mlx5_ipool_zmalloc(ipool, &idx);
if (!entry) {
rte_flow_error_set(ctx->error, ENOMEM,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
(sh->ctx, ns, entry,
data_len, &entry->action);
if (ret) {
- mlx5_free(entry);
+ mlx5_ipool_free(sh->mdh_ipools[ref->actions_num - 1], idx);
rte_flow_error_set(ctx->error, ENOMEM,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
NULL, "cannot create modification action");
return NULL;
}
+ entry->idx = idx;
+ return &entry->entry;
+}
+
+struct mlx5_list_entry *
+flow_dv_modify_clone_cb(void *tool_ctx, struct mlx5_list_entry *oentry,
+ void *cb_ctx)
+{
+ struct mlx5_dev_ctx_shared *sh = tool_ctx;
+ struct mlx5_flow_cb_ctx *ctx = cb_ctx;
+ struct mlx5_flow_dv_modify_hdr_resource *entry;
+ struct mlx5_flow_dv_modify_hdr_resource *ref = ctx->data;
+ uint32_t data_len = ref->actions_num * sizeof(ref->actions[0]);
+ uint32_t idx;
+
+ entry = mlx5_ipool_malloc(sh->mdh_ipools[ref->actions_num - 1],
+ &idx);
+ if (!entry) {
+ rte_flow_error_set(ctx->error, ENOMEM,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "cannot allocate resource memory");
+ return NULL;
+ }
+ memcpy(entry, oentry, sizeof(*entry) + data_len);
+ entry->idx = idx;
return &entry->entry;
}
+void
+flow_dv_modify_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry)
+{
+ struct mlx5_dev_ctx_shared *sh = tool_ctx;
+ struct mlx5_flow_dv_modify_hdr_resource *res =
+ container_of(entry, typeof(*res), entry);
+
+ mlx5_ipool_free(sh->mdh_ipools[res->actions_num - 1], res->idx);
+}
+
/**
* Validate the sample action.
*
uint32_t key_len = sizeof(*resource) -
offsetof(typeof(*resource), ft_type) +
resource->actions_num * sizeof(resource->actions[0]);
- struct mlx5_hlist_entry *entry;
+ struct mlx5_list_entry *entry;
struct mlx5_flow_cb_ctx ctx = {
.error = error,
.data = resource,
};
+ struct mlx5_hlist *modify_cmds;
uint64_t key64;
+ modify_cmds = flow_dv_hlist_prepare(sh, &sh->modify_cmds,
+ "hdr_modify",
+ MLX5_FLOW_HDR_MODIFY_HTABLE_SZ,
+ true, false, sh,
+ flow_dv_modify_create_cb,
+ flow_dv_modify_match_cb,
+ flow_dv_modify_remove_cb,
+ flow_dv_modify_clone_cb,
+ flow_dv_modify_clone_free_cb);
+ if (unlikely(!modify_cmds))
+ return -rte_errno;
resource->root = !dev_flow->dv.group;
if (resource->actions_num > flow_dv_modify_hdr_action_max(dev,
resource->root))
RTE_FLOW_ERROR_TYPE_ACTION, NULL,
"too many modify header items");
key64 = __rte_raw_cksum(&resource->ft_type, key_len, 0);
- entry = mlx5_hlist_register(sh->modify_cmds, key64, &ctx);
+ entry = mlx5_hlist_register(modify_cmds, key64, &ctx);
if (!entry)
return -rte_errno;
resource = container_of(entry, typeof(*resource), entry);
case RTE_FLOW_ITEM_TYPE_IPV4:
mlx5_flow_tunnel_ip_check(items, next_protocol,
&item_flags, &tunnel);
- ret = flow_dv_validate_item_ipv4(items, item_flags,
+ ret = flow_dv_validate_item_ipv4(dev, items, item_flags,
last_item, ether_type,
error);
if (ret < 0)
void *headers_v;
char *l24_m;
char *l24_v;
- uint8_t tos;
+ uint8_t tos, ihl_m, ihl_v;
if (inner) {
headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
*(uint32_t *)l24_m = ipv4_m->hdr.src_addr;
*(uint32_t *)l24_v = ipv4_m->hdr.src_addr & ipv4_v->hdr.src_addr;
tos = ipv4_m->hdr.type_of_service & ipv4_v->hdr.type_of_service;
+ ihl_m = ipv4_m->hdr.version_ihl & RTE_IPV4_HDR_IHL_MASK;
+ ihl_v = ipv4_v->hdr.version_ihl & RTE_IPV4_HDR_IHL_MASK;
+ MLX5_SET(fte_match_set_lyr_2_4, headers_m, ipv4_ihl, ihl_m);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, ipv4_ihl, ihl_m & ihl_v);
MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn,
ipv4_m->hdr.type_of_service);
MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, tos);
}
}
-struct mlx5_hlist_entry *
-flow_dv_tbl_create_cb(struct mlx5_hlist *list, uint64_t key64, void *cb_ctx)
+static struct mlx5_list_entry *
+flow_dv_matcher_clone_cb(void *tool_ctx __rte_unused,
+ struct mlx5_list_entry *entry, void *cb_ctx)
{
- struct mlx5_dev_ctx_shared *sh = list->ctx;
+ struct mlx5_flow_cb_ctx *ctx = cb_ctx;
+ struct mlx5_flow_dv_matcher *ref = ctx->data;
+ struct mlx5_flow_tbl_data_entry *tbl = container_of(ref->tbl,
+ typeof(*tbl), tbl);
+ struct mlx5_flow_dv_matcher *resource = mlx5_malloc(MLX5_MEM_ANY,
+ sizeof(*resource),
+ 0, SOCKET_ID_ANY);
+
+ if (!resource) {
+ rte_flow_error_set(ctx->error, ENOMEM,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "cannot create matcher");
+ return NULL;
+ }
+ memcpy(resource, entry, sizeof(*resource));
+ resource->tbl = &tbl->tbl;
+ return &resource->entry;
+}
+
+static void
+flow_dv_matcher_clone_free_cb(void *tool_ctx __rte_unused,
+ struct mlx5_list_entry *entry)
+{
+ mlx5_free(entry);
+}
+
+struct mlx5_list_entry *
+flow_dv_tbl_create_cb(void *tool_ctx, void *cb_ctx)
+{
+ struct mlx5_dev_ctx_shared *sh = tool_ctx;
struct mlx5_flow_cb_ctx *ctx = cb_ctx;
struct rte_eth_dev *dev = ctx->dev;
struct mlx5_flow_tbl_data_entry *tbl_data;
- struct mlx5_flow_tbl_tunnel_prm *tt_prm = ctx->data;
+ struct mlx5_flow_tbl_tunnel_prm *tt_prm = ctx->data2;
struct rte_flow_error *error = ctx->error;
- union mlx5_flow_tbl_key key = { .v64 = key64 };
+ union mlx5_flow_tbl_key key = { .v64 = *(uint64_t *)(ctx->data) };
struct mlx5_flow_tbl_resource *tbl;
void *domain;
uint32_t idx = 0;
MKSTR(matcher_name, "%s_%s_%u_%u_matcher_list",
key.is_fdb ? "FDB" : "NIC", key.is_egress ? "egress" : "ingress",
key.level, key.id);
- mlx5_list_create(&tbl_data->matchers, matcher_name, 0, sh,
- flow_dv_matcher_create_cb,
- flow_dv_matcher_match_cb,
- flow_dv_matcher_remove_cb);
+ tbl_data->matchers = mlx5_list_create(matcher_name, sh, true,
+ flow_dv_matcher_create_cb,
+ flow_dv_matcher_match_cb,
+ flow_dv_matcher_remove_cb,
+ flow_dv_matcher_clone_cb,
+ flow_dv_matcher_clone_free_cb);
+ if (!tbl_data->matchers) {
+ rte_flow_error_set(error, ENOMEM,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "cannot create tbl matcher list");
+ mlx5_flow_os_destroy_flow_action(tbl_data->jump.action);
+ mlx5_flow_os_destroy_flow_tbl(tbl->obj);
+ mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], idx);
+ return NULL;
+ }
return &tbl_data->entry;
}
int
-flow_dv_tbl_match_cb(struct mlx5_hlist *list __rte_unused,
- struct mlx5_hlist_entry *entry, uint64_t key64,
- void *cb_ctx __rte_unused)
+flow_dv_tbl_match_cb(void *tool_ctx __rte_unused, struct mlx5_list_entry *entry,
+ void *cb_ctx)
{
+ struct mlx5_flow_cb_ctx *ctx = cb_ctx;
struct mlx5_flow_tbl_data_entry *tbl_data =
container_of(entry, struct mlx5_flow_tbl_data_entry, entry);
- union mlx5_flow_tbl_key key = { .v64 = key64 };
+ union mlx5_flow_tbl_key key = { .v64 = *(uint64_t *)(ctx->data) };
return tbl_data->level != key.level ||
tbl_data->id != key.id ||
tbl_data->is_egress != !!key.is_egress;
}
+struct mlx5_list_entry *
+flow_dv_tbl_clone_cb(void *tool_ctx, struct mlx5_list_entry *oentry,
+ void *cb_ctx)
+{
+ struct mlx5_dev_ctx_shared *sh = tool_ctx;
+ struct mlx5_flow_cb_ctx *ctx = cb_ctx;
+ struct mlx5_flow_tbl_data_entry *tbl_data;
+ struct rte_flow_error *error = ctx->error;
+ uint32_t idx = 0;
+
+ tbl_data = mlx5_ipool_malloc(sh->ipool[MLX5_IPOOL_JUMP], &idx);
+ if (!tbl_data) {
+ rte_flow_error_set(error, ENOMEM,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "cannot allocate flow table data entry");
+ return NULL;
+ }
+ memcpy(tbl_data, oentry, sizeof(*tbl_data));
+ tbl_data->idx = idx;
+ return &tbl_data->entry;
+}
+
+void
+flow_dv_tbl_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry)
+{
+ struct mlx5_dev_ctx_shared *sh = tool_ctx;
+ struct mlx5_flow_tbl_data_entry *tbl_data =
+ container_of(entry, struct mlx5_flow_tbl_data_entry, entry);
+
+ mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], tbl_data->idx);
+}
+
/**
* Get a flow table.
*
struct mlx5_flow_cb_ctx ctx = {
.dev = dev,
.error = error,
- .data = &tt_prm,
+ .data = &table_key.v64,
+ .data2 = &tt_prm,
};
- struct mlx5_hlist_entry *entry;
+ struct mlx5_list_entry *entry;
struct mlx5_flow_tbl_data_entry *tbl_data;
entry = mlx5_hlist_register(priv->sh->flow_tbls, table_key.v64, &ctx);
}
void
-flow_dv_tbl_remove_cb(struct mlx5_hlist *list,
- struct mlx5_hlist_entry *entry)
+flow_dv_tbl_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry)
{
- struct mlx5_dev_ctx_shared *sh = list->ctx;
+ struct mlx5_dev_ctx_shared *sh = tool_ctx;
struct mlx5_flow_tbl_data_entry *tbl_data =
- container_of(entry, struct mlx5_flow_tbl_data_entry, entry);
+ container_of(entry, struct mlx5_flow_tbl_data_entry, entry);
MLX5_ASSERT(entry && sh);
if (tbl_data->jump.action)
if (tbl_data->tbl.obj)
mlx5_flow_os_destroy_flow_tbl(tbl_data->tbl.obj);
if (tbl_data->tunnel_offload && tbl_data->external) {
- struct mlx5_hlist_entry *he;
+ struct mlx5_list_entry *he;
struct mlx5_hlist *tunnel_grp_hash;
struct mlx5_flow_tunnel_hub *thub = sh->tunnel_hub;
union tunnel_tbl_key tunnel_key = {
.group = tbl_data->group_id
};
uint32_t table_level = tbl_data->level;
+ struct mlx5_flow_cb_ctx ctx = {
+ .data = (void *)&tunnel_key.val,
+ };
tunnel_grp_hash = tbl_data->tunnel ?
tbl_data->tunnel->groups :
thub->groups;
- he = mlx5_hlist_lookup(tunnel_grp_hash, tunnel_key.val, NULL);
+ he = mlx5_hlist_lookup(tunnel_grp_hash, tunnel_key.val, &ctx);
if (he)
mlx5_hlist_unregister(tunnel_grp_hash, he);
DRV_LOG(DEBUG,
tbl_data->tunnel->tunnel_id : 0,
tbl_data->group_id);
}
- mlx5_list_destroy(&tbl_data->matchers);
+ mlx5_list_destroy(tbl_data->matchers);
mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], tbl_data->idx);
}
}
int
-flow_dv_matcher_match_cb(struct mlx5_list *list __rte_unused,
+flow_dv_matcher_match_cb(void *tool_ctx __rte_unused,
struct mlx5_list_entry *entry, void *cb_ctx)
{
struct mlx5_flow_cb_ctx *ctx = cb_ctx;
}
struct mlx5_list_entry *
-flow_dv_matcher_create_cb(struct mlx5_list *list,
- struct mlx5_list_entry *entry __rte_unused,
- void *cb_ctx)
+flow_dv_matcher_create_cb(void *tool_ctx, void *cb_ctx)
{
- struct mlx5_dev_ctx_shared *sh = list->ctx;
+ struct mlx5_dev_ctx_shared *sh = tool_ctx;
struct mlx5_flow_cb_ctx *ctx = cb_ctx;
struct mlx5_flow_dv_matcher *ref = ctx->data;
struct mlx5_flow_dv_matcher *resource;
return -rte_errno; /* No need to refill the error info */
tbl_data = container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
ref->tbl = tbl;
- entry = mlx5_list_register(&tbl_data->matchers, &ctx);
+ entry = mlx5_list_register(tbl_data->matchers, &ctx);
if (!entry) {
flow_dv_tbl_resource_release(MLX5_SH(dev), tbl);
return rte_flow_error_set(error, ENOMEM,
return 0;
}
-struct mlx5_hlist_entry *
-flow_dv_tag_create_cb(struct mlx5_hlist *list, uint64_t key, void *ctx)
+struct mlx5_list_entry *
+flow_dv_tag_create_cb(void *tool_ctx, void *cb_ctx)
{
- struct mlx5_dev_ctx_shared *sh = list->ctx;
- struct rte_flow_error *error = ctx;
+ struct mlx5_dev_ctx_shared *sh = tool_ctx;
+ struct mlx5_flow_cb_ctx *ctx = cb_ctx;
struct mlx5_flow_dv_tag_resource *entry;
uint32_t idx = 0;
int ret;
entry = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_TAG], &idx);
if (!entry) {
- rte_flow_error_set(error, ENOMEM,
+ rte_flow_error_set(ctx->error, ENOMEM,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
"cannot allocate resource memory");
return NULL;
}
entry->idx = idx;
- entry->tag_id = key;
- ret = mlx5_flow_os_create_flow_action_tag(key,
+ entry->tag_id = *(uint32_t *)(ctx->data);
+ ret = mlx5_flow_os_create_flow_action_tag(entry->tag_id,
&entry->action);
if (ret) {
mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TAG], idx);
- rte_flow_error_set(error, ENOMEM,
+ rte_flow_error_set(ctx->error, ENOMEM,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
NULL, "cannot create action");
return NULL;
}
int
-flow_dv_tag_match_cb(struct mlx5_hlist *list __rte_unused,
- struct mlx5_hlist_entry *entry, uint64_t key,
- void *cb_ctx __rte_unused)
+flow_dv_tag_match_cb(void *tool_ctx __rte_unused, struct mlx5_list_entry *entry,
+ void *cb_ctx)
+{
+ struct mlx5_flow_cb_ctx *ctx = cb_ctx;
+ struct mlx5_flow_dv_tag_resource *tag =
+ container_of(entry, struct mlx5_flow_dv_tag_resource, entry);
+
+ return *(uint32_t *)(ctx->data) != tag->tag_id;
+}
+
+struct mlx5_list_entry *
+flow_dv_tag_clone_cb(void *tool_ctx, struct mlx5_list_entry *oentry,
+ void *cb_ctx)
+{
+ struct mlx5_dev_ctx_shared *sh = tool_ctx;
+ struct mlx5_flow_cb_ctx *ctx = cb_ctx;
+ struct mlx5_flow_dv_tag_resource *entry;
+ uint32_t idx = 0;
+
+ entry = mlx5_ipool_malloc(sh->ipool[MLX5_IPOOL_TAG], &idx);
+ if (!entry) {
+ rte_flow_error_set(ctx->error, ENOMEM,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "cannot allocate tag resource memory");
+ return NULL;
+ }
+ memcpy(entry, oentry, sizeof(*entry));
+ entry->idx = idx;
+ return &entry->entry;
+}
+
+void
+flow_dv_tag_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry)
{
+ struct mlx5_dev_ctx_shared *sh = tool_ctx;
struct mlx5_flow_dv_tag_resource *tag =
- container_of(entry, struct mlx5_flow_dv_tag_resource, entry);
+ container_of(entry, struct mlx5_flow_dv_tag_resource, entry);
- return key != tag->tag_id;
+ mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TAG], tag->idx);
}
/**
{
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_flow_dv_tag_resource *resource;
- struct mlx5_hlist_entry *entry;
-
- entry = mlx5_hlist_register(priv->sh->tag_table, tag_be24, error);
+ struct mlx5_list_entry *entry;
+ struct mlx5_flow_cb_ctx ctx = {
+ .error = error,
+ .data = &tag_be24,
+ };
+ struct mlx5_hlist *tag_table;
+
+ tag_table = flow_dv_hlist_prepare(priv->sh, &priv->sh->tag_table,
+ "tags",
+ MLX5_TAGS_HLIST_ARRAY_SIZE,
+ false, false, priv->sh,
+ flow_dv_tag_create_cb,
+ flow_dv_tag_match_cb,
+ flow_dv_tag_remove_cb,
+ flow_dv_tag_clone_cb,
+ flow_dv_tag_clone_free_cb);
+ if (unlikely(!tag_table))
+ return -rte_errno;
+ entry = mlx5_hlist_register(tag_table, tag_be24, &ctx);
if (entry) {
resource = container_of(entry, struct mlx5_flow_dv_tag_resource,
entry);
}
void
-flow_dv_tag_remove_cb(struct mlx5_hlist *list,
- struct mlx5_hlist_entry *entry)
+flow_dv_tag_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry)
{
- struct mlx5_dev_ctx_shared *sh = list->ctx;
+ struct mlx5_dev_ctx_shared *sh = tool_ctx;
struct mlx5_flow_dv_tag_resource *tag =
- container_of(entry, struct mlx5_flow_dv_tag_resource, entry);
+ container_of(entry, struct mlx5_flow_dv_tag_resource, entry);
MLX5_ASSERT(tag && sh && tag->action);
claim_zero(mlx5_flow_os_destroy_flow_action(tag->action));
}
int
-flow_dv_sample_match_cb(struct mlx5_list *list __rte_unused,
+flow_dv_sample_match_cb(void *tool_ctx __rte_unused,
struct mlx5_list_entry *entry, void *cb_ctx)
{
struct mlx5_flow_cb_ctx *ctx = cb_ctx;
}
struct mlx5_list_entry *
-flow_dv_sample_create_cb(struct mlx5_list *list __rte_unused,
- struct mlx5_list_entry *entry __rte_unused,
- void *cb_ctx)
+flow_dv_sample_create_cb(void *tool_ctx __rte_unused, void *cb_ctx)
{
struct mlx5_flow_cb_ctx *ctx = cb_ctx;
struct rte_eth_dev *dev = ctx->dev;
}
+struct mlx5_list_entry *
+flow_dv_sample_clone_cb(void *tool_ctx __rte_unused,
+ struct mlx5_list_entry *entry __rte_unused,
+ void *cb_ctx)
+{
+ struct mlx5_flow_cb_ctx *ctx = cb_ctx;
+ struct rte_eth_dev *dev = ctx->dev;
+ struct mlx5_flow_dv_sample_resource *resource;
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_dev_ctx_shared *sh = priv->sh;
+ uint32_t idx = 0;
+
+ resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_SAMPLE], &idx);
+ if (!resource) {
+ rte_flow_error_set(ctx->error, ENOMEM,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "cannot allocate resource memory");
+ return NULL;
+ }
+ memcpy(resource, entry, sizeof(*resource));
+ resource->idx = idx;
+ resource->dev = dev;
+ return &resource->entry;
+}
+
+void
+flow_dv_sample_clone_free_cb(void *tool_ctx __rte_unused,
+ struct mlx5_list_entry *entry)
+{
+ struct mlx5_flow_dv_sample_resource *resource =
+ container_of(entry, typeof(*resource), entry);
+ struct rte_eth_dev *dev = resource->dev;
+ struct mlx5_priv *priv = dev->data->dev_private;
+
+ mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_SAMPLE], resource->idx);
+}
+
/**
* Find existing sample resource or create and register a new one.
*
.data = ref,
};
- entry = mlx5_list_register(&priv->sh->sample_action_list, &ctx);
+ entry = mlx5_list_register(priv->sh->sample_action_list, &ctx);
if (!entry)
return -rte_errno;
resource = container_of(entry, typeof(*resource), entry);
}
int
-flow_dv_dest_array_match_cb(struct mlx5_list *list __rte_unused,
+flow_dv_dest_array_match_cb(void *tool_ctx __rte_unused,
struct mlx5_list_entry *entry, void *cb_ctx)
{
struct mlx5_flow_cb_ctx *ctx = cb_ctx;
struct mlx5_flow_dv_dest_array_resource *ctx_resource = ctx->data;
struct rte_eth_dev *dev = ctx->dev;
struct mlx5_flow_dv_dest_array_resource *resource =
- container_of(entry, typeof(*resource), entry);
+ container_of(entry, typeof(*resource), entry);
uint32_t idx = 0;
if (ctx_resource->num_of_dest == resource->num_of_dest &&
}
struct mlx5_list_entry *
-flow_dv_dest_array_create_cb(struct mlx5_list *list __rte_unused,
- struct mlx5_list_entry *entry __rte_unused,
- void *cb_ctx)
+flow_dv_dest_array_create_cb(void *tool_ctx __rte_unused, void *cb_ctx)
{
struct mlx5_flow_cb_ctx *ctx = cb_ctx;
struct rte_eth_dev *dev = ctx->dev;
return NULL;
}
+struct mlx5_list_entry *
+flow_dv_dest_array_clone_cb(void *tool_ctx __rte_unused,
+ struct mlx5_list_entry *entry __rte_unused,
+ void *cb_ctx)
+{
+ struct mlx5_flow_cb_ctx *ctx = cb_ctx;
+ struct rte_eth_dev *dev = ctx->dev;
+ struct mlx5_flow_dv_dest_array_resource *resource;
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_dev_ctx_shared *sh = priv->sh;
+ uint32_t res_idx = 0;
+ struct rte_flow_error *error = ctx->error;
+
+ resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_DEST_ARRAY],
+ &res_idx);
+ if (!resource) {
+ rte_flow_error_set(error, ENOMEM,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "cannot allocate dest-array memory");
+ return NULL;
+ }
+ memcpy(resource, entry, sizeof(*resource));
+ resource->idx = res_idx;
+ resource->dev = dev;
+ return &resource->entry;
+}
+
+void
+flow_dv_dest_array_clone_free_cb(void *tool_ctx __rte_unused,
+ struct mlx5_list_entry *entry)
+{
+ struct mlx5_flow_dv_dest_array_resource *resource =
+ container_of(entry, typeof(*resource), entry);
+ struct rte_eth_dev *dev = resource->dev;
+ struct mlx5_priv *priv = dev->data->dev_private;
+
+ mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_DEST_ARRAY], resource->idx);
+}
+
/**
* Find existing destination array resource or create and register a new one.
*
.data = ref,
};
- entry = mlx5_list_register(&priv->sh->dest_array_list, &ctx);
+ entry = mlx5_list_register(priv->sh->dest_array_list, &ctx);
if (!entry)
return -rte_errno;
resource = container_of(entry, typeof(*resource), entry);
}
void
-flow_dv_matcher_remove_cb(struct mlx5_list *list __rte_unused,
+flow_dv_matcher_remove_cb(void *tool_ctx __rte_unused,
struct mlx5_list_entry *entry)
{
struct mlx5_flow_dv_matcher *resource = container_of(entry,
int ret;
MLX5_ASSERT(matcher->matcher_object);
- ret = mlx5_list_unregister(&tbl->matchers, &matcher->entry);
+ ret = mlx5_list_unregister(tbl->matchers, &matcher->entry);
flow_dv_tbl_resource_release(MLX5_SH(dev), &tbl->tbl);
return ret;
}
-/**
- * Release encap_decap resource.
- *
- * @param list
- * Pointer to the hash list.
- * @param entry
- * Pointer to exist resource entry object.
- */
void
-flow_dv_encap_decap_remove_cb(struct mlx5_hlist *list,
- struct mlx5_hlist_entry *entry)
+flow_dv_encap_decap_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry)
{
- struct mlx5_dev_ctx_shared *sh = list->ctx;
+ struct mlx5_dev_ctx_shared *sh = tool_ctx;
struct mlx5_flow_dv_encap_decap_resource *res =
container_of(entry, typeof(*res), entry);
}
void
-flow_dv_modify_remove_cb(struct mlx5_hlist *list __rte_unused,
- struct mlx5_hlist_entry *entry)
+flow_dv_modify_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry)
{
struct mlx5_flow_dv_modify_hdr_resource *res =
container_of(entry, typeof(*res), entry);
+ struct mlx5_dev_ctx_shared *sh = tool_ctx;
claim_zero(mlx5_flow_os_destroy_flow_action(res->action));
- mlx5_free(entry);
+ mlx5_ipool_free(sh->mdh_ipools[res->actions_num - 1], res->idx);
}
/**
}
void
-flow_dv_port_id_remove_cb(struct mlx5_list *list,
- struct mlx5_list_entry *entry)
+flow_dv_port_id_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry)
{
- struct mlx5_dev_ctx_shared *sh = list->ctx;
+ struct mlx5_dev_ctx_shared *sh = tool_ctx;
struct mlx5_flow_dv_port_id_action_resource *resource =
container_of(entry, typeof(*resource), entry);
if (!resource)
return 0;
MLX5_ASSERT(resource->action);
- return mlx5_list_unregister(&priv->sh->port_id_action_list,
+ return mlx5_list_unregister(priv->sh->port_id_action_list,
&resource->entry);
}
}
void
-flow_dv_push_vlan_remove_cb(struct mlx5_list *list,
- struct mlx5_list_entry *entry)
+flow_dv_push_vlan_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry)
{
- struct mlx5_dev_ctx_shared *sh = list->ctx;
+ struct mlx5_dev_ctx_shared *sh = tool_ctx;
struct mlx5_flow_dv_push_vlan_action_resource *resource =
container_of(entry, typeof(*resource), entry);
if (!resource)
return 0;
MLX5_ASSERT(resource->action);
- return mlx5_list_unregister(&priv->sh->push_vlan_action_list,
+ return mlx5_list_unregister(priv->sh->push_vlan_action_list,
&resource->entry);
}
}
void
-flow_dv_sample_remove_cb(struct mlx5_list *list __rte_unused,
+flow_dv_sample_remove_cb(void *tool_ctx __rte_unused,
struct mlx5_list_entry *entry)
{
struct mlx5_flow_dv_sample_resource *resource = container_of(entry,
if (!resource)
return 0;
MLX5_ASSERT(resource->verbs_action);
- return mlx5_list_unregister(&priv->sh->sample_action_list,
+ return mlx5_list_unregister(priv->sh->sample_action_list,
&resource->entry);
}
void
-flow_dv_dest_array_remove_cb(struct mlx5_list *list __rte_unused,
+flow_dv_dest_array_remove_cb(void *tool_ctx __rte_unused,
struct mlx5_list_entry *entry)
{
struct mlx5_flow_dv_dest_array_resource *resource =
if (!resource)
return 0;
MLX5_ASSERT(resource->action);
- return mlx5_list_unregister(&priv->sh->dest_array_list,
+ return mlx5_list_unregister(priv->sh->dest_array_list,
&resource->entry);
}
claim_zero(mlx5_flow_os_destroy_flow(color_rule->rule));
tbl = container_of(color_rule->matcher->tbl,
typeof(*tbl), tbl);
- mlx5_list_unregister(&tbl->matchers,
+ mlx5_list_unregister(tbl->matchers,
&color_rule->matcher->entry);
TAILQ_REMOVE(&sub_policy->color_rules[i],
color_rule, next_port);
if (mtrmng->def_matcher[i]) {
tbl = container_of(mtrmng->def_matcher[i]->tbl,
struct mlx5_flow_tbl_data_entry, tbl);
- mlx5_list_unregister(&tbl->matchers,
+ mlx5_list_unregister(tbl->matchers,
&mtrmng->def_matcher[i]->entry);
mtrmng->def_matcher[i] = NULL;
}
container_of(mtrmng->drop_matcher[i][j]->tbl,
struct mlx5_flow_tbl_data_entry,
tbl);
- mlx5_list_unregister(&tbl->matchers,
+ mlx5_list_unregister(tbl->matchers,
&mtrmng->drop_matcher[i][j]->entry);
mtrmng->drop_matcher[i][j] = NULL;
}
matcher.priority = priority;
matcher.crc = rte_raw_cksum((const void *)matcher.mask.buf,
matcher.mask.size);
- entry = mlx5_list_register(&tbl_data->matchers, &ctx);
+ entry = mlx5_list_register(tbl_data->matchers, &ctx);
if (!entry) {
DRV_LOG(ERR, "Failed to register meter drop matcher.");
return -1;
struct mlx5_flow_tbl_data_entry *tbl =
container_of(color_rule->matcher->tbl,
typeof(*tbl), tbl);
- mlx5_list_unregister(&tbl->matchers,
+ mlx5_list_unregister(tbl->matchers,
&color_rule->matcher->entry);
}
mlx5_free(color_rule);
matcher.crc = rte_raw_cksum
((const void *)matcher.mask.buf,
matcher.mask.size);
- entry = mlx5_list_register(&tbl_data->matchers, &ctx);
+ entry = mlx5_list_register(tbl_data->matchers, &ctx);
if (!entry) {
DRV_LOG(ERR, "Failed to register meter "
"drop default matcher.");
matcher.crc = rte_raw_cksum
((const void *)matcher.mask.buf,
matcher.mask.size);
- entry = mlx5_list_register(&tbl_data->matchers, &ctx);
+ entry = mlx5_list_register(tbl_data->matchers, &ctx);
if (!entry) {
DRV_LOG(ERR,
"Failed to register meter drop matcher.");
struct mlx5_flow_tbl_data_entry *tbl =
container_of(color_rule->matcher->tbl,
typeof(*tbl), tbl);
- mlx5_list_unregister(&tbl->matchers,
+ mlx5_list_unregister(tbl->matchers,
&color_rule->matcher->entry);
}
mlx5_free(color_rule);