}
}
+static inline struct mlx5_hlist *
+flow_dv_hlist_prepare(struct mlx5_dev_ctx_shared *sh, struct mlx5_hlist **phl,
+ const char *name, uint32_t size, bool direct_key,
+ bool lcores_share, void *ctx,
+ mlx5_list_create_cb cb_create,
+ mlx5_list_match_cb cb_match,
+ mlx5_list_remove_cb cb_remove,
+ mlx5_list_clone_cb cb_clone,
+ mlx5_list_clone_free_cb cb_clone_free)
+{
+ struct mlx5_hlist *hl;
+ struct mlx5_hlist *expected = NULL;
+ char s[MLX5_NAME_SIZE];
+
+ hl = __atomic_load_n(phl, __ATOMIC_SEQ_CST);
+ if (likely(hl))
+ return hl;
+ snprintf(s, sizeof(s), "%s_%s", sh->ibdev_name, name);
+ hl = mlx5_hlist_create(s, size, direct_key, lcores_share,
+ ctx, cb_create, cb_match, cb_remove, cb_clone,
+ cb_clone_free);
+ if (!hl) {
+ DRV_LOG(ERR, "%s hash creation failed", name);
+ rte_errno = ENOMEM;
+ return NULL;
+ }
+ if (!__atomic_compare_exchange_n(phl, &expected, hl, false,
+ __ATOMIC_SEQ_CST,
+ __ATOMIC_SEQ_CST)) {
+ mlx5_hlist_destroy(hl);
+ hl = __atomic_load_n(phl, __ATOMIC_SEQ_CST);
+ }
+ return hl;
+}
+
/* Update VLAN's VID/PCP based on input rte_flow_action.
*
* @param[in] action
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
-flow_dv_validate_item_ipv4(const struct rte_flow_item *item,
- uint64_t item_flags,
- uint64_t last_item,
- uint16_t ether_type,
- struct rte_flow_error *error)
+flow_dv_validate_item_ipv4(struct rte_eth_dev *dev,
+ const struct rte_flow_item *item,
+ uint64_t item_flags, uint64_t last_item,
+ uint16_t ether_type, struct rte_flow_error *error)
{
int ret;
+ struct mlx5_priv *priv = dev->data->dev_private;
const struct rte_flow_item_ipv4 *spec = item->spec;
const struct rte_flow_item_ipv4 *last = item->last;
const struct rte_flow_item_ipv4 *mask = item->mask;
rte_be16_t fragment_offset_spec = 0;
rte_be16_t fragment_offset_last = 0;
- const struct rte_flow_item_ipv4 nic_ipv4_mask = {
+ struct rte_flow_item_ipv4 nic_ipv4_mask = {
.hdr = {
.src_addr = RTE_BE32(0xffffffff),
.dst_addr = RTE_BE32(0xffffffff),
},
};
+ if (mask && (mask->hdr.version_ihl & RTE_IPV4_HDR_IHL_MASK)) {
+ int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
+ bool ihl_cap = !tunnel ? priv->config.hca_attr.outer_ipv4_ihl :
+ priv->config.hca_attr.inner_ipv4_ihl;
+ if (!ihl_cap)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "IPV4 ihl offload not supported");
+ nic_ipv4_mask.hdr.version_ihl = mask->hdr.version_ihl;
+ }
ret = mlx5_flow_validate_item_ipv4(item, item_flags, last_item,
ether_type, &nic_ipv4_mask,
MLX5_ITEM_RANGE_ACCEPTED, error);
.error = error,
.data = resource,
};
+ struct mlx5_hlist *encaps_decaps;
uint64_t key64;
+ encaps_decaps = flow_dv_hlist_prepare(sh, &sh->encaps_decaps,
+ "encaps_decaps",
+ MLX5_FLOW_ENCAP_DECAP_HTABLE_SZ,
+ true, true, sh,
+ flow_dv_encap_decap_create_cb,
+ flow_dv_encap_decap_match_cb,
+ flow_dv_encap_decap_remove_cb,
+ flow_dv_encap_decap_clone_cb,
+ flow_dv_encap_decap_clone_free_cb);
+ if (unlikely(!encaps_decaps))
+ return -rte_errno;
resource->flags = dev_flow->dv.group ? 0 : 1;
key64 = __rte_raw_cksum(&encap_decap_key.v32,
sizeof(encap_decap_key.v32), 0);
MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TUNNEL_TO_L2 &&
resource->size)
key64 = __rte_raw_cksum(resource->buf, resource->size, key64);
- entry = mlx5_hlist_register(sh->encaps_decaps, key64, &ctx);
+ entry = mlx5_hlist_register(encaps_decaps, key64, &ctx);
if (!entry)
return -rte_errno;
resource = container_of(entry, typeof(*resource), entry);
memcmp(&ref->ft_type, &resource->ft_type, key_len);
}
+static struct mlx5_indexed_pool *
+flow_dv_modify_ipool_get(struct mlx5_dev_ctx_shared *sh, uint8_t index)
+{
+ struct mlx5_indexed_pool *ipool = __atomic_load_n
+ (&sh->mdh_ipools[index], __ATOMIC_SEQ_CST);
+
+ if (!ipool) {
+ struct mlx5_indexed_pool *expected = NULL;
+ struct mlx5_indexed_pool_config cfg =
+ (struct mlx5_indexed_pool_config) {
+ .size = sizeof(struct mlx5_flow_dv_modify_hdr_resource) +
+ (index + 1) *
+ sizeof(struct mlx5_modification_cmd),
+ .trunk_size = 64,
+ .grow_trunk = 3,
+ .grow_shift = 2,
+ .need_lock = 1,
+ .release_mem_en = !!sh->reclaim_mode,
+ .per_core_cache = sh->reclaim_mode ? 0 : (1 << 16),
+ .malloc = mlx5_malloc,
+ .free = mlx5_free,
+ .type = "mlx5_modify_action_resource",
+ };
+
+ cfg.size = RTE_ALIGN(cfg.size, sizeof(ipool));
+ ipool = mlx5_ipool_create(&cfg);
+ if (!ipool)
+ return NULL;
+ if (!__atomic_compare_exchange_n(&sh->mdh_ipools[index],
+ &expected, ipool, false,
+ __ATOMIC_SEQ_CST,
+ __ATOMIC_SEQ_CST)) {
+ mlx5_ipool_destroy(ipool);
+ ipool = __atomic_load_n(&sh->mdh_ipools[index],
+ __ATOMIC_SEQ_CST);
+ }
+ }
+ return ipool;
+}
+
struct mlx5_list_entry *
flow_dv_modify_create_cb(void *tool_ctx, void *cb_ctx)
{
struct mlx5dv_dr_domain *ns;
struct mlx5_flow_dv_modify_hdr_resource *entry;
struct mlx5_flow_dv_modify_hdr_resource *ref = ctx->data;
+ struct mlx5_indexed_pool *ipool = flow_dv_modify_ipool_get(sh,
+ ref->actions_num - 1);
int ret;
uint32_t data_len = ref->actions_num * sizeof(ref->actions[0]);
uint32_t key_len = sizeof(*ref) - offsetof(typeof(*ref), ft_type);
+ uint32_t idx;
- entry = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*entry) + data_len, 0,
- SOCKET_ID_ANY);
+ if (unlikely(!ipool)) {
+ rte_flow_error_set(ctx->error, ENOMEM,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL, "cannot allocate modify ipool");
+ return NULL;
+ }
+ entry = mlx5_ipool_zmalloc(ipool, &idx);
if (!entry) {
rte_flow_error_set(ctx->error, ENOMEM,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
(sh->ctx, ns, entry,
data_len, &entry->action);
if (ret) {
- mlx5_free(entry);
+ mlx5_ipool_free(sh->mdh_ipools[ref->actions_num - 1], idx);
rte_flow_error_set(ctx->error, ENOMEM,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
NULL, "cannot create modification action");
return NULL;
}
+ entry->idx = idx;
return &entry->entry;
}
struct mlx5_list_entry *
-flow_dv_modify_clone_cb(void *tool_ctx __rte_unused,
- struct mlx5_list_entry *oentry, void *cb_ctx)
+flow_dv_modify_clone_cb(void *tool_ctx, struct mlx5_list_entry *oentry,
+ void *cb_ctx)
{
+ struct mlx5_dev_ctx_shared *sh = tool_ctx;
struct mlx5_flow_cb_ctx *ctx = cb_ctx;
struct mlx5_flow_dv_modify_hdr_resource *entry;
struct mlx5_flow_dv_modify_hdr_resource *ref = ctx->data;
uint32_t data_len = ref->actions_num * sizeof(ref->actions[0]);
+ uint32_t idx;
- entry = mlx5_malloc(0, sizeof(*entry) + data_len, 0, SOCKET_ID_ANY);
+ entry = mlx5_ipool_malloc(sh->mdh_ipools[ref->actions_num - 1],
+ &idx);
if (!entry) {
rte_flow_error_set(ctx->error, ENOMEM,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
return NULL;
}
memcpy(entry, oentry, sizeof(*entry) + data_len);
+ entry->idx = idx;
return &entry->entry;
}
void
-flow_dv_modify_clone_free_cb(void *tool_ctx __rte_unused,
- struct mlx5_list_entry *entry)
+flow_dv_modify_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry)
{
- mlx5_free(entry);
+ struct mlx5_dev_ctx_shared *sh = tool_ctx;
+ struct mlx5_flow_dv_modify_hdr_resource *res =
+ container_of(entry, typeof(*res), entry);
+
+ mlx5_ipool_free(sh->mdh_ipools[res->actions_num - 1], res->idx);
}
/**
.error = error,
.data = resource,
};
+ struct mlx5_hlist *modify_cmds;
uint64_t key64;
+ modify_cmds = flow_dv_hlist_prepare(sh, &sh->modify_cmds,
+ "hdr_modify",
+ MLX5_FLOW_HDR_MODIFY_HTABLE_SZ,
+ true, false, sh,
+ flow_dv_modify_create_cb,
+ flow_dv_modify_match_cb,
+ flow_dv_modify_remove_cb,
+ flow_dv_modify_clone_cb,
+ flow_dv_modify_clone_free_cb);
+ if (unlikely(!modify_cmds))
+ return -rte_errno;
resource->root = !dev_flow->dv.group;
if (resource->actions_num > flow_dv_modify_hdr_action_max(dev,
resource->root))
RTE_FLOW_ERROR_TYPE_ACTION, NULL,
"too many modify header items");
key64 = __rte_raw_cksum(&resource->ft_type, key_len, 0);
- entry = mlx5_hlist_register(sh->modify_cmds, key64, &ctx);
+ entry = mlx5_hlist_register(modify_cmds, key64, &ctx);
if (!entry)
return -rte_errno;
resource = container_of(entry, typeof(*resource), entry);
case RTE_FLOW_ITEM_TYPE_IPV4:
mlx5_flow_tunnel_ip_check(items, next_protocol,
&item_flags, &tunnel);
- ret = flow_dv_validate_item_ipv4(items, item_flags,
+ ret = flow_dv_validate_item_ipv4(dev, items, item_flags,
last_item, ether_type,
error);
if (ret < 0)
void *headers_v;
char *l24_m;
char *l24_v;
- uint8_t tos;
+ uint8_t tos, ihl_m, ihl_v;
if (inner) {
headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
*(uint32_t *)l24_m = ipv4_m->hdr.src_addr;
*(uint32_t *)l24_v = ipv4_m->hdr.src_addr & ipv4_v->hdr.src_addr;
tos = ipv4_m->hdr.type_of_service & ipv4_v->hdr.type_of_service;
+ ihl_m = ipv4_m->hdr.version_ihl & RTE_IPV4_HDR_IHL_MASK;
+ ihl_v = ipv4_v->hdr.version_ihl & RTE_IPV4_HDR_IHL_MASK;
+ MLX5_SET(fte_match_set_lyr_2_4, headers_m, ipv4_ihl, ihl_m);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, ipv4_ihl, ihl_m & ihl_v);
MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn,
ipv4_m->hdr.type_of_service);
MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, tos);
.error = error,
.data = &tag_be24,
};
-
- entry = mlx5_hlist_register(priv->sh->tag_table, tag_be24, &ctx);
+ struct mlx5_hlist *tag_table;
+
+ tag_table = flow_dv_hlist_prepare(priv->sh, &priv->sh->tag_table,
+ "tags",
+ MLX5_TAGS_HLIST_ARRAY_SIZE,
+ false, false, priv->sh,
+ flow_dv_tag_create_cb,
+ flow_dv_tag_match_cb,
+ flow_dv_tag_remove_cb,
+ flow_dv_tag_clone_cb,
+ flow_dv_tag_clone_free_cb);
+ if (unlikely(!tag_table))
+ return -rte_errno;
+ entry = mlx5_hlist_register(tag_table, tag_be24, &ctx);
if (entry) {
resource = container_of(entry, struct mlx5_flow_dv_tag_resource,
entry);
}
void
-flow_dv_modify_remove_cb(void *tool_ctx __rte_unused,
- struct mlx5_list_entry *entry)
+flow_dv_modify_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry)
{
struct mlx5_flow_dv_modify_hdr_resource *res =
container_of(entry, typeof(*res), entry);
+ struct mlx5_dev_ctx_shared *sh = tool_ctx;
claim_zero(mlx5_flow_os_destroy_flow_action(res->action));
- mlx5_free(entry);
+ mlx5_ipool_free(sh->mdh_ipools[res->actions_num - 1], res->idx);
}
/**