X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fmlx5%2Fmlx5_flow_dv.c;h=5e230a3c259e6b4033dad1d67a69cd2ea5e8ca21;hb=a0e4728c430c5d0367bf85d29b5063d456d5da5f;hp=dafe07f42ed55b65cad711a705ddb4fe071ffa06;hpb=86b59a1af671529fbbc0da964d90b796251f6051;p=dpdk.git diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c index dafe07f42e..5e230a3c25 100644 --- a/drivers/net/mlx5/mlx5_flow_dv.c +++ b/drivers/net/mlx5/mlx5_flow_dv.c @@ -33,6 +33,7 @@ #include "mlx5_flow.h" #include "mlx5_flow_os.h" #include "mlx5_rxtx.h" +#include "rte_pmd_mlx5.h" #ifdef HAVE_IBV_FLOW_DV_SUPPORT @@ -70,12 +71,9 @@ union flow_dv_attr { }; static int -flow_dv_tbl_resource_release(struct rte_eth_dev *dev, +flow_dv_tbl_resource_release(struct mlx5_dev_ctx_shared *sh, struct mlx5_flow_tbl_resource *tbl); -static int -flow_dv_default_miss_resource_release(struct rte_eth_dev *dev); - static int flow_dv_encap_decap_resource_release(struct rte_eth_dev *dev, uint32_t encap_decap_idx); @@ -83,6 +81,8 @@ flow_dv_encap_decap_resource_release(struct rte_eth_dev *dev, static int flow_dv_port_id_action_resource_release(struct rte_eth_dev *dev, uint32_t port_id); +static void +flow_dv_shared_rss_action_release(struct rte_eth_dev *dev, uint32_t srss); /** * Initialize flow attributes structure according to flow items' types. @@ -278,45 +278,6 @@ mlx5_flow_tunnel_ip_check(const struct rte_flow_item *item __rte_unused, } } -/** - * Acquire the synchronizing object to protect multithreaded access - * to shared dv context. Lock occurs only if context is actually - * shared, i.e. we have multiport IB device and representors are - * created. - * - * @param[in] dev - * Pointer to the rte_eth_dev structure. - */ -static void -flow_dv_shared_lock(struct rte_eth_dev *dev) -{ - struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5_dev_ctx_shared *sh = priv->sh; - - if (sh->refcnt > 1) { - int ret; - - ret = pthread_mutex_lock(&sh->dv_mutex); - MLX5_ASSERT(!ret); - (void)ret; - } -} - -static void -flow_dv_shared_unlock(struct rte_eth_dev *dev) -{ - struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5_dev_ctx_shared *sh = priv->sh; - - if (sh->refcnt > 1) { - int ret; - - ret = pthread_mutex_unlock(&sh->dv_mutex); - MLX5_ASSERT(!ret); - (void)ret; - } -} - /* Update VLAN's VID/PCP based on input rte_flow_action. * * @param[in] action @@ -2786,21 +2747,27 @@ flow_dv_validate_action_raw_encap_decap /** * Match encap_decap resource. * + * @param list + * Pointer to the hash list. * @param entry * Pointer to exist resource entry object. - * @param ctx + * @param key + * Key of the new entry. + * @param ctx_cb * Pointer to new encap_decap resource. * * @return - * 0 on matching, -1 otherwise. + * 0 on matching, none-zero otherwise. */ -static int -flow_dv_encap_decap_resource_match(struct mlx5_hlist_entry *entry, void *ctx) +int +flow_dv_encap_decap_match_cb(struct mlx5_hlist *list __rte_unused, + struct mlx5_hlist_entry *entry, + uint64_t key __rte_unused, void *cb_ctx) { - struct mlx5_flow_dv_encap_decap_resource *resource; + struct mlx5_flow_cb_ctx *ctx = cb_ctx; + struct mlx5_flow_dv_encap_decap_resource *resource = ctx->data; struct mlx5_flow_dv_encap_decap_resource *cache_resource; - resource = (struct mlx5_flow_dv_encap_decap_resource *)ctx; cache_resource = container_of(entry, struct mlx5_flow_dv_encap_decap_resource, entry); @@ -2816,6 +2783,63 @@ flow_dv_encap_decap_resource_match(struct mlx5_hlist_entry *entry, void *ctx) return -1; } +/** + * Allocate encap_decap resource. + * + * @param list + * Pointer to the hash list. + * @param entry + * Pointer to exist resource entry object. + * @param ctx_cb + * Pointer to new encap_decap resource. + * + * @return + * 0 on matching, none-zero otherwise. + */ +struct mlx5_hlist_entry * +flow_dv_encap_decap_create_cb(struct mlx5_hlist *list, + uint64_t key __rte_unused, + void *cb_ctx) +{ + struct mlx5_dev_ctx_shared *sh = list->ctx; + struct mlx5_flow_cb_ctx *ctx = cb_ctx; + struct mlx5dv_dr_domain *domain; + struct mlx5_flow_dv_encap_decap_resource *resource = ctx->data; + struct mlx5_flow_dv_encap_decap_resource *cache_resource; + uint32_t idx; + int ret; + + if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB) + domain = sh->fdb_domain; + else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX) + domain = sh->rx_domain; + else + domain = sh->tx_domain; + /* Register new encap/decap resource. */ + cache_resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_DECAP_ENCAP], + &idx); + if (!cache_resource) { + rte_flow_error_set(ctx->error, ENOMEM, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "cannot allocate resource memory"); + return NULL; + } + *cache_resource = *resource; + cache_resource->idx = idx; + ret = mlx5_flow_os_create_flow_action_packet_reformat + (sh->ctx, domain, cache_resource, + &cache_resource->action); + if (ret) { + mlx5_ipool_free(sh->ipool[MLX5_IPOOL_DECAP_ENCAP], idx); + rte_flow_error_set(ctx->error, ENOMEM, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, "cannot create action"); + return NULL; + } + + return &cache_resource->entry; +} + /** * Find existing encap/decap resource or create and register a new one. * @@ -2840,8 +2864,6 @@ flow_dv_encap_decap_resource_register { struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_dev_ctx_shared *sh = priv->sh; - struct mlx5_flow_dv_encap_decap_resource *cache_resource; - struct mlx5dv_dr_domain *domain; struct mlx5_hlist_entry *entry; union mlx5_flow_encap_decap_key encap_decap_key = { { @@ -2852,68 +2874,22 @@ flow_dv_encap_decap_resource_register .cksum = 0, } }; - int ret; + struct mlx5_flow_cb_ctx ctx = { + .error = error, + .data = resource, + }; resource->flags = dev_flow->dv.group ? 0 : 1; - if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB) - domain = sh->fdb_domain; - else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX) - domain = sh->rx_domain; - else - domain = sh->tx_domain; encap_decap_key.cksum = __rte_raw_cksum(resource->buf, resource->size, 0); resource->entry.key = encap_decap_key.v64; - /* Lookup a matching resource from cache. */ - entry = mlx5_hlist_lookup_ex(sh->encaps_decaps, resource->entry.key, - flow_dv_encap_decap_resource_match, - (void *)resource); - if (entry) { - cache_resource = container_of(entry, - struct mlx5_flow_dv_encap_decap_resource, entry); - DRV_LOG(DEBUG, "encap/decap resource %p: refcnt %d++", - (void *)cache_resource, - rte_atomic32_read(&cache_resource->refcnt)); - rte_atomic32_inc(&cache_resource->refcnt); - dev_flow->handle->dvh.rix_encap_decap = cache_resource->idx; - dev_flow->dv.encap_decap = cache_resource; - return 0; - } - /* Register new encap/decap resource. */ - cache_resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_DECAP_ENCAP], - &dev_flow->handle->dvh.rix_encap_decap); - if (!cache_resource) - return rte_flow_error_set(error, ENOMEM, - RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, - "cannot allocate resource memory"); - *cache_resource = *resource; - cache_resource->idx = dev_flow->handle->dvh.rix_encap_decap; - ret = mlx5_flow_os_create_flow_action_packet_reformat - (sh->ctx, domain, cache_resource, - &cache_resource->action); - if (ret) { - mlx5_free(cache_resource); - return rte_flow_error_set(error, ENOMEM, - RTE_FLOW_ERROR_TYPE_UNSPECIFIED, - NULL, "cannot create action"); - } - rte_atomic32_init(&cache_resource->refcnt); - rte_atomic32_inc(&cache_resource->refcnt); - if (mlx5_hlist_insert_ex(sh->encaps_decaps, &cache_resource->entry, - flow_dv_encap_decap_resource_match, - (void *)cache_resource)) { - claim_zero(mlx5_flow_os_destroy_flow_action - (cache_resource->action)); - mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_DECAP_ENCAP], - cache_resource->idx); - return rte_flow_error_set(error, EEXIST, - RTE_FLOW_ERROR_TYPE_UNSPECIFIED, - NULL, "action exist"); - } - dev_flow->dv.encap_decap = cache_resource; - DRV_LOG(DEBUG, "new encap/decap resource %p: refcnt %d++", - (void *)cache_resource, - rte_atomic32_read(&cache_resource->refcnt)); + entry = mlx5_hlist_register(sh->encaps_decaps, resource->entry.key, + &ctx); + if (!entry) + return -rte_errno; + resource = container_of(entry, typeof(*resource), entry); + dev_flow->dv.encap_decap = resource; + dev_flow->handle->dvh.rix_encap_decap = resource->idx; return 0; } @@ -2937,70 +2913,62 @@ flow_dv_jump_tbl_resource_register (struct rte_eth_dev *dev __rte_unused, struct mlx5_flow_tbl_resource *tbl, struct mlx5_flow *dev_flow, - struct rte_flow_error *error) + struct rte_flow_error *error __rte_unused) { struct mlx5_flow_tbl_data_entry *tbl_data = container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl); - int cnt, ret; MLX5_ASSERT(tbl); - cnt = rte_atomic32_read(&tbl_data->jump.refcnt); - if (!cnt) { - ret = mlx5_flow_os_create_flow_action_dest_flow_tbl - (tbl->obj, &tbl_data->jump.action); - if (ret) - return rte_flow_error_set(error, ENOMEM, - RTE_FLOW_ERROR_TYPE_UNSPECIFIED, - NULL, "cannot create jump action"); - DRV_LOG(DEBUG, "new jump table resource %p: refcnt %d++", - (void *)&tbl_data->jump, cnt); - } else { - /* old jump should not make the table ref++. */ - flow_dv_tbl_resource_release(dev, &tbl_data->tbl); - MLX5_ASSERT(tbl_data->jump.action); - DRV_LOG(DEBUG, "existed jump table resource %p: refcnt %d++", - (void *)&tbl_data->jump, cnt); - } - rte_atomic32_inc(&tbl_data->jump.refcnt); + MLX5_ASSERT(tbl_data->jump.action); dev_flow->handle->rix_jump = tbl_data->idx; dev_flow->dv.jump = &tbl_data->jump; return 0; } -/** - * Find existing default miss resource or create and register a new one. - * - * @param[in, out] dev - * Pointer to rte_eth_dev structure. - * @param[out] error - * pointer to error structure. - * - * @return - * 0 on success otherwise -errno and errno is set. - */ -static int -flow_dv_default_miss_resource_register(struct rte_eth_dev *dev, - struct rte_flow_error *error) +int +flow_dv_port_id_match_cb(struct mlx5_cache_list *list __rte_unused, + struct mlx5_cache_entry *entry, void *cb_ctx) { - struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5_dev_ctx_shared *sh = priv->sh; - struct mlx5_flow_default_miss_resource *cache_resource = - &sh->default_miss; - int cnt = rte_atomic32_read(&cache_resource->refcnt); - - if (!cnt) { - MLX5_ASSERT(cache_resource->action); - cache_resource->action = - mlx5_glue->dr_create_flow_action_default_miss(); - if (!cache_resource->action) - return rte_flow_error_set(error, ENOMEM, - RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, - "cannot create default miss action"); - DRV_LOG(DEBUG, "new default miss resource %p: refcnt %d++", - (void *)cache_resource->action, cnt); - } - rte_atomic32_inc(&cache_resource->refcnt); - return 0; + struct mlx5_flow_cb_ctx *ctx = cb_ctx; + struct mlx5_flow_dv_port_id_action_resource *ref = ctx->data; + struct mlx5_flow_dv_port_id_action_resource *res = + container_of(entry, typeof(*res), entry); + + return ref->port_id != res->port_id; +} + +struct mlx5_cache_entry * +flow_dv_port_id_create_cb(struct mlx5_cache_list *list, + struct mlx5_cache_entry *entry __rte_unused, + void *cb_ctx) +{ + struct mlx5_dev_ctx_shared *sh = list->ctx; + struct mlx5_flow_cb_ctx *ctx = cb_ctx; + struct mlx5_flow_dv_port_id_action_resource *ref = ctx->data; + struct mlx5_flow_dv_port_id_action_resource *cache; + uint32_t idx; + int ret; + + /* Register new port id action resource. */ + cache = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_PORT_ID], &idx); + if (!cache) { + rte_flow_error_set(ctx->error, ENOMEM, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "cannot allocate port_id action cache memory"); + return NULL; + } + *cache = *ref; + ret = mlx5_flow_os_create_flow_action_dest_port(sh->fdb_domain, + ref->port_id, + &cache->action); + if (ret) { + mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PORT_ID], idx); + rte_flow_error_set(ctx->error, ENOMEM, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "cannot create action"); + return NULL; + } + return &cache->entry; } /** @@ -3026,52 +2994,72 @@ flow_dv_port_id_action_resource_register struct rte_flow_error *error) { struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5_dev_ctx_shared *sh = priv->sh; - struct mlx5_flow_dv_port_id_action_resource *cache_resource; - uint32_t idx = 0; + struct mlx5_cache_entry *entry; + struct mlx5_flow_dv_port_id_action_resource *cache; + struct mlx5_flow_cb_ctx ctx = { + .error = error, + .data = resource, + }; + + entry = mlx5_cache_register(&priv->sh->port_id_action_list, &ctx); + if (!entry) + return -rte_errno; + cache = container_of(entry, typeof(*cache), entry); + dev_flow->dv.port_id_action = cache; + dev_flow->handle->rix_port_id_action = cache->idx; + return 0; +} + +int +flow_dv_push_vlan_match_cb(struct mlx5_cache_list *list __rte_unused, + struct mlx5_cache_entry *entry, void *cb_ctx) +{ + struct mlx5_flow_cb_ctx *ctx = cb_ctx; + struct mlx5_flow_dv_push_vlan_action_resource *ref = ctx->data; + struct mlx5_flow_dv_push_vlan_action_resource *res = + container_of(entry, typeof(*res), entry); + + return ref->vlan_tag != res->vlan_tag || ref->ft_type != res->ft_type; +} + +struct mlx5_cache_entry * +flow_dv_push_vlan_create_cb(struct mlx5_cache_list *list, + struct mlx5_cache_entry *entry __rte_unused, + void *cb_ctx) +{ + struct mlx5_dev_ctx_shared *sh = list->ctx; + struct mlx5_flow_cb_ctx *ctx = cb_ctx; + struct mlx5_flow_dv_push_vlan_action_resource *ref = ctx->data; + struct mlx5_flow_dv_push_vlan_action_resource *cache; + struct mlx5dv_dr_domain *domain; + uint32_t idx; int ret; - /* Lookup a matching resource from cache. */ - ILIST_FOREACH(sh->ipool[MLX5_IPOOL_PORT_ID], sh->port_id_action_list, - idx, cache_resource, next) { - if (resource->port_id == cache_resource->port_id) { - DRV_LOG(DEBUG, "port id action resource resource %p: " - "refcnt %d++", - (void *)cache_resource, - rte_atomic32_read(&cache_resource->refcnt)); - rte_atomic32_inc(&cache_resource->refcnt); - dev_flow->handle->rix_port_id_action = idx; - dev_flow->dv.port_id_action = cache_resource; - return 0; - } - } /* Register new port id action resource. */ - cache_resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_PORT_ID], - &dev_flow->handle->rix_port_id_action); - if (!cache_resource) - return rte_flow_error_set(error, ENOMEM, - RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, - "cannot allocate resource memory"); - *cache_resource = *resource; - ret = mlx5_flow_os_create_flow_action_dest_port - (priv->sh->fdb_domain, resource->port_id, - &cache_resource->action); + cache = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_PUSH_VLAN], &idx); + if (!cache) { + rte_flow_error_set(ctx->error, ENOMEM, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "cannot allocate push_vlan action cache memory"); + return NULL; + } + *cache = *ref; + if (ref->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB) + domain = sh->fdb_domain; + else if (ref->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX) + domain = sh->rx_domain; + else + domain = sh->tx_domain; + ret = mlx5_flow_os_create_flow_action_push_vlan(domain, ref->vlan_tag, + &cache->action); if (ret) { - mlx5_free(cache_resource); - return rte_flow_error_set(error, ENOMEM, - RTE_FLOW_ERROR_TYPE_UNSPECIFIED, - NULL, "cannot create action"); - } - rte_atomic32_init(&cache_resource->refcnt); - rte_atomic32_inc(&cache_resource->refcnt); - ILIST_INSERT(sh->ipool[MLX5_IPOOL_PORT_ID], &sh->port_id_action_list, - dev_flow->handle->rix_port_id_action, cache_resource, - next); - dev_flow->dv.port_id_action = cache_resource; - DRV_LOG(DEBUG, "new port id action resource %p: refcnt %d++", - (void *)cache_resource, - rte_atomic32_read(&cache_resource->refcnt)); - return 0; + mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PUSH_VLAN], idx); + rte_flow_error_set(ctx->error, ENOMEM, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "cannot create push vlan action"); + return NULL; + } + return &cache->entry; } /** @@ -3097,62 +3085,23 @@ flow_dv_push_vlan_action_resource_register struct rte_flow_error *error) { struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5_dev_ctx_shared *sh = priv->sh; - struct mlx5_flow_dv_push_vlan_action_resource *cache_resource; - struct mlx5dv_dr_domain *domain; - uint32_t idx = 0; - int ret; + struct mlx5_flow_dv_push_vlan_action_resource *cache; + struct mlx5_cache_entry *entry; + struct mlx5_flow_cb_ctx ctx = { + .error = error, + .data = resource, + }; - /* Lookup a matching resource from cache. */ - ILIST_FOREACH(sh->ipool[MLX5_IPOOL_PUSH_VLAN], - sh->push_vlan_action_list, idx, cache_resource, next) { - if (resource->vlan_tag == cache_resource->vlan_tag && - resource->ft_type == cache_resource->ft_type) { - DRV_LOG(DEBUG, "push-VLAN action resource resource %p: " - "refcnt %d++", - (void *)cache_resource, - rte_atomic32_read(&cache_resource->refcnt)); - rte_atomic32_inc(&cache_resource->refcnt); - dev_flow->handle->dvh.rix_push_vlan = idx; - dev_flow->dv.push_vlan_res = cache_resource; - return 0; - } - } - /* Register new push_vlan action resource. */ - cache_resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_PUSH_VLAN], - &dev_flow->handle->dvh.rix_push_vlan); - if (!cache_resource) - return rte_flow_error_set(error, ENOMEM, - RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, - "cannot allocate resource memory"); - *cache_resource = *resource; - if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB) - domain = sh->fdb_domain; - else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX) - domain = sh->rx_domain; - else - domain = sh->tx_domain; - ret = mlx5_flow_os_create_flow_action_push_vlan - (domain, resource->vlan_tag, - &cache_resource->action); - if (ret) { - mlx5_free(cache_resource); - return rte_flow_error_set(error, ENOMEM, - RTE_FLOW_ERROR_TYPE_UNSPECIFIED, - NULL, "cannot create action"); - } - rte_atomic32_init(&cache_resource->refcnt); - rte_atomic32_inc(&cache_resource->refcnt); - ILIST_INSERT(sh->ipool[MLX5_IPOOL_PUSH_VLAN], - &sh->push_vlan_action_list, - dev_flow->handle->dvh.rix_push_vlan, - cache_resource, next); - dev_flow->dv.push_vlan_res = cache_resource; - DRV_LOG(DEBUG, "new push vlan action resource %p: refcnt %d++", - (void *)cache_resource, - rte_atomic32_read(&cache_resource->refcnt)); + entry = mlx5_cache_register(&priv->sh->push_vlan_action_list, &ctx); + if (!entry) + return -rte_errno; + cache = container_of(entry, typeof(*cache), entry); + + dev_flow->handle->dvh.rix_push_vlan = cache->idx; + dev_flow->dv.push_vlan_res = cache; return 0; } + /** * Get the size of specific rte_flow_item_type hdr size * @@ -3986,7 +3935,7 @@ flow_dv_validate_action_jump(struct rte_eth_dev *dev, target_group = ((const struct rte_flow_action_jump *)action->conf)->group; ret = mlx5_flow_group_to_table(dev, tunnel, target_group, &table, - grp_info, error); + &grp_info, error); if (ret) return ret; if (attributes->group == target_group && @@ -4180,7 +4129,8 @@ flow_dv_validate_action_age(uint64_t action_flags, struct mlx5_priv *priv = dev->data->dev_private; const struct rte_flow_action_age *age = action->conf; - if (!priv->config.devx || priv->sh->cmng.counter_fallback) + if (!priv->config.devx || (priv->sh->cmng.counter_fallback && + !priv->sh->aso_age_mng)) return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, @@ -4271,35 +4221,75 @@ flow_dv_validate_action_modify_ipv6_dscp(const uint64_t action_flags, /** * Match modify-header resource. * + * @param list + * Pointer to the hash list. * @param entry * Pointer to exist resource entry object. + * @param key + * Key of the new entry. * @param ctx * Pointer to new modify-header resource. * * @return - * 0 on matching, -1 otherwise. + * 0 on matching, non-zero otherwise. */ -static int -flow_dv_modify_hdr_resource_match(struct mlx5_hlist_entry *entry, void *ctx) +int +flow_dv_modify_match_cb(struct mlx5_hlist *list __rte_unused, + struct mlx5_hlist_entry *entry, + uint64_t key __rte_unused, void *cb_ctx) { - struct mlx5_flow_dv_modify_hdr_resource *resource; - struct mlx5_flow_dv_modify_hdr_resource *cache_resource; - uint32_t actions_len; + struct mlx5_flow_cb_ctx *ctx = cb_ctx; + struct mlx5_flow_dv_modify_hdr_resource *ref = ctx->data; + struct mlx5_flow_dv_modify_hdr_resource *resource = + container_of(entry, typeof(*resource), entry); + uint32_t key_len = sizeof(*ref) - offsetof(typeof(*ref), ft_type); - resource = (struct mlx5_flow_dv_modify_hdr_resource *)ctx; - cache_resource = container_of(entry, - struct mlx5_flow_dv_modify_hdr_resource, - entry); - actions_len = resource->actions_num * sizeof(resource->actions[0]); - if (resource->entry.key == cache_resource->entry.key && - resource->ft_type == cache_resource->ft_type && - resource->actions_num == cache_resource->actions_num && - resource->flags == cache_resource->flags && - !memcmp((const void *)resource->actions, - (const void *)cache_resource->actions, - actions_len)) - return 0; - return -1; + key_len += ref->actions_num * sizeof(ref->actions[0]); + return ref->actions_num != resource->actions_num || + memcmp(&ref->ft_type, &resource->ft_type, key_len); +} + +struct mlx5_hlist_entry * +flow_dv_modify_create_cb(struct mlx5_hlist *list, uint64_t key __rte_unused, + void *cb_ctx) +{ + struct mlx5_dev_ctx_shared *sh = list->ctx; + struct mlx5_flow_cb_ctx *ctx = cb_ctx; + struct mlx5dv_dr_domain *ns; + struct mlx5_flow_dv_modify_hdr_resource *entry; + struct mlx5_flow_dv_modify_hdr_resource *ref = ctx->data; + int ret; + uint32_t data_len = ref->actions_num * sizeof(ref->actions[0]); + uint32_t key_len = sizeof(*ref) - offsetof(typeof(*ref), ft_type); + + entry = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*entry) + data_len, 0, + SOCKET_ID_ANY); + if (!entry) { + rte_flow_error_set(ctx->error, ENOMEM, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "cannot allocate resource memory"); + return NULL; + } + rte_memcpy(&entry->ft_type, + RTE_PTR_ADD(ref, offsetof(typeof(*ref), ft_type)), + key_len + data_len); + if (entry->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB) + ns = sh->fdb_domain; + else if (entry->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_TX) + ns = sh->tx_domain; + else + ns = sh->rx_domain; + ret = mlx5_flow_os_create_flow_action_modify_header + (sh->ctx, ns, entry, + data_len, &entry->action); + if (ret) { + mlx5_free(entry); + rte_flow_error_set(ctx->error, ENOMEM, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, "cannot create modification action"); + return NULL; + } + return &entry->entry; } /** @@ -4510,19 +4500,14 @@ flow_dv_modify_hdr_resource_register { struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_dev_ctx_shared *sh = priv->sh; - struct mlx5_flow_dv_modify_hdr_resource *cache_resource; - struct mlx5dv_dr_domain *ns; - uint32_t actions_len; + uint32_t key_len = sizeof(*resource) - + offsetof(typeof(*resource), ft_type) + + resource->actions_num * sizeof(resource->actions[0]); struct mlx5_hlist_entry *entry; - union mlx5_flow_modify_hdr_key hdr_mod_key = { - { - .ft_type = resource->ft_type, - .actions_num = resource->actions_num, - .group = dev_flow->dv.group, - .cksum = 0, - } + struct mlx5_flow_cb_ctx ctx = { + .error = error, + .data = resource, }; - int ret; resource->flags = dev_flow->dv.group ? 0 : MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL; @@ -4531,66 +4516,12 @@ flow_dv_modify_hdr_resource_register return rte_flow_error_set(error, EOVERFLOW, RTE_FLOW_ERROR_TYPE_ACTION, NULL, "too many modify header items"); - if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB) - ns = sh->fdb_domain; - else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_TX) - ns = sh->tx_domain; - else - ns = sh->rx_domain; - /* Lookup a matching resource from cache. */ - actions_len = resource->actions_num * sizeof(resource->actions[0]); - hdr_mod_key.cksum = __rte_raw_cksum(resource->actions, actions_len, 0); - resource->entry.key = hdr_mod_key.v64; - entry = mlx5_hlist_lookup_ex(sh->modify_cmds, resource->entry.key, - flow_dv_modify_hdr_resource_match, - (void *)resource); - if (entry) { - cache_resource = container_of(entry, - struct mlx5_flow_dv_modify_hdr_resource, - entry); - DRV_LOG(DEBUG, "modify-header resource %p: refcnt %d++", - (void *)cache_resource, - rte_atomic32_read(&cache_resource->refcnt)); - rte_atomic32_inc(&cache_resource->refcnt); - dev_flow->handle->dvh.modify_hdr = cache_resource; - return 0; - - } - /* Register new modify-header resource. */ - cache_resource = mlx5_malloc(MLX5_MEM_ZERO, - sizeof(*cache_resource) + actions_len, 0, - SOCKET_ID_ANY); - if (!cache_resource) - return rte_flow_error_set(error, ENOMEM, - RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, - "cannot allocate resource memory"); - *cache_resource = *resource; - rte_memcpy(cache_resource->actions, resource->actions, actions_len); - ret = mlx5_flow_os_create_flow_action_modify_header - (sh->ctx, ns, cache_resource, - actions_len, &cache_resource->action); - if (ret) { - mlx5_free(cache_resource); - return rte_flow_error_set(error, ENOMEM, - RTE_FLOW_ERROR_TYPE_UNSPECIFIED, - NULL, "cannot create action"); - } - rte_atomic32_init(&cache_resource->refcnt); - rte_atomic32_inc(&cache_resource->refcnt); - if (mlx5_hlist_insert_ex(sh->modify_cmds, &cache_resource->entry, - flow_dv_modify_hdr_resource_match, - (void *)cache_resource)) { - claim_zero(mlx5_flow_os_destroy_flow_action - (cache_resource->action)); - mlx5_free(cache_resource); - return rte_flow_error_set(error, EEXIST, - RTE_FLOW_ERROR_TYPE_UNSPECIFIED, - NULL, "action exist"); - } - dev_flow->handle->dvh.modify_hdr = cache_resource; - DRV_LOG(DEBUG, "new modify-header resource %p: refcnt %d++", - (void *)cache_resource, - rte_atomic32_read(&cache_resource->refcnt)); + resource->entry.key = __rte_raw_cksum(&resource->ft_type, key_len, 0); + entry = mlx5_hlist_register(sh->modify_cmds, resource->entry.key, &ctx); + if (!entry) + return -rte_errno; + resource = container_of(entry, typeof(*resource), entry); + dev_flow->handle->dvh.modify_hdr = resource; return 0; } @@ -5108,7 +5039,7 @@ flow_dv_counter_remove_from_age(struct rte_eth_dev *dev, * Index to the counter handler. */ static void -flow_dv_counter_release(struct rte_eth_dev *dev, uint32_t counter) +flow_dv_counter_free(struct rte_eth_dev *dev, uint32_t counter) { struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_flow_counter_pool *pool = NULL; @@ -5172,7 +5103,7 @@ static int flow_dv_validate_attributes(struct rte_eth_dev *dev, const struct mlx5_flow_tunnel *tunnel, const struct rte_flow_attr *attributes, - struct flow_grp_info grp_info, + const struct flow_grp_info *grp_info, struct rte_flow_error *error) { struct mlx5_priv *priv = dev->data->dev_private; @@ -5327,7 +5258,7 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, } grp_info.std_tbl_fix = tunnel_use_standard_attr_group_translate (dev, tunnel, attr, items, actions); - ret = flow_dv_validate_attributes(dev, tunnel, attr, grp_info, error); + ret = flow_dv_validate_attributes(dev, tunnel, attr, &grp_info, error); if (ret < 0) return ret; is_root = (uint64_t)ret; @@ -5997,6 +5928,15 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, /* Meter action will add one more TAG action. */ rw_act_num += MLX5_ACT_NUM_SET_TAG; break; + case MLX5_RTE_FLOW_ACTION_TYPE_AGE: + if (!attr->group) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "Shared ASO age action is not supported for group 0"); + action_flags |= MLX5_FLOW_ACTION_AGE; + ++actions_n; + break; case RTE_FLOW_ACTION_TYPE_AGE: ret = flow_dv_validate_action_age(action_flags, actions, dev, @@ -6248,9 +6188,11 @@ flow_dv_prepare(struct rte_eth_dev *dev, struct mlx5_flow *dev_flow; struct mlx5_flow_handle *dev_handle; struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace(); + MLX5_ASSERT(wks); /* In case of corrupting the memory. */ - if (priv->flow_idx >= MLX5_NUM_MAX_DEV_FLOWS) { + if (wks->flow_idx >= MLX5_NUM_MAX_DEV_FLOWS) { rte_flow_error_set(error, ENOSPC, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, "not free temporary device flow"); @@ -6264,8 +6206,8 @@ flow_dv_prepare(struct rte_eth_dev *dev, "not enough memory to create flow handle"); return NULL; } - /* No multi-thread supporting. */ - dev_flow = &((struct mlx5_flow *)priv->inter_flows)[priv->flow_idx++]; + MLX5_ASSERT(wks->flow_idx + 1 < RTE_DIM(wks->flows)); + dev_flow = &wks->flows[wks->flow_idx++]; dev_flow->handle = dev_handle; dev_flow->handle_idx = handle_idx; /* @@ -7868,6 +7810,7 @@ flow_dv_translate_item_ecpri(struct rte_eth_dev *dev, void *matcher, struct mlx5_priv *priv = dev->data->dev_private; const struct rte_flow_item_ecpri *ecpri_m = item->mask; const struct rte_flow_item_ecpri *ecpri_v = item->spec; + struct rte_ecpri_common_hdr common; void *misc4_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_4); void *misc4_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_4); @@ -7908,7 +7851,8 @@ flow_dv_translate_item_ecpri(struct rte_eth_dev *dev, void *matcher, * Some wildcard rules only matching type field should be supported. */ if (ecpri_m->hdr.dummy[0]) { - switch (ecpri_v->hdr.common.type) { + common.u32 = rte_be_to_cpu_32(ecpri_v->hdr.common.u32); + switch (common.type) { case RTE_ECPRI_MSG_TYPE_IQ_DATA: case RTE_ECPRI_MSG_TYPE_RTC_CTRL: case RTE_ECPRI_MSG_TYPE_DLY_MSR: @@ -7972,8 +7916,76 @@ flow_dv_matcher_enable(uint32_t *match_criteria) return match_criteria_enable; } - -/** +struct mlx5_hlist_entry * +flow_dv_tbl_create_cb(struct mlx5_hlist *list, uint64_t key64, void *cb_ctx) +{ + struct mlx5_dev_ctx_shared *sh = list->ctx; + struct mlx5_flow_cb_ctx *ctx = cb_ctx; + struct rte_eth_dev *dev = ctx->dev; + struct mlx5_flow_tbl_data_entry *tbl_data; + struct mlx5_flow_tbl_tunnel_prm *tt_prm = ctx->data; + struct rte_flow_error *error = ctx->error; + union mlx5_flow_tbl_key key = { .v64 = key64 }; + struct mlx5_flow_tbl_resource *tbl; + void *domain; + uint32_t idx = 0; + int ret; + + tbl_data = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_JUMP], &idx); + if (!tbl_data) { + rte_flow_error_set(error, ENOMEM, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "cannot allocate flow table data entry"); + return NULL; + } + tbl_data->idx = idx; + tbl_data->tunnel = tt_prm->tunnel; + tbl_data->group_id = tt_prm->group_id; + tbl_data->external = tt_prm->external; + tbl_data->tunnel_offload = is_tunnel_offload_active(dev); + tbl_data->is_egress = !!key.direction; + tbl = &tbl_data->tbl; + if (key.dummy) + return &tbl_data->entry; + if (key.domain) + domain = sh->fdb_domain; + else if (key.direction) + domain = sh->tx_domain; + else + domain = sh->rx_domain; + ret = mlx5_flow_os_create_flow_tbl(domain, key.table_id, &tbl->obj); + if (ret) { + rte_flow_error_set(error, ENOMEM, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, "cannot create flow table object"); + mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], idx); + return NULL; + } + if (key.table_id) { + ret = mlx5_flow_os_create_flow_action_dest_flow_tbl + (tbl->obj, &tbl_data->jump.action); + if (ret) { + rte_flow_error_set(error, ENOMEM, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "cannot create flow jump action"); + mlx5_flow_os_destroy_flow_tbl(tbl->obj); + mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], idx); + return NULL; + } + } + MKSTR(matcher_name, "%s_%s_%u_matcher_cache", + key.domain ? "FDB" : "NIC", key.direction ? "egress" : "ingress", + key.table_id); + mlx5_cache_list_init(&tbl_data->matchers, matcher_name, 0, sh, + flow_dv_matcher_create_cb, + flow_dv_matcher_match_cb, + flow_dv_matcher_remove_cb); + return &tbl_data->entry; +} + +/** * Get a flow table. * * @param[in, out] dev @@ -7984,99 +7996,107 @@ flow_dv_matcher_enable(uint32_t *match_criteria) * Direction of the table. * @param[in] transfer * E-Switch or NIC flow. + * @param[in] dummy + * Dummy entry for dv API. * @param[out] error * pointer to error structure. * * @return * Returns tables resource based on the index, NULL in case of failed. */ -static struct mlx5_flow_tbl_resource * +struct mlx5_flow_tbl_resource * flow_dv_tbl_resource_get(struct rte_eth_dev *dev, uint32_t table_id, uint8_t egress, uint8_t transfer, bool external, const struct mlx5_flow_tunnel *tunnel, - uint32_t group_id, + uint32_t group_id, uint8_t dummy, struct rte_flow_error *error) { struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5_dev_ctx_shared *sh = priv->sh; - struct mlx5_flow_tbl_resource *tbl; union mlx5_flow_tbl_key table_key = { { .table_id = table_id, - .reserved = 0, + .dummy = dummy, .domain = !!transfer, .direction = !!egress, } }; - struct mlx5_hlist_entry *pos = mlx5_hlist_lookup(sh->flow_tbls, - table_key.v64); + struct mlx5_flow_tbl_tunnel_prm tt_prm = { + .tunnel = tunnel, + .group_id = group_id, + .external = external, + }; + struct mlx5_flow_cb_ctx ctx = { + .dev = dev, + .error = error, + .data = &tt_prm, + }; + struct mlx5_hlist_entry *entry; struct mlx5_flow_tbl_data_entry *tbl_data; - uint32_t idx = 0; - int ret; - void *domain; - if (pos) { - tbl_data = container_of(pos, struct mlx5_flow_tbl_data_entry, - entry); - tbl = &tbl_data->tbl; - rte_atomic32_inc(&tbl->refcnt); - return tbl; - } - tbl_data = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_JUMP], &idx); - if (!tbl_data) { + entry = mlx5_hlist_register(priv->sh->flow_tbls, table_key.v64, &ctx); + if (!entry) { rte_flow_error_set(error, ENOMEM, - RTE_FLOW_ERROR_TYPE_UNSPECIFIED, - NULL, - "cannot allocate flow table data entry"); - return NULL; - } - tbl_data->idx = idx; - tbl_data->tunnel = tunnel; - tbl_data->group_id = group_id; - tbl_data->external = external; - tbl = &tbl_data->tbl; - pos = &tbl_data->entry; - if (transfer) - domain = sh->fdb_domain; - else if (egress) - domain = sh->tx_domain; - else - domain = sh->rx_domain; - ret = mlx5_flow_os_create_flow_tbl(domain, table_id, &tbl->obj); - if (ret) { - rte_flow_error_set(error, ENOMEM, - RTE_FLOW_ERROR_TYPE_UNSPECIFIED, - NULL, "cannot create flow table object"); - mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], idx); + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "cannot get table"); return NULL; } - /* - * No multi-threads now, but still better to initialize the reference - * count before insert it into the hash list. - */ - rte_atomic32_init(&tbl->refcnt); - /* Jump action reference count is initialized here. */ - rte_atomic32_init(&tbl_data->jump.refcnt); - pos->key = table_key.v64; - ret = mlx5_hlist_insert(sh->flow_tbls, pos); - if (ret < 0) { - rte_flow_error_set(error, -ret, - RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, - "cannot insert flow table data entry"); - mlx5_flow_os_destroy_flow_tbl(tbl->obj); - mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], idx); + DRV_LOG(DEBUG, "Table_id %u tunnel %u group %u registered.", + table_id, tunnel ? tunnel->tunnel_id : 0, group_id); + tbl_data = container_of(entry, struct mlx5_flow_tbl_data_entry, entry); + return &tbl_data->tbl; +} + +void +flow_dv_tbl_remove_cb(struct mlx5_hlist *list, + struct mlx5_hlist_entry *entry) +{ + struct mlx5_dev_ctx_shared *sh = list->ctx; + struct mlx5_flow_tbl_data_entry *tbl_data = + container_of(entry, struct mlx5_flow_tbl_data_entry, entry); + + MLX5_ASSERT(entry && sh); + if (tbl_data->jump.action) + mlx5_flow_os_destroy_flow_action(tbl_data->jump.action); + if (tbl_data->tbl.obj) + mlx5_flow_os_destroy_flow_tbl(tbl_data->tbl.obj); + if (tbl_data->tunnel_offload && tbl_data->external) { + struct mlx5_hlist_entry *he; + struct mlx5_hlist *tunnel_grp_hash; + struct mlx5_flow_tunnel_hub *thub = sh->tunnel_hub; + union tunnel_tbl_key tunnel_key = { + .tunnel_id = tbl_data->tunnel ? + tbl_data->tunnel->tunnel_id : 0, + .group = tbl_data->group_id + }; + union mlx5_flow_tbl_key table_key = { + .v64 = entry->key + }; + uint32_t table_id = table_key.table_id; + + tunnel_grp_hash = tbl_data->tunnel ? + tbl_data->tunnel->groups : + thub->groups; + he = mlx5_hlist_lookup(tunnel_grp_hash, tunnel_key.val, NULL); + if (he) + mlx5_hlist_unregister(tunnel_grp_hash, he); + DRV_LOG(DEBUG, + "Table_id %u tunnel %u group %u released.", + table_id, + tbl_data->tunnel ? + tbl_data->tunnel->tunnel_id : 0, + tbl_data->group_id); } - rte_atomic32_inc(&tbl->refcnt); - return tbl; + mlx5_cache_list_destroy(&tbl_data->matchers); + mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], tbl_data->idx); } /** * Release a flow table. * - * @param[in] dev - * Pointer to rte_eth_dev structure. + * @param[in] sh + * Pointer to device shared structure. * @param[in] tbl * Table resource to be released. * @@ -8084,63 +8104,72 @@ flow_dv_tbl_resource_get(struct rte_eth_dev *dev, * Returns 0 if table was released, else return 1; */ static int -flow_dv_tbl_resource_release(struct rte_eth_dev *dev, +flow_dv_tbl_resource_release(struct mlx5_dev_ctx_shared *sh, struct mlx5_flow_tbl_resource *tbl) { - struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5_dev_ctx_shared *sh = priv->sh; struct mlx5_flow_tbl_data_entry *tbl_data = container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl); if (!tbl) return 0; - if (rte_atomic32_dec_and_test(&tbl->refcnt)) { - struct mlx5_hlist_entry *pos = &tbl_data->entry; - - mlx5_flow_os_destroy_flow_tbl(tbl->obj); - tbl->obj = NULL; - if (is_tunnel_offload_active(dev) && tbl_data->external) { - struct mlx5_hlist_entry *he; - struct mlx5_hlist *tunnel_grp_hash; - struct mlx5_flow_tunnel_hub *thub = - mlx5_tunnel_hub(dev); - union tunnel_tbl_key tunnel_key = { - .tunnel_id = tbl_data->tunnel ? - tbl_data->tunnel->tunnel_id : 0, - .group = tbl_data->group_id - }; - union mlx5_flow_tbl_key table_key = { - .v64 = pos->key - }; - uint32_t table_id = table_key.table_id; - - tunnel_grp_hash = tbl_data->tunnel ? - tbl_data->tunnel->groups : - thub->groups; - he = mlx5_hlist_lookup(tunnel_grp_hash, tunnel_key.val); - if (he) { - struct tunnel_tbl_entry *tte; - tte = container_of(he, typeof(*tte), hash); - MLX5_ASSERT(tte->flow_table == table_id); - mlx5_hlist_remove(tunnel_grp_hash, he); - mlx5_free(tte); - } - mlx5_flow_id_release(mlx5_tunnel_hub(dev)->table_ids, - tunnel_flow_tbl_to_id(table_id)); - DRV_LOG(DEBUG, - "port %u release table_id %#x tunnel %u group %u", - dev->data->port_id, table_id, - tbl_data->tunnel ? - tbl_data->tunnel->tunnel_id : 0, - tbl_data->group_id); - } - /* remove the entry from the hash list and free memory. */ - mlx5_hlist_remove(sh->flow_tbls, pos); - mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_JUMP], - tbl_data->idx); - return 0; + return mlx5_hlist_unregister(sh->flow_tbls, &tbl_data->entry); +} + +int +flow_dv_matcher_match_cb(struct mlx5_cache_list *list __rte_unused, + struct mlx5_cache_entry *entry, void *cb_ctx) +{ + struct mlx5_flow_cb_ctx *ctx = cb_ctx; + struct mlx5_flow_dv_matcher *ref = ctx->data; + struct mlx5_flow_dv_matcher *cur = container_of(entry, typeof(*cur), + entry); + + return cur->crc != ref->crc || + cur->priority != ref->priority || + memcmp((const void *)cur->mask.buf, + (const void *)ref->mask.buf, ref->mask.size); +} + +struct mlx5_cache_entry * +flow_dv_matcher_create_cb(struct mlx5_cache_list *list, + struct mlx5_cache_entry *entry __rte_unused, + void *cb_ctx) +{ + struct mlx5_dev_ctx_shared *sh = list->ctx; + struct mlx5_flow_cb_ctx *ctx = cb_ctx; + struct mlx5_flow_dv_matcher *ref = ctx->data; + struct mlx5_flow_dv_matcher *cache; + struct mlx5dv_flow_matcher_attr dv_attr = { + .type = IBV_FLOW_ATTR_NORMAL, + .match_mask = (void *)&ref->mask, + }; + struct mlx5_flow_tbl_data_entry *tbl = container_of(ref->tbl, + typeof(*tbl), tbl); + int ret; + + cache = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*cache), 0, SOCKET_ID_ANY); + if (!cache) { + rte_flow_error_set(ctx->error, ENOMEM, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "cannot create matcher"); + return NULL; } - return 1; + *cache = *ref; + dv_attr.match_criteria_enable = + flow_dv_matcher_enable(cache->mask.buf); + dv_attr.priority = ref->priority; + if (tbl->is_egress) + dv_attr.flags |= IBV_FLOW_ATTR_FLAGS_EGRESS; + ret = mlx5_flow_os_create_flow_matcher(sh->ctx, &dv_attr, tbl->tbl.obj, + &cache->matcher_object); + if (ret) { + mlx5_free(cache); + rte_flow_error_set(ctx->error, ENOMEM, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "cannot create matcher"); + return NULL; + } + return &cache->entry; } /** @@ -8162,90 +8191,75 @@ flow_dv_tbl_resource_release(struct rte_eth_dev *dev, */ static int flow_dv_matcher_register(struct rte_eth_dev *dev, - struct mlx5_flow_dv_matcher *matcher, + struct mlx5_flow_dv_matcher *ref, union mlx5_flow_tbl_key *key, struct mlx5_flow *dev_flow, + const struct mlx5_flow_tunnel *tunnel, + uint32_t group_id, struct rte_flow_error *error) { - struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5_dev_ctx_shared *sh = priv->sh; - struct mlx5_flow_dv_matcher *cache_matcher; - struct mlx5dv_flow_matcher_attr dv_attr = { - .type = IBV_FLOW_ATTR_NORMAL, - .match_mask = (void *)&matcher->mask, - }; + struct mlx5_cache_entry *entry; + struct mlx5_flow_dv_matcher *cache; struct mlx5_flow_tbl_resource *tbl; struct mlx5_flow_tbl_data_entry *tbl_data; - int ret; + struct mlx5_flow_cb_ctx ctx = { + .error = error, + .data = ref, + }; - tbl = flow_dv_tbl_resource_get(dev, key->table_id, key->direction, - key->domain, false, NULL, 0, error); + /** + * tunnel offload API requires this registration for cases when + * tunnel match rule was inserted before tunnel set rule. + */ + tbl = flow_dv_tbl_resource_get(dev, key->table_id, + key->direction, key->domain, + dev_flow->external, tunnel, + group_id, 0, error); if (!tbl) return -rte_errno; /* No need to refill the error info */ tbl_data = container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl); - /* Lookup from cache. */ - LIST_FOREACH(cache_matcher, &tbl_data->matchers, next) { - if (matcher->crc == cache_matcher->crc && - matcher->priority == cache_matcher->priority && - !memcmp((const void *)matcher->mask.buf, - (const void *)cache_matcher->mask.buf, - cache_matcher->mask.size)) { - DRV_LOG(DEBUG, - "%s group %u priority %hd use %s " - "matcher %p: refcnt %d++", - key->domain ? "FDB" : "NIC", key->table_id, - cache_matcher->priority, - key->direction ? "tx" : "rx", - (void *)cache_matcher, - rte_atomic32_read(&cache_matcher->refcnt)); - rte_atomic32_inc(&cache_matcher->refcnt); - dev_flow->handle->dvh.matcher = cache_matcher; - /* old matcher should not make the table ref++. */ - flow_dv_tbl_resource_release(dev, tbl); - return 0; - } - } - /* Register new matcher. */ - cache_matcher = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*cache_matcher), 0, - SOCKET_ID_ANY); - if (!cache_matcher) { - flow_dv_tbl_resource_release(dev, tbl); + ref->tbl = tbl; + entry = mlx5_cache_register(&tbl_data->matchers, &ctx); + if (!entry) { + flow_dv_tbl_resource_release(MLX5_SH(dev), tbl); return rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, - "cannot allocate matcher memory"); + "cannot allocate ref memory"); } - *cache_matcher = *matcher; - dv_attr.match_criteria_enable = - flow_dv_matcher_enable(cache_matcher->mask.buf); - dv_attr.priority = matcher->priority; - if (key->direction) - dv_attr.flags |= IBV_FLOW_ATTR_FLAGS_EGRESS; - ret = mlx5_flow_os_create_flow_matcher(sh->ctx, &dv_attr, tbl->obj, - &cache_matcher->matcher_object); - if (ret) { - mlx5_free(cache_matcher); -#ifdef HAVE_MLX5DV_DR - flow_dv_tbl_resource_release(dev, tbl); -#endif - return rte_flow_error_set(error, ENOMEM, - RTE_FLOW_ERROR_TYPE_UNSPECIFIED, - NULL, "cannot create matcher"); - } - /* Save the table information */ - cache_matcher->tbl = tbl; - rte_atomic32_init(&cache_matcher->refcnt); - /* only matcher ref++, table ref++ already done above in get API. */ - rte_atomic32_inc(&cache_matcher->refcnt); - LIST_INSERT_HEAD(&tbl_data->matchers, cache_matcher, next); - dev_flow->handle->dvh.matcher = cache_matcher; - DRV_LOG(DEBUG, "%s group %u priority %hd new %s matcher %p: refcnt %d", - key->domain ? "FDB" : "NIC", key->table_id, - cache_matcher->priority, - key->direction ? "tx" : "rx", (void *)cache_matcher, - rte_atomic32_read(&cache_matcher->refcnt)); + cache = container_of(entry, typeof(*cache), entry); + dev_flow->handle->dvh.matcher = cache; return 0; } +struct mlx5_hlist_entry * +flow_dv_tag_create_cb(struct mlx5_hlist *list, uint64_t key, void *ctx) +{ + struct mlx5_dev_ctx_shared *sh = list->ctx; + struct rte_flow_error *error = ctx; + struct mlx5_flow_dv_tag_resource *entry; + uint32_t idx = 0; + int ret; + + entry = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_TAG], &idx); + if (!entry) { + rte_flow_error_set(error, ENOMEM, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "cannot allocate resource memory"); + return NULL; + } + entry->idx = idx; + ret = mlx5_flow_os_create_flow_action_tag(key, + &entry->action); + if (ret) { + mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TAG], idx); + rte_flow_error_set(error, ENOMEM, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, "cannot create action"); + return NULL; + } + return &entry->entry; +} + /** * Find existing tag resource or create and register a new one. * @@ -8269,54 +8283,32 @@ flow_dv_tag_resource_register struct rte_flow_error *error) { struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5_dev_ctx_shared *sh = priv->sh; struct mlx5_flow_dv_tag_resource *cache_resource; struct mlx5_hlist_entry *entry; - int ret; - /* Lookup a matching resource from cache. */ - entry = mlx5_hlist_lookup(sh->tag_table, (uint64_t)tag_be24); + entry = mlx5_hlist_register(priv->sh->tag_table, tag_be24, error); if (entry) { cache_resource = container_of (entry, struct mlx5_flow_dv_tag_resource, entry); - rte_atomic32_inc(&cache_resource->refcnt); dev_flow->handle->dvh.rix_tag = cache_resource->idx; dev_flow->dv.tag_resource = cache_resource; - DRV_LOG(DEBUG, "cached tag resource %p: refcnt now %d++", - (void *)cache_resource, - rte_atomic32_read(&cache_resource->refcnt)); return 0; } - /* Register new resource. */ - cache_resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_TAG], - &dev_flow->handle->dvh.rix_tag); - if (!cache_resource) - return rte_flow_error_set(error, ENOMEM, - RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, - "cannot allocate resource memory"); - cache_resource->entry.key = (uint64_t)tag_be24; - ret = mlx5_flow_os_create_flow_action_tag(tag_be24, - &cache_resource->action); - if (ret) { - mlx5_free(cache_resource); - return rte_flow_error_set(error, ENOMEM, - RTE_FLOW_ERROR_TYPE_UNSPECIFIED, - NULL, "cannot create action"); - } - rte_atomic32_init(&cache_resource->refcnt); - rte_atomic32_inc(&cache_resource->refcnt); - if (mlx5_hlist_insert(sh->tag_table, &cache_resource->entry)) { - mlx5_flow_os_destroy_flow_action(cache_resource->action); - mlx5_free(cache_resource); - return rte_flow_error_set(error, EEXIST, - RTE_FLOW_ERROR_TYPE_UNSPECIFIED, - NULL, "cannot insert tag"); - } - dev_flow->dv.tag_resource = cache_resource; - DRV_LOG(DEBUG, "new tag resource %p: refcnt now %d++", - (void *)cache_resource, - rte_atomic32_read(&cache_resource->refcnt)); - return 0; + return -rte_errno; +} + +void +flow_dv_tag_remove_cb(struct mlx5_hlist *list, + struct mlx5_hlist_entry *entry) +{ + struct mlx5_dev_ctx_shared *sh = list->ctx; + struct mlx5_flow_dv_tag_resource *tag = + container_of(entry, struct mlx5_flow_dv_tag_resource, entry); + + MLX5_ASSERT(tag && sh && tag->action); + claim_zero(mlx5_flow_os_destroy_flow_action(tag->action)); + DRV_LOG(DEBUG, "Tag %p: removed.", (void *)tag); + mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TAG], tag->idx); } /** @@ -8335,24 +8327,14 @@ flow_dv_tag_release(struct rte_eth_dev *dev, uint32_t tag_idx) { struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5_dev_ctx_shared *sh = priv->sh; struct mlx5_flow_dv_tag_resource *tag; tag = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_TAG], tag_idx); if (!tag) return 0; DRV_LOG(DEBUG, "port %u tag %p: refcnt %d--", - dev->data->port_id, (void *)tag, - rte_atomic32_read(&tag->refcnt)); - if (rte_atomic32_dec_and_test(&tag->refcnt)) { - claim_zero(mlx5_flow_os_destroy_flow_action(tag->action)); - mlx5_hlist_remove(sh->tag_table, &tag->entry); - DRV_LOG(DEBUG, "port %u tag %p: removed", - dev->data->port_id, (void *)tag); - mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_TAG], tag_idx); - return 0; - } - return 1; + dev->data->port_id, (void *)tag, tag->entry.ref_cnt); + return mlx5_hlist_unregister(priv->sh->tag_table, &tag->entry); } /** @@ -8442,6 +8424,7 @@ flow_dv_translate_create_counter(struct rte_eth_dev *dev, __atomic_store_n(&age_param->state, AGE_CANDIDATE, __ATOMIC_RELAXED); return counter; } + /** * Add Tx queue matcher * @@ -8559,7 +8542,7 @@ flow_dv_hashfields_set(struct mlx5_flow *dev_flow, } /** - * Create an Rx Hash queue. + * Prepare an Rx Hash queue. * * @param dev * Pointer to Ethernet device. @@ -8574,61 +8557,101 @@ flow_dv_hashfields_set(struct mlx5_flow *dev_flow, * The Verbs/DevX object initialised, NULL otherwise and rte_errno is set. */ static struct mlx5_hrxq * -flow_dv_handle_rx_queue(struct rte_eth_dev *dev, - struct mlx5_flow *dev_flow, - struct mlx5_flow_rss_desc *rss_desc, - uint32_t *hrxq_idx) +flow_dv_hrxq_prepare(struct rte_eth_dev *dev, + struct mlx5_flow *dev_flow, + struct mlx5_flow_rss_desc *rss_desc, + uint32_t *hrxq_idx) { struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_flow_handle *dh = dev_flow->handle; struct mlx5_hrxq *hrxq; MLX5_ASSERT(rss_desc->queue_num); - *hrxq_idx = mlx5_hrxq_get(dev, rss_desc->key, MLX5_RSS_HASH_KEY_LEN, - dev_flow->hash_fields, - rss_desc->queue, rss_desc->queue_num); - if (!*hrxq_idx) { - *hrxq_idx = mlx5_hrxq_new - (dev, rss_desc->key, MLX5_RSS_HASH_KEY_LEN, - dev_flow->hash_fields, - rss_desc->queue, rss_desc->queue_num, - !!(dh->layers & MLX5_FLOW_LAYER_TUNNEL), - false); - if (!*hrxq_idx) - return NULL; - } + rss_desc->key_len = MLX5_RSS_HASH_KEY_LEN; + rss_desc->hash_fields = dev_flow->hash_fields; + rss_desc->tunnel = !!(dh->layers & MLX5_FLOW_LAYER_TUNNEL); + rss_desc->shared_rss = 0; + *hrxq_idx = mlx5_hrxq_get(dev, rss_desc); + if (!*hrxq_idx) + return NULL; hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ], *hrxq_idx); return hrxq; } /** - * Find existing sample resource or create and register a new one. + * Release sample sub action resource. * * @param[in, out] dev * Pointer to rte_eth_dev structure. - * @param[in] attr - * Attributes of flow that includes this item. - * @param[in] resource - * Pointer to sample resource. - * @parm[in, out] dev_flow - * Pointer to the dev_flow. - * @param[in, out] sample_dv_actions - * Pointer to sample actions list. - * @param[out] error - * pointer to error structure. - * - * @return - * 0 on success otherwise -errno and errno is set. + * @param[in] act_res + * Pointer to sample sub action resource. */ -static int -flow_dv_sample_resource_register(struct rte_eth_dev *dev, - const struct rte_flow_attr *attr, - struct mlx5_flow_dv_sample_resource *resource, - struct mlx5_flow *dev_flow, - void **sample_dv_actions, - struct rte_flow_error *error) +static void +flow_dv_sample_sub_actions_release(struct rte_eth_dev *dev, + struct mlx5_flow_sub_actions_idx *act_res) { + if (act_res->rix_hrxq) { + mlx5_hrxq_release(dev, act_res->rix_hrxq); + act_res->rix_hrxq = 0; + } + if (act_res->rix_encap_decap) { + flow_dv_encap_decap_resource_release(dev, + act_res->rix_encap_decap); + act_res->rix_encap_decap = 0; + } + if (act_res->rix_port_id_action) { + flow_dv_port_id_action_resource_release(dev, + act_res->rix_port_id_action); + act_res->rix_port_id_action = 0; + } + if (act_res->rix_tag) { + flow_dv_tag_release(dev, act_res->rix_tag); + act_res->rix_tag = 0; + } + if (act_res->cnt) { + flow_dv_counter_free(dev, act_res->cnt); + act_res->cnt = 0; + } +} + +int +flow_dv_sample_match_cb(struct mlx5_cache_list *list __rte_unused, + struct mlx5_cache_entry *entry, void *cb_ctx) +{ + struct mlx5_flow_cb_ctx *ctx = cb_ctx; + struct rte_eth_dev *dev = ctx->dev; + struct mlx5_flow_dv_sample_resource *resource = ctx->data; + struct mlx5_flow_dv_sample_resource *cache_resource = + container_of(entry, typeof(*cache_resource), entry); + + if (resource->ratio == cache_resource->ratio && + resource->ft_type == cache_resource->ft_type && + resource->ft_id == cache_resource->ft_id && + resource->set_action == cache_resource->set_action && + !memcmp((void *)&resource->sample_act, + (void *)&cache_resource->sample_act, + sizeof(struct mlx5_flow_sub_actions_list))) { + /* + * Existing sample action should release the prepared + * sub-actions reference counter. + */ + flow_dv_sample_sub_actions_release(dev, + &resource->sample_idx); + return 0; + } + return 1; +} + +struct mlx5_cache_entry * +flow_dv_sample_create_cb(struct mlx5_cache_list *list __rte_unused, + struct mlx5_cache_entry *entry __rte_unused, + void *cb_ctx) +{ + struct mlx5_flow_cb_ctx *ctx = cb_ctx; + struct rte_eth_dev *dev = ctx->dev; + struct mlx5_flow_dv_sample_resource *resource = ctx->data; + void **sample_dv_actions = resource->sub_actions; struct mlx5_flow_dv_sample_resource *cache_resource; struct mlx5dv_dr_flow_sampler_attr sampler_attr; struct mlx5_priv *priv = dev->data->dev_private; @@ -8637,41 +8660,28 @@ flow_dv_sample_resource_register(struct rte_eth_dev *dev, uint32_t idx = 0; const uint32_t next_ft_step = 1; uint32_t next_ft_id = resource->ft_id + next_ft_step; + uint8_t is_egress = 0; + uint8_t is_transfer = 0; + struct rte_flow_error *error = ctx->error; - /* Lookup a matching resource from cache. */ - ILIST_FOREACH(sh->ipool[MLX5_IPOOL_SAMPLE], sh->sample_action_list, - idx, cache_resource, next) { - if (resource->ratio == cache_resource->ratio && - resource->ft_type == cache_resource->ft_type && - resource->ft_id == cache_resource->ft_id && - resource->set_action == cache_resource->set_action && - !memcmp((void *)&resource->sample_act, - (void *)&cache_resource->sample_act, - sizeof(struct mlx5_flow_sub_actions_list))) { - DRV_LOG(DEBUG, "sample resource %p: refcnt %d++", - (void *)cache_resource, - __atomic_load_n(&cache_resource->refcnt, - __ATOMIC_RELAXED)); - __atomic_fetch_add(&cache_resource->refcnt, 1, - __ATOMIC_RELAXED); - dev_flow->handle->dvh.rix_sample = idx; - dev_flow->dv.sample_res = cache_resource; - return 0; - } - } /* Register new sample resource. */ - cache_resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_SAMPLE], - &dev_flow->handle->dvh.rix_sample); - if (!cache_resource) - return rte_flow_error_set(error, ENOMEM, + cache_resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_SAMPLE], &idx); + if (!cache_resource) { + rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, "cannot allocate resource memory"); + return NULL; + } *cache_resource = *resource; /* Create normal path table level */ + if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB) + is_transfer = 1; + else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_TX) + is_egress = 1; tbl = flow_dv_tbl_resource_get(dev, next_ft_id, - attr->egress, attr->transfer, - dev_flow->external, NULL, 0, error); + is_egress, is_transfer, + true, NULL, 0, 0, error); if (!tbl) { rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, @@ -8710,53 +8720,31 @@ flow_dv_sample_resource_register(struct rte_eth_dev *dev, NULL, "cannot create sample action"); goto error; } - __atomic_store_n(&cache_resource->refcnt, 1, __ATOMIC_RELAXED); - ILIST_INSERT(sh->ipool[MLX5_IPOOL_SAMPLE], &sh->sample_action_list, - dev_flow->handle->dvh.rix_sample, cache_resource, - next); - dev_flow->dv.sample_res = cache_resource; - DRV_LOG(DEBUG, "new sample resource %p: refcnt %d++", - (void *)cache_resource, - __atomic_load_n(&cache_resource->refcnt, __ATOMIC_RELAXED)); - return 0; + cache_resource->idx = idx; + return &cache_resource->entry; error: - if (cache_resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB) { - if (cache_resource->default_miss) - claim_zero(mlx5_glue->destroy_flow_action + if (cache_resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB && + cache_resource->default_miss) + claim_zero(mlx5_glue->destroy_flow_action (cache_resource->default_miss)); - } else { - if (cache_resource->sample_idx.rix_hrxq && - !mlx5_hrxq_release(dev, - cache_resource->sample_idx.rix_hrxq)) - cache_resource->sample_idx.rix_hrxq = 0; - if (cache_resource->sample_idx.rix_tag && - !flow_dv_tag_release(dev, - cache_resource->sample_idx.rix_tag)) - cache_resource->sample_idx.rix_tag = 0; - if (cache_resource->sample_idx.cnt) { - flow_dv_counter_release(dev, - cache_resource->sample_idx.cnt); - cache_resource->sample_idx.cnt = 0; - } - } + else + flow_dv_sample_sub_actions_release(dev, + &cache_resource->sample_idx); if (cache_resource->normal_path_tbl) - flow_dv_tbl_resource_release(dev, + flow_dv_tbl_resource_release(MLX5_SH(dev), cache_resource->normal_path_tbl); - mlx5_ipool_free(sh->ipool[MLX5_IPOOL_SAMPLE], - dev_flow->handle->dvh.rix_sample); - dev_flow->handle->dvh.rix_sample = 0; - return -rte_errno; + mlx5_ipool_free(sh->ipool[MLX5_IPOOL_SAMPLE], idx); + return NULL; + } /** - * Find existing destination array resource or create and register a new one. + * Find existing sample resource or create and register a new one. * * @param[in, out] dev * Pointer to rte_eth_dev structure. - * @param[in] attr - * Attributes of flow that includes this item. * @param[in] resource - * Pointer to destination array resource. + * Pointer to sample resource. * @parm[in, out] dev_flow * Pointer to the dev_flow. * @param[out] error @@ -8766,54 +8754,90 @@ error: * 0 on success otherwise -errno and errno is set. */ static int -flow_dv_dest_array_resource_register(struct rte_eth_dev *dev, - const struct rte_flow_attr *attr, - struct mlx5_flow_dv_dest_array_resource *resource, +flow_dv_sample_resource_register(struct rte_eth_dev *dev, + struct mlx5_flow_dv_sample_resource *resource, struct mlx5_flow *dev_flow, struct rte_flow_error *error) { + struct mlx5_flow_dv_sample_resource *cache_resource; + struct mlx5_cache_entry *entry; + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_flow_cb_ctx ctx = { + .dev = dev, + .error = error, + .data = resource, + }; + + entry = mlx5_cache_register(&priv->sh->sample_action_list, &ctx); + if (!entry) + return -rte_errno; + cache_resource = container_of(entry, typeof(*cache_resource), entry); + dev_flow->handle->dvh.rix_sample = cache_resource->idx; + dev_flow->dv.sample_res = cache_resource; + return 0; +} + +int +flow_dv_dest_array_match_cb(struct mlx5_cache_list *list __rte_unused, + struct mlx5_cache_entry *entry, void *cb_ctx) +{ + struct mlx5_flow_cb_ctx *ctx = cb_ctx; + struct mlx5_flow_dv_dest_array_resource *resource = ctx->data; + struct rte_eth_dev *dev = ctx->dev; + struct mlx5_flow_dv_dest_array_resource *cache_resource = + container_of(entry, typeof(*cache_resource), entry); + uint32_t idx = 0; + + if (resource->num_of_dest == cache_resource->num_of_dest && + resource->ft_type == cache_resource->ft_type && + !memcmp((void *)cache_resource->sample_act, + (void *)resource->sample_act, + (resource->num_of_dest * + sizeof(struct mlx5_flow_sub_actions_list)))) { + /* + * Existing sample action should release the prepared + * sub-actions reference counter. + */ + for (idx = 0; idx < resource->num_of_dest; idx++) + flow_dv_sample_sub_actions_release(dev, + &resource->sample_idx[idx]); + return 0; + } + return 1; +} + +struct mlx5_cache_entry * +flow_dv_dest_array_create_cb(struct mlx5_cache_list *list __rte_unused, + struct mlx5_cache_entry *entry __rte_unused, + void *cb_ctx) +{ + struct mlx5_flow_cb_ctx *ctx = cb_ctx; + struct rte_eth_dev *dev = ctx->dev; struct mlx5_flow_dv_dest_array_resource *cache_resource; + struct mlx5_flow_dv_dest_array_resource *resource = ctx->data; struct mlx5dv_dr_action_dest_attr *dest_attr[MLX5_MAX_DEST_NUM] = { 0 }; struct mlx5dv_dr_action_dest_reformat dest_reformat[MLX5_MAX_DEST_NUM]; struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_dev_ctx_shared *sh = priv->sh; struct mlx5_flow_sub_actions_list *sample_act; struct mlx5dv_dr_domain *domain; - uint32_t idx = 0; + uint32_t idx = 0, res_idx = 0; + struct rte_flow_error *error = ctx->error; - /* Lookup a matching resource from cache. */ - ILIST_FOREACH(sh->ipool[MLX5_IPOOL_DEST_ARRAY], - sh->dest_array_list, - idx, cache_resource, next) { - if (resource->num_of_dest == cache_resource->num_of_dest && - resource->ft_type == cache_resource->ft_type && - !memcmp((void *)cache_resource->sample_act, - (void *)resource->sample_act, - (resource->num_of_dest * - sizeof(struct mlx5_flow_sub_actions_list)))) { - DRV_LOG(DEBUG, "dest array resource %p: refcnt %d++", - (void *)cache_resource, - __atomic_load_n(&cache_resource->refcnt, - __ATOMIC_RELAXED)); - __atomic_fetch_add(&cache_resource->refcnt, 1, - __ATOMIC_RELAXED); - dev_flow->handle->dvh.rix_dest_array = idx; - dev_flow->dv.dest_array_res = cache_resource; - return 0; - } - } /* Register new destination array resource. */ cache_resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_DEST_ARRAY], - &dev_flow->handle->dvh.rix_dest_array); - if (!cache_resource) - return rte_flow_error_set(error, ENOMEM, + &res_idx); + if (!cache_resource) { + rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, "cannot allocate resource memory"); + return NULL; + } *cache_resource = *resource; - if (attr->transfer) + if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB) domain = sh->fdb_domain; - else if (attr->ingress) + else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX) domain = sh->rx_domain; else domain = sh->tx_domain; @@ -8858,42 +8882,71 @@ flow_dv_dest_array_resource_register(struct rte_eth_dev *dev, "cannot create destination array action"); goto error; } - __atomic_store_n(&cache_resource->refcnt, 1, __ATOMIC_RELAXED); - ILIST_INSERT(sh->ipool[MLX5_IPOOL_DEST_ARRAY], - &sh->dest_array_list, - dev_flow->handle->dvh.rix_dest_array, cache_resource, - next); + cache_resource->idx = res_idx; + for (idx = 0; idx < resource->num_of_dest; idx++) + mlx5_free(dest_attr[idx]); + return &cache_resource->entry; +error: + for (idx = 0; idx < resource->num_of_dest; idx++) { + struct mlx5_flow_sub_actions_idx *act_res = + &cache_resource->sample_idx[idx]; + if (act_res->rix_hrxq && + !mlx5_hrxq_release(dev, + act_res->rix_hrxq)) + act_res->rix_hrxq = 0; + if (act_res->rix_encap_decap && + !flow_dv_encap_decap_resource_release(dev, + act_res->rix_encap_decap)) + act_res->rix_encap_decap = 0; + if (act_res->rix_port_id_action && + !flow_dv_port_id_action_resource_release(dev, + act_res->rix_port_id_action)) + act_res->rix_port_id_action = 0; + if (dest_attr[idx]) + mlx5_free(dest_attr[idx]); + } + + mlx5_ipool_free(sh->ipool[MLX5_IPOOL_DEST_ARRAY], res_idx); + return NULL; +} + +/** + * Find existing destination array resource or create and register a new one. + * + * @param[in, out] dev + * Pointer to rte_eth_dev structure. + * @param[in] resource + * Pointer to destination array resource. + * @parm[in, out] dev_flow + * Pointer to the dev_flow. + * @param[out] error + * pointer to error structure. + * + * @return + * 0 on success otherwise -errno and errno is set. + */ +static int +flow_dv_dest_array_resource_register(struct rte_eth_dev *dev, + struct mlx5_flow_dv_dest_array_resource *resource, + struct mlx5_flow *dev_flow, + struct rte_flow_error *error) +{ + struct mlx5_flow_dv_dest_array_resource *cache_resource; + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_cache_entry *entry; + struct mlx5_flow_cb_ctx ctx = { + .dev = dev, + .error = error, + .data = resource, + }; + + entry = mlx5_cache_register(&priv->sh->dest_array_list, &ctx); + if (!entry) + return -rte_errno; + cache_resource = container_of(entry, typeof(*cache_resource), entry); + dev_flow->handle->dvh.rix_dest_array = cache_resource->idx; dev_flow->dv.dest_array_res = cache_resource; - DRV_LOG(DEBUG, "new destination array resource %p: refcnt %d++", - (void *)cache_resource, - __atomic_load_n(&cache_resource->refcnt, __ATOMIC_RELAXED)); - for (idx = 0; idx < resource->num_of_dest; idx++) - mlx5_free(dest_attr[idx]); return 0; -error: - for (idx = 0; idx < resource->num_of_dest; idx++) { - struct mlx5_flow_sub_actions_idx *act_res = - &cache_resource->sample_idx[idx]; - if (act_res->rix_hrxq && - !mlx5_hrxq_release(dev, - act_res->rix_hrxq)) - act_res->rix_hrxq = 0; - if (act_res->rix_encap_decap && - !flow_dv_encap_decap_resource_release(dev, - act_res->rix_encap_decap)) - act_res->rix_encap_decap = 0; - if (act_res->rix_port_id_action && - !flow_dv_port_id_action_resource_release(dev, - act_res->rix_port_id_action)) - act_res->rix_port_id_action = 0; - if (dest_attr[idx]) - mlx5_free(dest_attr[idx]); - } - - mlx5_ipool_free(sh->ipool[MLX5_IPOOL_DEST_ARRAY], - dev_flow->handle->dvh.rix_dest_array); - dev_flow->handle->dvh.rix_dest_array = 0; - return -rte_errno; } /** @@ -8935,11 +8988,12 @@ flow_dv_translate_action_sample(struct rte_eth_dev *dev, const struct rte_flow_action_queue *queue; struct mlx5_flow_sub_actions_list *sample_act; struct mlx5_flow_sub_actions_idx *sample_idx; - struct mlx5_flow_rss_desc *rss_desc = &((struct mlx5_flow_rss_desc *) - priv->rss_desc) - [!!priv->flow_nested_idx]; + struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace(); + struct mlx5_flow_rss_desc *rss_desc; uint64_t action_flags = 0; + MLX5_ASSERT(wks); + rss_desc = &wks->rss_desc; sample_act = &res->sample_act; sample_idx = &res->sample_idx; sample_action = (const struct rte_flow_action_sample *)action->conf; @@ -8958,8 +9012,8 @@ flow_dv_translate_action_sample(struct rte_eth_dev *dev, queue = sub_actions->conf; rss_desc->queue_num = 1; rss_desc->queue[0] = queue->index; - hrxq = flow_dv_handle_rx_queue(dev, dev_flow, - rss_desc, &hrxq_idx); + hrxq = flow_dv_hrxq_prepare(dev, dev_flow, + rss_desc, &hrxq_idx); if (!hrxq) return rte_flow_error_set (error, rte_errno, @@ -9101,6 +9155,8 @@ flow_dv_translate_action_sample(struct rte_eth_dev *dev, res->set_action = action_ctx.set_action; } else if (attr->ingress) { res->ft_type = MLX5DV_FLOW_TABLE_TYPE_NIC_RX; + } else { + res->ft_type = MLX5DV_FLOW_TABLE_TYPE_NIC_TX; } return 0; } @@ -9112,8 +9168,6 @@ flow_dv_translate_action_sample(struct rte_eth_dev *dev, * Pointer to rte_eth_dev structure. * @param[in, out] dev_flow * Pointer to the mlx5_flow. - * @param[in] attr - * Pointer to the flow attributes. * @param[in] num_of_dest * The num of destination. * @param[in, out] res @@ -9133,7 +9187,6 @@ flow_dv_translate_action_sample(struct rte_eth_dev *dev, static int flow_dv_create_action_sample(struct rte_eth_dev *dev, struct mlx5_flow *dev_flow, - const struct rte_flow_attr *attr, uint32_t num_of_dest, struct mlx5_flow_dv_sample_resource *res, struct mlx5_flow_dv_dest_array_resource *mdest_res, @@ -9141,23 +9194,23 @@ flow_dv_create_action_sample(struct rte_eth_dev *dev, uint64_t action_flags, struct rte_flow_error *error) { - struct mlx5_priv *priv = dev->data->dev_private; /* update normal path action resource into last index of array */ uint32_t dest_index = MLX5_MAX_DEST_NUM - 1; struct mlx5_flow_sub_actions_list *sample_act = &mdest_res->sample_act[dest_index]; - struct mlx5_flow_rss_desc *rss_desc = &((struct mlx5_flow_rss_desc *) - priv->rss_desc) - [!!priv->flow_nested_idx]; + struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace(); + struct mlx5_flow_rss_desc *rss_desc; uint32_t normal_idx = 0; struct mlx5_hrxq *hrxq; uint32_t hrxq_idx; + MLX5_ASSERT(wks); + rss_desc = &wks->rss_desc; if (num_of_dest > 1) { if (sample_act->action_flags & MLX5_FLOW_ACTION_QUEUE) { /* Handle QP action for mirroring */ - hrxq = flow_dv_handle_rx_queue(dev, dev_flow, - rss_desc, &hrxq_idx); + hrxq = flow_dv_hrxq_prepare(dev, dev_flow, + rss_desc, &hrxq_idx); if (!hrxq) return rte_flow_error_set (error, rte_errno, @@ -9193,15 +9246,15 @@ flow_dv_create_action_sample(struct rte_eth_dev *dev, memcpy(&mdest_res->sample_act[0], &res->sample_act, sizeof(struct mlx5_flow_sub_actions_list)); mdest_res->num_of_dest = num_of_dest; - if (flow_dv_dest_array_resource_register(dev, attr, mdest_res, + if (flow_dv_dest_array_resource_register(dev, mdest_res, dev_flow, error)) return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, NULL, "can't create sample " "action"); } else { - if (flow_dv_sample_resource_register(dev, attr, res, dev_flow, - sample_actions, error)) + res->sub_actions = sample_actions; + if (flow_dv_sample_resource_register(dev, res, dev_flow, error)) return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, NULL, @@ -9210,6 +9263,248 @@ flow_dv_create_action_sample(struct rte_eth_dev *dev, return 0; } +/** + * Remove an ASO age action from age actions list. + * + * @param[in] dev + * Pointer to the Ethernet device structure. + * @param[in] age + * Pointer to the aso age action handler. + */ +static void +flow_dv_aso_age_remove_from_age(struct rte_eth_dev *dev, + struct mlx5_aso_age_action *age) +{ + struct mlx5_age_info *age_info; + struct mlx5_age_param *age_param = &age->age_params; + struct mlx5_priv *priv = dev->data->dev_private; + uint16_t expected = AGE_CANDIDATE; + + age_info = GET_PORT_AGE_INFO(priv); + if (!__atomic_compare_exchange_n(&age_param->state, &expected, + AGE_FREE, false, __ATOMIC_RELAXED, + __ATOMIC_RELAXED)) { + /** + * We need the lock even it is age timeout, + * since age action may still in process. + */ + rte_spinlock_lock(&age_info->aged_sl); + LIST_REMOVE(age, next); + rte_spinlock_unlock(&age_info->aged_sl); + __atomic_store_n(&age_param->state, AGE_FREE, __ATOMIC_RELAXED); + } +} + +/** + * Release an ASO age action. + * + * @param[in] dev + * Pointer to the Ethernet device structure. + * @param[in] age_idx + * Index of ASO age action to release. + * @param[in] flow + * True if the release operation is during flow destroy operation. + * False if the release operation is during action destroy operation. + * + * @return + * 0 when age action was removed, otherwise the number of references. + */ +static int +flow_dv_aso_age_release(struct rte_eth_dev *dev, uint32_t age_idx) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng; + struct mlx5_aso_age_action *age = flow_aso_age_get_by_idx(dev, age_idx); + uint32_t ret = __atomic_sub_fetch(&age->refcnt, 1, __ATOMIC_RELAXED); + + if (!ret) { + flow_dv_aso_age_remove_from_age(dev, age); + rte_spinlock_lock(&mng->free_sl); + LIST_INSERT_HEAD(&mng->free, age, next); + rte_spinlock_unlock(&mng->free_sl); + } + return ret; +} + +/** + * Resize the ASO age pools array by MLX5_CNT_CONTAINER_RESIZE pools. + * + * @param[in] dev + * Pointer to the Ethernet device structure. + * + * @return + * 0 on success, otherwise negative errno value and rte_errno is set. + */ +static int +flow_dv_aso_age_pools_resize(struct rte_eth_dev *dev) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng; + void *old_pools = mng->pools; + uint32_t resize = mng->n + MLX5_CNT_CONTAINER_RESIZE; + uint32_t mem_size = sizeof(struct mlx5_aso_age_pool *) * resize; + void *pools = mlx5_malloc(MLX5_MEM_ZERO, mem_size, 0, SOCKET_ID_ANY); + + if (!pools) { + rte_errno = ENOMEM; + return -ENOMEM; + } + if (old_pools) { + memcpy(pools, old_pools, + mng->n * sizeof(struct mlx5_flow_counter_pool *)); + mlx5_free(old_pools); + } else { + /* First ASO flow hit allocation - starting ASO data-path. */ + int ret = mlx5_aso_queue_start(priv->sh); + + if (ret) { + mlx5_free(pools); + return ret; + } + } + mng->n = resize; + mng->pools = pools; + return 0; +} + +/** + * Create and initialize a new ASO aging pool. + * + * @param[in] dev + * Pointer to the Ethernet device structure. + * @param[out] age_free + * Where to put the pointer of a new age action. + * + * @return + * The age actions pool pointer and @p age_free is set on success, + * NULL otherwise and rte_errno is set. + */ +static struct mlx5_aso_age_pool * +flow_dv_age_pool_create(struct rte_eth_dev *dev, + struct mlx5_aso_age_action **age_free) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng; + struct mlx5_aso_age_pool *pool = NULL; + struct mlx5_devx_obj *obj = NULL; + uint32_t i; + + obj = mlx5_devx_cmd_create_flow_hit_aso_obj(priv->sh->ctx, + priv->sh->pdn); + if (!obj) { + rte_errno = ENODATA; + DRV_LOG(ERR, "Failed to create flow_hit_aso_obj using DevX."); + return NULL; + } + pool = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*pool), 0, SOCKET_ID_ANY); + if (!pool) { + claim_zero(mlx5_devx_cmd_destroy(obj)); + rte_errno = ENOMEM; + return NULL; + } + pool->flow_hit_aso_obj = obj; + pool->time_of_last_age_check = MLX5_CURR_TIME_SEC; + rte_spinlock_lock(&mng->resize_sl); + pool->index = mng->next; + /* Resize pools array if there is no room for the new pool in it. */ + if (pool->index == mng->n && flow_dv_aso_age_pools_resize(dev)) { + claim_zero(mlx5_devx_cmd_destroy(obj)); + mlx5_free(pool); + rte_spinlock_unlock(&mng->resize_sl); + return NULL; + } + mng->pools[pool->index] = pool; + mng->next++; + rte_spinlock_unlock(&mng->resize_sl); + /* Assign the first action in the new pool, the rest go to free list. */ + *age_free = &pool->actions[0]; + for (i = 1; i < MLX5_ASO_AGE_ACTIONS_PER_POOL; i++) { + pool->actions[i].offset = i; + LIST_INSERT_HEAD(&mng->free, &pool->actions[i], next); + } + return pool; +} + +/** + * Allocate a ASO aging bit. + * + * @param[in] dev + * Pointer to the Ethernet device structure. + * + * @return + * Index to ASO age action on success, 0 otherwise and rte_errno is set. + */ +static uint32_t +flow_dv_aso_age_alloc(struct rte_eth_dev *dev) +{ + struct mlx5_priv *priv = dev->data->dev_private; + const struct mlx5_aso_age_pool *pool; + struct mlx5_aso_age_action *age_free = NULL; + struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng; + + MLX5_ASSERT(mng); + /* Try to get the next free age action bit. */ + rte_spinlock_lock(&mng->free_sl); + age_free = LIST_FIRST(&mng->free); + if (age_free) { + LIST_REMOVE(age_free, next); + } else if (!flow_dv_age_pool_create(dev, &age_free)) { + rte_spinlock_unlock(&mng->free_sl); + return 0; /* 0 is an error.*/ + } + rte_spinlock_unlock(&mng->free_sl); + pool = container_of + ((const struct mlx5_aso_age_action (*)[MLX5_ASO_AGE_ACTIONS_PER_POOL]) + (age_free - age_free->offset), const struct mlx5_aso_age_pool, + actions); + if (!age_free->dr_action) { + age_free->dr_action = mlx5_glue->dr_action_create_flow_hit + (pool->flow_hit_aso_obj->obj, + age_free->offset, REG_C_5); + if (!age_free->dr_action) { + rte_errno = errno; + rte_spinlock_lock(&mng->free_sl); + LIST_INSERT_HEAD(&mng->free, age_free, next); + rte_spinlock_unlock(&mng->free_sl); + return 0; /* 0 is an error.*/ + } + } + __atomic_store_n(&age_free->refcnt, 1, __ATOMIC_RELAXED); + return pool->index | ((age_free->offset + 1) << 16); +} + +/** + * Create a age action using ASO mechanism. + * + * @param[in] dev + * Pointer to rte_eth_dev structure. + * @param[in] age + * Pointer to the aging action configuration. + * + * @return + * Index to flow counter on success, 0 otherwise. + */ +static uint32_t +flow_dv_translate_create_aso_age(struct rte_eth_dev *dev, + const struct rte_flow_action_age *age) +{ + uint32_t age_idx = 0; + struct mlx5_aso_age_action *aso_age; + + age_idx = flow_dv_aso_age_alloc(dev); + if (!age_idx) + return 0; + aso_age = flow_aso_age_get_by_idx(dev, age_idx); + aso_age->age_params.context = age->context; + aso_age->age_params.timeout = age->timeout; + aso_age->age_params.port_id = dev->data->port_id; + __atomic_store_n(&aso_age->age_params.sec_since_last_hit, 0, + __ATOMIC_RELAXED); + __atomic_store_n(&aso_age->age_params.state, AGE_CANDIDATE, + __ATOMIC_RELAXED); + return age_idx; +} + /** * Fill the flow with DV spec, lock free * (mutex should be acquired by caller). @@ -9231,20 +9526,19 @@ flow_dv_create_action_sample(struct rte_eth_dev *dev, * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int -__flow_dv_translate(struct rte_eth_dev *dev, - struct mlx5_flow *dev_flow, - const struct rte_flow_attr *attr, - const struct rte_flow_item items[], - const struct rte_flow_action actions[], - struct rte_flow_error *error) +flow_dv_translate(struct rte_eth_dev *dev, + struct mlx5_flow *dev_flow, + const struct rte_flow_attr *attr, + const struct rte_flow_item items[], + const struct rte_flow_action actions[], + struct rte_flow_error *error) { struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_dev_config *dev_conf = &priv->config; struct rte_flow *flow = dev_flow->flow; struct mlx5_flow_handle *handle = dev_flow->handle; - struct mlx5_flow_rss_desc *rss_desc = &((struct mlx5_flow_rss_desc *) - priv->rss_desc) - [!!priv->flow_nested_idx]; + struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace(); + struct mlx5_flow_rss_desc *rss_desc; uint64_t item_flags = 0; uint64_t last_item = 0; uint64_t action_flags = 0; @@ -9288,8 +9582,15 @@ __flow_dv_translate(struct rte_eth_dev *dev, .external = !!dev_flow->external, .transfer = !!attr->transfer, .fdb_def_rule = !!priv->fdb_def_rule, + .skip_scale = !!dev_flow->skip_scale, }; + if (!wks) + return rte_flow_error_set(error, ENOMEM, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "failed to push flow workspace"); + rss_desc = &wks->rss_desc; memset(&mdest_res, 0, sizeof(struct mlx5_flow_dv_dest_array_resource)); memset(&sample_res, 0, sizeof(struct mlx5_flow_dv_sample_resource)); mhdr_res->ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX : @@ -9306,7 +9607,7 @@ __flow_dv_translate(struct rte_eth_dev *dev, grp_info.std_tbl_fix = tunnel_use_standard_attr_group_translate (dev, tunnel, attr, items, actions); ret = mlx5_flow_group_to_table(dev, tunnel, attr->group, &table, - grp_info, error); + &grp_info, error); if (ret) return ret; dev_flow->dv.group = table; @@ -9320,10 +9621,14 @@ __flow_dv_translate(struct rte_eth_dev *dev, /* * do not add decap action if match rule drops packet * HW rejects rules with decap & drop + * + * if tunnel match rule was inserted before matching tunnel set + * rule flow table used in the match rule must be registered. + * current implementation handles that in the + * flow_dv_match_register() at the function end. */ bool add_decap = true; const struct rte_flow_action *ptr = actions; - struct mlx5_flow_tbl_resource *tbl; for (; ptr->type != RTE_FLOW_ACTION_TYPE_END; ptr++) { if (ptr->type == RTE_FLOW_ACTION_TYPE_DROP) { @@ -9340,20 +9645,6 @@ __flow_dv_translate(struct rte_eth_dev *dev, dev_flow->dv.encap_decap->action; action_flags |= MLX5_FLOW_ACTION_DECAP; } - /* - * bind table_id with for tunnel match rule. - * Tunnel set rule establishes that bind in JUMP action handler. - * Required for scenario when application creates tunnel match - * rule before tunnel set rule. - */ - tbl = flow_dv_tbl_resource_get(dev, table, attr->egress, - attr->transfer, - !!dev_flow->external, tunnel, - attr->group, error); - if (!tbl) - return rte_flow_error_set - (error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, - actions, "cannot register tunnel group"); } for (; !actions_end ; actions++) { const struct rte_flow_action_queue *queue; @@ -9362,6 +9653,7 @@ __flow_dv_translate(struct rte_eth_dev *dev, const uint8_t *rss_key; const struct rte_flow_action_meter *mtr; struct mlx5_flow_tbl_resource *tbl; + struct mlx5_aso_age_action *age_act; uint32_t port_id = 0; struct mlx5_flow_dv_port_id_action_resource port_id_resource; int action_type = actions->type; @@ -9496,9 +9788,35 @@ __flow_dv_translate(struct rte_eth_dev *dev, * when expanding items for RSS. */ action_flags |= MLX5_FLOW_ACTION_RSS; - dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE; + dev_flow->handle->fate_action = rss_desc->shared_rss ? + MLX5_FLOW_FATE_SHARED_RSS : + MLX5_FLOW_FATE_QUEUE; + break; + case MLX5_RTE_FLOW_ACTION_TYPE_AGE: + flow->age = (uint32_t)(uintptr_t)(action->conf); + age_act = flow_aso_age_get_by_idx(dev, flow->age); + __atomic_fetch_add(&age_act->refcnt, 1, + __ATOMIC_RELAXED); + dev_flow->dv.actions[actions_n++] = age_act->dr_action; + action_flags |= MLX5_FLOW_ACTION_AGE; break; case RTE_FLOW_ACTION_TYPE_AGE: + if (priv->sh->flow_hit_aso_en && attr->group) { + flow->age = flow_dv_translate_create_aso_age + (dev, action->conf); + if (!flow->age) + return rte_flow_error_set + (error, rte_errno, + RTE_FLOW_ERROR_TYPE_ACTION, + NULL, + "can't create ASO age action"); + dev_flow->dv.actions[actions_n++] = + (flow_aso_age_get_by_idx + (dev, flow->age))->dr_action; + action_flags |= MLX5_FLOW_ACTION_AGE; + break; + } + /* Fall-through */ case RTE_FLOW_ACTION_TYPE_COUNT: if (!dev_conf->devx) { return rte_flow_error_set @@ -9622,16 +9940,17 @@ __flow_dv_translate(struct rte_eth_dev *dev, jump_group = ((const struct rte_flow_action_jump *) action->conf)->group; grp_info.std_tbl_fix = 0; + grp_info.skip_scale = 0; ret = mlx5_flow_group_to_table(dev, tunnel, jump_group, &table, - grp_info, error); + &grp_info, error); if (ret) return ret; tbl = flow_dv_tbl_resource_get(dev, table, attr->egress, attr->transfer, !!dev_flow->external, - tunnel, jump_group, + tunnel, jump_group, 0, error); if (!tbl) return rte_flow_error_set @@ -9641,7 +9960,7 @@ __flow_dv_translate(struct rte_eth_dev *dev, "cannot create jump action."); if (flow_dv_jump_tbl_resource_register (dev, tbl, dev_flow, error)) { - flow_dv_tbl_resource_release(dev, tbl); + flow_dv_tbl_resource_release(MLX5_SH(dev), tbl); return rte_flow_error_set (error, errno, RTE_FLOW_ERROR_TYPE_ACTION, @@ -9839,7 +10158,7 @@ __flow_dv_translate(struct rte_eth_dev *dev, } if (action_flags & MLX5_FLOW_ACTION_SAMPLE) { ret = flow_dv_create_action_sample(dev, - dev_flow, attr, + dev_flow, num_of_dest, &sample_res, &mdest_res, @@ -10155,7 +10474,8 @@ __flow_dv_translate(struct rte_eth_dev *dev, tbl_key.domain = attr->transfer; tbl_key.direction = attr->egress; tbl_key.table_id = dev_flow->dv.group; - if (flow_dv_matcher_register(dev, &matcher, &tbl_key, dev_flow, error)) + if (flow_dv_matcher_register(dev, &matcher, &tbl_key, dev_flow, + tunnel, attr->group, error)) return -rte_errno; return 0; } @@ -10215,8 +10535,10 @@ __flow_dv_action_rss_hrxq_set(struct mlx5_shared_action_rss *action, * Look up for hash RX queue by hash fields (see enum ibv_rx_hash_fields) * and tunnel. * - * @param[in] action - * Shred RSS action holding hash RX queue objects. + * @param[in] dev + * Pointer to the Ethernet device structure. + * @param[in] idx + * Shared RSS action ID holding hash RX queue objects. * @param[in] hash_fields * Defines combination of packet fields to participate in RX hash. * @param[in] tunnel @@ -10226,11 +10548,15 @@ __flow_dv_action_rss_hrxq_set(struct mlx5_shared_action_rss *action, * Valid hash RX queue index, otherwise 0. */ static uint32_t -__flow_dv_action_rss_hrxq_lookup(const struct mlx5_shared_action_rss *action, +__flow_dv_action_rss_hrxq_lookup(struct rte_eth_dev *dev, uint32_t idx, const uint64_t hash_fields, const int tunnel) { - const uint32_t *hrxqs = tunnel ? action->hrxq : action->hrxq_tunnel; + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_shared_action_rss *shared_rss = + mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], idx); + const uint32_t *hrxqs = tunnel ? shared_rss->hrxq : + shared_rss->hrxq_tunnel; switch (hash_fields & ~IBV_RX_HASH_INNER) { case MLX5_RSS_HASH_IPV4: @@ -10257,10 +10583,12 @@ __flow_dv_action_rss_hrxq_lookup(const struct mlx5_shared_action_rss *action, * If shared action configured for *flow* suitable hash RX queue will be * retrieved from attached shared action. * - * @param[in] flow - * Shred RSS action holding hash RX queue objects. + * @param[in] dev + * Pointer to the Ethernet device structure. * @param[in] dev_flow * Pointer to the sub flow. + * @param[in] rss_desc + * Pointer to the RSS descriptor. * @param[out] hrxq * Pointer to retrieved hash RX queue object. * @@ -10268,46 +10596,25 @@ __flow_dv_action_rss_hrxq_lookup(const struct mlx5_shared_action_rss *action, * Valid hash RX queue index, otherwise 0 and rte_errno is set. */ static uint32_t -__flow_dv_rss_get_hrxq(struct rte_eth_dev *dev, struct rte_flow *flow, - struct mlx5_flow *dev_flow, - struct mlx5_hrxq **hrxq) +__flow_dv_rss_get_hrxq(struct rte_eth_dev *dev, struct mlx5_flow *dev_flow, + struct mlx5_flow_rss_desc *rss_desc, + struct mlx5_hrxq **hrxq) { struct mlx5_priv *priv = dev->data->dev_private; uint32_t hrxq_idx; - if (flow->shared_rss) { + if (rss_desc->shared_rss) { hrxq_idx = __flow_dv_action_rss_hrxq_lookup - (flow->shared_rss, dev_flow->hash_fields, + (dev, rss_desc->shared_rss, + dev_flow->hash_fields, !!(dev_flow->handle->layers & MLX5_FLOW_LAYER_TUNNEL)); - if (hrxq_idx) { + if (hrxq_idx) *hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq_idx); - rte_atomic32_inc(&(*hrxq)->refcnt); - } } else { - struct mlx5_flow_rss_desc *rss_desc = - &((struct mlx5_flow_rss_desc *)priv->rss_desc) - [!!priv->flow_nested_idx]; - - MLX5_ASSERT(rss_desc->queue_num); - hrxq_idx = mlx5_hrxq_get(dev, rss_desc->key, - MLX5_RSS_HASH_KEY_LEN, - dev_flow->hash_fields, - rss_desc->queue, rss_desc->queue_num); - if (!hrxq_idx) { - hrxq_idx = mlx5_hrxq_new(dev, - rss_desc->key, - MLX5_RSS_HASH_KEY_LEN, - dev_flow->hash_fields, - rss_desc->queue, - rss_desc->queue_num, - !!(dev_flow->handle->layers & - MLX5_FLOW_LAYER_TUNNEL), - false); - } - *hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ], - hrxq_idx); + *hrxq = flow_dv_hrxq_prepare(dev, dev_flow, rss_desc, + &hrxq_idx); } return hrxq_idx; } @@ -10327,8 +10634,8 @@ __flow_dv_rss_get_hrxq(struct rte_eth_dev *dev, struct rte_flow *flow, * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int -__flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow, - struct rte_flow_error *error) +flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow, + struct rte_flow_error *error) { struct mlx5_flow_dv_workspace *dv; struct mlx5_flow_handle *dh; @@ -10339,9 +10646,17 @@ __flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow, int n; int err; int idx; + struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace(); + struct mlx5_flow_rss_desc *rss_desc = &wks->rss_desc; - for (idx = priv->flow_idx - 1; idx >= priv->flow_nested_idx; idx--) { - dev_flow = &((struct mlx5_flow *)priv->inter_flows)[idx]; + MLX5_ASSERT(wks); + if (rss_desc->shared_rss) { + dh = wks->flows[wks->flow_idx - 1].handle; + MLX5_ASSERT(dh->fate_action == MLX5_FLOW_FATE_SHARED_RSS); + dh->rix_srss = rss_desc->shared_rss; + } + for (idx = wks->flow_idx - 1; idx >= 0; idx--) { + dev_flow = &wks->flows[idx]; dv = &dev_flow->dv; dh = dev_flow->handle; dv_h = &dh->dvh; @@ -10350,31 +10665,16 @@ __flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow, if (dv->transfer) { dv->actions[n++] = priv->sh->esw_drop_action; } else { - struct mlx5_hrxq *drop_hrxq; - drop_hrxq = mlx5_drop_action_create(dev); - if (!drop_hrxq) { - rte_flow_error_set - (error, errno, - RTE_FLOW_ERROR_TYPE_UNSPECIFIED, - NULL, - "cannot get drop hash queue"); - goto error; - } - /* - * Drop queues will be released by the specify - * mlx5_drop_action_destroy() function. Assign - * the special index to hrxq to mark the queue - * has been allocated. - */ - dh->rix_hrxq = UINT32_MAX; - dv->actions[n++] = drop_hrxq->action; + MLX5_ASSERT(priv->drop_queue.hrxq); + dv->actions[n++] = + priv->drop_queue.hrxq->action; } - } else if (dh->fate_action == MLX5_FLOW_FATE_QUEUE && - !dv_h->rix_sample && !dv_h->rix_dest_array) { + } else if ((dh->fate_action == MLX5_FLOW_FATE_QUEUE && + !dv_h->rix_sample && !dv_h->rix_dest_array) || + (dh->fate_action == MLX5_FLOW_FATE_SHARED_RSS)) { struct mlx5_hrxq *hrxq = NULL; uint32_t hrxq_idx = __flow_dv_rss_get_hrxq - (dev, flow, dev_flow, &hrxq); - + (dev, dev_flow, rss_desc, &hrxq); if (!hrxq) { rte_flow_error_set (error, rte_errno, @@ -10382,19 +10682,18 @@ __flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow, "cannot get hash queue"); goto error; } - dh->rix_hrxq = hrxq_idx; + if (dh->fate_action == MLX5_FLOW_FATE_QUEUE) + dh->rix_hrxq = hrxq_idx; dv->actions[n++] = hrxq->action; } else if (dh->fate_action == MLX5_FLOW_FATE_DEFAULT_MISS) { - if (flow_dv_default_miss_resource_register - (dev, error)) { + if (!priv->sh->default_miss_action) { rte_flow_error_set (error, rte_errno, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, - "cannot create default miss resource"); - goto error_default_miss; + "default miss action not be created."); + goto error; } - dh->rix_default_fate = MLX5_FLOW_FATE_DEFAULT_MISS; - dv->actions[n++] = priv->sh->default_miss.action; + dv->actions[n++] = priv->sh->default_miss_action; } err = mlx5_flow_os_create_flow(dv_h->matcher->matcher_object, (void *)&dv->value, n, @@ -10419,29 +10718,34 @@ __flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow, } return 0; error: - if (dh->fate_action == MLX5_FLOW_FATE_DEFAULT_MISS) - flow_dv_default_miss_resource_release(dev); -error_default_miss: err = rte_errno; /* Save rte_errno before cleanup. */ SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles, handle_idx, dh, next) { /* hrxq is union, don't clear it if the flag is not set. */ - if (dh->rix_hrxq) { - if (dh->fate_action == MLX5_FLOW_FATE_DROP) { - mlx5_drop_action_destroy(dev); - dh->rix_hrxq = 0; - } else if (dh->fate_action == MLX5_FLOW_FATE_QUEUE) { - mlx5_hrxq_release(dev, dh->rix_hrxq); - dh->rix_hrxq = 0; - } + if (dh->fate_action == MLX5_FLOW_FATE_QUEUE && dh->rix_hrxq) { + mlx5_hrxq_release(dev, dh->rix_hrxq); + dh->rix_hrxq = 0; } if (dh->vf_vlan.tag && dh->vf_vlan.created) mlx5_vlan_vmwa_release(dev, &dh->vf_vlan); } + if (rss_desc->shared_rss) + wks->flows[wks->flow_idx - 1].handle->rix_srss = 0; rte_errno = err; /* Restore rte_errno. */ return -rte_errno; } +void +flow_dv_matcher_remove_cb(struct mlx5_cache_list *list __rte_unused, + struct mlx5_cache_entry *entry) +{ + struct mlx5_flow_dv_matcher *cache = container_of(entry, typeof(*cache), + entry); + + claim_zero(mlx5_flow_os_destroy_flow_matcher(cache->matcher_object)); + mlx5_free(cache); +} + /** * Release the flow matcher. * @@ -10458,23 +10762,34 @@ flow_dv_matcher_release(struct rte_eth_dev *dev, struct mlx5_flow_handle *handle) { struct mlx5_flow_dv_matcher *matcher = handle->dvh.matcher; + struct mlx5_flow_tbl_data_entry *tbl = container_of(matcher->tbl, + typeof(*tbl), tbl); + int ret; MLX5_ASSERT(matcher->matcher_object); - DRV_LOG(DEBUG, "port %u matcher %p: refcnt %d--", - dev->data->port_id, (void *)matcher, - rte_atomic32_read(&matcher->refcnt)); - if (rte_atomic32_dec_and_test(&matcher->refcnt)) { - claim_zero(mlx5_flow_os_destroy_flow_matcher - (matcher->matcher_object)); - LIST_REMOVE(matcher, next); - /* table ref-- in release interface. */ - flow_dv_tbl_resource_release(dev, matcher->tbl); - mlx5_free(matcher); - DRV_LOG(DEBUG, "port %u matcher %p: removed", - dev->data->port_id, (void *)matcher); - return 0; - } - return 1; + ret = mlx5_cache_unregister(&tbl->matchers, &matcher->entry); + flow_dv_tbl_resource_release(MLX5_SH(dev), &tbl->tbl); + return ret; +} + +/** + * Release encap_decap resource. + * + * @param list + * Pointer to the hash list. + * @param entry + * Pointer to exist resource entry object. + */ +void +flow_dv_encap_decap_remove_cb(struct mlx5_hlist *list, + struct mlx5_hlist_entry *entry) +{ + struct mlx5_dev_ctx_shared *sh = list->ctx; + struct mlx5_flow_dv_encap_decap_resource *res = + container_of(entry, typeof(*res), entry); + + claim_zero(mlx5_flow_os_destroy_flow_action(res->action)); + mlx5_ipool_free(sh->ipool[MLX5_IPOOL_DECAP_ENCAP], res->idx); } /** @@ -10493,28 +10808,15 @@ flow_dv_encap_decap_resource_release(struct rte_eth_dev *dev, uint32_t encap_decap_idx) { struct mlx5_priv *priv = dev->data->dev_private; - uint32_t idx = encap_decap_idx; struct mlx5_flow_dv_encap_decap_resource *cache_resource; cache_resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_DECAP_ENCAP], - idx); + encap_decap_idx); if (!cache_resource) return 0; MLX5_ASSERT(cache_resource->action); - DRV_LOG(DEBUG, "encap/decap resource %p: refcnt %d--", - (void *)cache_resource, - rte_atomic32_read(&cache_resource->refcnt)); - if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) { - claim_zero(mlx5_flow_os_destroy_flow_action - (cache_resource->action)); - mlx5_hlist_remove(priv->sh->encaps_decaps, - &cache_resource->entry); - mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_DECAP_ENCAP], idx); - DRV_LOG(DEBUG, "encap/decap resource %p: removed", - (void *)cache_resource); - return 0; - } - return 1; + return mlx5_hlist_unregister(priv->sh->encaps_decaps, + &cache_resource->entry); } /** @@ -10533,58 +10835,24 @@ flow_dv_jump_tbl_resource_release(struct rte_eth_dev *dev, struct mlx5_flow_handle *handle) { struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5_flow_dv_jump_tbl_resource *cache_resource; struct mlx5_flow_tbl_data_entry *tbl_data; tbl_data = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_JUMP], handle->rix_jump); if (!tbl_data) return 0; - cache_resource = &tbl_data->jump; - MLX5_ASSERT(cache_resource->action); - DRV_LOG(DEBUG, "jump table resource %p: refcnt %d--", - (void *)cache_resource, - rte_atomic32_read(&cache_resource->refcnt)); - if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) { - claim_zero(mlx5_flow_os_destroy_flow_action - (cache_resource->action)); - /* jump action memory free is inside the table release. */ - flow_dv_tbl_resource_release(dev, &tbl_data->tbl); - DRV_LOG(DEBUG, "jump table resource %p: removed", - (void *)cache_resource); - return 0; - } - return 1; + return flow_dv_tbl_resource_release(MLX5_SH(dev), &tbl_data->tbl); } -/** - * Release a default miss resource. - * - * @param dev - * Pointer to Ethernet device. - * @return - * 1 while a reference on it exists, 0 when freed. - */ -static int -flow_dv_default_miss_resource_release(struct rte_eth_dev *dev) +void +flow_dv_modify_remove_cb(struct mlx5_hlist *list __rte_unused, + struct mlx5_hlist_entry *entry) { - struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5_dev_ctx_shared *sh = priv->sh; - struct mlx5_flow_default_miss_resource *cache_resource = - &sh->default_miss; + struct mlx5_flow_dv_modify_hdr_resource *res = + container_of(entry, typeof(*res), entry); - MLX5_ASSERT(cache_resource->action); - DRV_LOG(DEBUG, "default miss resource %p: refcnt %d--", - (void *)cache_resource->action, - rte_atomic32_read(&cache_resource->refcnt)); - if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) { - claim_zero(mlx5_glue->destroy_flow_action - (cache_resource->action)); - DRV_LOG(DEBUG, "default miss resource %p: removed", - (void *)cache_resource->action); - return 0; - } - return 1; + claim_zero(mlx5_flow_os_destroy_flow_action(res->action)); + mlx5_free(entry); } /** @@ -10603,24 +10871,22 @@ flow_dv_modify_hdr_resource_release(struct rte_eth_dev *dev, struct mlx5_flow_handle *handle) { struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5_flow_dv_modify_hdr_resource *cache_resource = - handle->dvh.modify_hdr; + struct mlx5_flow_dv_modify_hdr_resource *entry = handle->dvh.modify_hdr; - MLX5_ASSERT(cache_resource->action); - DRV_LOG(DEBUG, "modify-header resource %p: refcnt %d--", - (void *)cache_resource, - rte_atomic32_read(&cache_resource->refcnt)); - if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) { - claim_zero(mlx5_flow_os_destroy_flow_action - (cache_resource->action)); - mlx5_hlist_remove(priv->sh->modify_cmds, - &cache_resource->entry); - mlx5_free(cache_resource); - DRV_LOG(DEBUG, "modify-header resource %p: removed", - (void *)cache_resource); - return 0; - } - return 1; + MLX5_ASSERT(entry->action); + return mlx5_hlist_unregister(priv->sh->modify_cmds, &entry->entry); +} + +void +flow_dv_port_id_remove_cb(struct mlx5_cache_list *list, + struct mlx5_cache_entry *entry) +{ + struct mlx5_dev_ctx_shared *sh = list->ctx; + struct mlx5_flow_dv_port_id_action_resource *cache = + container_of(entry, typeof(*cache), entry); + + claim_zero(mlx5_flow_os_destroy_flow_action(cache->action)); + mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PORT_ID], cache->idx); } /** @@ -10639,29 +10905,45 @@ flow_dv_port_id_action_resource_release(struct rte_eth_dev *dev, uint32_t port_id) { struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5_flow_dv_port_id_action_resource *cache_resource; - uint32_t idx = port_id; + struct mlx5_flow_dv_port_id_action_resource *cache; - cache_resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_PORT_ID], - idx); - if (!cache_resource) + cache = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_PORT_ID], port_id); + if (!cache) return 0; - MLX5_ASSERT(cache_resource->action); - DRV_LOG(DEBUG, "port ID action resource %p: refcnt %d--", - (void *)cache_resource, - rte_atomic32_read(&cache_resource->refcnt)); - if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) { - claim_zero(mlx5_flow_os_destroy_flow_action - (cache_resource->action)); - ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_PORT_ID], - &priv->sh->port_id_action_list, idx, - cache_resource, next); - mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_PORT_ID], idx); - DRV_LOG(DEBUG, "port id action resource %p: removed", - (void *)cache_resource); - return 0; - } - return 1; + MLX5_ASSERT(cache->action); + return mlx5_cache_unregister(&priv->sh->port_id_action_list, + &cache->entry); +} + +/** + * Release shared RSS action resource. + * + * @param dev + * Pointer to Ethernet device. + * @param srss + * Shared RSS action index. + */ +static void +flow_dv_shared_rss_action_release(struct rte_eth_dev *dev, uint32_t srss) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_shared_action_rss *shared_rss; + + shared_rss = mlx5_ipool_get + (priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], srss); + __atomic_sub_fetch(&shared_rss->refcnt, 1, __ATOMIC_RELAXED); +} + +void +flow_dv_push_vlan_remove_cb(struct mlx5_cache_list *list, + struct mlx5_cache_entry *entry) +{ + struct mlx5_dev_ctx_shared *sh = list->ctx; + struct mlx5_flow_dv_push_vlan_action_resource *cache = + container_of(entry, typeof(*cache), entry); + + claim_zero(mlx5_flow_os_destroy_flow_action(cache->action)); + mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PUSH_VLAN], cache->idx); } /** @@ -10680,29 +10962,15 @@ flow_dv_push_vlan_action_resource_release(struct rte_eth_dev *dev, struct mlx5_flow_handle *handle) { struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_flow_dv_push_vlan_action_resource *cache; uint32_t idx = handle->dvh.rix_push_vlan; - struct mlx5_flow_dv_push_vlan_action_resource *cache_resource; - cache_resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_PUSH_VLAN], - idx); - if (!cache_resource) - return 0; - MLX5_ASSERT(cache_resource->action); - DRV_LOG(DEBUG, "push VLAN action resource %p: refcnt %d--", - (void *)cache_resource, - rte_atomic32_read(&cache_resource->refcnt)); - if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) { - claim_zero(mlx5_flow_os_destroy_flow_action - (cache_resource->action)); - ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_PUSH_VLAN], - &priv->sh->push_vlan_action_list, idx, - cache_resource, next); - mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_PUSH_VLAN], idx); - DRV_LOG(DEBUG, "push vlan action resource %p: removed", - (void *)cache_resource); + cache = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_PUSH_VLAN], idx); + if (!cache) return 0; - } - return 1; + MLX5_ASSERT(cache->action); + return mlx5_cache_unregister(&priv->sh->push_vlan_action_list, + &cache->entry); } /** @@ -10720,9 +10988,6 @@ flow_dv_fate_resource_release(struct rte_eth_dev *dev, if (!handle->rix_fate) return; switch (handle->fate_action) { - case MLX5_FLOW_FATE_DROP: - mlx5_drop_action_destroy(dev); - break; case MLX5_FLOW_FATE_QUEUE: mlx5_hrxq_release(dev, handle->rix_hrxq); break; @@ -10733,8 +10998,8 @@ flow_dv_fate_resource_release(struct rte_eth_dev *dev, flow_dv_port_id_action_resource_release(dev, handle->rix_port_id_action); break; - case MLX5_FLOW_FATE_DEFAULT_MISS: - flow_dv_default_miss_resource_release(dev); + case MLX5_FLOW_FATE_SHARED_RSS: + flow_dv_shared_rss_action_release(dev, handle->rix_srss); break; default: DRV_LOG(DEBUG, "Incorrect fate action:%d", handle->fate_action); @@ -10743,6 +11008,34 @@ flow_dv_fate_resource_release(struct rte_eth_dev *dev, handle->rix_fate = 0; } +void +flow_dv_sample_remove_cb(struct mlx5_cache_list *list, + struct mlx5_cache_entry *entry) +{ + struct rte_eth_dev *dev = list->ctx; + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_flow_dv_sample_resource *cache_resource = + container_of(entry, typeof(*cache_resource), entry); + + if (cache_resource->verbs_action) + claim_zero(mlx5_glue->destroy_flow_action + (cache_resource->verbs_action)); + if (cache_resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB) { + if (cache_resource->default_miss) + claim_zero(mlx5_glue->destroy_flow_action + (cache_resource->default_miss)); + } + if (cache_resource->normal_path_tbl) + flow_dv_tbl_resource_release(MLX5_SH(dev), + cache_resource->normal_path_tbl); + flow_dv_sample_sub_actions_release(dev, + &cache_resource->sample_idx); + mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_SAMPLE], + cache_resource->idx); + DRV_LOG(DEBUG, "sample resource %p: removed", + (void *)cache_resource); +} + /** * Release an sample resource. * @@ -10759,54 +11052,38 @@ flow_dv_sample_resource_release(struct rte_eth_dev *dev, struct mlx5_flow_handle *handle) { struct mlx5_priv *priv = dev->data->dev_private; - uint32_t idx = handle->dvh.rix_sample; struct mlx5_flow_dv_sample_resource *cache_resource; cache_resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_SAMPLE], - idx); + handle->dvh.rix_sample); if (!cache_resource) return 0; MLX5_ASSERT(cache_resource->verbs_action); - DRV_LOG(DEBUG, "sample resource %p: refcnt %d--", - (void *)cache_resource, - __atomic_load_n(&cache_resource->refcnt, __ATOMIC_RELAXED)); - if (__atomic_sub_fetch(&cache_resource->refcnt, 1, - __ATOMIC_RELAXED) == 0) { - if (cache_resource->verbs_action) - claim_zero(mlx5_glue->destroy_flow_action - (cache_resource->verbs_action)); - if (cache_resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB) { - if (cache_resource->default_miss) - claim_zero(mlx5_glue->destroy_flow_action - (cache_resource->default_miss)); - } - if (cache_resource->normal_path_tbl) - flow_dv_tbl_resource_release(dev, - cache_resource->normal_path_tbl); - } - if (cache_resource->sample_idx.rix_hrxq && - !mlx5_hrxq_release(dev, - cache_resource->sample_idx.rix_hrxq)) - cache_resource->sample_idx.rix_hrxq = 0; - if (cache_resource->sample_idx.rix_tag && - !flow_dv_tag_release(dev, - cache_resource->sample_idx.rix_tag)) - cache_resource->sample_idx.rix_tag = 0; - if (cache_resource->sample_idx.cnt) { - flow_dv_counter_release(dev, - cache_resource->sample_idx.cnt); - cache_resource->sample_idx.cnt = 0; - } - if (!__atomic_load_n(&cache_resource->refcnt, __ATOMIC_RELAXED)) { - ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_SAMPLE], - &priv->sh->sample_action_list, idx, - cache_resource, next); - mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_SAMPLE], idx); - DRV_LOG(DEBUG, "sample resource %p: removed", - (void *)cache_resource); - return 0; - } - return 1; + return mlx5_cache_unregister(&priv->sh->sample_action_list, + &cache_resource->entry); +} + +void +flow_dv_dest_array_remove_cb(struct mlx5_cache_list *list, + struct mlx5_cache_entry *entry) +{ + struct rte_eth_dev *dev = list->ctx; + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_flow_dv_dest_array_resource *cache_resource = + container_of(entry, typeof(*cache_resource), entry); + uint32_t i = 0; + + MLX5_ASSERT(cache_resource->action); + if (cache_resource->action) + claim_zero(mlx5_glue->destroy_flow_action + (cache_resource->action)); + for (; i < cache_resource->num_of_dest; i++) + flow_dv_sample_sub_actions_release(dev, + &cache_resource->sample_idx[i]); + mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_DEST_ARRAY], + cache_resource->idx); + DRV_LOG(DEBUG, "destination array resource %p: removed", + (void *)cache_resource); } /** @@ -10822,59 +11099,18 @@ flow_dv_sample_resource_release(struct rte_eth_dev *dev, */ static int flow_dv_dest_array_resource_release(struct rte_eth_dev *dev, - struct mlx5_flow_handle *handle) + struct mlx5_flow_handle *handle) { struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5_flow_dv_dest_array_resource *cache_resource; - struct mlx5_flow_sub_actions_idx *mdest_act_res; - uint32_t idx = handle->dvh.rix_dest_array; - uint32_t i = 0; + struct mlx5_flow_dv_dest_array_resource *cache; - cache_resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_DEST_ARRAY], - idx); - if (!cache_resource) - return 0; - MLX5_ASSERT(cache_resource->action); - DRV_LOG(DEBUG, "destination array resource %p: refcnt %d--", - (void *)cache_resource, - __atomic_load_n(&cache_resource->refcnt, __ATOMIC_RELAXED)); - if (__atomic_sub_fetch(&cache_resource->refcnt, 1, - __ATOMIC_RELAXED) == 0) { - if (cache_resource->action) - claim_zero(mlx5_glue->destroy_flow_action - (cache_resource->action)); - for (; i < cache_resource->num_of_dest; i++) { - mdest_act_res = &cache_resource->sample_idx[i]; - if (mdest_act_res->rix_hrxq) { - mlx5_hrxq_release(dev, - mdest_act_res->rix_hrxq); - mdest_act_res->rix_hrxq = 0; - } - if (mdest_act_res->rix_encap_decap) { - flow_dv_encap_decap_resource_release(dev, - mdest_act_res->rix_encap_decap); - mdest_act_res->rix_encap_decap = 0; - } - if (mdest_act_res->rix_port_id_action) { - flow_dv_port_id_action_resource_release(dev, - mdest_act_res->rix_port_id_action); - mdest_act_res->rix_port_id_action = 0; - } - if (mdest_act_res->rix_tag) { - flow_dv_tag_release(dev, - mdest_act_res->rix_tag); - mdest_act_res->rix_tag = 0; - } - } - ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_DEST_ARRAY], - &priv->sh->dest_array_list, idx, - cache_resource, next); - mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_DEST_ARRAY], idx); - DRV_LOG(DEBUG, "destination array resource %p: removed", - (void *)cache_resource); + cache = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_DEST_ARRAY], + handle->dvh.rix_dest_array); + if (!cache) return 0; - } - return 1; + MLX5_ASSERT(cache->action); + return mlx5_cache_unregister(&priv->sh->dest_array_list, + &cache->entry); } /** @@ -10887,7 +11123,7 @@ flow_dv_dest_array_resource_release(struct rte_eth_dev *dev, * Pointer to flow structure. */ static void -__flow_dv_remove(struct rte_eth_dev *dev, struct rte_flow *flow) +flow_dv_remove(struct rte_eth_dev *dev, struct rte_flow *flow) { struct mlx5_flow_handle *dh; uint32_t handle_idx; @@ -10905,9 +11141,7 @@ __flow_dv_remove(struct rte_eth_dev *dev, struct rte_flow *flow) claim_zero(mlx5_flow_os_destroy_flow(dh->drv_flow)); dh->drv_flow = NULL; } - if (dh->fate_action == MLX5_FLOW_FATE_DROP || - dh->fate_action == MLX5_FLOW_FATE_QUEUE || - dh->fate_action == MLX5_FLOW_FATE_DEFAULT_MISS) + if (dh->fate_action == MLX5_FLOW_FATE_QUEUE) flow_dv_fate_resource_release(dev, dh); if (dh->vf_vlan.tag && dh->vf_vlan.created) mlx5_vlan_vmwa_release(dev, &dh->vf_vlan); @@ -10925,20 +11159,16 @@ __flow_dv_remove(struct rte_eth_dev *dev, struct rte_flow *flow) * Pointer to flow structure. */ static void -__flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow) +flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow) { - struct rte_flow_shared_action *shared; struct mlx5_flow_handle *dev_handle; struct mlx5_priv *priv = dev->data->dev_private; if (!flow) return; - __flow_dv_remove(dev, flow); - shared = mlx5_flow_get_shared_rss(flow); - if (shared) - __atomic_sub_fetch(&shared->refcnt, 1, __ATOMIC_RELAXED); + flow_dv_remove(dev, flow); if (flow->counter) { - flow_dv_counter_release(dev, flow->counter); + flow_dv_counter_free(dev, flow->counter); flow->counter = 0; } if (flow->meter) { @@ -10950,6 +11180,8 @@ __flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow) mlx5_flow_meter_detach(fm); flow->meter = 0; } + if (flow->age) + flow_dv_aso_age_release(dev, flow->age); while (flow->dev_handles) { uint32_t tmp_idx = flow->dev_handles; @@ -11039,6 +11271,8 @@ __flow_dv_action_rss_hrxqs_release(struct rte_eth_dev *dev, * * @param[in] dev * Pointer to the Ethernet device structure. + * @param[in] action_idx + * Shared RSS action ipool index. * @param[in, out] action * Partially initialized shared RSS action. * @param[out] error @@ -11050,24 +11284,29 @@ __flow_dv_action_rss_hrxqs_release(struct rte_eth_dev *dev, */ static int __flow_dv_action_rss_setup(struct rte_eth_dev *dev, + uint32_t action_idx, struct mlx5_shared_action_rss *action, struct rte_flow_error *error) { + struct mlx5_flow_rss_desc rss_desc = { 0 }; size_t i; int err; + memcpy(rss_desc.key, action->origin.key, MLX5_RSS_HASH_KEY_LEN); + rss_desc.key_len = MLX5_RSS_HASH_KEY_LEN; + rss_desc.const_q = action->origin.queue; + rss_desc.queue_num = action->origin.queue_num; + /* Set non-zero value to indicate a shared RSS. */ + rss_desc.shared_rss = action_idx; for (i = 0; i < MLX5_RSS_HASH_FIELDS_LEN; i++) { uint32_t hrxq_idx; uint64_t hash_fields = mlx5_rss_hash_fields[i]; int tunnel; for (tunnel = 0; tunnel < 2; tunnel++) { - hrxq_idx = mlx5_hrxq_new(dev, action->origin.key, - MLX5_RSS_HASH_KEY_LEN, - hash_fields, - action->origin.queue, - action->origin.queue_num, - tunnel, true); + rss_desc.tunnel = tunnel; + rss_desc.hash_fields = hash_fields; + hrxq_idx = mlx5_hrxq_get(dev, &rss_desc); if (!hrxq_idx) { rte_flow_error_set (error, rte_errno, @@ -11102,56 +11341,69 @@ error_hrxq_new: * error only. * * @return - * A valid shared action handle in case of success, NULL otherwise and + * A valid shared action ID in case of success, 0 otherwise and * rte_errno is set. */ -static struct rte_flow_shared_action * +static uint32_t __flow_dv_action_rss_create(struct rte_eth_dev *dev, const struct rte_flow_shared_action_conf *conf, const struct rte_flow_action_rss *rss, struct rte_flow_error *error) { - struct rte_flow_shared_action *shared_action = NULL; + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_shared_action_rss *shared_action = NULL; void *queue = NULL; - struct mlx5_shared_action_rss *shared_rss; struct rte_flow_action_rss *origin; const uint8_t *rss_key; uint32_t queue_size = rss->queue_num * sizeof(uint16_t); + uint32_t idx; RTE_SET_USED(conf); queue = mlx5_malloc(0, RTE_ALIGN_CEIL(queue_size, sizeof(void *)), 0, SOCKET_ID_ANY); - shared_action = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*shared_action), 0, - SOCKET_ID_ANY); + shared_action = mlx5_ipool_zmalloc + (priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], &idx); if (!shared_action || !queue) { rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, "cannot allocate resource memory"); goto error_rss_init; } - shared_rss = &shared_action->rss; - shared_rss->queue = queue; - origin = &shared_rss->origin; + if (idx > (1u << MLX5_SHARED_ACTION_TYPE_OFFSET)) { + rte_flow_error_set(error, E2BIG, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "rss action number out of range"); + goto error_rss_init; + } + shared_action->queue = queue; + origin = &shared_action->origin; origin->func = rss->func; origin->level = rss->level; /* RSS type 0 indicates default RSS type (ETH_RSS_IP). */ origin->types = !rss->types ? ETH_RSS_IP : rss->types; /* NULL RSS key indicates default RSS key. */ rss_key = !rss->key ? rss_hash_default_key : rss->key; - memcpy(shared_rss->key, rss_key, MLX5_RSS_HASH_KEY_LEN); - origin->key = &shared_rss->key[0]; + memcpy(shared_action->key, rss_key, MLX5_RSS_HASH_KEY_LEN); + origin->key = &shared_action->key[0]; origin->key_len = MLX5_RSS_HASH_KEY_LEN; - memcpy(shared_rss->queue, rss->queue, queue_size); - origin->queue = shared_rss->queue; + memcpy(shared_action->queue, rss->queue, queue_size); + origin->queue = shared_action->queue; origin->queue_num = rss->queue_num; - if (__flow_dv_action_rss_setup(dev, shared_rss, error)) + if (__flow_dv_action_rss_setup(dev, idx, shared_action, error)) goto error_rss_init; - shared_action->type = MLX5_RTE_FLOW_ACTION_TYPE_SHARED_RSS; - return shared_action; + __atomic_add_fetch(&shared_action->refcnt, 1, __ATOMIC_RELAXED); + rte_spinlock_lock(&priv->shared_act_sl); + ILIST_INSERT(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], + &priv->rss_shared_actions, idx, shared_action, next); + rte_spinlock_unlock(&priv->shared_act_sl); + return idx; error_rss_init: - mlx5_free(shared_action); - mlx5_free(queue); - return NULL; + if (shared_action) + mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], + idx); + if (queue) + mlx5_free(queue); + return 0; } /** @@ -11160,8 +11412,8 @@ error_rss_init: * * @param[in] dev * Pointer to the Ethernet device structure. - * @param[in] shared_rss - * The shared RSS action object to be removed. + * @param[in] idx + * The shared RSS action object ID to be removed. * @param[out] error * Perform verbose error reporting if not NULL. Initialized in case of * error only. @@ -11170,31 +11422,39 @@ error_rss_init: * 0 on success, otherwise negative errno value. */ static int -__flow_dv_action_rss_release(struct rte_eth_dev *dev, - struct mlx5_shared_action_rss *shared_rss, - struct rte_flow_error *error) +__flow_dv_action_rss_release(struct rte_eth_dev *dev, uint32_t idx, + struct rte_flow_error *error) { - struct rte_flow_shared_action *shared_action = NULL; + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_shared_action_rss *shared_rss = + mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], idx); uint32_t old_refcnt = 1; - int remaining = __flow_dv_action_rss_hrxqs_release(dev, shared_rss); + int remaining; - if (remaining) { + if (!shared_rss) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, NULL, + "invalid shared action"); + remaining = __flow_dv_action_rss_hrxqs_release(dev, shared_rss); + if (remaining) return rte_flow_error_set(error, ETOOMANYREFS, RTE_FLOW_ERROR_TYPE_ACTION, NULL, "shared rss hrxq has references"); - } - shared_action = container_of(shared_rss, - struct rte_flow_shared_action, rss); - if (!__atomic_compare_exchange_n(&shared_action->refcnt, &old_refcnt, - 0, 0, - __ATOMIC_ACQUIRE, __ATOMIC_RELAXED)) { + if (!__atomic_compare_exchange_n(&shared_rss->refcnt, &old_refcnt, + 0, 0, __ATOMIC_ACQUIRE, + __ATOMIC_RELAXED)) return rte_flow_error_set(error, ETOOMANYREFS, RTE_FLOW_ERROR_TYPE_ACTION, NULL, "shared rss has references"); - } rte_free(shared_rss->queue); + rte_spinlock_lock(&priv->shared_act_sl); + ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], + &priv->rss_shared_actions, idx, shared_rss, next); + rte_spinlock_unlock(&priv->shared_act_sl); + mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], + idx); return 0; } @@ -11218,31 +11478,39 @@ __flow_dv_action_rss_release(struct rte_eth_dev *dev, * rte_errno is set. */ static struct rte_flow_shared_action * -__flow_dv_action_create(struct rte_eth_dev *dev, - const struct rte_flow_shared_action_conf *conf, - const struct rte_flow_action *action, - struct rte_flow_error *error) +flow_dv_action_create(struct rte_eth_dev *dev, + const struct rte_flow_shared_action_conf *conf, + const struct rte_flow_action *action, + struct rte_flow_error *err) { - struct rte_flow_shared_action *shared_action = NULL; - struct mlx5_priv *priv = dev->data->dev_private; + uint32_t idx = 0; + uint32_t ret = 0; switch (action->type) { case RTE_FLOW_ACTION_TYPE_RSS: - shared_action = __flow_dv_action_rss_create(dev, conf, - action->conf, - error); + ret = __flow_dv_action_rss_create(dev, conf, action->conf, err); + idx = (MLX5_SHARED_ACTION_TYPE_RSS << + MLX5_SHARED_ACTION_TYPE_OFFSET) | ret; + break; + case RTE_FLOW_ACTION_TYPE_AGE: + ret = flow_dv_translate_create_aso_age(dev, action->conf); + idx = (MLX5_SHARED_ACTION_TYPE_AGE << + MLX5_SHARED_ACTION_TYPE_OFFSET) | ret; + if (ret) { + struct mlx5_aso_age_action *aso_age = + flow_aso_age_get_by_idx(dev, ret); + + if (!aso_age->age_params.context) + aso_age->age_params.context = + (void *)(uintptr_t)idx; + } break; default: - rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, + rte_flow_error_set(err, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, NULL, "action type not supported"); break; } - if (shared_action) { - __atomic_add_fetch(&shared_action->refcnt, 1, - __ATOMIC_RELAXED); - LIST_INSERT_HEAD(&priv->shared_actions, shared_action, next); - } - return shared_action; + return ret ? (struct rte_flow_shared_action *)(uintptr_t)idx : NULL; } /** @@ -11263,27 +11531,34 @@ __flow_dv_action_create(struct rte_eth_dev *dev, * 0 on success, otherwise negative errno value. */ static int -__flow_dv_action_destroy(struct rte_eth_dev *dev, - struct rte_flow_shared_action *action, - struct rte_flow_error *error) +flow_dv_action_destroy(struct rte_eth_dev *dev, + struct rte_flow_shared_action *action, + struct rte_flow_error *error) { + uint32_t act_idx = (uint32_t)(uintptr_t)action; + uint32_t type = act_idx >> MLX5_SHARED_ACTION_TYPE_OFFSET; + uint32_t idx = act_idx & ((1u << MLX5_SHARED_ACTION_TYPE_OFFSET) - 1); int ret; - switch (action->type) { - case MLX5_RTE_FLOW_ACTION_TYPE_SHARED_RSS: - ret = __flow_dv_action_rss_release(dev, &action->rss, error); - break; + switch (type) { + case MLX5_SHARED_ACTION_TYPE_RSS: + return __flow_dv_action_rss_release(dev, idx, error); + case MLX5_SHARED_ACTION_TYPE_AGE: + ret = flow_dv_aso_age_release(dev, idx); + if (ret) + /* + * In this case, the last flow has a reference will + * actually release the age action. + */ + DRV_LOG(DEBUG, "Shared age action %" PRIu32 " was" + " released with references %d.", idx, ret); + return 0; default: return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, NULL, "action type not supported"); } - if (ret) - return ret; - LIST_REMOVE(action, next); - rte_free(action); - return 0; } /** @@ -11291,8 +11566,8 @@ __flow_dv_action_destroy(struct rte_eth_dev *dev, * * @param[in] dev * Pointer to the Ethernet device structure. - * @param[in] shared_rss - * The shared RSS action object to be updated. + * @param[in] idx + * The shared RSS action object ID to be updated. * @param[in] action_conf * RSS action specification used to modify *shared_rss*. * @param[out] error @@ -11304,11 +11579,13 @@ __flow_dv_action_destroy(struct rte_eth_dev *dev, * @note: currently only support update of RSS queues. */ static int -__flow_dv_action_rss_update(struct rte_eth_dev *dev, - struct mlx5_shared_action_rss *shared_rss, +__flow_dv_action_rss_update(struct rte_eth_dev *dev, uint32_t idx, const struct rte_flow_action_rss *action_conf, struct rte_flow_error *error) { + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_shared_action_rss *shared_rss = + mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], idx); size_t i; int ret; void *queue = NULL; @@ -11316,6 +11593,10 @@ __flow_dv_action_rss_update(struct rte_eth_dev *dev, uint32_t rss_key_len; uint32_t queue_size = action_conf->queue_num * sizeof(uint16_t); + if (!shared_rss) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, NULL, + "invalid shared action to update"); queue = mlx5_malloc(MLX5_MEM_ZERO, RTE_ALIGN_CEIL(queue_size, sizeof(void *)), 0, SOCKET_ID_ANY); @@ -11338,7 +11619,7 @@ __flow_dv_action_rss_update(struct rte_eth_dev *dev, for (tunnel = 0; tunnel < 2; tunnel++) { hrxq_idx = __flow_dv_action_rss_hrxq_lookup - (shared_rss, hash_fields, tunnel); + (dev, idx, hash_fields, tunnel); MLX5_ASSERT(hrxq_idx); ret = mlx5_hrxq_modify (dev, hrxq_idx, @@ -11382,22 +11663,57 @@ __flow_dv_action_rss_update(struct rte_eth_dev *dev, * 0 on success, otherwise negative errno value. */ static int -__flow_dv_action_update(struct rte_eth_dev *dev, +flow_dv_action_update(struct rte_eth_dev *dev, struct rte_flow_shared_action *action, const void *action_conf, - struct rte_flow_error *error) + struct rte_flow_error *err) { - switch (action->type) { - case MLX5_RTE_FLOW_ACTION_TYPE_SHARED_RSS: - return __flow_dv_action_rss_update(dev, &action->rss, - action_conf, error); + uint32_t act_idx = (uint32_t)(uintptr_t)action; + uint32_t type = act_idx >> MLX5_SHARED_ACTION_TYPE_OFFSET; + uint32_t idx = act_idx & ((1u << MLX5_SHARED_ACTION_TYPE_OFFSET) - 1); + + switch (type) { + case MLX5_SHARED_ACTION_TYPE_RSS: + return __flow_dv_action_rss_update(dev, idx, action_conf, err); + default: + return rte_flow_error_set(err, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, + NULL, + "action type update not supported"); + } +} + +static int +flow_dv_action_query(struct rte_eth_dev *dev, + const struct rte_flow_shared_action *action, void *data, + struct rte_flow_error *error) +{ + struct mlx5_age_param *age_param; + struct rte_flow_query_age *resp; + uint32_t act_idx = (uint32_t)(uintptr_t)action; + uint32_t type = act_idx >> MLX5_SHARED_ACTION_TYPE_OFFSET; + uint32_t idx = act_idx & ((1u << MLX5_SHARED_ACTION_TYPE_OFFSET) - 1); + + switch (type) { + case MLX5_SHARED_ACTION_TYPE_AGE: + age_param = &flow_aso_age_get_by_idx(dev, idx)->age_params; + resp = data; + resp->aged = __atomic_load_n(&age_param->state, + __ATOMIC_RELAXED) == AGE_TMOUT ? + 1 : 0; + resp->sec_since_last_hit_valid = !resp->aged; + if (resp->sec_since_last_hit_valid) + resp->sec_since_last_hit = __atomic_load_n + (&age_param->sec_since_last_hit, __ATOMIC_RELAXED); + return 0; default: return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, NULL, - "action type not supported"); + "action type query not supported"); } } + /** * Query a dv flow rule for its statistics via devx. * @@ -11474,30 +11790,33 @@ flow_dv_query_age(struct rte_eth_dev *dev, struct rte_flow *flow, void *data, struct rte_flow_error *error) { struct rte_flow_query_age *resp = data; + struct mlx5_age_param *age_param; - if (flow->counter) { - struct mlx5_age_param *age_param = - flow_dv_counter_idx_get_age(dev, flow->counter); + if (flow->age) { + struct mlx5_aso_age_action *act = + flow_aso_age_get_by_idx(dev, flow->age); + + age_param = &act->age_params; + } else if (flow->counter) { + age_param = flow_dv_counter_idx_get_age(dev, flow->counter); if (!age_param || !age_param->timeout) return rte_flow_error_set (error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, "cannot read age data"); - resp->aged = __atomic_load_n(&age_param->state, - __ATOMIC_RELAXED) == - AGE_TMOUT ? 1 : 0; - resp->sec_since_last_hit_valid = !resp->aged; - if (resp->sec_since_last_hit_valid) - resp->sec_since_last_hit = - __atomic_load_n(&age_param->sec_since_last_hit, - __ATOMIC_RELAXED); - return 0; - } - return rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_UNSPECIFIED, - NULL, - "age data not available"); + } else { + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, "age data not available"); + } + resp->aged = __atomic_load_n(&age_param->state, __ATOMIC_RELAXED) == + AGE_TMOUT ? 1 : 0; + resp->sec_since_last_hit_valid = !resp->aged; + if (resp->sec_since_last_hit_valid) + resp->sec_since_last_hit = __atomic_load_n + (&age_param->sec_since_last_hit, __ATOMIC_RELAXED); + return 0; } /** @@ -11573,9 +11892,9 @@ flow_dv_destroy_mtr_tbl(struct rte_eth_dev *dev, claim_zero(mlx5_flow_os_destroy_flow_matcher (mtd->egress.any_matcher)); if (mtd->egress.tbl) - flow_dv_tbl_resource_release(dev, mtd->egress.tbl); + flow_dv_tbl_resource_release(MLX5_SH(dev), mtd->egress.tbl); if (mtd->egress.sfx_tbl) - flow_dv_tbl_resource_release(dev, mtd->egress.sfx_tbl); + flow_dv_tbl_resource_release(MLX5_SH(dev), mtd->egress.sfx_tbl); if (mtd->ingress.color_matcher) claim_zero(mlx5_flow_os_destroy_flow_matcher (mtd->ingress.color_matcher)); @@ -11583,9 +11902,10 @@ flow_dv_destroy_mtr_tbl(struct rte_eth_dev *dev, claim_zero(mlx5_flow_os_destroy_flow_matcher (mtd->ingress.any_matcher)); if (mtd->ingress.tbl) - flow_dv_tbl_resource_release(dev, mtd->ingress.tbl); + flow_dv_tbl_resource_release(MLX5_SH(dev), mtd->ingress.tbl); if (mtd->ingress.sfx_tbl) - flow_dv_tbl_resource_release(dev, mtd->ingress.sfx_tbl); + flow_dv_tbl_resource_release(MLX5_SH(dev), + mtd->ingress.sfx_tbl); if (mtd->transfer.color_matcher) claim_zero(mlx5_flow_os_destroy_flow_matcher (mtd->transfer.color_matcher)); @@ -11593,9 +11913,10 @@ flow_dv_destroy_mtr_tbl(struct rte_eth_dev *dev, claim_zero(mlx5_flow_os_destroy_flow_matcher (mtd->transfer.any_matcher)); if (mtd->transfer.tbl) - flow_dv_tbl_resource_release(dev, mtd->transfer.tbl); + flow_dv_tbl_resource_release(MLX5_SH(dev), mtd->transfer.tbl); if (mtd->transfer.sfx_tbl) - flow_dv_tbl_resource_release(dev, mtd->transfer.sfx_tbl); + flow_dv_tbl_resource_release(MLX5_SH(dev), + mtd->transfer.sfx_tbl); if (mtd->drop_actn) claim_zero(mlx5_flow_os_destroy_flow_action(mtd->drop_actn)); mlx5_free(mtd); @@ -11657,7 +11978,7 @@ flow_dv_prepare_mtr_tables(struct rte_eth_dev *dev, /* Create the meter table with METER level. */ dtb->tbl = flow_dv_tbl_resource_get(dev, MLX5_FLOW_TABLE_LEVEL_METER, egress, transfer, false, NULL, 0, - &error); + 0, &error); if (!dtb->tbl) { DRV_LOG(ERR, "Failed to create meter policer table."); return -1; @@ -11666,7 +11987,7 @@ flow_dv_prepare_mtr_tables(struct rte_eth_dev *dev, dtb->sfx_tbl = flow_dv_tbl_resource_get(dev, MLX5_FLOW_TABLE_LEVEL_SUFFIX, egress, transfer, false, NULL, 0, - &error); + 0, &error); if (!dtb->sfx_tbl) { DRV_LOG(ERR, "Failed to create meter suffix table."); return -1; @@ -11979,18 +12300,15 @@ mlx5_flow_dv_discover_counter_offset_support(struct rte_eth_dev *dev) .match_mask = (void *)&mask, }; void *actions[2] = { 0 }; - struct mlx5_flow_tbl_resource *tbl = NULL, *dest_tbl = NULL; + struct mlx5_flow_tbl_resource *tbl = NULL; struct mlx5_devx_obj *dcs = NULL; void *matcher = NULL; void *flow = NULL; - int i, ret = -1; + int ret = -1; - tbl = flow_dv_tbl_resource_get(dev, 0, 0, 0, false, NULL, 0, NULL); + tbl = flow_dv_tbl_resource_get(dev, 0, 0, 0, false, NULL, 0, 0, NULL); if (!tbl) goto err; - dest_tbl = flow_dv_tbl_resource_get(dev, 1, 0, 0, false, NULL, 0, NULL); - if (!dest_tbl) - goto err; dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0x4); if (!dcs) goto err; @@ -11998,10 +12316,7 @@ mlx5_flow_dv_discover_counter_offset_support(struct rte_eth_dev *dev) &actions[0]); if (ret) goto err; - ret = mlx5_flow_os_create_flow_action_dest_flow_tbl - (dest_tbl->obj, &actions[1]); - if (ret) - goto err; + actions[1] = priv->drop_queue.hrxq->action; dv_attr.match_criteria_enable = flow_dv_matcher_enable(mask.buf); ret = mlx5_flow_os_create_flow_matcher(sh->ctx, &dv_attr, tbl->obj, &matcher); @@ -12030,17 +12345,12 @@ err: "support detection"); ret = 0; } - for (i = 0; i < 2; i++) { - if (actions[i]) - claim_zero(mlx5_flow_os_destroy_flow_action - (actions[i])); - } + if (actions[0]) + claim_zero(mlx5_flow_os_destroy_flow_action(actions[0])); if (matcher) claim_zero(mlx5_flow_os_destroy_flow_matcher(matcher)); if (tbl) - flow_dv_tbl_resource_release(dev, tbl); - if (dest_tbl) - flow_dv_tbl_resource_release(dev, dest_tbl); + flow_dv_tbl_resource_release(MLX5_SH(dev), tbl); if (dcs) claim_zero(mlx5_devx_cmd_destroy(dcs)); return ret; @@ -12118,16 +12428,24 @@ flow_get_aged_flows(struct rte_eth_dev *dev, struct mlx5_age_info *age_info; struct mlx5_age_param *age_param; struct mlx5_flow_counter *counter; + struct mlx5_aso_age_action *act; int nb_flows = 0; if (nb_contexts && !context) return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, - NULL, - "Should assign at least one flow or" - " context to get if nb_contexts != 0"); + NULL, "empty context"); age_info = GET_PORT_AGE_INFO(priv); rte_spinlock_lock(&age_info->aged_sl); + LIST_FOREACH(act, &age_info->aged_aso, next) { + nb_flows++; + if (nb_contexts) { + context[nb_flows - 1] = + act->age_params.context; + if (!(--nb_contexts)) + break; + } + } TAILQ_FOREACH(counter, &age_info->aged_counters, next) { nb_flows++; if (nb_contexts) { @@ -12142,86 +12460,13 @@ flow_get_aged_flows(struct rte_eth_dev *dev, return nb_flows; } -/* - * Mutex-protected thunk to lock-free __flow_dv_translate(). - */ -static int -flow_dv_translate(struct rte_eth_dev *dev, - struct mlx5_flow *dev_flow, - const struct rte_flow_attr *attr, - const struct rte_flow_item items[], - const struct rte_flow_action actions[], - struct rte_flow_error *error) -{ - int ret; - - flow_dv_shared_lock(dev); - ret = __flow_dv_translate(dev, dev_flow, attr, items, actions, error); - flow_dv_shared_unlock(dev); - return ret; -} - -/* - * Mutex-protected thunk to lock-free __flow_dv_apply(). - */ -static int -flow_dv_apply(struct rte_eth_dev *dev, - struct rte_flow *flow, - struct rte_flow_error *error) -{ - int ret; - - flow_dv_shared_lock(dev); - ret = __flow_dv_apply(dev, flow, error); - flow_dv_shared_unlock(dev); - return ret; -} - -/* - * Mutex-protected thunk to lock-free __flow_dv_remove(). - */ -static void -flow_dv_remove(struct rte_eth_dev *dev, struct rte_flow *flow) -{ - flow_dv_shared_lock(dev); - __flow_dv_remove(dev, flow); - flow_dv_shared_unlock(dev); -} - -/* - * Mutex-protected thunk to lock-free __flow_dv_destroy(). - */ -static void -flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow) -{ - flow_dv_shared_lock(dev); - __flow_dv_destroy(dev, flow); - flow_dv_shared_unlock(dev); -} - /* * Mutex-protected thunk to lock-free flow_dv_counter_alloc(). */ static uint32_t flow_dv_counter_allocate(struct rte_eth_dev *dev) { - uint32_t cnt; - - flow_dv_shared_lock(dev); - cnt = flow_dv_counter_alloc(dev, 0); - flow_dv_shared_unlock(dev); - return cnt; -} - -/* - * Mutex-protected thunk to lock-free flow_dv_counter_release(). - */ -static void -flow_dv_counter_free(struct rte_eth_dev *dev, uint32_t cnt) -{ - flow_dv_shared_lock(dev); - flow_dv_counter_release(dev, cnt); - flow_dv_shared_unlock(dev); + return flow_dv_counter_alloc(dev, 0); } /** @@ -12245,69 +12490,52 @@ static int flow_dv_action_validate(struct rte_eth_dev *dev, const struct rte_flow_shared_action_conf *conf, const struct rte_flow_action *action, - struct rte_flow_error *error) + struct rte_flow_error *err) { + struct mlx5_priv *priv = dev->data->dev_private; + RTE_SET_USED(conf); switch (action->type) { case RTE_FLOW_ACTION_TYPE_RSS: - return mlx5_validate_action_rss(dev, action, error); + return mlx5_validate_action_rss(dev, action, err); + case RTE_FLOW_ACTION_TYPE_AGE: + if (!priv->sh->aso_age_mng) + return rte_flow_error_set(err, ENOTSUP, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "shared age action not supported"); + return flow_dv_validate_action_age(0, action, dev, err); default: - return rte_flow_error_set(error, ENOTSUP, + return rte_flow_error_set(err, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, NULL, "action type not supported"); } } -/* - * Mutex-protected thunk to lock-free __flow_dv_action_create(). - */ -static struct rte_flow_shared_action * -flow_dv_action_create(struct rte_eth_dev *dev, - const struct rte_flow_shared_action_conf *conf, - const struct rte_flow_action *action, - struct rte_flow_error *error) -{ - struct rte_flow_shared_action *shared_action = NULL; - - flow_dv_shared_lock(dev); - shared_action = __flow_dv_action_create(dev, conf, action, error); - flow_dv_shared_unlock(dev); - return shared_action; -} - -/* - * Mutex-protected thunk to lock-free __flow_dv_action_destroy(). - */ -static int -flow_dv_action_destroy(struct rte_eth_dev *dev, - struct rte_flow_shared_action *action, - struct rte_flow_error *error) -{ - int ret; - - flow_dv_shared_lock(dev); - ret = __flow_dv_action_destroy(dev, action, error); - flow_dv_shared_unlock(dev); - return ret; -} - -/* - * Mutex-protected thunk to lock-free __flow_dv_action_update(). - */ static int -flow_dv_action_update(struct rte_eth_dev *dev, - struct rte_flow_shared_action *action, - const void *action_conf, - struct rte_flow_error *error) +flow_dv_sync_domain(struct rte_eth_dev *dev, uint32_t domains, uint32_t flags) { - int ret; + struct mlx5_priv *priv = dev->data->dev_private; + int ret = 0; - flow_dv_shared_lock(dev); - ret = __flow_dv_action_update(dev, action, action_conf, - error); - flow_dv_shared_unlock(dev); - return ret; + if ((domains & MLX5_DOMAIN_BIT_NIC_RX) && priv->sh->rx_domain != NULL) { + ret = mlx5_glue->dr_sync_domain(priv->sh->rx_domain, + flags); + if (ret != 0) + return ret; + } + if ((domains & MLX5_DOMAIN_BIT_NIC_TX) && priv->sh->tx_domain != NULL) { + ret = mlx5_glue->dr_sync_domain(priv->sh->tx_domain, flags); + if (ret != 0) + return ret; + } + if ((domains & MLX5_DOMAIN_BIT_FDB) && priv->sh->fdb_domain != NULL) { + ret = mlx5_glue->dr_sync_domain(priv->sh->fdb_domain, flags); + if (ret != 0) + return ret; + } + return 0; } const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops = { @@ -12330,6 +12558,8 @@ const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops = { .action_create = flow_dv_action_create, .action_destroy = flow_dv_action_destroy, .action_update = flow_dv_action_update, + .action_query = flow_dv_action_query, + .sync_domain = flow_dv_sync_domain, }; #endif /* HAVE_IBV_FLOW_DV_SUPPORT */