X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fmlx5%2Fmlx5_flow_dv.c;h=bfcba45594269c5301151482f6bcaf52721a2a5c;hb=4ae8825c5085;hp=53399800ff6f7da55dba62362789fea3bfd3d498;hpb=7301d1923a7103974077577520511d15420c7407;p=dpdk.git diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c index 53399800ff..bfcba45594 100644 --- a/drivers/net/mlx5/mlx5_flow_dv.c +++ b/drivers/net/mlx5/mlx5_flow_dv.c @@ -33,6 +33,7 @@ #include "mlx5_flow.h" #include "mlx5_flow_os.h" #include "mlx5_rxtx.h" +#include "rte_pmd_mlx5.h" #ifdef HAVE_IBV_FLOW_DV_SUPPORT @@ -76,6 +77,14 @@ flow_dv_tbl_resource_release(struct rte_eth_dev *dev, static int flow_dv_default_miss_resource_release(struct rte_eth_dev *dev); +static int +flow_dv_encap_decap_resource_release(struct rte_eth_dev *dev, + uint32_t encap_decap_idx); + +static int +flow_dv_port_id_action_resource_release(struct rte_eth_dev *dev, + uint32_t port_id); + /** * Initialize flow attributes structure according to flow items' types. * @@ -285,7 +294,7 @@ flow_dv_shared_lock(struct rte_eth_dev *dev) struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_dev_ctx_shared *sh = priv->sh; - if (sh->dv_refcnt > 1) { + if (sh->refcnt > 1) { int ret; ret = pthread_mutex_lock(&sh->dv_mutex); @@ -300,7 +309,7 @@ flow_dv_shared_unlock(struct rte_eth_dev *dev) struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_dev_ctx_shared *sh = priv->sh; - if (sh->dv_refcnt > 1) { + if (sh->refcnt > 1) { int ret; ret = pthread_mutex_unlock(&sh->dv_mutex); @@ -945,7 +954,7 @@ flow_dv_convert_action_modify_tcp_ack } static enum mlx5_modification_field reg_to_field[] = { - [REG_NONE] = MLX5_MODI_OUT_NONE, + [REG_NON] = MLX5_MODI_OUT_NONE, [REG_A] = MLX5_MODI_META_DATA_REG_A, [REG_B] = MLX5_MODI_META_DATA_REG_B, [REG_C_0] = MLX5_MODI_META_REG_C_0, @@ -985,7 +994,7 @@ flow_dv_convert_action_set_reg return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, NULL, "too many items to modify"); - MLX5_ASSERT(conf->id != REG_NONE); + MLX5_ASSERT(conf->id != REG_NON); MLX5_ASSERT(conf->id < RTE_DIM(reg_to_field)); actions[i] = (struct mlx5_modification_cmd) { .action_type = MLX5_MODIFICATION_TYPE_SET, @@ -1035,7 +1044,7 @@ flow_dv_convert_action_set_tag ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, conf->index, error); if (ret < 0) return ret; - MLX5_ASSERT(ret != REG_NONE); + MLX5_ASSERT(ret != REG_NON); MLX5_ASSERT((unsigned int)ret < RTE_DIM(reg_to_field)); reg_type = reg_to_field[ret]; MLX5_ASSERT(reg_type > 0); @@ -1418,7 +1427,7 @@ flow_dv_validate_item_mark(struct rte_eth_dev *dev, ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask, (const uint8_t *)&nic_mask, sizeof(struct rte_flow_item_mark), - error); + MLX5_ITEM_RANGE_NOT_ACCEPTED, error); if (ret < 0) return ret; return 0; @@ -1494,7 +1503,7 @@ flow_dv_validate_item_meta(struct rte_eth_dev *dev __rte_unused, ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask, (const uint8_t *)&nic_mask, sizeof(struct rte_flow_item_meta), - error); + MLX5_ITEM_RANGE_NOT_ACCEPTED, error); return ret; } @@ -1547,7 +1556,7 @@ flow_dv_validate_item_tag(struct rte_eth_dev *dev, ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask, (const uint8_t *)&nic_mask, sizeof(struct rte_flow_item_tag), - error); + MLX5_ITEM_RANGE_NOT_ACCEPTED, error); if (ret < 0) return ret; if (mask->index != 0xff) @@ -1558,7 +1567,7 @@ flow_dv_validate_item_tag(struct rte_eth_dev *dev, ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, spec->index, error); if (ret < 0) return ret; - MLX5_ASSERT(ret != REG_NONE); + MLX5_ASSERT(ret != REG_NON); return 0; } @@ -1618,7 +1627,7 @@ flow_dv_validate_item_port_id(struct rte_eth_dev *dev, (item, (const uint8_t *)mask, (const uint8_t *)&rte_flow_item_port_id_mask, sizeof(struct rte_flow_item_port_id), - error); + MLX5_ITEM_RANGE_NOT_ACCEPTED, error); if (ret) return ret; if (!spec) @@ -1668,6 +1677,7 @@ flow_dv_validate_item_vlan(const struct rte_flow_item *item, const struct rte_flow_item_vlan nic_mask = { .tci = RTE_BE16(UINT16_MAX), .inner_type = RTE_BE16(UINT16_MAX), + .has_more_vlan = 1, }; const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); int ret; @@ -1691,7 +1701,7 @@ flow_dv_validate_item_vlan(const struct rte_flow_item *item, ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask, (const uint8_t *)&nic_mask, sizeof(struct rte_flow_item_vlan), - error); + MLX5_ITEM_RANGE_NOT_ACCEPTED, error); if (ret) return ret; if (!tunnel && mask->tci != RTE_BE16(0x0fff)) { @@ -1778,11 +1788,240 @@ flow_dv_validate_item_gtp(struct rte_eth_dev *dev, RTE_FLOW_ERROR_TYPE_ITEM, item, "Match is supported for GTP" " flags only"); - return mlx5_flow_item_acceptable - (item, (const uint8_t *)mask, - (const uint8_t *)&nic_mask, - sizeof(struct rte_flow_item_gtp), - error); + return mlx5_flow_item_acceptable(item, (const uint8_t *)mask, + (const uint8_t *)&nic_mask, + sizeof(struct rte_flow_item_gtp), + MLX5_ITEM_RANGE_NOT_ACCEPTED, error); +} + +/** + * Validate IPV4 item. + * Use existing validation function mlx5_flow_validate_item_ipv4(), and + * add specific validation of fragment_offset field, + * + * @param[in] item + * Item specification. + * @param[in] item_flags + * Bit-fields that holds the items detected until now. + * @param[out] error + * Pointer to error structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +flow_dv_validate_item_ipv4(const struct rte_flow_item *item, + uint64_t item_flags, + uint64_t last_item, + uint16_t ether_type, + struct rte_flow_error *error) +{ + int ret; + const struct rte_flow_item_ipv4 *spec = item->spec; + const struct rte_flow_item_ipv4 *last = item->last; + const struct rte_flow_item_ipv4 *mask = item->mask; + rte_be16_t fragment_offset_spec = 0; + rte_be16_t fragment_offset_last = 0; + const struct rte_flow_item_ipv4 nic_ipv4_mask = { + .hdr = { + .src_addr = RTE_BE32(0xffffffff), + .dst_addr = RTE_BE32(0xffffffff), + .type_of_service = 0xff, + .fragment_offset = RTE_BE16(0xffff), + .next_proto_id = 0xff, + .time_to_live = 0xff, + }, + }; + + ret = mlx5_flow_validate_item_ipv4(item, item_flags, last_item, + ether_type, &nic_ipv4_mask, + MLX5_ITEM_RANGE_ACCEPTED, error); + if (ret < 0) + return ret; + if (spec && mask) + fragment_offset_spec = spec->hdr.fragment_offset & + mask->hdr.fragment_offset; + if (!fragment_offset_spec) + return 0; + /* + * spec and mask are valid, enforce using full mask to make sure the + * complete value is used correctly. + */ + if ((mask->hdr.fragment_offset & RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK)) + != RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK)) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM_MASK, + item, "must use full mask for" + " fragment_offset"); + /* + * Match on fragment_offset 0x2000 means MF is 1 and frag-offset is 0, + * indicating this is 1st fragment of fragmented packet. + * This is not yet supported in MLX5, return appropriate error message. + */ + if (fragment_offset_spec == RTE_BE16(RTE_IPV4_HDR_MF_FLAG)) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "match on first fragment not " + "supported"); + if (fragment_offset_spec && !last) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "specified value not supported"); + /* spec and last are valid, validate the specified range. */ + fragment_offset_last = last->hdr.fragment_offset & + mask->hdr.fragment_offset; + /* + * Match on fragment_offset spec 0x2001 and last 0x3fff + * means MF is 1 and frag-offset is > 0. + * This packet is fragment 2nd and onward, excluding last. + * This is not yet supported in MLX5, return appropriate + * error message. + */ + if (fragment_offset_spec == RTE_BE16(RTE_IPV4_HDR_MF_FLAG + 1) && + fragment_offset_last == RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK)) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM_LAST, + last, "match on following " + "fragments not supported"); + /* + * Match on fragment_offset spec 0x0001 and last 0x1fff + * means MF is 0 and frag-offset is > 0. + * This packet is last fragment of fragmented packet. + * This is not yet supported in MLX5, return appropriate + * error message. + */ + if (fragment_offset_spec == RTE_BE16(1) && + fragment_offset_last == RTE_BE16(RTE_IPV4_HDR_OFFSET_MASK)) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM_LAST, + last, "match on last " + "fragment not supported"); + /* + * Match on fragment_offset spec 0x0001 and last 0x3fff + * means MF and/or frag-offset is not 0. + * This is a fragmented packet. + * Other range values are invalid and rejected. + */ + if (!(fragment_offset_spec == RTE_BE16(1) && + fragment_offset_last == RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK))) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM_LAST, last, + "specified range not supported"); + return 0; +} + +/** + * Validate IPV6 fragment extension item. + * + * @param[in] item + * Item specification. + * @param[in] item_flags + * Bit-fields that holds the items detected until now. + * @param[out] error + * Pointer to error structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +flow_dv_validate_item_ipv6_frag_ext(const struct rte_flow_item *item, + uint64_t item_flags, + struct rte_flow_error *error) +{ + const struct rte_flow_item_ipv6_frag_ext *spec = item->spec; + const struct rte_flow_item_ipv6_frag_ext *last = item->last; + const struct rte_flow_item_ipv6_frag_ext *mask = item->mask; + rte_be16_t frag_data_spec = 0; + rte_be16_t frag_data_last = 0; + const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); + const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 : + MLX5_FLOW_LAYER_OUTER_L4; + int ret = 0; + struct rte_flow_item_ipv6_frag_ext nic_mask = { + .hdr = { + .next_header = 0xff, + .frag_data = RTE_BE16(0xffff), + }, + }; + + if (item_flags & l4m) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "ipv6 fragment extension item cannot " + "follow L4 item."); + if ((tunnel && !(item_flags & MLX5_FLOW_LAYER_INNER_L3_IPV6)) || + (!tunnel && !(item_flags & MLX5_FLOW_LAYER_OUTER_L3_IPV6))) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "ipv6 fragment extension item must " + "follow ipv6 item"); + if (spec && mask) + frag_data_spec = spec->hdr.frag_data & mask->hdr.frag_data; + if (!frag_data_spec) + return 0; + /* + * spec and mask are valid, enforce using full mask to make sure the + * complete value is used correctly. + */ + if ((mask->hdr.frag_data & RTE_BE16(RTE_IPV6_FRAG_USED_MASK)) != + RTE_BE16(RTE_IPV6_FRAG_USED_MASK)) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM_MASK, + item, "must use full mask for" + " frag_data"); + /* + * Match on frag_data 0x00001 means M is 1 and frag-offset is 0. + * This is 1st fragment of fragmented packet. + */ + if (frag_data_spec == RTE_BE16(RTE_IPV6_EHDR_MF_MASK)) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "match on first fragment not " + "supported"); + if (frag_data_spec && !last) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "specified value not supported"); + ret = mlx5_flow_item_acceptable + (item, (const uint8_t *)mask, + (const uint8_t *)&nic_mask, + sizeof(struct rte_flow_item_ipv6_frag_ext), + MLX5_ITEM_RANGE_ACCEPTED, error); + if (ret) + return ret; + /* spec and last are valid, validate the specified range. */ + frag_data_last = last->hdr.frag_data & mask->hdr.frag_data; + /* + * Match on frag_data spec 0x0009 and last 0xfff9 + * means M is 1 and frag-offset is > 0. + * This packet is fragment 2nd and onward, excluding last. + * This is not yet supported in MLX5, return appropriate + * error message. + */ + if (frag_data_spec == RTE_BE16(RTE_IPV6_EHDR_FO_ALIGN | + RTE_IPV6_EHDR_MF_MASK) && + frag_data_last == RTE_BE16(RTE_IPV6_FRAG_USED_MASK)) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM_LAST, + last, "match on following " + "fragments not supported"); + /* + * Match on frag_data spec 0x0008 and last 0xfff8 + * means M is 0 and frag-offset is > 0. + * This packet is last fragment of fragmented packet. + * This is not yet supported in MLX5, return appropriate + * error message. + */ + if (frag_data_spec == RTE_BE16(RTE_IPV6_EHDR_FO_ALIGN) && + frag_data_last == RTE_BE16(RTE_IPV6_EHDR_FO_MASK)) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM_LAST, + last, "match on last " + "fragment not supported"); + /* Other range values are invalid and rejected. */ + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM_LAST, last, + "specified range not supported"); } /** @@ -2545,6 +2784,39 @@ flow_dv_validate_action_raw_encap_decap return 0; } +/** + * Match encap_decap resource. + * + * @param entry + * Pointer to exist resource entry object. + * @param ctx + * Pointer to new encap_decap resource. + * + * @return + * 0 on matching, -1 otherwise. + */ +static int +flow_dv_encap_decap_resource_match(struct mlx5_hlist_entry *entry, void *ctx) +{ + struct mlx5_flow_dv_encap_decap_resource *resource; + struct mlx5_flow_dv_encap_decap_resource *cache_resource; + + resource = (struct mlx5_flow_dv_encap_decap_resource *)ctx; + cache_resource = container_of(entry, + struct mlx5_flow_dv_encap_decap_resource, + entry); + if (resource->entry.key == cache_resource->entry.key && + resource->reformat_type == cache_resource->reformat_type && + resource->ft_type == cache_resource->ft_type && + resource->flags == cache_resource->flags && + resource->size == cache_resource->size && + !memcmp((const void *)resource->buf, + (const void *)cache_resource->buf, + resource->size)) + return 0; + return -1; +} + /** * Find existing encap/decap resource or create and register a new one. * @@ -2571,7 +2843,16 @@ flow_dv_encap_decap_resource_register struct mlx5_dev_ctx_shared *sh = priv->sh; struct mlx5_flow_dv_encap_decap_resource *cache_resource; struct mlx5dv_dr_domain *domain; - uint32_t idx = 0; + struct mlx5_hlist_entry *entry; + union mlx5_flow_encap_decap_key encap_decap_key = { + { + .ft_type = resource->ft_type, + .refmt_type = resource->reformat_type, + .buf_size = resource->size, + .table_level = !!dev_flow->dv.group, + .cksum = 0, + } + }; int ret; resource->flags = dev_flow->dv.group ? 0 : 1; @@ -2581,24 +2862,25 @@ flow_dv_encap_decap_resource_register domain = sh->rx_domain; else domain = sh->tx_domain; + encap_decap_key.cksum = __rte_raw_cksum(resource->buf, + resource->size, 0); + resource->entry.key = encap_decap_key.v64; /* Lookup a matching resource from cache. */ - ILIST_FOREACH(sh->ipool[MLX5_IPOOL_DECAP_ENCAP], sh->encaps_decaps, idx, - cache_resource, next) { - if (resource->reformat_type == cache_resource->reformat_type && - resource->ft_type == cache_resource->ft_type && - resource->flags == cache_resource->flags && - resource->size == cache_resource->size && - !memcmp((const void *)resource->buf, - (const void *)cache_resource->buf, - resource->size)) { - DRV_LOG(DEBUG, "encap/decap resource %p: refcnt %d++", - (void *)cache_resource, - rte_atomic32_read(&cache_resource->refcnt)); - rte_atomic32_inc(&cache_resource->refcnt); - dev_flow->handle->dvh.rix_encap_decap = idx; - dev_flow->dv.encap_decap = cache_resource; - return 0; - } + entry = mlx5_hlist_lookup_ex(sh->encaps_decaps, resource->entry.key, + flow_dv_encap_decap_resource_match, + (void *)resource); + if (entry) { + cache_resource = container_of(entry, + struct mlx5_flow_dv_encap_decap_resource, entry); + DRV_LOG(DEBUG, "encap/decap resource %p: refcnt %d++", + (void *)cache_resource, + __atomic_load_n(&cache_resource->refcnt, + __ATOMIC_RELAXED)); + __atomic_fetch_add(&cache_resource->refcnt, 1, + __ATOMIC_RELAXED); + dev_flow->handle->dvh.rix_encap_decap = cache_resource->idx; + dev_flow->dv.encap_decap = cache_resource; + return 0; } /* Register new encap/decap resource. */ cache_resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_DECAP_ENCAP], @@ -2608,6 +2890,7 @@ flow_dv_encap_decap_resource_register RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, "cannot allocate resource memory"); *cache_resource = *resource; + cache_resource->idx = dev_flow->handle->dvh.rix_encap_decap; ret = mlx5_flow_os_create_flow_action_packet_reformat (sh->ctx, domain, cache_resource, &cache_resource->action); @@ -2617,15 +2900,22 @@ flow_dv_encap_decap_resource_register RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, "cannot create action"); } - rte_atomic32_init(&cache_resource->refcnt); - rte_atomic32_inc(&cache_resource->refcnt); - ILIST_INSERT(sh->ipool[MLX5_IPOOL_DECAP_ENCAP], &sh->encaps_decaps, - dev_flow->handle->dvh.rix_encap_decap, cache_resource, - next); + __atomic_store_n(&cache_resource->refcnt, 1, __ATOMIC_RELAXED); + if (mlx5_hlist_insert_ex(sh->encaps_decaps, &cache_resource->entry, + flow_dv_encap_decap_resource_match, + (void *)cache_resource)) { + claim_zero(mlx5_flow_os_destroy_flow_action + (cache_resource->action)); + mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_DECAP_ENCAP], + cache_resource->idx); + return rte_flow_error_set(error, EEXIST, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, "action exist"); + } dev_flow->dv.encap_decap = cache_resource; DRV_LOG(DEBUG, "new encap/decap resource %p: refcnt %d++", (void *)cache_resource, - rte_atomic32_read(&cache_resource->refcnt)); + __atomic_load_n(&cache_resource->refcnt, __ATOMIC_RELAXED)); return 0; } @@ -2656,7 +2946,7 @@ flow_dv_jump_tbl_resource_register int cnt, ret; MLX5_ASSERT(tbl); - cnt = rte_atomic32_read(&tbl_data->jump.refcnt); + cnt = __atomic_load_n(&tbl_data->jump.refcnt, __ATOMIC_ACQUIRE); if (!cnt) { ret = mlx5_flow_os_create_flow_action_dest_flow_tbl (tbl->obj, &tbl_data->jump.action); @@ -2673,7 +2963,7 @@ flow_dv_jump_tbl_resource_register DRV_LOG(DEBUG, "existed jump table resource %p: refcnt %d++", (void *)&tbl_data->jump, cnt); } - rte_atomic32_inc(&tbl_data->jump.refcnt); + __atomic_fetch_add(&tbl_data->jump.refcnt, 1, __ATOMIC_RELEASE); dev_flow->handle->rix_jump = tbl_data->idx; dev_flow->dv.jump = &tbl_data->jump; return 0; @@ -2698,7 +2988,7 @@ flow_dv_default_miss_resource_register(struct rte_eth_dev *dev, struct mlx5_dev_ctx_shared *sh = priv->sh; struct mlx5_flow_default_miss_resource *cache_resource = &sh->default_miss; - int cnt = rte_atomic32_read(&cache_resource->refcnt); + int cnt = __atomic_load_n(&cache_resource->refcnt, __ATOMIC_ACQUIRE); if (!cnt) { MLX5_ASSERT(cache_resource->action); @@ -2711,7 +3001,7 @@ flow_dv_default_miss_resource_register(struct rte_eth_dev *dev, DRV_LOG(DEBUG, "new default miss resource %p: refcnt %d++", (void *)cache_resource->action, cnt); } - rte_atomic32_inc(&cache_resource->refcnt); + __atomic_fetch_add(&cache_resource->refcnt, 1, __ATOMIC_RELEASE); return 0; } @@ -2750,8 +3040,10 @@ flow_dv_port_id_action_resource_register DRV_LOG(DEBUG, "port id action resource resource %p: " "refcnt %d++", (void *)cache_resource, - rte_atomic32_read(&cache_resource->refcnt)); - rte_atomic32_inc(&cache_resource->refcnt); + __atomic_load_n(&cache_resource->refcnt, + __ATOMIC_RELAXED)); + __atomic_fetch_add(&cache_resource->refcnt, 1, + __ATOMIC_RELAXED); dev_flow->handle->rix_port_id_action = idx; dev_flow->dv.port_id_action = cache_resource; return 0; @@ -2774,15 +3066,14 @@ flow_dv_port_id_action_resource_register RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, "cannot create action"); } - rte_atomic32_init(&cache_resource->refcnt); - rte_atomic32_inc(&cache_resource->refcnt); + __atomic_store_n(&cache_resource->refcnt, 1, __ATOMIC_RELAXED); ILIST_INSERT(sh->ipool[MLX5_IPOOL_PORT_ID], &sh->port_id_action_list, dev_flow->handle->rix_port_id_action, cache_resource, next); dev_flow->dv.port_id_action = cache_resource; DRV_LOG(DEBUG, "new port id action resource %p: refcnt %d++", (void *)cache_resource, - rte_atomic32_read(&cache_resource->refcnt)); + __atomic_load_n(&cache_resource->refcnt, __ATOMIC_RELAXED)); return 0; } @@ -2823,8 +3114,10 @@ flow_dv_push_vlan_action_resource_register DRV_LOG(DEBUG, "push-VLAN action resource resource %p: " "refcnt %d++", (void *)cache_resource, - rte_atomic32_read(&cache_resource->refcnt)); - rte_atomic32_inc(&cache_resource->refcnt); + __atomic_load_n(&cache_resource->refcnt, + __ATOMIC_RELAXED)); + __atomic_fetch_add(&cache_resource->refcnt, 1, + __ATOMIC_RELAXED); dev_flow->handle->dvh.rix_push_vlan = idx; dev_flow->dv.push_vlan_res = cache_resource; return 0; @@ -2853,8 +3146,7 @@ flow_dv_push_vlan_action_resource_register RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, "cannot create action"); } - rte_atomic32_init(&cache_resource->refcnt); - rte_atomic32_inc(&cache_resource->refcnt); + __atomic_store_n(&cache_resource->refcnt, 1, __ATOMIC_RELAXED); ILIST_INSERT(sh->ipool[MLX5_IPOOL_PUSH_VLAN], &sh->push_vlan_action_list, dev_flow->handle->dvh.rix_push_vlan, @@ -2862,7 +3154,7 @@ flow_dv_push_vlan_action_resource_register dev_flow->dv.push_vlan_res = cache_resource; DRV_LOG(DEBUG, "new push vlan action resource %p: refcnt %d++", (void *)cache_resource, - rte_atomic32_read(&cache_resource->refcnt)); + __atomic_load_n(&cache_resource->refcnt, __ATOMIC_RELAXED)); return 0; } /** @@ -3320,6 +3612,8 @@ flow_dv_create_action_push_vlan(struct rte_eth_dev *dev, (dev, &res, dev_flow, error); } +static int fdb_mirror; + /** * Validate the modify-header actions. * @@ -3347,6 +3641,12 @@ flow_dv_validate_action_modify_hdr(const uint64_t action_flags, RTE_FLOW_ERROR_TYPE_ACTION, NULL, "can't have encap action before" " modify action"); + if ((action_flags & MLX5_FLOW_ACTION_SAMPLE) && fdb_mirror) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, NULL, + "can't support sample action before" + " modify action for E-Switch" + " mirroring"); return 0; } @@ -3652,14 +3952,21 @@ flow_dv_validate_action_modify_ttl(const uint64_t action_flags, * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int -flow_dv_validate_action_jump(const struct rte_flow_action *action, +flow_dv_validate_action_jump(struct rte_eth_dev *dev, + const struct mlx5_flow_tunnel *tunnel, + const struct rte_flow_action *action, uint64_t action_flags, const struct rte_flow_attr *attributes, bool external, struct rte_flow_error *error) { uint32_t target_group, table; int ret = 0; - + struct flow_grp_info grp_info = { + .external = !!external, + .transfer = !!attributes->transfer, + .fdb_def_rule = 1, + .std_tbl_fix = 0 + }; if (action_flags & (MLX5_FLOW_FATE_ACTIONS | MLX5_FLOW_FATE_ESWITCH_ACTIONS)) return rte_flow_error_set(error, EINVAL, @@ -3670,17 +3977,25 @@ flow_dv_validate_action_jump(const struct rte_flow_action *action, return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, NULL, "jump with meter not support"); + if ((action_flags & MLX5_FLOW_ACTION_SAMPLE) && fdb_mirror) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, NULL, + "E-Switch mirroring can't support" + " Sample action and jump action in" + " same flow now"); if (!action->conf) return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL, "action configuration not set"); target_group = ((const struct rte_flow_action_jump *)action->conf)->group; - ret = mlx5_flow_group_to_table(attributes, external, target_group, - true, &table, error); + ret = mlx5_flow_group_to_table(dev, tunnel, target_group, &table, + grp_info, error); if (ret) return ret; - if (attributes->group == target_group) + if (attributes->group == target_group && + !(action_flags & (MLX5_FLOW_ACTION_TUNNEL_SET | + MLX5_FLOW_ACTION_TUNNEL_MATCH))) return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, NULL, "target group must be other than" @@ -3869,7 +4184,7 @@ flow_dv_validate_action_age(uint64_t action_flags, struct mlx5_priv *priv = dev->data->dev_private; const struct rte_flow_action_age *age = action->conf; - if (!priv->config.devx || priv->counter_fallback) + if (!priv->config.devx || priv->sh->cmng.counter_fallback) return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, @@ -3878,14 +4193,14 @@ flow_dv_validate_action_age(uint64_t action_flags, return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, action, "configuration cannot be null"); - if (age->timeout >= UINT16_MAX / 2 / 10) - return rte_flow_error_set(error, ENOTSUP, + if (!(age->timeout)) + return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, action, - "Max age time: 3275 seconds"); + "invalid timeout value 0"); if (action_flags & MLX5_FLOW_ACTION_AGE) return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, NULL, - "Duplicate age ctions set"); + "duplicate age actions set"); return 0; } @@ -3958,148 +4273,380 @@ flow_dv_validate_action_modify_ipv6_dscp(const uint64_t action_flags, } /** - * Find existing modify-header resource or create and register a new one. + * Match modify-header resource. * - * @param dev[in, out] - * Pointer to rte_eth_dev structure. - * @param[in, out] resource - * Pointer to modify-header resource. - * @parm[in, out] dev_flow - * Pointer to the dev_flow. - * @param[out] error - * pointer to error structure. + * @param entry + * Pointer to exist resource entry object. + * @param ctx + * Pointer to new modify-header resource. * * @return - * 0 on success otherwise -errno and errno is set. + * 0 on matching, -1 otherwise. */ static int -flow_dv_modify_hdr_resource_register - (struct rte_eth_dev *dev, - struct mlx5_flow_dv_modify_hdr_resource *resource, - struct mlx5_flow *dev_flow, - struct rte_flow_error *error) +flow_dv_modify_hdr_resource_match(struct mlx5_hlist_entry *entry, void *ctx) { - struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5_dev_ctx_shared *sh = priv->sh; + struct mlx5_flow_dv_modify_hdr_resource *resource; struct mlx5_flow_dv_modify_hdr_resource *cache_resource; - struct mlx5dv_dr_domain *ns; uint32_t actions_len; - int ret; - resource->flags = dev_flow->dv.group ? 0 : - MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL; - if (resource->actions_num > flow_dv_modify_hdr_action_max(dev, - resource->flags)) - return rte_flow_error_set(error, EOVERFLOW, - RTE_FLOW_ERROR_TYPE_ACTION, NULL, - "too many modify header items"); - if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB) - ns = sh->fdb_domain; - else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_TX) - ns = sh->tx_domain; - else - ns = sh->rx_domain; - /* Lookup a matching resource from cache. */ + resource = (struct mlx5_flow_dv_modify_hdr_resource *)ctx; + cache_resource = container_of(entry, + struct mlx5_flow_dv_modify_hdr_resource, + entry); actions_len = resource->actions_num * sizeof(resource->actions[0]); - LIST_FOREACH(cache_resource, &sh->modify_cmds, next) { - if (resource->ft_type == cache_resource->ft_type && - resource->actions_num == cache_resource->actions_num && - resource->flags == cache_resource->flags && - !memcmp((const void *)resource->actions, - (const void *)cache_resource->actions, - actions_len)) { - DRV_LOG(DEBUG, "modify-header resource %p: refcnt %d++", - (void *)cache_resource, - rte_atomic32_read(&cache_resource->refcnt)); - rte_atomic32_inc(&cache_resource->refcnt); - dev_flow->handle->dvh.modify_hdr = cache_resource; - return 0; - } - } - /* Register new modify-header resource. */ - cache_resource = mlx5_malloc(MLX5_MEM_ZERO, - sizeof(*cache_resource) + actions_len, 0, - SOCKET_ID_ANY); - if (!cache_resource) - return rte_flow_error_set(error, ENOMEM, - RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, - "cannot allocate resource memory"); - *cache_resource = *resource; - rte_memcpy(cache_resource->actions, resource->actions, actions_len); - ret = mlx5_flow_os_create_flow_action_modify_header - (sh->ctx, ns, cache_resource, - actions_len, &cache_resource->action); - if (ret) { - mlx5_free(cache_resource); - return rte_flow_error_set(error, ENOMEM, - RTE_FLOW_ERROR_TYPE_UNSPECIFIED, - NULL, "cannot create action"); - } - rte_atomic32_init(&cache_resource->refcnt); - rte_atomic32_inc(&cache_resource->refcnt); - LIST_INSERT_HEAD(&sh->modify_cmds, cache_resource, next); - dev_flow->handle->dvh.modify_hdr = cache_resource; - DRV_LOG(DEBUG, "new modify-header resource %p: refcnt %d++", - (void *)cache_resource, - rte_atomic32_read(&cache_resource->refcnt)); - return 0; + if (resource->entry.key == cache_resource->entry.key && + resource->ft_type == cache_resource->ft_type && + resource->actions_num == cache_resource->actions_num && + resource->flags == cache_resource->flags && + !memcmp((const void *)resource->actions, + (const void *)cache_resource->actions, + actions_len)) + return 0; + return -1; } /** - * Get DV flow counter by index. + * Validate the sample action. * + * @param[in] action_flags + * Holds the actions detected until now. + * @param[in] action + * Pointer to the sample action. * @param[in] dev * Pointer to the Ethernet device structure. - * @param[in] idx - * mlx5 flow counter index in the container. - * @param[out] ppool - * mlx5 flow counter pool in the container, + * @param[in] attr + * Attributes of flow that includes this action. + * @param[out] error + * Pointer to error structure. * * @return - * Pointer to the counter, NULL otherwise. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ -static struct mlx5_flow_counter * -flow_dv_counter_get_by_idx(struct rte_eth_dev *dev, - uint32_t idx, - struct mlx5_flow_counter_pool **ppool) +static int +flow_dv_validate_action_sample(uint64_t action_flags, + const struct rte_flow_action *action, + struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + struct rte_flow_error *error) { struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5_pools_container *cont; - struct mlx5_flow_counter_pool *pool; - uint32_t batch = 0, age = 0; - - idx--; - age = MLX_CNT_IS_AGE(idx); - idx = age ? idx - MLX5_CNT_AGE_OFFSET : idx; - if (idx >= MLX5_CNT_BATCH_OFFSET) { - idx -= MLX5_CNT_BATCH_OFFSET; - batch = 1; - } - cont = MLX5_CNT_CONTAINER(priv->sh, batch, age); - MLX5_ASSERT(idx / MLX5_COUNTERS_PER_POOL < cont->n); - pool = cont->pools[idx / MLX5_COUNTERS_PER_POOL]; - MLX5_ASSERT(pool); - if (ppool) - *ppool = pool; - return MLX5_POOL_GET_CNT(pool, idx % MLX5_COUNTERS_PER_POOL); -} - -/** - * Check the devx counter belongs to the pool. - * - * @param[in] pool - * Pointer to the counter pool. - * @param[in] id - * The counter devx ID. - * - * @return - * True if counter belongs to the pool, false otherwise. - */ -static bool -flow_dv_is_counter_in_pool(struct mlx5_flow_counter_pool *pool, int id) -{ - int base = (pool->min_dcs->id / MLX5_COUNTERS_PER_POOL) * - MLX5_COUNTERS_PER_POOL; + struct mlx5_dev_config *dev_conf = &priv->config; + const struct rte_flow_action_sample *sample = action->conf; + const struct rte_flow_action *act; + uint64_t sub_action_flags = 0; + uint16_t queue_index = 0xFFFF; + int actions_n = 0; + int ret; + fdb_mirror = 0; + + if (!sample) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, action, + "configuration cannot be NULL"); + if (sample->ratio == 0) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, action, + "ratio value starts from 1"); + if (!priv->config.devx || (sample->ratio > 0 && !priv->sampler_en)) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "sample action not supported"); + if (action_flags & MLX5_FLOW_ACTION_SAMPLE) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, NULL, + "Multiple sample actions not " + "supported"); + if (action_flags & MLX5_FLOW_ACTION_METER) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, action, + "wrong action order, meter should " + "be after sample action"); + if (action_flags & MLX5_FLOW_ACTION_JUMP) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, action, + "wrong action order, jump should " + "be after sample action"); + act = sample->actions; + for (; act->type != RTE_FLOW_ACTION_TYPE_END; act++) { + if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, + act, "too many actions"); + switch (act->type) { + case RTE_FLOW_ACTION_TYPE_QUEUE: + ret = mlx5_flow_validate_action_queue(act, + sub_action_flags, + dev, + attr, error); + if (ret < 0) + return ret; + queue_index = ((const struct rte_flow_action_queue *) + (act->conf))->index; + sub_action_flags |= MLX5_FLOW_ACTION_QUEUE; + ++actions_n; + break; + case RTE_FLOW_ACTION_TYPE_MARK: + ret = flow_dv_validate_action_mark(dev, act, + sub_action_flags, + attr, error); + if (ret < 0) + return ret; + if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) + sub_action_flags |= MLX5_FLOW_ACTION_MARK | + MLX5_FLOW_ACTION_MARK_EXT; + else + sub_action_flags |= MLX5_FLOW_ACTION_MARK; + ++actions_n; + break; + case RTE_FLOW_ACTION_TYPE_COUNT: + ret = flow_dv_validate_action_count(dev, error); + if (ret < 0) + return ret; + sub_action_flags |= MLX5_FLOW_ACTION_COUNT; + ++actions_n; + break; + case RTE_FLOW_ACTION_TYPE_PORT_ID: + ret = flow_dv_validate_action_port_id(dev, + sub_action_flags, + act, + attr, + error); + if (ret) + return ret; + sub_action_flags |= MLX5_FLOW_ACTION_PORT_ID; + ++actions_n; + break; + case RTE_FLOW_ACTION_TYPE_RAW_ENCAP: + ret = flow_dv_validate_action_raw_encap_decap + (dev, NULL, act->conf, attr, &sub_action_flags, + &actions_n, error); + if (ret < 0) + return ret; + ++actions_n; + break; + default: + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, + NULL, + "Doesn't support optional " + "action"); + } + } + if (attr->ingress && !attr->transfer) { + if (!(sub_action_flags & MLX5_FLOW_ACTION_QUEUE)) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + NULL, + "Ingress must has a dest " + "QUEUE for Sample"); + } else if (attr->egress && !attr->transfer) { + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, + NULL, + "Sample Only support Ingress " + "or E-Switch"); + } else if (sample->actions->type != RTE_FLOW_ACTION_TYPE_END) { + MLX5_ASSERT(attr->transfer); + if (sample->ratio > 1) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, + NULL, + "E-Switch doesn't support " + "any optional action " + "for sampling"); + fdb_mirror = 1; + if (sub_action_flags & MLX5_FLOW_ACTION_QUEUE) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, + NULL, + "unsupported action QUEUE"); + if (!(sub_action_flags & MLX5_FLOW_ACTION_PORT_ID)) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + NULL, + "E-Switch must has a dest " + "port for mirroring"); + } + /* Continue validation for Xcap actions.*/ + if ((sub_action_flags & MLX5_FLOW_XCAP_ACTIONS) && + (queue_index == 0xFFFF || + mlx5_rxq_get_type(dev, queue_index) != MLX5_RXQ_TYPE_HAIRPIN)) { + if ((sub_action_flags & MLX5_FLOW_XCAP_ACTIONS) == + MLX5_FLOW_XCAP_ACTIONS) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, + NULL, "encap and decap " + "combination aren't " + "supported"); + if (!attr->transfer && attr->ingress && (sub_action_flags & + MLX5_FLOW_ACTION_ENCAP)) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, + NULL, "encap is not supported" + " for ingress traffic"); + } + return 0; +} + +/** + * Find existing modify-header resource or create and register a new one. + * + * @param dev[in, out] + * Pointer to rte_eth_dev structure. + * @param[in, out] resource + * Pointer to modify-header resource. + * @parm[in, out] dev_flow + * Pointer to the dev_flow. + * @param[out] error + * pointer to error structure. + * + * @return + * 0 on success otherwise -errno and errno is set. + */ +static int +flow_dv_modify_hdr_resource_register + (struct rte_eth_dev *dev, + struct mlx5_flow_dv_modify_hdr_resource *resource, + struct mlx5_flow *dev_flow, + struct rte_flow_error *error) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_dev_ctx_shared *sh = priv->sh; + struct mlx5_flow_dv_modify_hdr_resource *cache_resource; + struct mlx5dv_dr_domain *ns; + uint32_t actions_len; + struct mlx5_hlist_entry *entry; + union mlx5_flow_modify_hdr_key hdr_mod_key = { + { + .ft_type = resource->ft_type, + .actions_num = resource->actions_num, + .group = dev_flow->dv.group, + .cksum = 0, + } + }; + int ret; + + resource->flags = dev_flow->dv.group ? 0 : + MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL; + if (resource->actions_num > flow_dv_modify_hdr_action_max(dev, + resource->flags)) + return rte_flow_error_set(error, EOVERFLOW, + RTE_FLOW_ERROR_TYPE_ACTION, NULL, + "too many modify header items"); + if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB) + ns = sh->fdb_domain; + else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_TX) + ns = sh->tx_domain; + else + ns = sh->rx_domain; + /* Lookup a matching resource from cache. */ + actions_len = resource->actions_num * sizeof(resource->actions[0]); + hdr_mod_key.cksum = __rte_raw_cksum(resource->actions, actions_len, 0); + resource->entry.key = hdr_mod_key.v64; + entry = mlx5_hlist_lookup_ex(sh->modify_cmds, resource->entry.key, + flow_dv_modify_hdr_resource_match, + (void *)resource); + if (entry) { + cache_resource = container_of(entry, + struct mlx5_flow_dv_modify_hdr_resource, + entry); + DRV_LOG(DEBUG, "modify-header resource %p: refcnt %d++", + (void *)cache_resource, + __atomic_load_n(&cache_resource->refcnt, + __ATOMIC_RELAXED)); + __atomic_fetch_add(&cache_resource->refcnt, 1, + __ATOMIC_RELAXED); + dev_flow->handle->dvh.modify_hdr = cache_resource; + return 0; + + } + /* Register new modify-header resource. */ + cache_resource = mlx5_malloc(MLX5_MEM_ZERO, + sizeof(*cache_resource) + actions_len, 0, + SOCKET_ID_ANY); + if (!cache_resource) + return rte_flow_error_set(error, ENOMEM, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "cannot allocate resource memory"); + *cache_resource = *resource; + rte_memcpy(cache_resource->actions, resource->actions, actions_len); + ret = mlx5_flow_os_create_flow_action_modify_header + (sh->ctx, ns, cache_resource, + actions_len, &cache_resource->action); + if (ret) { + mlx5_free(cache_resource); + return rte_flow_error_set(error, ENOMEM, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, "cannot create action"); + } + __atomic_store_n(&cache_resource->refcnt, 1, __ATOMIC_RELAXED); + if (mlx5_hlist_insert_ex(sh->modify_cmds, &cache_resource->entry, + flow_dv_modify_hdr_resource_match, + (void *)cache_resource)) { + claim_zero(mlx5_flow_os_destroy_flow_action + (cache_resource->action)); + mlx5_free(cache_resource); + return rte_flow_error_set(error, EEXIST, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, "action exist"); + } + dev_flow->handle->dvh.modify_hdr = cache_resource; + DRV_LOG(DEBUG, "new modify-header resource %p: refcnt %d++", + (void *)cache_resource, + __atomic_load_n(&cache_resource->refcnt, __ATOMIC_RELAXED)); + return 0; +} + +/** + * Get DV flow counter by index. + * + * @param[in] dev + * Pointer to the Ethernet device structure. + * @param[in] idx + * mlx5 flow counter index in the container. + * @param[out] ppool + * mlx5 flow counter pool in the container, + * + * @return + * Pointer to the counter, NULL otherwise. + */ +static struct mlx5_flow_counter * +flow_dv_counter_get_by_idx(struct rte_eth_dev *dev, + uint32_t idx, + struct mlx5_flow_counter_pool **ppool) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng; + struct mlx5_flow_counter_pool *pool; + + /* Decrease to original index and clear shared bit. */ + idx = (idx - 1) & (MLX5_CNT_SHARED_OFFSET - 1); + MLX5_ASSERT(idx / MLX5_COUNTERS_PER_POOL < cmng->n); + pool = cmng->pools[idx / MLX5_COUNTERS_PER_POOL]; + MLX5_ASSERT(pool); + if (ppool) + *ppool = pool; + return MLX5_POOL_GET_CNT(pool, idx % MLX5_COUNTERS_PER_POOL); +} + +/** + * Check the devx counter belongs to the pool. + * + * @param[in] pool + * Pointer to the counter pool. + * @param[in] id + * The counter devx ID. + * + * @return + * True if counter belongs to the pool, false otherwise. + */ +static bool +flow_dv_is_counter_in_pool(struct mlx5_flow_counter_pool *pool, int id) +{ + int base = (pool->min_dcs->id / MLX5_COUNTERS_PER_POOL) * + MLX5_COUNTERS_PER_POOL; if (id >= base && id < base + MLX5_COUNTERS_PER_POOL) return true; @@ -4109,8 +4656,8 @@ flow_dv_is_counter_in_pool(struct mlx5_flow_counter_pool *pool, int id) /** * Get a pool by devx counter ID. * - * @param[in] cont - * Pointer to the counter container. + * @param[in] cmng + * Pointer to the counter management. * @param[in] id * The counter devx ID. * @@ -4118,107 +4665,38 @@ flow_dv_is_counter_in_pool(struct mlx5_flow_counter_pool *pool, int id) * The counter pool pointer if exists, NULL otherwise, */ static struct mlx5_flow_counter_pool * -flow_dv_find_pool_by_id(struct mlx5_pools_container *cont, int id) +flow_dv_find_pool_by_id(struct mlx5_flow_counter_mng *cmng, int id) { uint32_t i; + struct mlx5_flow_counter_pool *pool = NULL; + rte_spinlock_lock(&cmng->pool_update_sl); /* Check last used pool. */ - if (cont->last_pool_idx != POOL_IDX_INVALID && - flow_dv_is_counter_in_pool(cont->pools[cont->last_pool_idx], id)) - return cont->pools[cont->last_pool_idx]; + if (cmng->last_pool_idx != POOL_IDX_INVALID && + flow_dv_is_counter_in_pool(cmng->pools[cmng->last_pool_idx], id)) { + pool = cmng->pools[cmng->last_pool_idx]; + goto out; + } /* ID out of range means no suitable pool in the container. */ - if (id > cont->max_id || id < cont->min_id) - return NULL; + if (id > cmng->max_id || id < cmng->min_id) + goto out; /* * Find the pool from the end of the container, since mostly counter * ID is sequence increasing, and the last pool should be the needed * one. */ - i = rte_atomic16_read(&cont->n_valid); + i = cmng->n_valid; while (i--) { - struct mlx5_flow_counter_pool *pool = cont->pools[i]; - - if (flow_dv_is_counter_in_pool(pool, id)) - return pool; - } - return NULL; -} + struct mlx5_flow_counter_pool *pool_tmp = cmng->pools[i]; -/** - * Allocate a new memory for the counter values wrapped by all the needed - * management. - * - * @param[in] dev - * Pointer to the Ethernet device structure. - * @param[in] raws_n - * The raw memory areas - each one for MLX5_COUNTERS_PER_POOL counters. - * - * @return - * The new memory management pointer on success, otherwise NULL and rte_errno - * is set. - */ -static struct mlx5_counter_stats_mem_mng * -flow_dv_create_counter_stat_mem_mng(struct rte_eth_dev *dev, int raws_n) -{ - struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5_dev_ctx_shared *sh = priv->sh; - struct mlx5_devx_mkey_attr mkey_attr; - struct mlx5_counter_stats_mem_mng *mem_mng; - volatile struct flow_counter_stats *raw_data; - int size = (sizeof(struct flow_counter_stats) * - MLX5_COUNTERS_PER_POOL + - sizeof(struct mlx5_counter_stats_raw)) * raws_n + - sizeof(struct mlx5_counter_stats_mem_mng); - size_t pgsize = rte_mem_page_size(); - if (pgsize == (size_t)-1) { - DRV_LOG(ERR, "Failed to get mem page size"); - rte_errno = ENOMEM; - return NULL; - } - uint8_t *mem = mlx5_malloc(MLX5_MEM_ZERO, size, pgsize, - SOCKET_ID_ANY); - int i; - - if (!mem) { - rte_errno = ENOMEM; - return NULL; - } - mem_mng = (struct mlx5_counter_stats_mem_mng *)(mem + size) - 1; - size = sizeof(*raw_data) * MLX5_COUNTERS_PER_POOL * raws_n; - mem_mng->umem = mlx5_glue->devx_umem_reg(sh->ctx, mem, size, - IBV_ACCESS_LOCAL_WRITE); - if (!mem_mng->umem) { - rte_errno = errno; - mlx5_free(mem); - return NULL; - } - mkey_attr.addr = (uintptr_t)mem; - mkey_attr.size = size; - mkey_attr.umem_id = mlx5_os_get_umem_id(mem_mng->umem); - mkey_attr.pd = sh->pdn; - mkey_attr.log_entity_size = 0; - mkey_attr.pg_access = 0; - mkey_attr.klm_array = NULL; - mkey_attr.klm_num = 0; - if (priv->config.hca_attr.relaxed_ordering_write && - priv->config.hca_attr.relaxed_ordering_read && - !haswell_broadwell_cpu) - mkey_attr.relaxed_ordering = 1; - mem_mng->dm = mlx5_devx_cmd_mkey_create(sh->ctx, &mkey_attr); - if (!mem_mng->dm) { - mlx5_glue->devx_umem_dereg(mem_mng->umem); - rte_errno = errno; - mlx5_free(mem); - return NULL; - } - mem_mng->raws = (struct mlx5_counter_stats_raw *)(mem + size); - raw_data = (volatile struct flow_counter_stats *)mem; - for (i = 0; i < raws_n; ++i) { - mem_mng->raws[i].mem_mng = mem_mng; - mem_mng->raws[i].data = raw_data + i * MLX5_COUNTERS_PER_POOL; + if (flow_dv_is_counter_in_pool(pool_tmp, id)) { + pool = pool_tmp; + break; + } } - LIST_INSERT_HEAD(&sh->cmng.mem_mngs, mem_mng, next); - return mem_mng; +out: + rte_spinlock_unlock(&cmng->pool_update_sl); + return pool; } /** @@ -4226,24 +4704,17 @@ flow_dv_create_counter_stat_mem_mng(struct rte_eth_dev *dev, int raws_n) * * @param[in] dev * Pointer to the Ethernet device structure. - * @param[in] batch - * Whether the pool is for counter that was allocated by batch command. - * @param[in] age - * Whether the pool is for Aging counter. * * @return * 0 on success, otherwise negative errno value and rte_errno is set. */ static int -flow_dv_container_resize(struct rte_eth_dev *dev, - uint32_t batch, uint32_t age) +flow_dv_container_resize(struct rte_eth_dev *dev) { struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5_pools_container *cont = MLX5_CNT_CONTAINER(priv->sh, batch, - age); - struct mlx5_counter_stats_mem_mng *mem_mng = NULL; - void *old_pools = cont->pools; - uint32_t resize = cont->n + MLX5_CNT_CONTAINER_RESIZE; + struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng; + void *old_pools = cmng->pools; + uint32_t resize = cmng->n + MLX5_CNT_CONTAINER_RESIZE; uint32_t mem_size = sizeof(struct mlx5_flow_counter_pool *) * resize; void *pools = mlx5_malloc(MLX5_MEM_ZERO, mem_size, 0, SOCKET_ID_ANY); @@ -4252,32 +4723,10 @@ flow_dv_container_resize(struct rte_eth_dev *dev, return -ENOMEM; } if (old_pools) - memcpy(pools, old_pools, cont->n * + memcpy(pools, old_pools, cmng->n * sizeof(struct mlx5_flow_counter_pool *)); - /* - * Fallback mode query the counter directly, no background query - * resources are needed. - */ - if (!priv->counter_fallback) { - int i; - - mem_mng = flow_dv_create_counter_stat_mem_mng(dev, - MLX5_CNT_CONTAINER_RESIZE + MLX5_MAX_PENDING_QUERIES); - if (!mem_mng) { - mlx5_free(pools); - return -ENOMEM; - } - for (i = 0; i < MLX5_MAX_PENDING_QUERIES; ++i) - LIST_INSERT_HEAD(&priv->sh->cmng.free_stat_raws, - mem_mng->raws + - MLX5_CNT_CONTAINER_RESIZE + - i, next); - } - rte_spinlock_lock(&cont->resize_sl); - cont->n = resize; - cont->mem_mng = mem_mng; - cont->pools = pools; - rte_spinlock_unlock(&cont->resize_sl); + cmng->n = resize; + cmng->pools = pools; if (old_pools) mlx5_free(old_pools); return 0; @@ -4305,25 +4754,15 @@ _flow_dv_query_count(struct rte_eth_dev *dev, uint32_t counter, uint64_t *pkts, struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_flow_counter_pool *pool = NULL; struct mlx5_flow_counter *cnt; - struct mlx5_flow_counter_ext *cnt_ext = NULL; int offset; cnt = flow_dv_counter_get_by_idx(dev, counter, &pool); MLX5_ASSERT(pool); - if (counter < MLX5_CNT_BATCH_OFFSET) { - cnt_ext = MLX5_CNT_TO_CNT_EXT(pool, cnt); - if (priv->counter_fallback) - return mlx5_devx_cmd_flow_counter_query(cnt_ext->dcs, 0, + if (priv->sh->cmng.counter_fallback) + return mlx5_devx_cmd_flow_counter_query(cnt->dcs_when_active, 0, 0, pkts, bytes, 0, NULL, NULL, 0); - } - rte_spinlock_lock(&pool->sl); - /* - * The single counters allocation may allocate smaller ID than the - * current allocated in parallel to the host reading. - * In this case the new counter values must be reported as 0. - */ - if (unlikely(cnt_ext && cnt_ext->dcs->id < pool->raw->min_dcs_id)) { + if (!pool->raw) { *pkts = 0; *bytes = 0; } else { @@ -4342,8 +4781,6 @@ _flow_dv_query_count(struct rte_eth_dev *dev, uint32_t counter, uint64_t *pkts, * Pointer to the Ethernet device structure. * @param[out] dcs * The devX counter handle. - * @param[in] batch - * Whether the pool is for counter that was allocated by batch command. * @param[in] age * Whether the pool is for counter that was allocated for aging. * @param[in/out] cont_cur @@ -4354,115 +4791,52 @@ _flow_dv_query_count(struct rte_eth_dev *dev, uint32_t counter, uint64_t *pkts, */ static struct mlx5_flow_counter_pool * flow_dv_pool_create(struct rte_eth_dev *dev, struct mlx5_devx_obj *dcs, - uint32_t batch, uint32_t age) + uint32_t age) { struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_flow_counter_pool *pool; - struct mlx5_pools_container *cont = MLX5_CNT_CONTAINER(priv->sh, batch, - age); - int16_t n_valid = rte_atomic16_read(&cont->n_valid); + struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng; + bool fallback = priv->sh->cmng.counter_fallback; uint32_t size = sizeof(*pool); - if (cont->n == n_valid && flow_dv_container_resize(dev, batch, age)) - return NULL; - size += MLX5_COUNTERS_PER_POOL * CNT_SIZE; - size += (batch ? 0 : MLX5_COUNTERS_PER_POOL * CNTEXT_SIZE); - size += (!age ? 0 : MLX5_COUNTERS_PER_POOL * AGE_SIZE); + size += MLX5_COUNTERS_PER_POOL * MLX5_CNT_SIZE; + size += (!age ? 0 : MLX5_COUNTERS_PER_POOL * MLX5_AGE_SIZE); pool = mlx5_malloc(MLX5_MEM_ZERO, size, 0, SOCKET_ID_ANY); if (!pool) { rte_errno = ENOMEM; return NULL; } - pool->min_dcs = dcs; - if (!priv->counter_fallback) - pool->raw = cont->mem_mng->raws + n_valid % - MLX5_CNT_CONTAINER_RESIZE; - pool->raw_hw = NULL; - pool->type = 0; - pool->type |= (batch ? 0 : CNT_POOL_TYPE_EXT); - pool->type |= (!age ? 0 : CNT_POOL_TYPE_AGE); + pool->raw = NULL; + pool->is_aged = !!age; pool->query_gen = 0; + pool->min_dcs = dcs; rte_spinlock_init(&pool->sl); + rte_spinlock_init(&pool->csl); TAILQ_INIT(&pool->counters[0]); TAILQ_INIT(&pool->counters[1]); - TAILQ_INSERT_HEAD(&cont->pool_list, pool, next); - pool->index = n_valid; - cont->pools[n_valid] = pool; - if (!batch) { + pool->time_of_last_age_check = MLX5_CURR_TIME_SEC; + rte_spinlock_lock(&cmng->pool_update_sl); + pool->index = cmng->n_valid; + if (pool->index == cmng->n && flow_dv_container_resize(dev)) { + mlx5_free(pool); + rte_spinlock_unlock(&cmng->pool_update_sl); + return NULL; + } + cmng->pools[pool->index] = pool; + cmng->n_valid++; + if (unlikely(fallback)) { int base = RTE_ALIGN_FLOOR(dcs->id, MLX5_COUNTERS_PER_POOL); - if (base < cont->min_id) - cont->min_id = base; - if (base > cont->max_id) - cont->max_id = base + MLX5_COUNTERS_PER_POOL - 1; - cont->last_pool_idx = pool->index; + if (base < cmng->min_id) + cmng->min_id = base; + if (base > cmng->max_id) + cmng->max_id = base + MLX5_COUNTERS_PER_POOL - 1; + cmng->last_pool_idx = pool->index; } - /* Pool initialization must be updated before host thread access. */ - rte_cio_wmb(); - rte_atomic16_add(&cont->n_valid, 1); + rte_spinlock_unlock(&cmng->pool_update_sl); return pool; } -/** - * Restore skipped counters in the pool. - * - * As counter pool query requires the first counter dcs - * ID start with 4 alinged, if the pool counters with - * min_dcs ID are not aligned with 4, the counters will - * be skipped. - * Once other min_dcs ID less than these skipped counter - * dcs ID appears, the skipped counters will be safe to - * use. - * Should be called when min_dcs is updated. - * - * @param[in] pool - * Current counter pool. - * @param[in] last_min_dcs - * Last min_dcs. - */ -static void -flow_dv_counter_restore(struct mlx5_flow_counter_pool *pool, - struct mlx5_devx_obj *last_min_dcs) -{ - struct mlx5_flow_counter_ext *cnt_ext; - uint32_t offset, new_offset; - uint32_t skip_cnt = 0; - uint32_t i; - - if (!pool->skip_cnt) - return; - /* - * If last min_dcs is not valid. The skipped counter may even after - * last min_dcs, set the offset to the whole pool. - */ - if (last_min_dcs->id & (MLX5_CNT_BATCH_QUERY_ID_ALIGNMENT - 1)) - offset = MLX5_COUNTERS_PER_POOL; - else - offset = last_min_dcs->id % MLX5_COUNTERS_PER_POOL; - new_offset = pool->min_dcs->id % MLX5_COUNTERS_PER_POOL; - /* - * Check the counters from 1 to the last_min_dcs range. Counters - * before new min_dcs indicates pool still has skipped counters. - * Counters be skipped after new min_dcs will be ready to use. - * Offset 0 counter must be empty or min_dcs, start from 1. - */ - for (i = 1; i < offset; i++) { - cnt_ext = MLX5_GET_POOL_CNT_EXT(pool, i); - if (cnt_ext->skipped) { - if (i > new_offset) { - cnt_ext->skipped = 0; - TAILQ_INSERT_TAIL - (&pool->counters[pool->query_gen], - MLX5_POOL_GET_CNT(pool, i), next); - } else { - skip_cnt++; - } - } - } - if (!skip_cnt) - pool->skip_cnt = 0; -} - /** * Prepare a new counter and/or a new counter pool. * @@ -4470,8 +4844,6 @@ flow_dv_counter_restore(struct mlx5_flow_counter_pool *pool, * Pointer to the Ethernet device structure. * @param[out] cnt_free * Where to put the pointer of a new counter. - * @param[in] batch - * Whether the pool is for counter that was allocated by batch command. * @param[in] age * Whether the pool is for counter that was allocated for aging. * @@ -4482,101 +4854,45 @@ flow_dv_counter_restore(struct mlx5_flow_counter_pool *pool, static struct mlx5_flow_counter_pool * flow_dv_counter_pool_prepare(struct rte_eth_dev *dev, struct mlx5_flow_counter **cnt_free, - uint32_t batch, uint32_t age) + uint32_t age) { struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5_pools_container *cont; + struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng; struct mlx5_flow_counter_pool *pool; struct mlx5_counters tmp_tq; - struct mlx5_devx_obj *last_min_dcs; struct mlx5_devx_obj *dcs = NULL; struct mlx5_flow_counter *cnt; - uint32_t add2other; + enum mlx5_counter_type cnt_type = + age ? MLX5_COUNTER_TYPE_AGE : MLX5_COUNTER_TYPE_ORIGIN; + bool fallback = priv->sh->cmng.counter_fallback; uint32_t i; - cont = MLX5_CNT_CONTAINER(priv->sh, batch, age); - if (!batch) { -retry: - add2other = 0; + if (fallback) { /* bulk_bitmap must be 0 for single counter allocation. */ dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0); if (!dcs) return NULL; - pool = flow_dv_find_pool_by_id(cont, dcs->id); - /* Check if counter belongs to exist pool ID range. */ + pool = flow_dv_find_pool_by_id(cmng, dcs->id); if (!pool) { - pool = flow_dv_find_pool_by_id - (MLX5_CNT_CONTAINER - (priv->sh, batch, (age ^ 0x1)), dcs->id); - /* - * Pool eixsts, counter will be added to the other - * container, need to reallocate it later. - */ - if (pool) { - add2other = 1; - } else { - pool = flow_dv_pool_create(dev, dcs, batch, - age); - if (!pool) { - mlx5_devx_cmd_destroy(dcs); - return NULL; - } + pool = flow_dv_pool_create(dev, dcs, age); + if (!pool) { + mlx5_devx_cmd_destroy(dcs); + return NULL; } } - if ((dcs->id < pool->min_dcs->id || - pool->min_dcs->id & - (MLX5_CNT_BATCH_QUERY_ID_ALIGNMENT - 1)) && - !(dcs->id & (MLX5_CNT_BATCH_QUERY_ID_ALIGNMENT - 1))) { - /* - * Update the pool min_dcs only if current dcs is - * valid and exist min_dcs is not valid or greater - * than new dcs. - */ - last_min_dcs = pool->min_dcs; - rte_atomic64_set(&pool->a64_dcs, - (int64_t)(uintptr_t)dcs); - /* - * Restore any skipped counters if the new min_dcs - * ID is smaller or min_dcs is not valid. - */ - if (dcs->id < last_min_dcs->id || - last_min_dcs->id & - (MLX5_CNT_BATCH_QUERY_ID_ALIGNMENT - 1)) - flow_dv_counter_restore(pool, last_min_dcs); - } i = dcs->id % MLX5_COUNTERS_PER_POOL; cnt = MLX5_POOL_GET_CNT(pool, i); cnt->pool = pool; - MLX5_GET_POOL_CNT_EXT(pool, i)->dcs = dcs; - /* - * If min_dcs is not valid, it means the new allocated dcs - * also fail to become the valid min_dcs, just skip it. - * Or if min_dcs is valid, and new dcs ID is smaller than - * min_dcs, but not become the min_dcs, also skip it. - */ - if (pool->min_dcs->id & - (MLX5_CNT_BATCH_QUERY_ID_ALIGNMENT - 1) || - dcs->id < pool->min_dcs->id) { - MLX5_GET_POOL_CNT_EXT(pool, i)->skipped = 1; - pool->skip_cnt = 1; - goto retry; - } - if (add2other) { - TAILQ_INSERT_TAIL(&pool->counters[pool->query_gen], - cnt, next); - goto retry; - } + cnt->dcs_when_free = dcs; *cnt_free = cnt; return pool; } - /* bulk_bitmap is in 128 counters units. */ - if (priv->config.hca_attr.flow_counter_bulk_alloc_bitmap & 0x4) - dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0x4); + dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0x4); if (!dcs) { rte_errno = ENODATA; return NULL; } - pool = flow_dv_pool_create(dev, dcs, batch, age); + pool = flow_dv_pool_create(dev, dcs, age); if (!pool) { mlx5_devx_cmd_destroy(dcs); return NULL; @@ -4587,57 +4903,19 @@ retry: cnt->pool = pool; TAILQ_INSERT_HEAD(&tmp_tq, cnt, next); } - rte_spinlock_lock(&cont->csl); - TAILQ_CONCAT(&cont->counters, &tmp_tq, next); - rte_spinlock_unlock(&cont->csl); + rte_spinlock_lock(&cmng->csl[cnt_type]); + TAILQ_CONCAT(&cmng->counters[cnt_type], &tmp_tq, next); + rte_spinlock_unlock(&cmng->csl[cnt_type]); *cnt_free = MLX5_POOL_GET_CNT(pool, 0); (*cnt_free)->pool = pool; return pool; } -/** - * Search for existed shared counter. - * - * @param[in] dev - * Pointer to the Ethernet device structure. - * @param[in] id - * The shared counter ID to search. - * @param[out] ppool - * mlx5 flow counter pool in the container, - * - * @return - * NULL if not existed, otherwise pointer to the shared extend counter. - */ -static struct mlx5_flow_counter_ext * -flow_dv_counter_shared_search(struct rte_eth_dev *dev, uint32_t id, - struct mlx5_flow_counter_pool **ppool) -{ - struct mlx5_priv *priv = dev->data->dev_private; - union mlx5_l3t_data data; - uint32_t cnt_idx; - - if (mlx5_l3t_get_entry(priv->sh->cnt_id_tbl, id, &data) || !data.dword) - return NULL; - cnt_idx = data.dword; - /* - * Shared counters don't have age info. The counter extend is after - * the counter datat structure. - */ - return (struct mlx5_flow_counter_ext *) - ((flow_dv_counter_get_by_idx(dev, cnt_idx, ppool)) + 1); -} - /** * Allocate a flow counter. * * @param[in] dev * Pointer to the Ethernet device structure. - * @param[in] shared - * Indicate if this counter is shared with other flows. - * @param[in] id - * Counter identifier. - * @param[in] group - * Counter flow group. * @param[in] age * Whether the counter was allocated for aging. * @@ -4645,68 +4923,44 @@ flow_dv_counter_shared_search(struct rte_eth_dev *dev, uint32_t id, * Index to flow counter on success, 0 otherwise and rte_errno is set. */ static uint32_t -flow_dv_counter_alloc(struct rte_eth_dev *dev, uint32_t shared, uint32_t id, - uint16_t group, uint32_t age) +flow_dv_counter_alloc(struct rte_eth_dev *dev, uint32_t age) { struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_flow_counter_pool *pool = NULL; struct mlx5_flow_counter *cnt_free = NULL; - struct mlx5_flow_counter_ext *cnt_ext = NULL; - /* - * Currently group 0 flow counter cannot be assigned to a flow if it is - * not the first one in the batch counter allocation, so it is better - * to allocate counters one by one for these flows in a separate - * container. - * A counter can be shared between different groups so need to take - * shared counters from the single container. - */ - uint32_t batch = (group && !shared && !priv->counter_fallback) ? 1 : 0; - struct mlx5_pools_container *cont = MLX5_CNT_CONTAINER(priv->sh, batch, - age); + bool fallback = priv->sh->cmng.counter_fallback; + struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng; + enum mlx5_counter_type cnt_type = + age ? MLX5_COUNTER_TYPE_AGE : MLX5_COUNTER_TYPE_ORIGIN; uint32_t cnt_idx; if (!priv->config.devx) { rte_errno = ENOTSUP; return 0; } - if (shared) { - cnt_ext = flow_dv_counter_shared_search(dev, id, &pool); - if (cnt_ext) { - if (cnt_ext->ref_cnt + 1 == 0) { - rte_errno = E2BIG; - return 0; - } - cnt_ext->ref_cnt++; - cnt_idx = pool->index * MLX5_COUNTERS_PER_POOL + - (cnt_ext->dcs->id % MLX5_COUNTERS_PER_POOL) - + 1; - return cnt_idx; - } - } /* Get free counters from container. */ - rte_spinlock_lock(&cont->csl); - cnt_free = TAILQ_FIRST(&cont->counters); + rte_spinlock_lock(&cmng->csl[cnt_type]); + cnt_free = TAILQ_FIRST(&cmng->counters[cnt_type]); if (cnt_free) - TAILQ_REMOVE(&cont->counters, cnt_free, next); - rte_spinlock_unlock(&cont->csl); - if (!cnt_free && !flow_dv_counter_pool_prepare(dev, &cnt_free, - batch, age)) + TAILQ_REMOVE(&cmng->counters[cnt_type], cnt_free, next); + rte_spinlock_unlock(&cmng->csl[cnt_type]); + if (!cnt_free && !flow_dv_counter_pool_prepare(dev, &cnt_free, age)) goto err; pool = cnt_free->pool; - if (!batch) - cnt_ext = MLX5_CNT_TO_CNT_EXT(pool, cnt_free); + if (fallback) + cnt_free->dcs_when_active = cnt_free->dcs_when_free; /* Create a DV counter action only in the first time usage. */ if (!cnt_free->action) { uint16_t offset; struct mlx5_devx_obj *dcs; int ret; - if (batch) { + if (!fallback) { offset = MLX5_CNT_ARRAY_IDX(pool, cnt_free); dcs = pool->min_dcs; } else { offset = 0; - dcs = cnt_ext->dcs; + dcs = cnt_free->dcs_when_free; } ret = mlx5_flow_os_create_flow_action_count(dcs->obj, offset, &cnt_free->action); @@ -4717,38 +4971,80 @@ flow_dv_counter_alloc(struct rte_eth_dev *dev, uint32_t shared, uint32_t id, } cnt_idx = MLX5_MAKE_CNT_IDX(pool->index, MLX5_CNT_ARRAY_IDX(pool, cnt_free)); - cnt_idx += batch * MLX5_CNT_BATCH_OFFSET; - cnt_idx += age * MLX5_CNT_AGE_OFFSET; /* Update the counter reset values. */ if (_flow_dv_query_count(dev, cnt_idx, &cnt_free->hits, &cnt_free->bytes)) goto err; - if (cnt_ext) { - cnt_ext->shared = shared; - cnt_ext->ref_cnt = 1; - cnt_ext->id = id; - if (shared) { - union mlx5_l3t_data data; - - data.dword = cnt_idx; - if (mlx5_l3t_set_entry(priv->sh->cnt_id_tbl, id, &data)) - return 0; - } - } - if (!priv->counter_fallback && !priv->sh->cmng.query_thread_on) + if (!fallback && !priv->sh->cmng.query_thread_on) /* Start the asynchronous batch query by the host thread. */ mlx5_set_query_alarm(priv->sh); return cnt_idx; err: if (cnt_free) { cnt_free->pool = pool; - rte_spinlock_lock(&cont->csl); - TAILQ_INSERT_TAIL(&cont->counters, cnt_free, next); - rte_spinlock_unlock(&cont->csl); + if (fallback) + cnt_free->dcs_when_free = cnt_free->dcs_when_active; + rte_spinlock_lock(&cmng->csl[cnt_type]); + TAILQ_INSERT_TAIL(&cmng->counters[cnt_type], cnt_free, next); + rte_spinlock_unlock(&cmng->csl[cnt_type]); } return 0; } +/** + * Allocate a shared flow counter. + * + * @param[in] ctx + * Pointer to the shared counter configuration. + * @param[in] data + * Pointer to save the allocated counter index. + * + * @return + * Index to flow counter on success, 0 otherwise and rte_errno is set. + */ + +static int32_t +flow_dv_counter_alloc_shared_cb(void *ctx, union mlx5_l3t_data *data) +{ + struct mlx5_shared_counter_conf *conf = ctx; + struct rte_eth_dev *dev = conf->dev; + struct mlx5_flow_counter *cnt; + + data->dword = flow_dv_counter_alloc(dev, 0); + data->dword |= MLX5_CNT_SHARED_OFFSET; + cnt = flow_dv_counter_get_by_idx(dev, data->dword, NULL); + cnt->shared_info.id = conf->id; + return 0; +} + +/** + * Get a shared flow counter. + * + * @param[in] dev + * Pointer to the Ethernet device structure. + * @param[in] id + * Counter identifier. + * + * @return + * Index to flow counter on success, 0 otherwise and rte_errno is set. + */ +static uint32_t +flow_dv_counter_get_shared(struct rte_eth_dev *dev, uint32_t id) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_shared_counter_conf conf = { + .dev = dev, + .id = id, + }; + union mlx5_l3t_data data = { + .dword = 0, + }; + + mlx5_l3t_prepare_entry(priv->sh->cnt_id_tbl, id, &data, + flow_dv_counter_alloc_shared_cb, &conf); + return data.dword; +} + /** * Get age param from counter index. * @@ -4790,13 +5086,13 @@ flow_dv_counter_remove_from_age(struct rte_eth_dev *dev, struct mlx5_age_info *age_info; struct mlx5_age_param *age_param; struct mlx5_priv *priv = dev->data->dev_private; + uint16_t expected = AGE_CANDIDATE; age_info = GET_PORT_AGE_INFO(priv); age_param = flow_dv_counter_idx_get_age(dev, counter); - if (rte_atomic16_cmpset((volatile uint16_t *) - &age_param->state, - AGE_CANDIDATE, AGE_FREE) - != AGE_CANDIDATE) { + if (!__atomic_compare_exchange_n(&age_param->state, &expected, + AGE_FREE, false, __ATOMIC_RELAXED, + __ATOMIC_RELAXED)) { /** * We need the lock even it is age timeout, * since counter may still in process. @@ -4804,9 +5100,10 @@ flow_dv_counter_remove_from_age(struct rte_eth_dev *dev, rte_spinlock_lock(&age_info->aged_sl); TAILQ_REMOVE(&age_info->aged_counters, cnt, next); rte_spinlock_unlock(&age_info->aged_sl); + __atomic_store_n(&age_param->state, AGE_FREE, __ATOMIC_RELAXED); } - rte_atomic16_set(&age_param->state, AGE_FREE); } + /** * Release a flow counter. * @@ -4821,23 +5118,16 @@ flow_dv_counter_release(struct rte_eth_dev *dev, uint32_t counter) struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_flow_counter_pool *pool = NULL; struct mlx5_flow_counter *cnt; - struct mlx5_flow_counter_ext *cnt_ext = NULL; + enum mlx5_counter_type cnt_type; if (!counter) return; cnt = flow_dv_counter_get_by_idx(dev, counter, &pool); MLX5_ASSERT(pool); - if (counter < MLX5_CNT_BATCH_OFFSET) { - cnt_ext = MLX5_CNT_TO_CNT_EXT(pool, cnt); - if (cnt_ext) { - if (--cnt_ext->ref_cnt) - return; - if (cnt_ext->shared) - mlx5_l3t_clear_entry(priv->sh->cnt_id_tbl, - cnt_ext->id); - } - } - if (IS_AGE_POOL(pool)) + if (IS_SHARED_CNT(counter) && + mlx5_l3t_clear_entry(priv->sh->cnt_id_tbl, cnt->shared_info.id)) + return; + if (pool->is_aged) flow_dv_counter_remove_from_age(dev, counter, cnt); cnt->pool = pool; /* @@ -4850,12 +5140,19 @@ flow_dv_counter_release(struct rte_eth_dev *dev, uint32_t counter) * function both operate with the different list. * */ - if (!priv->counter_fallback) + if (!priv->sh->cmng.counter_fallback) { + rte_spinlock_lock(&pool->csl); TAILQ_INSERT_TAIL(&pool->counters[pool->query_gen], cnt, next); - else - TAILQ_INSERT_TAIL(&((MLX5_CNT_CONTAINER - (priv->sh, 0, 0))->counters), + rte_spinlock_unlock(&pool->csl); + } else { + cnt->dcs_when_free = cnt->dcs_when_active; + cnt_type = pool->is_aged ? MLX5_COUNTER_TYPE_AGE : + MLX5_COUNTER_TYPE_ORIGIN; + rte_spinlock_lock(&priv->sh->cmng.csl[cnt_type]); + TAILQ_INSERT_TAIL(&priv->sh->cmng.counters[cnt_type], cnt, next); + rte_spinlock_unlock(&priv->sh->cmng.csl[cnt_type]); + } } /** @@ -4878,8 +5175,9 @@ flow_dv_counter_release(struct rte_eth_dev *dev, uint32_t counter) */ static int flow_dv_validate_attributes(struct rte_eth_dev *dev, + const struct mlx5_flow_tunnel *tunnel, const struct rte_flow_attr *attributes, - bool external __rte_unused, + struct flow_grp_info grp_info, struct rte_flow_error *error) { struct mlx5_priv *priv = dev->data->dev_private; @@ -4887,6 +5185,8 @@ flow_dv_validate_attributes(struct rte_eth_dev *dev, int ret = 0; #ifndef HAVE_MLX5DV_DR + RTE_SET_USED(tunnel); + RTE_SET_USED(grp_info); if (attributes->group) return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR_GROUP, @@ -4895,9 +5195,8 @@ flow_dv_validate_attributes(struct rte_eth_dev *dev, #else uint32_t table = 0; - ret = mlx5_flow_group_to_table(attributes, external, - attributes->group, !!priv->fdb_def_rule, - &table, error); + ret = mlx5_flow_group_to_table(dev, tunnel, attributes->group, &table, + grp_info, error); if (ret) return ret; if (!table) @@ -4980,15 +5279,6 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, .dst_port = RTE_BE16(UINT16_MAX), } }; - const struct rte_flow_item_ipv4 nic_ipv4_mask = { - .hdr = { - .src_addr = RTE_BE32(0xffffffff), - .dst_addr = RTE_BE32(0xffffffff), - .type_of_service = 0xff, - .next_proto_id = 0xff, - .time_to_live = 0xff, - }, - }; const struct rte_flow_item_ipv6 nic_ipv6_mask = { .hdr = { .src_addr = @@ -5001,6 +5291,7 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, .proto = 0xff, .hop_limits = 0xff, }, + .has_frag_ext = 1, }; const struct rte_flow_item_ecpri nic_ecpri_mask = { .hdr = { @@ -5019,10 +5310,29 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, const struct rte_flow_item_vlan *vlan_m = NULL; int16_t rw_act_num = 0; uint64_t is_root; + const struct mlx5_flow_tunnel *tunnel; + struct flow_grp_info grp_info = { + .external = !!external, + .transfer = !!attr->transfer, + .fdb_def_rule = !!priv->fdb_def_rule, + }; + const struct rte_eth_hairpin_conf *conf; if (items == NULL) return -1; - ret = flow_dv_validate_attributes(dev, attr, external, error); + if (is_flow_tunnel_match_rule(dev, attr, items, actions)) { + tunnel = flow_items_to_tunnel(items); + action_flags |= MLX5_FLOW_ACTION_TUNNEL_MATCH | + MLX5_FLOW_ACTION_DECAP; + } else if (is_flow_tunnel_steer_rule(dev, attr, items, actions)) { + tunnel = flow_actions_to_tunnel(actions); + action_flags |= MLX5_FLOW_ACTION_TUNNEL_SET; + } else { + tunnel = NULL; + } + grp_info.std_tbl_fix = tunnel_use_standard_attr_group_translate + (dev, tunnel, attr, items, actions); + ret = flow_dv_validate_attributes(dev, tunnel, attr, grp_info, error); if (ret < 0) return ret; is_root = (uint64_t)ret; @@ -5035,6 +5345,15 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, RTE_FLOW_ERROR_TYPE_ITEM, NULL, "item not supported"); switch (type) { + case MLX5_RTE_FLOW_ITEM_TYPE_TUNNEL: + if (items[0].type != (typeof(items[0].type)) + MLX5_RTE_FLOW_ITEM_TYPE_TUNNEL) + return rte_flow_error_set + (error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + NULL, "MLX5 private items " + "must be the first"); + break; case RTE_FLOW_ITEM_TYPE_VOID: break; case RTE_FLOW_ITEM_TYPE_PORT_ID: @@ -5046,7 +5365,7 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, break; case RTE_FLOW_ITEM_TYPE_ETH: ret = mlx5_flow_validate_item_eth(items, item_flags, - error); + true, error); if (ret < 0) return ret; last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 : @@ -5088,11 +5407,9 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, case RTE_FLOW_ITEM_TYPE_IPV4: mlx5_flow_tunnel_ip_check(items, next_protocol, &item_flags, &tunnel); - ret = mlx5_flow_validate_item_ipv4(items, item_flags, - last_item, - ether_type, - &nic_ipv4_mask, - error); + ret = flow_dv_validate_item_ipv4(items, item_flags, + last_item, ether_type, + error); if (ret < 0) return ret; last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 : @@ -5140,6 +5457,29 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, next_protocol = 0xff; } break; + case RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT: + ret = flow_dv_validate_item_ipv6_frag_ext(items, + item_flags, + error); + if (ret < 0) + return ret; + last_item = tunnel ? + MLX5_FLOW_LAYER_INNER_L3_IPV6_FRAG_EXT : + MLX5_FLOW_LAYER_OUTER_L3_IPV6_FRAG_EXT; + if (items->mask != NULL && + ((const struct rte_flow_item_ipv6_frag_ext *) + items->mask)->hdr.next_header) { + next_protocol = + ((const struct rte_flow_item_ipv6_frag_ext *) + items->spec)->hdr.next_header; + next_protocol &= + ((const struct rte_flow_item_ipv6_frag_ext *) + items->mask)->hdr.next_header; + } else { + /* Reset for inner layer. */ + next_protocol = 0xff; + } + break; case RTE_FLOW_ITEM_TYPE_TCP: ret = mlx5_flow_validate_item_tcp (items, item_flags, @@ -5599,7 +5939,7 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, rw_act_num += MLX5_ACT_NUM_MDF_TTL; break; case RTE_FLOW_ACTION_TYPE_JUMP: - ret = flow_dv_validate_action_jump(actions, + ret = flow_dv_validate_action_jump(dev, tunnel, actions, action_flags, attr, external, error); @@ -5699,6 +6039,26 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, action_flags |= MLX5_FLOW_ACTION_SET_IPV6_DSCP; rw_act_num += MLX5_ACT_NUM_SET_DSCP; break; + case RTE_FLOW_ACTION_TYPE_SAMPLE: + ret = flow_dv_validate_action_sample(action_flags, + actions, dev, + attr, error); + if (ret < 0) + return ret; + action_flags |= MLX5_FLOW_ACTION_SAMPLE; + ++actions_n; + break; + case MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET: + if (actions[0].type != (typeof(actions[0].type)) + MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET) + return rte_flow_error_set + (error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + NULL, "MLX5 private action " + "must be the first"); + + action_flags |= MLX5_FLOW_ACTION_TUNNEL_SET; + break; default: return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, @@ -5706,6 +6066,54 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, "action not supported"); } } + /* + * Validate actions in flow rules + * - Explicit decap action is prohibited by the tunnel offload API. + * - Drop action in tunnel steer rule is prohibited by the API. + * - Application cannot use MARK action because it's value can mask + * tunnel default miss nitification. + * - JUMP in tunnel match rule has no support in current PMD + * implementation. + * - TAG & META are reserved for future uses. + */ + if (action_flags & MLX5_FLOW_ACTION_TUNNEL_SET) { + uint64_t bad_actions_mask = MLX5_FLOW_ACTION_DECAP | + MLX5_FLOW_ACTION_MARK | + MLX5_FLOW_ACTION_SET_TAG | + MLX5_FLOW_ACTION_SET_META | + MLX5_FLOW_ACTION_DROP; + + if (action_flags & bad_actions_mask) + return rte_flow_error_set + (error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, NULL, + "Invalid RTE action in tunnel " + "set decap rule"); + if (!(action_flags & MLX5_FLOW_ACTION_JUMP)) + return rte_flow_error_set + (error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, NULL, + "tunnel set decap rule must terminate " + "with JUMP"); + if (!attr->ingress) + return rte_flow_error_set + (error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, NULL, + "tunnel flows for ingress traffic only"); + } + if (action_flags & MLX5_FLOW_ACTION_TUNNEL_MATCH) { + uint64_t bad_actions_mask = MLX5_FLOW_ACTION_JUMP | + MLX5_FLOW_ACTION_MARK | + MLX5_FLOW_ACTION_SET_TAG | + MLX5_FLOW_ACTION_SET_META; + + if (action_flags & bad_actions_mask) + return rte_flow_error_set + (error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, NULL, + "Invalid RTE action in tunnel " + "set match rule"); + } /* * Validate the drop action mutual exclusion with other actions. * Drop action is mutually-exclusive with any other action, except for @@ -5754,11 +6162,18 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, actions, "no fate action is found"); } - /* Continue validation for Xcap and VLAN actions.*/ + /* + * Continue validation for Xcap and VLAN actions. + * If hairpin is working in explicit TX rule mode, there is no actions + * splitting and the validation of hairpin ingress flow should be the + * same as other standard flows. + */ if ((action_flags & (MLX5_FLOW_XCAP_ACTIONS | MLX5_FLOW_VLAN_ACTIONS)) && (queue_index == 0xFFFF || - mlx5_rxq_get_type(dev, queue_index) != MLX5_RXQ_TYPE_HAIRPIN)) { + mlx5_rxq_get_type(dev, queue_index) != MLX5_RXQ_TYPE_HAIRPIN || + ((conf = mlx5_rxq_get_hairpin_conf(dev, queue_index)) != NULL && + conf->tx_explicit != 0))) { if ((action_flags & MLX5_FLOW_XCAP_ACTIONS) == MLX5_FLOW_XCAP_ACTIONS) return rte_flow_error_set(error, ENOTSUP, @@ -5787,7 +6202,10 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, "multiple VLAN actions"); } } - /* Hairpin flow will add one more TAG action. */ + /* + * Hairpin flow will add one more TAG action in TX implicit mode. + * In TX explicit mode, there will be no hairpin flow ID. + */ if (hairpin > 0) rw_act_num += MLX5_ACT_NUM_SET_TAG; /* extra metadata enabled: one more TAG action will be add. */ @@ -5835,9 +6253,11 @@ flow_dv_prepare(struct rte_eth_dev *dev, struct mlx5_flow *dev_flow; struct mlx5_flow_handle *dev_handle; struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace(); + MLX5_ASSERT(wks); /* In case of corrupting the memory. */ - if (priv->flow_idx >= MLX5_NUM_MAX_DEV_FLOWS) { + if (wks->flow_idx >= MLX5_NUM_MAX_DEV_FLOWS) { rte_flow_error_set(error, ENOSPC, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, "not free temporary device flow"); @@ -5851,8 +6271,8 @@ flow_dv_prepare(struct rte_eth_dev *dev, "not enough memory to create flow handle"); return NULL; } - /* No multi-thread supporting. */ - dev_flow = &((struct mlx5_flow *)priv->inter_flows)[priv->flow_idx++]; + MLX5_ASSERT(wks->flow_idx + 1 < RTE_DIM(wks->flows)); + dev_flow = &wks->flows[wks->flow_idx++]; dev_flow->handle = dev_handle; dev_flow->handle_idx = handle_idx; /* @@ -5959,9 +6379,10 @@ flow_dv_translate_item_eth(void *matcher, void *key, .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff", .src.addr_bytes = "\xff\xff\xff\xff\xff\xff", .type = RTE_BE16(0xffff), + .has_vlan = 0, }; - void *headers_m; - void *headers_v; + void *hdrs_m; + void *hdrs_v; char *l24_v; unsigned int i; @@ -5970,38 +6391,26 @@ flow_dv_translate_item_eth(void *matcher, void *key, if (!eth_m) eth_m = &nic_mask; if (inner) { - headers_m = MLX5_ADDR_OF(fte_match_param, matcher, + hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher, inner_headers); - headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers); + hdrs_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers); } else { - headers_m = MLX5_ADDR_OF(fte_match_param, matcher, + hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher, outer_headers); - headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers); + hdrs_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers); } - memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m, dmac_47_16), + memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_m, dmac_47_16), ð_m->dst, sizeof(eth_m->dst)); /* The value must be in the range of the mask. */ - l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, dmac_47_16); + l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_v, dmac_47_16); for (i = 0; i < sizeof(eth_m->dst); ++i) l24_v[i] = eth_m->dst.addr_bytes[i] & eth_v->dst.addr_bytes[i]; - memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m, smac_47_16), + memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_m, smac_47_16), ð_m->src, sizeof(eth_m->src)); - l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, smac_47_16); + l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_v, smac_47_16); /* The value must be in the range of the mask. */ for (i = 0; i < sizeof(eth_m->dst); ++i) l24_v[i] = eth_m->src.addr_bytes[i] & eth_v->src.addr_bytes[i]; - if (eth_v->type) { - /* When ethertype is present set mask for tagged VLAN. */ - MLX5_SET(fte_match_set_lyr_2_4, headers_m, cvlan_tag, 1); - /* Set value for tagged VLAN if ethertype is 802.1Q. */ - if (eth_v->type == RTE_BE16(RTE_ETHER_TYPE_VLAN) || - eth_v->type == RTE_BE16(RTE_ETHER_TYPE_QINQ)) { - MLX5_SET(fte_match_set_lyr_2_4, headers_v, cvlan_tag, - 1); - /* Return here to avoid setting match on ethertype. */ - return; - } - } /* * HW supports match on one Ethertype, the Ethertype following the last * VLAN tag of the packet (see PRM). @@ -6010,19 +6419,42 @@ flow_dv_translate_item_eth(void *matcher, void *key, * ethertype, and use ip_version field instead. * eCPRI over Ether layer will use type value 0xAEFE. */ - if (eth_v->type == RTE_BE16(RTE_ETHER_TYPE_IPV4) && - eth_m->type == 0xFFFF) { - flow_dv_set_match_ip_version(group, headers_v, headers_m, 4); - } else if (eth_v->type == RTE_BE16(RTE_ETHER_TYPE_IPV6) && - eth_m->type == 0xFFFF) { - flow_dv_set_match_ip_version(group, headers_v, headers_m, 6); - } else { - MLX5_SET(fte_match_set_lyr_2_4, headers_m, ethertype, - rte_be_to_cpu_16(eth_m->type)); - l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, - ethertype); - *(uint16_t *)(l24_v) = eth_m->type & eth_v->type; + if (eth_m->type == 0xFFFF) { + /* Set cvlan_tag mask for any single\multi\un-tagged case. */ + MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, cvlan_tag, 1); + switch (eth_v->type) { + case RTE_BE16(RTE_ETHER_TYPE_VLAN): + MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 1); + return; + case RTE_BE16(RTE_ETHER_TYPE_QINQ): + MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, svlan_tag, 1); + MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag, 1); + return; + case RTE_BE16(RTE_ETHER_TYPE_IPV4): + flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 4); + return; + case RTE_BE16(RTE_ETHER_TYPE_IPV6): + flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 6); + return; + default: + break; + } + } + if (eth_m->has_vlan) { + MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, cvlan_tag, 1); + if (eth_v->has_vlan) { + /* + * Here, when also has_more_vlan field in VLAN item is + * not set, only single-tagged packets will be matched. + */ + MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 1); + return; + } } + MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, ethertype, + rte_be_to_cpu_16(eth_m->type)); + l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_v, ethertype); + *(uint16_t *)(l24_v) = eth_m->type & eth_v->type; } /** @@ -6047,19 +6479,19 @@ flow_dv_translate_item_vlan(struct mlx5_flow *dev_flow, { const struct rte_flow_item_vlan *vlan_m = item->mask; const struct rte_flow_item_vlan *vlan_v = item->spec; - void *headers_m; - void *headers_v; + void *hdrs_m; + void *hdrs_v; uint16_t tci_m; uint16_t tci_v; if (inner) { - headers_m = MLX5_ADDR_OF(fte_match_param, matcher, + hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher, inner_headers); - headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers); + hdrs_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers); } else { - headers_m = MLX5_ADDR_OF(fte_match_param, matcher, + hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher, outer_headers); - headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers); + hdrs_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers); /* * This is workaround, masks are not supported, * and pre-validated. @@ -6072,37 +6504,54 @@ flow_dv_translate_item_vlan(struct mlx5_flow *dev_flow, * When VLAN item exists in flow, mark packet as tagged, * even if TCI is not specified. */ - MLX5_SET(fte_match_set_lyr_2_4, headers_m, cvlan_tag, 1); - MLX5_SET(fte_match_set_lyr_2_4, headers_v, cvlan_tag, 1); + if (!MLX5_GET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag)) { + MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, cvlan_tag, 1); + MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 1); + } if (!vlan_v) return; if (!vlan_m) vlan_m = &rte_flow_item_vlan_mask; tci_m = rte_be_to_cpu_16(vlan_m->tci); tci_v = rte_be_to_cpu_16(vlan_m->tci & vlan_v->tci); - MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_vid, tci_m); - MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid, tci_v); - MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_cfi, tci_m >> 12); - MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_cfi, tci_v >> 12); - MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_prio, tci_m >> 13); - MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_prio, tci_v >> 13); + MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, first_vid, tci_m); + MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, first_vid, tci_v); + MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, first_cfi, tci_m >> 12); + MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, first_cfi, tci_v >> 12); + MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, first_prio, tci_m >> 13); + MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, first_prio, tci_v >> 13); /* * HW is optimized for IPv4/IPv6. In such cases, avoid setting * ethertype, and use ip_version field instead. */ - if (vlan_v->inner_type == RTE_BE16(RTE_ETHER_TYPE_IPV4) && - vlan_m->inner_type == 0xFFFF) { - flow_dv_set_match_ip_version(group, headers_v, headers_m, 4); - } else if (vlan_v->inner_type == RTE_BE16(RTE_ETHER_TYPE_IPV6) && - vlan_m->inner_type == 0xFFFF) { - flow_dv_set_match_ip_version(group, headers_v, headers_m, 6); - } else { - MLX5_SET(fte_match_set_lyr_2_4, headers_m, ethertype, - rte_be_to_cpu_16(vlan_m->inner_type)); - MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, - rte_be_to_cpu_16(vlan_m->inner_type & - vlan_v->inner_type)); + if (vlan_m->inner_type == 0xFFFF) { + switch (vlan_v->inner_type) { + case RTE_BE16(RTE_ETHER_TYPE_VLAN): + MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, svlan_tag, 1); + MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag, 1); + MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 0); + return; + case RTE_BE16(RTE_ETHER_TYPE_IPV4): + flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 4); + return; + case RTE_BE16(RTE_ETHER_TYPE_IPV6): + flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 6); + return; + default: + break; + } + } + if (vlan_m->has_more_vlan && vlan_v->has_more_vlan) { + MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, svlan_tag, 1); + MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag, 1); + /* Only one vlan_tag bit can be set. */ + MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 0); + return; } + MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, ethertype, + rte_be_to_cpu_16(vlan_m->inner_type)); + MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, ethertype, + rte_be_to_cpu_16(vlan_m->inner_type & vlan_v->inner_type)); } /** @@ -6114,8 +6563,6 @@ flow_dv_translate_item_vlan(struct mlx5_flow *dev_flow, * Flow matcher value. * @param[in] item * Flow pattern to translate. - * @param[in] item_flags - * Bit-fields that holds the items detected until now. * @param[in] inner * Item is inner pattern. * @param[in] group @@ -6124,7 +6571,6 @@ flow_dv_translate_item_vlan(struct mlx5_flow *dev_flow, static void flow_dv_translate_item_ipv4(void *matcher, void *key, const struct rte_flow_item *item, - const uint64_t item_flags, int inner, uint32_t group) { const struct rte_flow_item_ipv4 *ipv4_m = item->mask; @@ -6154,13 +6600,6 @@ flow_dv_translate_item_ipv4(void *matcher, void *key, headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers); } flow_dv_set_match_ip_version(group, headers_v, headers_m, 4); - /* - * On outer header (which must contains L2), or inner header with L2, - * set cvlan_tag mask bit to mark this packet as untagged. - * This should be done even if item->spec is empty. - */ - if (!inner || item_flags & MLX5_FLOW_LAYER_INNER_L2) - MLX5_SET(fte_match_set_lyr_2_4, headers_m, cvlan_tag, 1); if (!ipv4_v) return; if (!ipv4_m) @@ -6192,6 +6631,10 @@ flow_dv_translate_item_ipv4(void *matcher, void *key, ipv4_m->hdr.time_to_live); MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ttl_hoplimit, ipv4_v->hdr.time_to_live & ipv4_m->hdr.time_to_live); + MLX5_SET(fte_match_set_lyr_2_4, headers_m, frag, + !!(ipv4_m->hdr.fragment_offset)); + MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag, + !!(ipv4_v->hdr.fragment_offset & ipv4_m->hdr.fragment_offset)); } /** @@ -6203,8 +6646,6 @@ flow_dv_translate_item_ipv4(void *matcher, void *key, * Flow matcher value. * @param[in] item * Flow pattern to translate. - * @param[in] item_flags - * Bit-fields that holds the items detected until now. * @param[in] inner * Item is inner pattern. * @param[in] group @@ -6213,7 +6654,6 @@ flow_dv_translate_item_ipv4(void *matcher, void *key, static void flow_dv_translate_item_ipv6(void *matcher, void *key, const struct rte_flow_item *item, - const uint64_t item_flags, int inner, uint32_t group) { const struct rte_flow_item_ipv6 *ipv6_m = item->mask; @@ -6252,13 +6692,6 @@ flow_dv_translate_item_ipv6(void *matcher, void *key, headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers); } flow_dv_set_match_ip_version(group, headers_v, headers_m, 6); - /* - * On outer header (which must contains L2), or inner header with L2, - * set cvlan_tag mask bit to mark this packet as untagged. - * This should be done even if item->spec is empty. - */ - if (!inner || item_flags & MLX5_FLOW_LAYER_INNER_L2) - MLX5_SET(fte_match_set_lyr_2_4, headers_m, cvlan_tag, 1); if (!ipv6_v) return; if (!ipv6_m) @@ -6307,6 +6740,61 @@ flow_dv_translate_item_ipv6(void *matcher, void *key, ipv6_m->hdr.hop_limits); MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ttl_hoplimit, ipv6_v->hdr.hop_limits & ipv6_m->hdr.hop_limits); + MLX5_SET(fte_match_set_lyr_2_4, headers_m, frag, + !!(ipv6_m->has_frag_ext)); + MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag, + !!(ipv6_v->has_frag_ext & ipv6_m->has_frag_ext)); +} + +/** + * Add IPV6 fragment extension item to matcher and to the value. + * + * @param[in, out] matcher + * Flow matcher. + * @param[in, out] key + * Flow matcher value. + * @param[in] item + * Flow pattern to translate. + * @param[in] inner + * Item is inner pattern. + */ +static void +flow_dv_translate_item_ipv6_frag_ext(void *matcher, void *key, + const struct rte_flow_item *item, + int inner) +{ + const struct rte_flow_item_ipv6_frag_ext *ipv6_frag_ext_m = item->mask; + const struct rte_flow_item_ipv6_frag_ext *ipv6_frag_ext_v = item->spec; + const struct rte_flow_item_ipv6_frag_ext nic_mask = { + .hdr = { + .next_header = 0xff, + .frag_data = RTE_BE16(0xffff), + }, + }; + void *headers_m; + void *headers_v; + + if (inner) { + headers_m = MLX5_ADDR_OF(fte_match_param, matcher, + inner_headers); + headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers); + } else { + headers_m = MLX5_ADDR_OF(fte_match_param, matcher, + outer_headers); + headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers); + } + /* IPv6 fragment extension item exists, so packet is IP fragment. */ + MLX5_SET(fte_match_set_lyr_2_4, headers_m, frag, 1); + MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag, 1); + if (!ipv6_frag_ext_v) + return; + if (!ipv6_frag_ext_m) + ipv6_frag_ext_m = &nic_mask; + MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, + ipv6_frag_ext_m->hdr.next_header); + MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, + ipv6_frag_ext_v->hdr.next_header & + ipv6_frag_ext_m->hdr.next_header); } /** @@ -7241,12 +7729,6 @@ flow_dv_translate_item_icmp6(void *matcher, void *key, return; if (!icmp6_m) icmp6_m = &rte_flow_item_icmp6_mask; - /* - * Force flow only to match the non-fragmented IPv6 ICMPv6 packets. - * If only the protocol is specified, no need to match the frag. - */ - MLX5_SET(fte_match_set_lyr_2_4, headers_m, frag, 1); - MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag, 0); MLX5_SET(fte_match_set_misc3, misc3_m, icmpv6_type, icmp6_m->type); MLX5_SET(fte_match_set_misc3, misc3_v, icmpv6_type, icmp6_v->type & icmp6_m->type); @@ -7274,6 +7756,8 @@ flow_dv_translate_item_icmp(void *matcher, void *key, { const struct rte_flow_item_icmp *icmp_m = item->mask; const struct rte_flow_item_icmp *icmp_v = item->spec; + uint32_t icmp_header_data_m = 0; + uint32_t icmp_header_data_v = 0; void *headers_m; void *headers_v; void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher, @@ -7294,12 +7778,6 @@ flow_dv_translate_item_icmp(void *matcher, void *key, return; if (!icmp_m) icmp_m = &rte_flow_item_icmp_mask; - /* - * Force flow only to match the non-fragmented IPv4 ICMP packets. - * If only the protocol is specified, no need to match the frag. - */ - MLX5_SET(fte_match_set_lyr_2_4, headers_m, frag, 1); - MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag, 0); MLX5_SET(fte_match_set_misc3, misc3_m, icmp_type, icmp_m->hdr.icmp_type); MLX5_SET(fte_match_set_misc3, misc3_v, icmp_type, @@ -7308,6 +7786,17 @@ flow_dv_translate_item_icmp(void *matcher, void *key, icmp_m->hdr.icmp_code); MLX5_SET(fte_match_set_misc3, misc3_v, icmp_code, icmp_v->hdr.icmp_code & icmp_m->hdr.icmp_code); + icmp_header_data_m = rte_be_to_cpu_16(icmp_m->hdr.icmp_seq_nb); + icmp_header_data_m |= rte_be_to_cpu_16(icmp_m->hdr.icmp_ident) << 16; + if (icmp_header_data_m) { + icmp_header_data_v = rte_be_to_cpu_16(icmp_v->hdr.icmp_seq_nb); + icmp_header_data_v |= + rte_be_to_cpu_16(icmp_v->hdr.icmp_ident) << 16; + MLX5_SET(fte_match_set_misc3, misc3_m, icmp_header_data, + icmp_header_data_m); + MLX5_SET(fte_match_set_misc3, misc3_v, icmp_header_data, + icmp_header_data_v & icmp_header_data_m); + } } /** @@ -7512,6 +8001,9 @@ static struct mlx5_flow_tbl_resource * flow_dv_tbl_resource_get(struct rte_eth_dev *dev, uint32_t table_id, uint8_t egress, uint8_t transfer, + bool external, + const struct mlx5_flow_tunnel *tunnel, + uint32_t group_id, struct rte_flow_error *error) { struct mlx5_priv *priv = dev->data->dev_private; @@ -7536,7 +8028,7 @@ flow_dv_tbl_resource_get(struct rte_eth_dev *dev, tbl_data = container_of(pos, struct mlx5_flow_tbl_data_entry, entry); tbl = &tbl_data->tbl; - rte_atomic32_inc(&tbl->refcnt); + __atomic_fetch_add(&tbl->refcnt, 1, __ATOMIC_RELAXED); return tbl; } tbl_data = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_JUMP], &idx); @@ -7548,6 +8040,9 @@ flow_dv_tbl_resource_get(struct rte_eth_dev *dev, return NULL; } tbl_data->idx = idx; + tbl_data->tunnel = tunnel; + tbl_data->group_id = group_id; + tbl_data->external = external; tbl = &tbl_data->tbl; pos = &tbl_data->entry; if (transfer) @@ -7568,9 +8063,9 @@ flow_dv_tbl_resource_get(struct rte_eth_dev *dev, * No multi-threads now, but still better to initialize the reference * count before insert it into the hash list. */ - rte_atomic32_init(&tbl->refcnt); + __atomic_store_n(&tbl->refcnt, 0, __ATOMIC_RELAXED); /* Jump action reference count is initialized here. */ - rte_atomic32_init(&tbl_data->jump.refcnt); + __atomic_store_n(&tbl_data->jump.refcnt, 0, __ATOMIC_RELAXED); pos->key = table_key.v64; ret = mlx5_hlist_insert(sh->flow_tbls, pos); if (ret < 0) { @@ -7580,7 +8075,7 @@ flow_dv_tbl_resource_get(struct rte_eth_dev *dev, mlx5_flow_os_destroy_flow_tbl(tbl->obj); mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], idx); } - rte_atomic32_inc(&tbl->refcnt); + __atomic_fetch_add(&tbl->refcnt, 1, __ATOMIC_RELAXED); return tbl; } @@ -7606,15 +8101,50 @@ flow_dv_tbl_resource_release(struct rte_eth_dev *dev, if (!tbl) return 0; - if (rte_atomic32_dec_and_test(&tbl->refcnt)) { + if (__atomic_sub_fetch(&tbl->refcnt, 1, __ATOMIC_RELAXED) == 0) { struct mlx5_hlist_entry *pos = &tbl_data->entry; mlx5_flow_os_destroy_flow_tbl(tbl->obj); tbl->obj = NULL; - /* remove the entry from the hash list and free memory. */ - mlx5_hlist_remove(sh->flow_tbls, pos); - mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_JUMP], - tbl_data->idx); + if (is_tunnel_offload_active(dev) && tbl_data->external) { + struct mlx5_hlist_entry *he; + struct mlx5_hlist *tunnel_grp_hash; + struct mlx5_flow_tunnel_hub *thub = + mlx5_tunnel_hub(dev); + union tunnel_tbl_key tunnel_key = { + .tunnel_id = tbl_data->tunnel ? + tbl_data->tunnel->tunnel_id : 0, + .group = tbl_data->group_id + }; + union mlx5_flow_tbl_key table_key = { + .v64 = pos->key + }; + uint32_t table_id = table_key.table_id; + + tunnel_grp_hash = tbl_data->tunnel ? + tbl_data->tunnel->groups : + thub->groups; + he = mlx5_hlist_lookup(tunnel_grp_hash, tunnel_key.val); + if (he) { + struct tunnel_tbl_entry *tte; + tte = container_of(he, typeof(*tte), hash); + MLX5_ASSERT(tte->flow_table == table_id); + mlx5_hlist_remove(tunnel_grp_hash, he); + mlx5_free(tte); + } + mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_TNL_TBL_ID], + tunnel_flow_tbl_to_id(table_id)); + DRV_LOG(DEBUG, + "port %u release table_id %#x tunnel %u group %u", + dev->data->port_id, table_id, + tbl_data->tunnel ? + tbl_data->tunnel->tunnel_id : 0, + tbl_data->group_id); + } + /* remove the entry from the hash list and free memory. */ + mlx5_hlist_remove(sh->flow_tbls, pos); + mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_JUMP], + tbl_data->idx); return 0; } return 1; @@ -7656,7 +8186,7 @@ flow_dv_matcher_register(struct rte_eth_dev *dev, int ret; tbl = flow_dv_tbl_resource_get(dev, key->table_id, key->direction, - key->domain, error); + key->domain, false, NULL, 0, error); if (!tbl) return -rte_errno; /* No need to refill the error info */ tbl_data = container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl); @@ -7674,8 +8204,10 @@ flow_dv_matcher_register(struct rte_eth_dev *dev, cache_matcher->priority, key->direction ? "tx" : "rx", (void *)cache_matcher, - rte_atomic32_read(&cache_matcher->refcnt)); - rte_atomic32_inc(&cache_matcher->refcnt); + __atomic_load_n(&cache_matcher->refcnt, + __ATOMIC_RELAXED)); + __atomic_fetch_add(&cache_matcher->refcnt, 1, + __ATOMIC_RELAXED); dev_flow->handle->dvh.matcher = cache_matcher; /* old matcher should not make the table ref++. */ flow_dv_tbl_resource_release(dev, tbl); @@ -7710,16 +8242,15 @@ flow_dv_matcher_register(struct rte_eth_dev *dev, } /* Save the table information */ cache_matcher->tbl = tbl; - rte_atomic32_init(&cache_matcher->refcnt); /* only matcher ref++, table ref++ already done above in get API. */ - rte_atomic32_inc(&cache_matcher->refcnt); + __atomic_store_n(&cache_matcher->refcnt, 1, __ATOMIC_RELAXED); LIST_INSERT_HEAD(&tbl_data->matchers, cache_matcher, next); dev_flow->handle->dvh.matcher = cache_matcher; DRV_LOG(DEBUG, "%s group %u priority %hd new %s matcher %p: refcnt %d", key->domain ? "FDB" : "NIC", key->table_id, cache_matcher->priority, key->direction ? "tx" : "rx", (void *)cache_matcher, - rte_atomic32_read(&cache_matcher->refcnt)); + __atomic_load_n(&cache_matcher->refcnt, __ATOMIC_RELAXED)); return 0; } @@ -7756,12 +8287,14 @@ flow_dv_tag_resource_register if (entry) { cache_resource = container_of (entry, struct mlx5_flow_dv_tag_resource, entry); - rte_atomic32_inc(&cache_resource->refcnt); + __atomic_fetch_add(&cache_resource->refcnt, 1, + __ATOMIC_RELAXED); dev_flow->handle->dvh.rix_tag = cache_resource->idx; dev_flow->dv.tag_resource = cache_resource; DRV_LOG(DEBUG, "cached tag resource %p: refcnt now %d++", (void *)cache_resource, - rte_atomic32_read(&cache_resource->refcnt)); + __atomic_load_n(&cache_resource->refcnt, + __ATOMIC_RELAXED)); return 0; } /* Register new resource. */ @@ -7780,8 +8313,7 @@ flow_dv_tag_resource_register RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, "cannot create action"); } - rte_atomic32_init(&cache_resource->refcnt); - rte_atomic32_inc(&cache_resource->refcnt); + __atomic_store_n(&cache_resource->refcnt, 1, __ATOMIC_RELAXED); if (mlx5_hlist_insert(sh->tag_table, &cache_resource->entry)) { mlx5_flow_os_destroy_flow_action(cache_resource->action); mlx5_free(cache_resource); @@ -7792,7 +8324,7 @@ flow_dv_tag_resource_register dev_flow->dv.tag_resource = cache_resource; DRV_LOG(DEBUG, "new tag resource %p: refcnt now %d++", (void *)cache_resource, - rte_atomic32_read(&cache_resource->refcnt)); + __atomic_load_n(&cache_resource->refcnt, __ATOMIC_RELAXED)); return 0; } @@ -7820,8 +8352,8 @@ flow_dv_tag_release(struct rte_eth_dev *dev, return 0; DRV_LOG(DEBUG, "port %u tag %p: refcnt %d--", dev->data->port_id, (void *)tag, - rte_atomic32_read(&tag->refcnt)); - if (rte_atomic32_dec_and_test(&tag->refcnt)) { + __atomic_load_n(&tag->refcnt, __ATOMIC_RELAXED)); + if (__atomic_sub_fetch(&tag->refcnt, 1, __ATOMIC_RELAXED) == 0) { claim_zero(mlx5_flow_os_destroy_flow_action(tag->action)); mlx5_hlist_remove(sh->tag_table, &tag->entry); DRV_LOG(DEBUG, "port %u tag %p: removed", @@ -7904,29 +8436,19 @@ flow_dv_translate_create_counter(struct rte_eth_dev *dev, uint32_t counter; struct mlx5_age_param *age_param; - counter = flow_dv_counter_alloc(dev, - count ? count->shared : 0, - count ? count->id : 0, - dev_flow->dv.group, !!age); + if (count && count->shared) + counter = flow_dv_counter_get_shared(dev, count->id); + else + counter = flow_dv_counter_alloc(dev, !!age); if (!counter || age == NULL) return counter; age_param = flow_dv_counter_idx_get_age(dev, counter); - /* - * The counter age accuracy may have a bit delay. Have 3/4 - * second bias on the timeount in order to let it age in time. - */ age_param->context = age->context ? age->context : (void *)(uintptr_t)(dev_flow->flow_idx); - /* - * The counter age accuracy may have a bit delay. Have 3/4 - * second bias on the timeount in order to let it age in time. - */ - age_param->timeout = age->timeout * 10 - MLX5_AGING_TIME_DELAY; - /* Set expire time in unit of 0.1 sec. */ + age_param->timeout = age->timeout; age_param->port_id = dev->data->port_id; - age_param->expire = age_param->timeout + - rte_rdtsc() / (rte_get_tsc_hz() / 10); - rte_atomic16_set(&age_param->state, AGE_CANDIDATE); + __atomic_store_n(&age_param->sec_since_last_hit, 0, __ATOMIC_RELAXED); + __atomic_store_n(&age_param->state, AGE_CANDIDATE, __ATOMIC_RELAXED); return counter; } /** @@ -8046,19 +8568,360 @@ flow_dv_hashfields_set(struct mlx5_flow *dev_flow, } /** - * Fill the flow with DV spec, lock free - * (mutex should be acquired by caller). + * Create an Rx Hash queue. + * + * @param dev + * Pointer to Ethernet device. + * @param[in] dev_flow + * Pointer to the mlx5_flow. + * @param[in] rss_desc + * Pointer to the mlx5_flow_rss_desc. + * @param[out] hrxq_idx + * Hash Rx queue index. + * + * @return + * The Verbs/DevX object initialised, NULL otherwise and rte_errno is set. + */ +static struct mlx5_hrxq * +flow_dv_handle_rx_queue(struct rte_eth_dev *dev, + struct mlx5_flow *dev_flow, + struct mlx5_flow_rss_desc *rss_desc, + uint32_t *hrxq_idx) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_flow_handle *dh = dev_flow->handle; + struct mlx5_hrxq *hrxq; + + MLX5_ASSERT(rss_desc->queue_num); + *hrxq_idx = mlx5_hrxq_get(dev, rss_desc->key, MLX5_RSS_HASH_KEY_LEN, + dev_flow->hash_fields, + rss_desc->queue, rss_desc->queue_num); + if (!*hrxq_idx) { + *hrxq_idx = mlx5_hrxq_new + (dev, rss_desc->key, MLX5_RSS_HASH_KEY_LEN, + dev_flow->hash_fields, + rss_desc->queue, rss_desc->queue_num, + !!(dh->layers & MLX5_FLOW_LAYER_TUNNEL), + false); + if (!*hrxq_idx) + return NULL; + } + hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ], + *hrxq_idx); + return hrxq; +} + +/** + * Find existing sample resource or create and register a new one. + * + * @param[in, out] dev + * Pointer to rte_eth_dev structure. + * @param[in] attr + * Attributes of flow that includes this item. + * @param[in] resource + * Pointer to sample resource. + * @parm[in, out] dev_flow + * Pointer to the dev_flow. + * @param[in, out] sample_dv_actions + * Pointer to sample actions list. + * @param[out] error + * pointer to error structure. + * + * @return + * 0 on success otherwise -errno and errno is set. + */ +static int +flow_dv_sample_resource_register(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + struct mlx5_flow_dv_sample_resource *resource, + struct mlx5_flow *dev_flow, + void **sample_dv_actions, + struct rte_flow_error *error) +{ + struct mlx5_flow_dv_sample_resource *cache_resource; + struct mlx5dv_dr_flow_sampler_attr sampler_attr; + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_dev_ctx_shared *sh = priv->sh; + struct mlx5_flow_tbl_resource *tbl; + uint32_t idx = 0; + const uint32_t next_ft_step = 1; + uint32_t next_ft_id = resource->ft_id + next_ft_step; + + /* Lookup a matching resource from cache. */ + ILIST_FOREACH(sh->ipool[MLX5_IPOOL_SAMPLE], sh->sample_action_list, + idx, cache_resource, next) { + if (resource->ratio == cache_resource->ratio && + resource->ft_type == cache_resource->ft_type && + resource->ft_id == cache_resource->ft_id && + resource->set_action == cache_resource->set_action && + !memcmp((void *)&resource->sample_act, + (void *)&cache_resource->sample_act, + sizeof(struct mlx5_flow_sub_actions_list))) { + DRV_LOG(DEBUG, "sample resource %p: refcnt %d++", + (void *)cache_resource, + __atomic_load_n(&cache_resource->refcnt, + __ATOMIC_RELAXED)); + __atomic_fetch_add(&cache_resource->refcnt, 1, + __ATOMIC_RELAXED); + dev_flow->handle->dvh.rix_sample = idx; + dev_flow->dv.sample_res = cache_resource; + return 0; + } + } + /* Register new sample resource. */ + cache_resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_SAMPLE], + &dev_flow->handle->dvh.rix_sample); + if (!cache_resource) + return rte_flow_error_set(error, ENOMEM, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "cannot allocate resource memory"); + *cache_resource = *resource; + /* Create normal path table level */ + tbl = flow_dv_tbl_resource_get(dev, next_ft_id, + attr->egress, attr->transfer, + dev_flow->external, NULL, 0, error); + if (!tbl) { + rte_flow_error_set(error, ENOMEM, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "fail to create normal path table " + "for sample"); + goto error; + } + cache_resource->normal_path_tbl = tbl; + if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB) { + cache_resource->default_miss = + mlx5_glue->dr_create_flow_action_default_miss(); + if (!cache_resource->default_miss) { + rte_flow_error_set(error, ENOMEM, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "cannot create default miss " + "action"); + goto error; + } + sample_dv_actions[resource->sample_act.actions_num++] = + cache_resource->default_miss; + } + /* Create a DR sample action */ + sampler_attr.sample_ratio = cache_resource->ratio; + sampler_attr.default_next_table = tbl->obj; + sampler_attr.num_sample_actions = resource->sample_act.actions_num; + sampler_attr.sample_actions = (struct mlx5dv_dr_action **) + &sample_dv_actions[0]; + sampler_attr.action = cache_resource->set_action; + cache_resource->verbs_action = + mlx5_glue->dr_create_flow_action_sampler(&sampler_attr); + if (!cache_resource->verbs_action) { + rte_flow_error_set(error, ENOMEM, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, "cannot create sample action"); + goto error; + } + __atomic_store_n(&cache_resource->refcnt, 1, __ATOMIC_RELAXED); + ILIST_INSERT(sh->ipool[MLX5_IPOOL_SAMPLE], &sh->sample_action_list, + dev_flow->handle->dvh.rix_sample, cache_resource, + next); + dev_flow->dv.sample_res = cache_resource; + DRV_LOG(DEBUG, "new sample resource %p: refcnt %d++", + (void *)cache_resource, + __atomic_load_n(&cache_resource->refcnt, __ATOMIC_RELAXED)); + return 0; +error: + if (cache_resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB) { + if (cache_resource->default_miss) + claim_zero(mlx5_glue->destroy_flow_action + (cache_resource->default_miss)); + } else { + if (cache_resource->sample_idx.rix_hrxq && + !mlx5_hrxq_release(dev, + cache_resource->sample_idx.rix_hrxq)) + cache_resource->sample_idx.rix_hrxq = 0; + if (cache_resource->sample_idx.rix_tag && + !flow_dv_tag_release(dev, + cache_resource->sample_idx.rix_tag)) + cache_resource->sample_idx.rix_tag = 0; + if (cache_resource->sample_idx.cnt) { + flow_dv_counter_release(dev, + cache_resource->sample_idx.cnt); + cache_resource->sample_idx.cnt = 0; + } + } + if (cache_resource->normal_path_tbl) + flow_dv_tbl_resource_release(dev, + cache_resource->normal_path_tbl); + mlx5_ipool_free(sh->ipool[MLX5_IPOOL_SAMPLE], + dev_flow->handle->dvh.rix_sample); + dev_flow->handle->dvh.rix_sample = 0; + return -rte_errno; +} + +/** + * Find existing destination array resource or create and register a new one. + * + * @param[in, out] dev + * Pointer to rte_eth_dev structure. + * @param[in] attr + * Attributes of flow that includes this item. + * @param[in] resource + * Pointer to destination array resource. + * @parm[in, out] dev_flow + * Pointer to the dev_flow. + * @param[out] error + * pointer to error structure. + * + * @return + * 0 on success otherwise -errno and errno is set. + */ +static int +flow_dv_dest_array_resource_register(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + struct mlx5_flow_dv_dest_array_resource *resource, + struct mlx5_flow *dev_flow, + struct rte_flow_error *error) +{ + struct mlx5_flow_dv_dest_array_resource *cache_resource; + struct mlx5dv_dr_action_dest_attr *dest_attr[MLX5_MAX_DEST_NUM] = { 0 }; + struct mlx5dv_dr_action_dest_reformat dest_reformat[MLX5_MAX_DEST_NUM]; + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_dev_ctx_shared *sh = priv->sh; + struct mlx5_flow_sub_actions_list *sample_act; + struct mlx5dv_dr_domain *domain; + uint32_t idx = 0; + + /* Lookup a matching resource from cache. */ + ILIST_FOREACH(sh->ipool[MLX5_IPOOL_DEST_ARRAY], + sh->dest_array_list, + idx, cache_resource, next) { + if (resource->num_of_dest == cache_resource->num_of_dest && + resource->ft_type == cache_resource->ft_type && + !memcmp((void *)cache_resource->sample_act, + (void *)resource->sample_act, + (resource->num_of_dest * + sizeof(struct mlx5_flow_sub_actions_list)))) { + DRV_LOG(DEBUG, "dest array resource %p: refcnt %d++", + (void *)cache_resource, + __atomic_load_n(&cache_resource->refcnt, + __ATOMIC_RELAXED)); + __atomic_fetch_add(&cache_resource->refcnt, 1, + __ATOMIC_RELAXED); + dev_flow->handle->dvh.rix_dest_array = idx; + dev_flow->dv.dest_array_res = cache_resource; + return 0; + } + } + /* Register new destination array resource. */ + cache_resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_DEST_ARRAY], + &dev_flow->handle->dvh.rix_dest_array); + if (!cache_resource) + return rte_flow_error_set(error, ENOMEM, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "cannot allocate resource memory"); + *cache_resource = *resource; + if (attr->transfer) + domain = sh->fdb_domain; + else if (attr->ingress) + domain = sh->rx_domain; + else + domain = sh->tx_domain; + for (idx = 0; idx < resource->num_of_dest; idx++) { + dest_attr[idx] = (struct mlx5dv_dr_action_dest_attr *) + mlx5_malloc(MLX5_MEM_ZERO, + sizeof(struct mlx5dv_dr_action_dest_attr), + 0, SOCKET_ID_ANY); + if (!dest_attr[idx]) { + rte_flow_error_set(error, ENOMEM, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "cannot allocate resource memory"); + goto error; + } + dest_attr[idx]->type = MLX5DV_DR_ACTION_DEST; + sample_act = &resource->sample_act[idx]; + if (sample_act->action_flags == MLX5_FLOW_ACTION_QUEUE) { + dest_attr[idx]->dest = sample_act->dr_queue_action; + } else if (sample_act->action_flags == + (MLX5_FLOW_ACTION_PORT_ID | MLX5_FLOW_ACTION_ENCAP)) { + dest_attr[idx]->type = MLX5DV_DR_ACTION_DEST_REFORMAT; + dest_attr[idx]->dest_reformat = &dest_reformat[idx]; + dest_attr[idx]->dest_reformat->reformat = + sample_act->dr_encap_action; + dest_attr[idx]->dest_reformat->dest = + sample_act->dr_port_id_action; + } else if (sample_act->action_flags == + MLX5_FLOW_ACTION_PORT_ID) { + dest_attr[idx]->dest = sample_act->dr_port_id_action; + } + } + /* create a dest array actioin */ + cache_resource->action = mlx5_glue->dr_create_flow_action_dest_array + (domain, + cache_resource->num_of_dest, + dest_attr); + if (!cache_resource->action) { + rte_flow_error_set(error, ENOMEM, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "cannot create destination array action"); + goto error; + } + __atomic_store_n(&cache_resource->refcnt, 1, __ATOMIC_RELAXED); + ILIST_INSERT(sh->ipool[MLX5_IPOOL_DEST_ARRAY], + &sh->dest_array_list, + dev_flow->handle->dvh.rix_dest_array, cache_resource, + next); + dev_flow->dv.dest_array_res = cache_resource; + DRV_LOG(DEBUG, "new destination array resource %p: refcnt %d++", + (void *)cache_resource, + __atomic_load_n(&cache_resource->refcnt, __ATOMIC_RELAXED)); + for (idx = 0; idx < resource->num_of_dest; idx++) + mlx5_free(dest_attr[idx]); + return 0; +error: + for (idx = 0; idx < resource->num_of_dest; idx++) { + struct mlx5_flow_sub_actions_idx *act_res = + &cache_resource->sample_idx[idx]; + if (act_res->rix_hrxq && + !mlx5_hrxq_release(dev, + act_res->rix_hrxq)) + act_res->rix_hrxq = 0; + if (act_res->rix_encap_decap && + !flow_dv_encap_decap_resource_release(dev, + act_res->rix_encap_decap)) + act_res->rix_encap_decap = 0; + if (act_res->rix_port_id_action && + !flow_dv_port_id_action_resource_release(dev, + act_res->rix_port_id_action)) + act_res->rix_port_id_action = 0; + if (dest_attr[idx]) + mlx5_free(dest_attr[idx]); + } + + mlx5_ipool_free(sh->ipool[MLX5_IPOOL_DEST_ARRAY], + dev_flow->handle->dvh.rix_dest_array); + dev_flow->handle->dvh.rix_dest_array = 0; + return -rte_errno; +} + +/** + * Convert Sample action to DV specification. * * @param[in] dev * Pointer to rte_eth_dev structure. + * @param[in] action + * Pointer to action structure. * @param[in, out] dev_flow - * Pointer to the sub flow. + * Pointer to the mlx5_flow. * @param[in] attr * Pointer to the flow attributes. - * @param[in] items - * Pointer to the list of items. - * @param[in] actions - * Pointer to the list of actions. + * @param[in, out] num_of_dest + * Pointer to the num of destination. + * @param[in, out] sample_actions + * Pointer to sample actions list. + * @param[in, out] res + * Pointer to sample resource. * @param[out] error * Pointer to the error structure. * @@ -8066,93 +8929,474 @@ flow_dv_hashfields_set(struct mlx5_flow *dev_flow, * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int -__flow_dv_translate(struct rte_eth_dev *dev, - struct mlx5_flow *dev_flow, - const struct rte_flow_attr *attr, - const struct rte_flow_item items[], - const struct rte_flow_action actions[], - struct rte_flow_error *error) +flow_dv_translate_action_sample(struct rte_eth_dev *dev, + const struct rte_flow_action *action, + struct mlx5_flow *dev_flow, + const struct rte_flow_attr *attr, + uint32_t *num_of_dest, + void **sample_actions, + struct mlx5_flow_dv_sample_resource *res, + struct rte_flow_error *error) { struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5_dev_config *dev_conf = &priv->config; - struct rte_flow *flow = dev_flow->flow; - struct mlx5_flow_handle *handle = dev_flow->handle; - struct mlx5_flow_rss_desc *rss_desc = &((struct mlx5_flow_rss_desc *) - priv->rss_desc) - [!!priv->flow_nested_idx]; - uint64_t item_flags = 0; - uint64_t last_item = 0; + const struct rte_flow_action_sample *sample_action; + const struct rte_flow_action *sub_actions; + const struct rte_flow_action_queue *queue; + struct mlx5_flow_sub_actions_list *sample_act; + struct mlx5_flow_sub_actions_idx *sample_idx; + struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace(); + struct mlx5_flow_rss_desc *rss_desc; uint64_t action_flags = 0; - uint64_t priority = attr->priority; - struct mlx5_flow_dv_matcher matcher = { - .mask = { - .size = sizeof(matcher.mask.buf) - - MLX5_ST_SZ_BYTES(fte_match_set_misc4), - }, - }; - int actions_n = 0; - bool actions_end = false; - union { - struct mlx5_flow_dv_modify_hdr_resource res; - uint8_t len[sizeof(struct mlx5_flow_dv_modify_hdr_resource) + - sizeof(struct mlx5_modification_cmd) * - (MLX5_MAX_MODIFY_NUM + 1)]; - } mhdr_dummy; - struct mlx5_flow_dv_modify_hdr_resource *mhdr_res = &mhdr_dummy.res; - const struct rte_flow_action_count *count = NULL; - const struct rte_flow_action_age *age = NULL; - union flow_dv_attr flow_attr = { .attr = 0 }; - uint32_t tag_be; - union mlx5_flow_tbl_key tbl_key; - uint32_t modify_action_position = UINT32_MAX; - void *match_mask = matcher.mask.buf; - void *match_value = dev_flow->dv.value.buf; - uint8_t next_protocol = 0xff; - struct rte_vlan_hdr vlan = { 0 }; - uint32_t table; - int ret = 0; - mhdr_res->ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX : - MLX5DV_FLOW_TABLE_TYPE_NIC_RX; - ret = mlx5_flow_group_to_table(attr, dev_flow->external, attr->group, - !!priv->fdb_def_rule, &table, error); - if (ret) - return ret; - dev_flow->dv.group = table; - if (attr->transfer) - mhdr_res->ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB; - if (priority == MLX5_FLOW_PRIO_RSVD) - priority = dev_conf->flow_prio - 1; - /* number of actions must be set to 0 in case of dirty stack. */ - mhdr_res->actions_num = 0; - for (; !actions_end ; actions++) { - const struct rte_flow_action_queue *queue; - const struct rte_flow_action_rss *rss; - const struct rte_flow_action *action = actions; - const uint8_t *rss_key; - const struct rte_flow_action_jump *jump_data; - const struct rte_flow_action_meter *mtr; - struct mlx5_flow_tbl_resource *tbl; - uint32_t port_id = 0; - struct mlx5_flow_dv_port_id_action_resource port_id_resource; - int action_type = actions->type; - const struct rte_flow_action *found_action = NULL; - struct mlx5_flow_meter *fm = NULL; + MLX5_ASSERT(wks); + rss_desc = &wks->rss_desc[!!wks->flow_nested_idx]; + sample_act = &res->sample_act; + sample_idx = &res->sample_idx; + sample_action = (const struct rte_flow_action_sample *)action->conf; + res->ratio = sample_action->ratio; + sub_actions = sample_action->actions; + for (; sub_actions->type != RTE_FLOW_ACTION_TYPE_END; sub_actions++) { + int type = sub_actions->type; + uint32_t pre_rix = 0; + void *pre_r; + switch (type) { + case RTE_FLOW_ACTION_TYPE_QUEUE: + { + struct mlx5_hrxq *hrxq; + uint32_t hrxq_idx; - if (!mlx5_flow_os_action_supported(action_type)) - return rte_flow_error_set(error, ENOTSUP, - RTE_FLOW_ERROR_TYPE_ACTION, - actions, - "action not supported"); - switch (action_type) { - case RTE_FLOW_ACTION_TYPE_VOID: + queue = sub_actions->conf; + rss_desc->queue_num = 1; + rss_desc->queue[0] = queue->index; + hrxq = flow_dv_handle_rx_queue(dev, dev_flow, + rss_desc, &hrxq_idx); + if (!hrxq) + return rte_flow_error_set + (error, rte_errno, + RTE_FLOW_ERROR_TYPE_ACTION, + NULL, + "cannot create fate queue"); + sample_act->dr_queue_action = hrxq->action; + sample_idx->rix_hrxq = hrxq_idx; + sample_actions[sample_act->actions_num++] = + hrxq->action; + (*num_of_dest)++; + action_flags |= MLX5_FLOW_ACTION_QUEUE; + if (action_flags & MLX5_FLOW_ACTION_MARK) + dev_flow->handle->rix_hrxq = hrxq_idx; + dev_flow->handle->fate_action = + MLX5_FLOW_FATE_QUEUE; break; - case RTE_FLOW_ACTION_TYPE_PORT_ID: - if (flow_dv_translate_action_port_id(dev, action, - &port_id, error)) + } + case RTE_FLOW_ACTION_TYPE_MARK: + { + uint32_t tag_be = mlx5_flow_mark_set + (((const struct rte_flow_action_mark *) + (sub_actions->conf))->id); + + dev_flow->handle->mark = 1; + pre_rix = dev_flow->handle->dvh.rix_tag; + /* Save the mark resource before sample */ + pre_r = dev_flow->dv.tag_resource; + if (flow_dv_tag_resource_register(dev, tag_be, + dev_flow, error)) return -rte_errno; - port_id_resource.port_id = port_id; - MLX5_ASSERT(!handle->rix_port_id_action); + MLX5_ASSERT(dev_flow->dv.tag_resource); + sample_act->dr_tag_action = + dev_flow->dv.tag_resource->action; + sample_idx->rix_tag = + dev_flow->handle->dvh.rix_tag; + sample_actions[sample_act->actions_num++] = + sample_act->dr_tag_action; + /* Recover the mark resource after sample */ + dev_flow->dv.tag_resource = pre_r; + dev_flow->handle->dvh.rix_tag = pre_rix; + action_flags |= MLX5_FLOW_ACTION_MARK; + break; + } + case RTE_FLOW_ACTION_TYPE_COUNT: + { + uint32_t counter; + + counter = flow_dv_translate_create_counter(dev, + dev_flow, sub_actions->conf, 0); + if (!counter) + return rte_flow_error_set + (error, rte_errno, + RTE_FLOW_ERROR_TYPE_ACTION, + NULL, + "cannot create counter" + " object."); + sample_idx->cnt = counter; + sample_act->dr_cnt_action = + (flow_dv_counter_get_by_idx(dev, + counter, NULL))->action; + sample_actions[sample_act->actions_num++] = + sample_act->dr_cnt_action; + action_flags |= MLX5_FLOW_ACTION_COUNT; + break; + } + case RTE_FLOW_ACTION_TYPE_PORT_ID: + { + struct mlx5_flow_dv_port_id_action_resource + port_id_resource; + uint32_t port_id = 0; + + memset(&port_id_resource, 0, sizeof(port_id_resource)); + /* Save the port id resource before sample */ + pre_rix = dev_flow->handle->rix_port_id_action; + pre_r = dev_flow->dv.port_id_action; + if (flow_dv_translate_action_port_id(dev, sub_actions, + &port_id, error)) + return -rte_errno; + port_id_resource.port_id = port_id; + if (flow_dv_port_id_action_resource_register + (dev, &port_id_resource, dev_flow, error)) + return -rte_errno; + sample_act->dr_port_id_action = + dev_flow->dv.port_id_action->action; + sample_idx->rix_port_id_action = + dev_flow->handle->rix_port_id_action; + sample_actions[sample_act->actions_num++] = + sample_act->dr_port_id_action; + /* Recover the port id resource after sample */ + dev_flow->dv.port_id_action = pre_r; + dev_flow->handle->rix_port_id_action = pre_rix; + (*num_of_dest)++; + action_flags |= MLX5_FLOW_ACTION_PORT_ID; + break; + } + case RTE_FLOW_ACTION_TYPE_RAW_ENCAP: + /* Save the encap resource before sample */ + pre_rix = dev_flow->handle->dvh.rix_encap_decap; + pre_r = dev_flow->dv.encap_decap; + if (flow_dv_create_action_l2_encap(dev, sub_actions, + dev_flow, + attr->transfer, + error)) + return -rte_errno; + sample_act->dr_encap_action = + dev_flow->dv.encap_decap->action; + sample_idx->rix_encap_decap = + dev_flow->handle->dvh.rix_encap_decap; + sample_actions[sample_act->actions_num++] = + sample_act->dr_encap_action; + /* Recover the encap resource after sample */ + dev_flow->dv.encap_decap = pre_r; + dev_flow->handle->dvh.rix_encap_decap = pre_rix; + action_flags |= MLX5_FLOW_ACTION_ENCAP; + break; + default: + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "Not support for sampler action"); + } + } + sample_act->action_flags = action_flags; + res->ft_id = dev_flow->dv.group; + if (attr->transfer) { + union { + uint32_t action_in[MLX5_ST_SZ_DW(set_action_in)]; + uint64_t set_action; + } action_ctx = { .set_action = 0 }; + + res->ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB; + MLX5_SET(set_action_in, action_ctx.action_in, action_type, + MLX5_MODIFICATION_TYPE_SET); + MLX5_SET(set_action_in, action_ctx.action_in, field, + MLX5_MODI_META_REG_C_0); + MLX5_SET(set_action_in, action_ctx.action_in, data, + priv->vport_meta_tag); + res->set_action = action_ctx.set_action; + } else if (attr->ingress) { + res->ft_type = MLX5DV_FLOW_TABLE_TYPE_NIC_RX; + } + return 0; +} + +/** + * Convert Sample action to DV specification. + * + * @param[in] dev + * Pointer to rte_eth_dev structure. + * @param[in, out] dev_flow + * Pointer to the mlx5_flow. + * @param[in] attr + * Pointer to the flow attributes. + * @param[in] num_of_dest + * The num of destination. + * @param[in, out] res + * Pointer to sample resource. + * @param[in, out] mdest_res + * Pointer to destination array resource. + * @param[in] sample_actions + * Pointer to sample path actions list. + * @param[in] action_flags + * Holds the actions detected until now. + * @param[out] error + * Pointer to the error structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +flow_dv_create_action_sample(struct rte_eth_dev *dev, + struct mlx5_flow *dev_flow, + const struct rte_flow_attr *attr, + uint32_t num_of_dest, + struct mlx5_flow_dv_sample_resource *res, + struct mlx5_flow_dv_dest_array_resource *mdest_res, + void **sample_actions, + uint64_t action_flags, + struct rte_flow_error *error) +{ + /* update normal path action resource into last index of array */ + uint32_t dest_index = MLX5_MAX_DEST_NUM - 1; + struct mlx5_flow_sub_actions_list *sample_act = + &mdest_res->sample_act[dest_index]; + struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace(); + struct mlx5_flow_rss_desc *rss_desc; + uint32_t normal_idx = 0; + struct mlx5_hrxq *hrxq; + uint32_t hrxq_idx; + + MLX5_ASSERT(wks); + rss_desc = &wks->rss_desc[!!wks->flow_nested_idx]; + if (num_of_dest > 1) { + if (sample_act->action_flags & MLX5_FLOW_ACTION_QUEUE) { + /* Handle QP action for mirroring */ + hrxq = flow_dv_handle_rx_queue(dev, dev_flow, + rss_desc, &hrxq_idx); + if (!hrxq) + return rte_flow_error_set + (error, rte_errno, + RTE_FLOW_ERROR_TYPE_ACTION, + NULL, + "cannot create rx queue"); + normal_idx++; + mdest_res->sample_idx[dest_index].rix_hrxq = hrxq_idx; + sample_act->dr_queue_action = hrxq->action; + if (action_flags & MLX5_FLOW_ACTION_MARK) + dev_flow->handle->rix_hrxq = hrxq_idx; + dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE; + } + if (sample_act->action_flags & MLX5_FLOW_ACTION_ENCAP) { + normal_idx++; + mdest_res->sample_idx[dest_index].rix_encap_decap = + dev_flow->handle->dvh.rix_encap_decap; + sample_act->dr_encap_action = + dev_flow->dv.encap_decap->action; + } + if (sample_act->action_flags & MLX5_FLOW_ACTION_PORT_ID) { + normal_idx++; + mdest_res->sample_idx[dest_index].rix_port_id_action = + dev_flow->handle->rix_port_id_action; + sample_act->dr_port_id_action = + dev_flow->dv.port_id_action->action; + } + sample_act->actions_num = normal_idx; + /* update sample action resource into first index of array */ + mdest_res->ft_type = res->ft_type; + memcpy(&mdest_res->sample_idx[0], &res->sample_idx, + sizeof(struct mlx5_flow_sub_actions_idx)); + memcpy(&mdest_res->sample_act[0], &res->sample_act, + sizeof(struct mlx5_flow_sub_actions_list)); + mdest_res->num_of_dest = num_of_dest; + if (flow_dv_dest_array_resource_register(dev, attr, mdest_res, + dev_flow, error)) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + NULL, "can't create sample " + "action"); + } else { + if (flow_dv_sample_resource_register(dev, attr, res, dev_flow, + sample_actions, error)) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + NULL, + "can't create sample action"); + } + return 0; +} + +/** + * Fill the flow with DV spec, lock free + * (mutex should be acquired by caller). + * + * @param[in] dev + * Pointer to rte_eth_dev structure. + * @param[in, out] dev_flow + * Pointer to the sub flow. + * @param[in] attr + * Pointer to the flow attributes. + * @param[in] items + * Pointer to the list of items. + * @param[in] actions + * Pointer to the list of actions. + * @param[out] error + * Pointer to the error structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +__flow_dv_translate(struct rte_eth_dev *dev, + struct mlx5_flow *dev_flow, + const struct rte_flow_attr *attr, + const struct rte_flow_item items[], + const struct rte_flow_action actions[], + struct rte_flow_error *error) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_dev_config *dev_conf = &priv->config; + struct rte_flow *flow = dev_flow->flow; + struct mlx5_flow_handle *handle = dev_flow->handle; + struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace(); + struct mlx5_flow_rss_desc *rss_desc; + uint64_t item_flags = 0; + uint64_t last_item = 0; + uint64_t action_flags = 0; + uint64_t priority = attr->priority; + struct mlx5_flow_dv_matcher matcher = { + .mask = { + .size = sizeof(matcher.mask.buf) - + MLX5_ST_SZ_BYTES(fte_match_set_misc4), + }, + }; + int actions_n = 0; + bool actions_end = false; + union { + struct mlx5_flow_dv_modify_hdr_resource res; + uint8_t len[sizeof(struct mlx5_flow_dv_modify_hdr_resource) + + sizeof(struct mlx5_modification_cmd) * + (MLX5_MAX_MODIFY_NUM + 1)]; + } mhdr_dummy; + struct mlx5_flow_dv_modify_hdr_resource *mhdr_res = &mhdr_dummy.res; + const struct rte_flow_action_count *count = NULL; + const struct rte_flow_action_age *age = NULL; + union flow_dv_attr flow_attr = { .attr = 0 }; + uint32_t tag_be; + union mlx5_flow_tbl_key tbl_key; + uint32_t modify_action_position = UINT32_MAX; + void *match_mask = matcher.mask.buf; + void *match_value = dev_flow->dv.value.buf; + uint8_t next_protocol = 0xff; + struct rte_vlan_hdr vlan = { 0 }; + struct mlx5_flow_dv_dest_array_resource mdest_res; + struct mlx5_flow_dv_sample_resource sample_res; + void *sample_actions[MLX5_DV_MAX_NUMBER_OF_ACTIONS] = {0}; + struct mlx5_flow_sub_actions_list *sample_act; + uint32_t sample_act_pos = UINT32_MAX; + uint32_t num_of_dest = 0; + int tmp_actions_n = 0; + uint32_t table; + int ret = 0; + const struct mlx5_flow_tunnel *tunnel; + struct flow_grp_info grp_info = { + .external = !!dev_flow->external, + .transfer = !!attr->transfer, + .fdb_def_rule = !!priv->fdb_def_rule, + }; + + MLX5_ASSERT(wks); + rss_desc = &wks->rss_desc[!!wks->flow_nested_idx]; + memset(&mdest_res, 0, sizeof(struct mlx5_flow_dv_dest_array_resource)); + memset(&sample_res, 0, sizeof(struct mlx5_flow_dv_sample_resource)); + mhdr_res->ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX : + MLX5DV_FLOW_TABLE_TYPE_NIC_RX; + /* update normal path action resource into last index of array */ + sample_act = &mdest_res.sample_act[MLX5_MAX_DEST_NUM - 1]; + tunnel = is_flow_tunnel_match_rule(dev, attr, items, actions) ? + flow_items_to_tunnel(items) : + is_flow_tunnel_steer_rule(dev, attr, items, actions) ? + flow_actions_to_tunnel(actions) : + dev_flow->tunnel ? dev_flow->tunnel : NULL; + mhdr_res->ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX : + MLX5DV_FLOW_TABLE_TYPE_NIC_RX; + grp_info.std_tbl_fix = tunnel_use_standard_attr_group_translate + (dev, tunnel, attr, items, actions); + ret = mlx5_flow_group_to_table(dev, tunnel, attr->group, &table, + grp_info, error); + if (ret) + return ret; + dev_flow->dv.group = table; + if (attr->transfer) + mhdr_res->ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB; + if (priority == MLX5_FLOW_PRIO_RSVD) + priority = dev_conf->flow_prio - 1; + /* number of actions must be set to 0 in case of dirty stack. */ + mhdr_res->actions_num = 0; + if (is_flow_tunnel_match_rule(dev, attr, items, actions)) { + /* + * do not add decap action if match rule drops packet + * HW rejects rules with decap & drop + */ + bool add_decap = true; + const struct rte_flow_action *ptr = actions; + struct mlx5_flow_tbl_resource *tbl; + + for (; ptr->type != RTE_FLOW_ACTION_TYPE_END; ptr++) { + if (ptr->type == RTE_FLOW_ACTION_TYPE_DROP) { + add_decap = false; + break; + } + } + if (add_decap) { + if (flow_dv_create_action_l2_decap(dev, dev_flow, + attr->transfer, + error)) + return -rte_errno; + dev_flow->dv.actions[actions_n++] = + dev_flow->dv.encap_decap->action; + action_flags |= MLX5_FLOW_ACTION_DECAP; + } + /* + * bind table_id with for tunnel match rule. + * Tunnel set rule establishes that bind in JUMP action handler. + * Required for scenario when application creates tunnel match + * rule before tunnel set rule. + */ + tbl = flow_dv_tbl_resource_get(dev, table, attr->egress, + attr->transfer, + !!dev_flow->external, tunnel, + attr->group, error); + if (!tbl) + return rte_flow_error_set + (error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, + actions, "cannot register tunnel group"); + } + for (; !actions_end ; actions++) { + const struct rte_flow_action_queue *queue; + const struct rte_flow_action_rss *rss; + const struct rte_flow_action *action = actions; + const uint8_t *rss_key; + const struct rte_flow_action_meter *mtr; + struct mlx5_flow_tbl_resource *tbl; + uint32_t port_id = 0; + struct mlx5_flow_dv_port_id_action_resource port_id_resource; + int action_type = actions->type; + const struct rte_flow_action *found_action = NULL; + struct mlx5_flow_meter *fm = NULL; + uint32_t jump_group = 0; + + if (!mlx5_flow_os_action_supported(action_type)) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, + actions, + "action not supported"); + switch (action_type) { + case MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET: + action_flags |= MLX5_FLOW_ACTION_TUNNEL_SET; + break; + case RTE_FLOW_ACTION_TYPE_VOID: + break; + case RTE_FLOW_ACTION_TYPE_PORT_ID: + if (flow_dv_translate_action_port_id(dev, action, + &port_id, error)) + return -rte_errno; + port_id_resource.port_id = port_id; + MLX5_ASSERT(!handle->rix_port_id_action); if (flow_dv_port_id_action_resource_register (dev, &port_id_resource, dev_flow, error)) return -rte_errno; @@ -8160,6 +9404,8 @@ __flow_dv_translate(struct rte_eth_dev *dev, dev_flow->dv.port_id_action->action; action_flags |= MLX5_FLOW_ACTION_PORT_ID; dev_flow->handle->fate_action = MLX5_FLOW_FATE_PORT_ID; + sample_act->action_flags |= MLX5_FLOW_ACTION_PORT_ID; + num_of_dest++; break; case RTE_FLOW_ACTION_TYPE_FLAG: action_flags |= MLX5_FLOW_ACTION_FLAG; @@ -8245,6 +9491,8 @@ __flow_dv_translate(struct rte_eth_dev *dev, rss_desc->queue[0] = queue->index; action_flags |= MLX5_FLOW_ACTION_QUEUE; dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE; + sample_act->action_flags |= MLX5_FLOW_ACTION_QUEUE; + num_of_dest++; break; case RTE_FLOW_ACTION_TYPE_RSS: rss = actions->conf; @@ -8332,6 +9580,9 @@ __flow_dv_translate(struct rte_eth_dev *dev, dev_flow->dv.actions[actions_n++] = dev_flow->dv.encap_decap->action; action_flags |= MLX5_FLOW_ACTION_ENCAP; + if (action_flags & MLX5_FLOW_ACTION_SAMPLE) + sample_act->action_flags |= + MLX5_FLOW_ACTION_ENCAP; break; case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP: case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP: @@ -8361,6 +9612,9 @@ __flow_dv_translate(struct rte_eth_dev *dev, dev_flow->dv.encap_decap->action; } action_flags |= MLX5_FLOW_ACTION_ENCAP; + if (action_flags & MLX5_FLOW_ACTION_SAMPLE) + sample_act->action_flags |= + MLX5_FLOW_ACTION_ENCAP; break; case RTE_FLOW_ACTION_TYPE_RAW_DECAP: while ((++action)->type == RTE_FLOW_ACTION_TYPE_VOID) @@ -8376,16 +9630,20 @@ __flow_dv_translate(struct rte_eth_dev *dev, action_flags |= MLX5_FLOW_ACTION_DECAP; break; case RTE_FLOW_ACTION_TYPE_JUMP: - jump_data = action->conf; - ret = mlx5_flow_group_to_table(attr, dev_flow->external, - jump_data->group, - !!priv->fdb_def_rule, - &table, error); + jump_group = ((const struct rte_flow_action_jump *) + action->conf)->group; + grp_info.std_tbl_fix = 0; + ret = mlx5_flow_group_to_table(dev, tunnel, + jump_group, + &table, + grp_info, error); if (ret) return ret; - tbl = flow_dv_tbl_resource_get(dev, table, - attr->egress, - attr->transfer, error); + tbl = flow_dv_tbl_resource_get(dev, table, attr->egress, + attr->transfer, + !!dev_flow->external, + tunnel, jump_group, + error); if (!tbl) return rte_flow_error_set (error, errno, @@ -8544,6 +9802,25 @@ __flow_dv_translate(struct rte_eth_dev *dev, return -rte_errno; action_flags |= MLX5_FLOW_ACTION_SET_IPV6_DSCP; break; + case RTE_FLOW_ACTION_TYPE_SAMPLE: + sample_act_pos = actions_n; + ret = flow_dv_translate_action_sample(dev, + actions, + dev_flow, attr, + &num_of_dest, + sample_actions, + &sample_res, + error); + if (ret < 0) + return ret; + actions_n++; + action_flags |= MLX5_FLOW_ACTION_SAMPLE; + /* put encap action into group if work with port id */ + if ((action_flags & MLX5_FLOW_ACTION_ENCAP) && + (action_flags & MLX5_FLOW_ACTION_PORT_ID)) + sample_act->action_flags |= + MLX5_FLOW_ACTION_ENCAP; + break; case RTE_FLOW_ACTION_TYPE_END: actions_end = true; if (mhdr_res->actions_num) { @@ -8566,9 +9843,33 @@ __flow_dv_translate(struct rte_eth_dev *dev, NULL, "cannot create counter" " object."); - dev_flow->dv.actions[actions_n++] = + dev_flow->dv.actions[actions_n] = (flow_dv_counter_get_by_idx(dev, flow->counter, NULL))->action; + actions_n++; + } + if (action_flags & MLX5_FLOW_ACTION_SAMPLE) { + ret = flow_dv_create_action_sample(dev, + dev_flow, attr, + num_of_dest, + &sample_res, + &mdest_res, + sample_actions, + action_flags, + error); + if (ret < 0) + return rte_flow_error_set + (error, rte_errno, + RTE_FLOW_ERROR_TYPE_ACTION, + NULL, + "cannot create sample action"); + if (num_of_dest > 1) { + dev_flow->dv.actions[sample_act_pos] = + dev_flow->dv.dest_array_res->action; + } else { + dev_flow->dv.actions[sample_act_pos] = + dev_flow->dv.sample_res->verbs_action; + } } break; default: @@ -8578,6 +9879,31 @@ __flow_dv_translate(struct rte_eth_dev *dev, modify_action_position == UINT32_MAX) modify_action_position = actions_n++; } + /* + * For multiple destination (sample action with ratio=1), the encap + * action and port id action will be combined into group action. + * So need remove the original these actions in the flow and only + * use the sample action instead of. + */ + if (num_of_dest > 1 && sample_act->dr_port_id_action) { + int i; + void *temp_actions[MLX5_DV_MAX_NUMBER_OF_ACTIONS] = {0}; + + for (i = 0; i < actions_n; i++) { + if ((sample_act->dr_encap_action && + sample_act->dr_encap_action == + dev_flow->dv.actions[i]) || + (sample_act->dr_port_id_action && + sample_act->dr_port_id_action == + dev_flow->dv.actions[i])) + continue; + temp_actions[tmp_actions_n++] = dev_flow->dv.actions[i]; + } + memcpy((void *)dev_flow->dv.actions, + (void *)temp_actions, + tmp_actions_n * sizeof(void *)); + actions_n = tmp_actions_n; + } dev_flow->dv.actions_n = actions_n; dev_flow->act_flags = action_flags; for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) { @@ -8621,7 +9947,7 @@ __flow_dv_translate(struct rte_eth_dev *dev, mlx5_flow_tunnel_ip_check(items, next_protocol, &item_flags, &tunnel); flow_dv_translate_item_ipv4(match_mask, match_value, - items, item_flags, tunnel, + items, tunnel, dev_flow->dv.group); matcher.priority = MLX5_PRIORITY_MAP_L3; last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 : @@ -8644,7 +9970,7 @@ __flow_dv_translate(struct rte_eth_dev *dev, mlx5_flow_tunnel_ip_check(items, next_protocol, &item_flags, &tunnel); flow_dv_translate_item_ipv6(match_mask, match_value, - items, item_flags, tunnel, + items, tunnel, dev_flow->dv.group); matcher.priority = MLX5_PRIORITY_MAP_L3; last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 : @@ -8663,6 +9989,27 @@ __flow_dv_translate(struct rte_eth_dev *dev, next_protocol = 0xff; } break; + case RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT: + flow_dv_translate_item_ipv6_frag_ext(match_mask, + match_value, + items, tunnel); + last_item = tunnel ? + MLX5_FLOW_LAYER_INNER_L3_IPV6_FRAG_EXT : + MLX5_FLOW_LAYER_OUTER_L3_IPV6_FRAG_EXT; + if (items->mask != NULL && + ((const struct rte_flow_item_ipv6_frag_ext *) + items->mask)->hdr.next_header) { + next_protocol = + ((const struct rte_flow_item_ipv6_frag_ext *) + items->spec)->hdr.next_header; + next_protocol &= + ((const struct rte_flow_item_ipv6_frag_ext *) + items->mask)->hdr.next_header; + } else { + /* Reset for inner layer. */ + next_protocol = 0xff; + } + break; case RTE_FLOW_ITEM_TYPE_TCP: flow_dv_translate_item_tcp(match_mask, match_value, items, tunnel); @@ -8825,45 +10172,200 @@ __flow_dv_translate(struct rte_eth_dev *dev, } /** - * Apply the flow to the NIC, lock free, - * (mutex should be acquired by caller). + * Set hash RX queue by hash fields (see enum ibv_rx_hash_fields) + * and tunnel. * - * @param[in] dev - * Pointer to the Ethernet device structure. - * @param[in, out] flow - * Pointer to flow structure. - * @param[out] error - * Pointer to error structure. + * @param[in, out] action + * Shred RSS action holding hash RX queue objects. + * @param[in] hash_fields + * Defines combination of packet fields to participate in RX hash. + * @param[in] tunnel + * Tunnel type + * @param[in] hrxq_idx + * Hash RX queue index to set. * * @return - * 0 on success, a negative errno value otherwise and rte_errno is set. + * 0 on success, otherwise negative errno value. */ static int -__flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow, - struct rte_flow_error *error) +__flow_dv_action_rss_hrxq_set(struct mlx5_shared_action_rss *action, + const uint64_t hash_fields, + const int tunnel, + uint32_t hrxq_idx) { - struct mlx5_flow_dv_workspace *dv; - struct mlx5_flow_handle *dh; - struct mlx5_flow_handle_dv *dv_h; - struct mlx5_flow *dev_flow; - struct mlx5_priv *priv = dev->data->dev_private; - uint32_t handle_idx; - int n; - int err; - int idx; + uint32_t *hrxqs = tunnel ? action->hrxq : action->hrxq_tunnel; - for (idx = priv->flow_idx - 1; idx >= priv->flow_nested_idx; idx--) { - dev_flow = &((struct mlx5_flow *)priv->inter_flows)[idx]; - dv = &dev_flow->dv; - dh = dev_flow->handle; - dv_h = &dh->dvh; - n = dv->actions_n; - if (dh->fate_action == MLX5_FLOW_FATE_DROP) { + switch (hash_fields & ~IBV_RX_HASH_INNER) { + case MLX5_RSS_HASH_IPV4: + hrxqs[0] = hrxq_idx; + return 0; + case MLX5_RSS_HASH_IPV4_TCP: + hrxqs[1] = hrxq_idx; + return 0; + case MLX5_RSS_HASH_IPV4_UDP: + hrxqs[2] = hrxq_idx; + return 0; + case MLX5_RSS_HASH_IPV6: + hrxqs[3] = hrxq_idx; + return 0; + case MLX5_RSS_HASH_IPV6_TCP: + hrxqs[4] = hrxq_idx; + return 0; + case MLX5_RSS_HASH_IPV6_UDP: + hrxqs[5] = hrxq_idx; + return 0; + case MLX5_RSS_HASH_NONE: + hrxqs[6] = hrxq_idx; + return 0; + default: + return -1; + } +} + +/** + * Look up for hash RX queue by hash fields (see enum ibv_rx_hash_fields) + * and tunnel. + * + * @param[in] action + * Shred RSS action holding hash RX queue objects. + * @param[in] hash_fields + * Defines combination of packet fields to participate in RX hash. + * @param[in] tunnel + * Tunnel type + * + * @return + * Valid hash RX queue index, otherwise 0. + */ +static uint32_t +__flow_dv_action_rss_hrxq_lookup(const struct mlx5_shared_action_rss *action, + const uint64_t hash_fields, + const int tunnel) +{ + const uint32_t *hrxqs = tunnel ? action->hrxq : action->hrxq_tunnel; + + switch (hash_fields & ~IBV_RX_HASH_INNER) { + case MLX5_RSS_HASH_IPV4: + return hrxqs[0]; + case MLX5_RSS_HASH_IPV4_TCP: + return hrxqs[1]; + case MLX5_RSS_HASH_IPV4_UDP: + return hrxqs[2]; + case MLX5_RSS_HASH_IPV6: + return hrxqs[3]; + case MLX5_RSS_HASH_IPV6_TCP: + return hrxqs[4]; + case MLX5_RSS_HASH_IPV6_UDP: + return hrxqs[5]; + case MLX5_RSS_HASH_NONE: + return hrxqs[6]; + default: + return 0; + } +} + +/** + * Retrieves hash RX queue suitable for the *flow*. + * If shared action configured for *flow* suitable hash RX queue will be + * retrieved from attached shared action. + * + * @param[in] flow + * Shred RSS action holding hash RX queue objects. + * @param[in] dev_flow + * Pointer to the sub flow. + * @param[out] hrxq + * Pointer to retrieved hash RX queue object. + * + * @return + * Valid hash RX queue index, otherwise 0 and rte_errno is set. + */ +static uint32_t +__flow_dv_rss_get_hrxq(struct rte_eth_dev *dev, struct rte_flow *flow, + struct mlx5_flow *dev_flow, + struct mlx5_hrxq **hrxq) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace(); + uint32_t hrxq_idx; + + if (flow->shared_rss) { + hrxq_idx = __flow_dv_action_rss_hrxq_lookup + (flow->shared_rss, dev_flow->hash_fields, + !!(dev_flow->handle->layers & + MLX5_FLOW_LAYER_TUNNEL)); + if (hrxq_idx) { + *hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ], + hrxq_idx); + __atomic_fetch_add(&(*hrxq)->refcnt, 1, + __ATOMIC_RELAXED); + } + } else { + struct mlx5_flow_rss_desc *rss_desc = + &wks->rss_desc[!!wks->flow_nested_idx]; + + MLX5_ASSERT(rss_desc->queue_num); + hrxq_idx = mlx5_hrxq_get(dev, rss_desc->key, + MLX5_RSS_HASH_KEY_LEN, + dev_flow->hash_fields, + rss_desc->queue, rss_desc->queue_num); + if (!hrxq_idx) { + hrxq_idx = mlx5_hrxq_new(dev, + rss_desc->key, + MLX5_RSS_HASH_KEY_LEN, + dev_flow->hash_fields, + rss_desc->queue, + rss_desc->queue_num, + !!(dev_flow->handle->layers & + MLX5_FLOW_LAYER_TUNNEL), + false); + } + *hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ], + hrxq_idx); + } + return hrxq_idx; +} + +/** + * Apply the flow to the NIC, lock free, + * (mutex should be acquired by caller). + * + * @param[in] dev + * Pointer to the Ethernet device structure. + * @param[in, out] flow + * Pointer to flow structure. + * @param[out] error + * Pointer to error structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +__flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow, + struct rte_flow_error *error) +{ + struct mlx5_flow_dv_workspace *dv; + struct mlx5_flow_handle *dh; + struct mlx5_flow_handle_dv *dv_h; + struct mlx5_flow *dev_flow; + struct mlx5_priv *priv = dev->data->dev_private; + uint32_t handle_idx; + int n; + int err; + int idx; + struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace(); + + MLX5_ASSERT(wks); + for (idx = wks->flow_idx - 1; idx >= wks->flow_nested_idx; idx--) { + dev_flow = &wks->flows[idx]; + dv = &dev_flow->dv; + dh = dev_flow->handle; + dv_h = &dh->dvh; + n = dv->actions_n; + if (dh->fate_action == MLX5_FLOW_FATE_DROP) { if (dv->transfer) { dv->actions[n++] = priv->sh->esw_drop_action; } else { struct mlx5_hrxq *drop_hrxq; - drop_hrxq = mlx5_hrxq_drop_new(dev); + drop_hrxq = mlx5_drop_action_create(dev); if (!drop_hrxq) { rte_flow_error_set (error, errno, @@ -8874,38 +10376,19 @@ __flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow, } /* * Drop queues will be released by the specify - * mlx5_hrxq_drop_release() function. Assign + * mlx5_drop_action_destroy() function. Assign * the special index to hrxq to mark the queue * has been allocated. */ dh->rix_hrxq = UINT32_MAX; dv->actions[n++] = drop_hrxq->action; } - } else if (dh->fate_action == MLX5_FLOW_FATE_QUEUE) { - struct mlx5_hrxq *hrxq; - uint32_t hrxq_idx; - struct mlx5_flow_rss_desc *rss_desc = - &((struct mlx5_flow_rss_desc *)priv->rss_desc) - [!!priv->flow_nested_idx]; + } else if (dh->fate_action == MLX5_FLOW_FATE_QUEUE && + !dv_h->rix_sample && !dv_h->rix_dest_array) { + struct mlx5_hrxq *hrxq = NULL; + uint32_t hrxq_idx = __flow_dv_rss_get_hrxq + (dev, flow, dev_flow, &hrxq); - MLX5_ASSERT(rss_desc->queue_num); - hrxq_idx = mlx5_hrxq_get(dev, rss_desc->key, - MLX5_RSS_HASH_KEY_LEN, - dev_flow->hash_fields, - rss_desc->queue, - rss_desc->queue_num); - if (!hrxq_idx) { - hrxq_idx = mlx5_hrxq_new - (dev, rss_desc->key, - MLX5_RSS_HASH_KEY_LEN, - dev_flow->hash_fields, - rss_desc->queue, - rss_desc->queue_num, - !!(dh->layers & - MLX5_FLOW_LAYER_TUNNEL)); - } - hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ], - hrxq_idx); if (!hrxq) { rte_flow_error_set (error, rte_errno, @@ -8959,7 +10442,7 @@ error_default_miss: /* hrxq is union, don't clear it if the flag is not set. */ if (dh->rix_hrxq) { if (dh->fate_action == MLX5_FLOW_FATE_DROP) { - mlx5_hrxq_drop_release(dev); + mlx5_drop_action_destroy(dev); dh->rix_hrxq = 0; } else if (dh->fate_action == MLX5_FLOW_FATE_QUEUE) { mlx5_hrxq_release(dev, dh->rix_hrxq); @@ -8993,8 +10476,8 @@ flow_dv_matcher_release(struct rte_eth_dev *dev, MLX5_ASSERT(matcher->matcher_object); DRV_LOG(DEBUG, "port %u matcher %p: refcnt %d--", dev->data->port_id, (void *)matcher, - rte_atomic32_read(&matcher->refcnt)); - if (rte_atomic32_dec_and_test(&matcher->refcnt)) { + __atomic_load_n(&matcher->refcnt, __ATOMIC_RELAXED)); + if (__atomic_sub_fetch(&matcher->refcnt, 1, __ATOMIC_RELAXED) == 0) { claim_zero(mlx5_flow_os_destroy_flow_matcher (matcher->matcher_object)); LIST_REMOVE(matcher, next); @@ -9013,18 +10496,18 @@ flow_dv_matcher_release(struct rte_eth_dev *dev, * * @param dev * Pointer to Ethernet device. - * @param handle - * Pointer to mlx5_flow_handle. + * @param encap_decap_idx + * Index of encap decap resource. * * @return * 1 while a reference on it exists, 0 when freed. */ static int flow_dv_encap_decap_resource_release(struct rte_eth_dev *dev, - struct mlx5_flow_handle *handle) + uint32_t encap_decap_idx) { struct mlx5_priv *priv = dev->data->dev_private; - uint32_t idx = handle->dvh.rix_encap_decap; + uint32_t idx = encap_decap_idx; struct mlx5_flow_dv_encap_decap_resource *cache_resource; cache_resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_DECAP_ENCAP], @@ -9034,13 +10517,13 @@ flow_dv_encap_decap_resource_release(struct rte_eth_dev *dev, MLX5_ASSERT(cache_resource->action); DRV_LOG(DEBUG, "encap/decap resource %p: refcnt %d--", (void *)cache_resource, - rte_atomic32_read(&cache_resource->refcnt)); - if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) { + __atomic_load_n(&cache_resource->refcnt, __ATOMIC_RELAXED)); + if (__atomic_sub_fetch(&cache_resource->refcnt, 1, + __ATOMIC_RELAXED) == 0) { claim_zero(mlx5_flow_os_destroy_flow_action (cache_resource->action)); - ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_DECAP_ENCAP], - &priv->sh->encaps_decaps, idx, - cache_resource, next); + mlx5_hlist_remove(priv->sh->encaps_decaps, + &cache_resource->entry); mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_DECAP_ENCAP], idx); DRV_LOG(DEBUG, "encap/decap resource %p: removed", (void *)cache_resource); @@ -9076,8 +10559,9 @@ flow_dv_jump_tbl_resource_release(struct rte_eth_dev *dev, MLX5_ASSERT(cache_resource->action); DRV_LOG(DEBUG, "jump table resource %p: refcnt %d--", (void *)cache_resource, - rte_atomic32_read(&cache_resource->refcnt)); - if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) { + __atomic_load_n(&cache_resource->refcnt, __ATOMIC_RELAXED)); + if (__atomic_sub_fetch(&cache_resource->refcnt, 1, + __ATOMIC_RELAXED) == 0) { claim_zero(mlx5_flow_os_destroy_flow_action (cache_resource->action)); /* jump action memory free is inside the table release. */ @@ -9108,8 +10592,10 @@ flow_dv_default_miss_resource_release(struct rte_eth_dev *dev) MLX5_ASSERT(cache_resource->action); DRV_LOG(DEBUG, "default miss resource %p: refcnt %d--", (void *)cache_resource->action, - rte_atomic32_read(&cache_resource->refcnt)); - if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) { + __atomic_load_n(&cache_resource->refcnt, + __ATOMIC_RELAXED)); + if (__atomic_sub_fetch(&cache_resource->refcnt, 1, + __ATOMIC_RELAXED) == 0) { claim_zero(mlx5_glue->destroy_flow_action (cache_resource->action)); DRV_LOG(DEBUG, "default miss resource %p: removed", @@ -9122,6 +10608,8 @@ flow_dv_default_miss_resource_release(struct rte_eth_dev *dev) /** * Release a modify-header resource. * + * @param dev + * Pointer to Ethernet device. * @param handle * Pointer to mlx5_flow_handle. * @@ -9129,19 +10617,23 @@ flow_dv_default_miss_resource_release(struct rte_eth_dev *dev) * 1 while a reference on it exists, 0 when freed. */ static int -flow_dv_modify_hdr_resource_release(struct mlx5_flow_handle *handle) +flow_dv_modify_hdr_resource_release(struct rte_eth_dev *dev, + struct mlx5_flow_handle *handle) { + struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_flow_dv_modify_hdr_resource *cache_resource = handle->dvh.modify_hdr; MLX5_ASSERT(cache_resource->action); DRV_LOG(DEBUG, "modify-header resource %p: refcnt %d--", (void *)cache_resource, - rte_atomic32_read(&cache_resource->refcnt)); - if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) { + __atomic_load_n(&cache_resource->refcnt, __ATOMIC_RELAXED)); + if (__atomic_sub_fetch(&cache_resource->refcnt, 1, + __ATOMIC_RELAXED) == 0) { claim_zero(mlx5_flow_os_destroy_flow_action (cache_resource->action)); - LIST_REMOVE(cache_resource, next); + mlx5_hlist_remove(priv->sh->modify_cmds, + &cache_resource->entry); mlx5_free(cache_resource); DRV_LOG(DEBUG, "modify-header resource %p: removed", (void *)cache_resource); @@ -9163,11 +10655,11 @@ flow_dv_modify_hdr_resource_release(struct mlx5_flow_handle *handle) */ static int flow_dv_port_id_action_resource_release(struct rte_eth_dev *dev, - struct mlx5_flow_handle *handle) + uint32_t port_id) { struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_flow_dv_port_id_action_resource *cache_resource; - uint32_t idx = handle->rix_port_id_action; + uint32_t idx = port_id; cache_resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_PORT_ID], idx); @@ -9176,8 +10668,9 @@ flow_dv_port_id_action_resource_release(struct rte_eth_dev *dev, MLX5_ASSERT(cache_resource->action); DRV_LOG(DEBUG, "port ID action resource %p: refcnt %d--", (void *)cache_resource, - rte_atomic32_read(&cache_resource->refcnt)); - if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) { + __atomic_load_n(&cache_resource->refcnt, __ATOMIC_RELAXED)); + if (__atomic_sub_fetch(&cache_resource->refcnt, 1, + __ATOMIC_RELAXED) == 0) { claim_zero(mlx5_flow_os_destroy_flow_action (cache_resource->action)); ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_PORT_ID], @@ -9217,8 +10710,9 @@ flow_dv_push_vlan_action_resource_release(struct rte_eth_dev *dev, MLX5_ASSERT(cache_resource->action); DRV_LOG(DEBUG, "push VLAN action resource %p: refcnt %d--", (void *)cache_resource, - rte_atomic32_read(&cache_resource->refcnt)); - if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) { + __atomic_load_n(&cache_resource->refcnt, __ATOMIC_RELAXED)); + if (__atomic_sub_fetch(&cache_resource->refcnt, 1, + __ATOMIC_RELAXED) == 0) { claim_zero(mlx5_flow_os_destroy_flow_action (cache_resource->action)); ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_PUSH_VLAN], @@ -9248,7 +10742,7 @@ flow_dv_fate_resource_release(struct rte_eth_dev *dev, return; switch (handle->fate_action) { case MLX5_FLOW_FATE_DROP: - mlx5_hrxq_drop_release(dev); + mlx5_drop_action_destroy(dev); break; case MLX5_FLOW_FATE_QUEUE: mlx5_hrxq_release(dev, handle->rix_hrxq); @@ -9257,7 +10751,8 @@ flow_dv_fate_resource_release(struct rte_eth_dev *dev, flow_dv_jump_tbl_resource_release(dev, handle); break; case MLX5_FLOW_FATE_PORT_ID: - flow_dv_port_id_action_resource_release(dev, handle); + flow_dv_port_id_action_resource_release(dev, + handle->rix_port_id_action); break; case MLX5_FLOW_FATE_DEFAULT_MISS: flow_dv_default_miss_resource_release(dev); @@ -9266,104 +10761,664 @@ flow_dv_fate_resource_release(struct rte_eth_dev *dev, DRV_LOG(DEBUG, "Incorrect fate action:%d", handle->fate_action); break; } - handle->rix_fate = 0; + handle->rix_fate = 0; +} + +/** + * Release an sample resource. + * + * @param dev + * Pointer to Ethernet device. + * @param handle + * Pointer to mlx5_flow_handle. + * + * @return + * 1 while a reference on it exists, 0 when freed. + */ +static int +flow_dv_sample_resource_release(struct rte_eth_dev *dev, + struct mlx5_flow_handle *handle) +{ + struct mlx5_priv *priv = dev->data->dev_private; + uint32_t idx = handle->dvh.rix_sample; + struct mlx5_flow_dv_sample_resource *cache_resource; + + cache_resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_SAMPLE], + idx); + if (!cache_resource) + return 0; + MLX5_ASSERT(cache_resource->verbs_action); + DRV_LOG(DEBUG, "sample resource %p: refcnt %d--", + (void *)cache_resource, + __atomic_load_n(&cache_resource->refcnt, __ATOMIC_RELAXED)); + if (__atomic_sub_fetch(&cache_resource->refcnt, 1, + __ATOMIC_RELAXED) == 0) { + if (cache_resource->verbs_action) + claim_zero(mlx5_glue->destroy_flow_action + (cache_resource->verbs_action)); + if (cache_resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB) { + if (cache_resource->default_miss) + claim_zero(mlx5_glue->destroy_flow_action + (cache_resource->default_miss)); + } + if (cache_resource->normal_path_tbl) + flow_dv_tbl_resource_release(dev, + cache_resource->normal_path_tbl); + } + if (cache_resource->sample_idx.rix_hrxq && + !mlx5_hrxq_release(dev, + cache_resource->sample_idx.rix_hrxq)) + cache_resource->sample_idx.rix_hrxq = 0; + if (cache_resource->sample_idx.rix_tag && + !flow_dv_tag_release(dev, + cache_resource->sample_idx.rix_tag)) + cache_resource->sample_idx.rix_tag = 0; + if (cache_resource->sample_idx.cnt) { + flow_dv_counter_release(dev, + cache_resource->sample_idx.cnt); + cache_resource->sample_idx.cnt = 0; + } + if (!__atomic_load_n(&cache_resource->refcnt, __ATOMIC_RELAXED)) { + ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_SAMPLE], + &priv->sh->sample_action_list, idx, + cache_resource, next); + mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_SAMPLE], idx); + DRV_LOG(DEBUG, "sample resource %p: removed", + (void *)cache_resource); + return 0; + } + return 1; +} + +/** + * Release an destination array resource. + * + * @param dev + * Pointer to Ethernet device. + * @param handle + * Pointer to mlx5_flow_handle. + * + * @return + * 1 while a reference on it exists, 0 when freed. + */ +static int +flow_dv_dest_array_resource_release(struct rte_eth_dev *dev, + struct mlx5_flow_handle *handle) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_flow_dv_dest_array_resource *cache_resource; + struct mlx5_flow_sub_actions_idx *mdest_act_res; + uint32_t idx = handle->dvh.rix_dest_array; + uint32_t i = 0; + + cache_resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_DEST_ARRAY], + idx); + if (!cache_resource) + return 0; + MLX5_ASSERT(cache_resource->action); + DRV_LOG(DEBUG, "destination array resource %p: refcnt %d--", + (void *)cache_resource, + __atomic_load_n(&cache_resource->refcnt, __ATOMIC_RELAXED)); + if (__atomic_sub_fetch(&cache_resource->refcnt, 1, + __ATOMIC_RELAXED) == 0) { + if (cache_resource->action) + claim_zero(mlx5_glue->destroy_flow_action + (cache_resource->action)); + for (; i < cache_resource->num_of_dest; i++) { + mdest_act_res = &cache_resource->sample_idx[i]; + if (mdest_act_res->rix_hrxq) { + mlx5_hrxq_release(dev, + mdest_act_res->rix_hrxq); + mdest_act_res->rix_hrxq = 0; + } + if (mdest_act_res->rix_encap_decap) { + flow_dv_encap_decap_resource_release(dev, + mdest_act_res->rix_encap_decap); + mdest_act_res->rix_encap_decap = 0; + } + if (mdest_act_res->rix_port_id_action) { + flow_dv_port_id_action_resource_release(dev, + mdest_act_res->rix_port_id_action); + mdest_act_res->rix_port_id_action = 0; + } + if (mdest_act_res->rix_tag) { + flow_dv_tag_release(dev, + mdest_act_res->rix_tag); + mdest_act_res->rix_tag = 0; + } + } + ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_DEST_ARRAY], + &priv->sh->dest_array_list, idx, + cache_resource, next); + mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_DEST_ARRAY], idx); + DRV_LOG(DEBUG, "destination array resource %p: removed", + (void *)cache_resource); + return 0; + } + return 1; +} + +/** + * Remove the flow from the NIC but keeps it in memory. + * Lock free, (mutex should be acquired by caller). + * + * @param[in] dev + * Pointer to Ethernet device. + * @param[in, out] flow + * Pointer to flow structure. + */ +static void +__flow_dv_remove(struct rte_eth_dev *dev, struct rte_flow *flow) +{ + struct mlx5_flow_handle *dh; + uint32_t handle_idx; + struct mlx5_priv *priv = dev->data->dev_private; + + if (!flow) + return; + handle_idx = flow->dev_handles; + while (handle_idx) { + dh = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], + handle_idx); + if (!dh) + return; + if (dh->drv_flow) { + claim_zero(mlx5_flow_os_destroy_flow(dh->drv_flow)); + dh->drv_flow = NULL; + } + if (dh->fate_action == MLX5_FLOW_FATE_DROP || + dh->fate_action == MLX5_FLOW_FATE_QUEUE || + dh->fate_action == MLX5_FLOW_FATE_DEFAULT_MISS) + flow_dv_fate_resource_release(dev, dh); + if (dh->vf_vlan.tag && dh->vf_vlan.created) + mlx5_vlan_vmwa_release(dev, &dh->vf_vlan); + handle_idx = dh->next.next; + } +} + +/** + * Remove the flow from the NIC and the memory. + * Lock free, (mutex should be acquired by caller). + * + * @param[in] dev + * Pointer to the Ethernet device structure. + * @param[in, out] flow + * Pointer to flow structure. + */ +static void +__flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow) +{ + struct rte_flow_shared_action *shared; + struct mlx5_flow_handle *dev_handle; + struct mlx5_priv *priv = dev->data->dev_private; + + if (!flow) + return; + __flow_dv_remove(dev, flow); + shared = mlx5_flow_get_shared_rss(flow); + if (shared) + __atomic_sub_fetch(&shared->refcnt, 1, __ATOMIC_RELAXED); + if (flow->counter) { + flow_dv_counter_release(dev, flow->counter); + flow->counter = 0; + } + if (flow->meter) { + struct mlx5_flow_meter *fm; + + fm = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_MTR], + flow->meter); + if (fm) + mlx5_flow_meter_detach(fm); + flow->meter = 0; + } + while (flow->dev_handles) { + uint32_t tmp_idx = flow->dev_handles; + + dev_handle = mlx5_ipool_get(priv->sh->ipool + [MLX5_IPOOL_MLX5_FLOW], tmp_idx); + if (!dev_handle) + return; + flow->dev_handles = dev_handle->next.next; + if (dev_handle->dvh.matcher) + flow_dv_matcher_release(dev, dev_handle); + if (dev_handle->dvh.rix_sample) + flow_dv_sample_resource_release(dev, dev_handle); + if (dev_handle->dvh.rix_dest_array) + flow_dv_dest_array_resource_release(dev, dev_handle); + if (dev_handle->dvh.rix_encap_decap) + flow_dv_encap_decap_resource_release(dev, + dev_handle->dvh.rix_encap_decap); + if (dev_handle->dvh.modify_hdr) + flow_dv_modify_hdr_resource_release(dev, dev_handle); + if (dev_handle->dvh.rix_push_vlan) + flow_dv_push_vlan_action_resource_release(dev, + dev_handle); + if (dev_handle->dvh.rix_tag) + flow_dv_tag_release(dev, + dev_handle->dvh.rix_tag); + flow_dv_fate_resource_release(dev, dev_handle); + mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], + tmp_idx); + } +} + +/** + * Release array of hash RX queue objects. + * Helper function. + * + * @param[in] dev + * Pointer to the Ethernet device structure. + * @param[in, out] hrxqs + * Array of hash RX queue objects. + * + * @return + * Total number of references to hash RX queue objects in *hrxqs* array + * after this operation. + */ +static int +__flow_dv_hrxqs_release(struct rte_eth_dev *dev, + uint32_t (*hrxqs)[MLX5_RSS_HASH_FIELDS_LEN]) +{ + size_t i; + int remaining = 0; + + for (i = 0; i < RTE_DIM(*hrxqs); i++) { + int ret = mlx5_hrxq_release(dev, (*hrxqs)[i]); + + if (!ret) + (*hrxqs)[i] = 0; + remaining += ret; + } + return remaining; +} + +/** + * Release all hash RX queue objects representing shared RSS action. + * + * @param[in] dev + * Pointer to the Ethernet device structure. + * @param[in, out] action + * Shared RSS action to remove hash RX queue objects from. + * + * @return + * Total number of references to hash RX queue objects stored in *action* + * after this operation. + * Expected to be 0 if no external references held. + */ +static int +__flow_dv_action_rss_hrxqs_release(struct rte_eth_dev *dev, + struct mlx5_shared_action_rss *action) +{ + return __flow_dv_hrxqs_release(dev, &action->hrxq) + + __flow_dv_hrxqs_release(dev, &action->hrxq_tunnel); +} + +/** + * Setup shared RSS action. + * Prepare set of hash RX queue objects sufficient to handle all valid + * hash_fields combinations (see enum ibv_rx_hash_fields). + * + * @param[in] dev + * Pointer to the Ethernet device structure. + * @param[in, out] action + * Partially initialized shared RSS action. + * @param[out] error + * Perform verbose error reporting if not NULL. Initialized in case of + * error only. + * + * @return + * 0 on success, otherwise negative errno value. + */ +static int +__flow_dv_action_rss_setup(struct rte_eth_dev *dev, + struct mlx5_shared_action_rss *action, + struct rte_flow_error *error) +{ + size_t i; + int err; + + for (i = 0; i < MLX5_RSS_HASH_FIELDS_LEN; i++) { + uint32_t hrxq_idx; + uint64_t hash_fields = mlx5_rss_hash_fields[i]; + int tunnel; + + for (tunnel = 0; tunnel < 2; tunnel++) { + hrxq_idx = mlx5_hrxq_new(dev, action->origin.key, + MLX5_RSS_HASH_KEY_LEN, + hash_fields, + action->origin.queue, + action->origin.queue_num, + tunnel, true); + if (!hrxq_idx) { + rte_flow_error_set + (error, rte_errno, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "cannot get hash queue"); + goto error_hrxq_new; + } + err = __flow_dv_action_rss_hrxq_set + (action, hash_fields, tunnel, hrxq_idx); + MLX5_ASSERT(!err); + } + } + return 0; +error_hrxq_new: + err = rte_errno; + __flow_dv_action_rss_hrxqs_release(dev, action); + rte_errno = err; + return -rte_errno; +} + +/** + * Create shared RSS action. + * + * @param[in] dev + * Pointer to the Ethernet device structure. + * @param[in] conf + * Shared action configuration. + * @param[in] rss + * RSS action specification used to create shared action. + * @param[out] error + * Perform verbose error reporting if not NULL. Initialized in case of + * error only. + * + * @return + * A valid shared action handle in case of success, NULL otherwise and + * rte_errno is set. + */ +static struct rte_flow_shared_action * +__flow_dv_action_rss_create(struct rte_eth_dev *dev, + const struct rte_flow_shared_action_conf *conf, + const struct rte_flow_action_rss *rss, + struct rte_flow_error *error) +{ + struct rte_flow_shared_action *shared_action = NULL; + void *queue = NULL; + struct mlx5_shared_action_rss *shared_rss; + struct rte_flow_action_rss *origin; + const uint8_t *rss_key; + uint32_t queue_size = rss->queue_num * sizeof(uint16_t); + + RTE_SET_USED(conf); + queue = mlx5_malloc(0, RTE_ALIGN_CEIL(queue_size, sizeof(void *)), + 0, SOCKET_ID_ANY); + shared_action = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*shared_action), 0, + SOCKET_ID_ANY); + if (!shared_action || !queue) { + rte_flow_error_set(error, ENOMEM, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "cannot allocate resource memory"); + goto error_rss_init; + } + shared_rss = &shared_action->rss; + shared_rss->queue = queue; + origin = &shared_rss->origin; + origin->func = rss->func; + origin->level = rss->level; + /* RSS type 0 indicates default RSS type (ETH_RSS_IP). */ + origin->types = !rss->types ? ETH_RSS_IP : rss->types; + /* NULL RSS key indicates default RSS key. */ + rss_key = !rss->key ? rss_hash_default_key : rss->key; + memcpy(shared_rss->key, rss_key, MLX5_RSS_HASH_KEY_LEN); + origin->key = &shared_rss->key[0]; + origin->key_len = MLX5_RSS_HASH_KEY_LEN; + memcpy(shared_rss->queue, rss->queue, queue_size); + origin->queue = shared_rss->queue; + origin->queue_num = rss->queue_num; + if (__flow_dv_action_rss_setup(dev, shared_rss, error)) + goto error_rss_init; + shared_action->type = MLX5_RTE_FLOW_ACTION_TYPE_SHARED_RSS; + return shared_action; +error_rss_init: + mlx5_free(shared_action); + mlx5_free(queue); + return NULL; +} + +/** + * Destroy the shared RSS action. + * Release related hash RX queue objects. + * + * @param[in] dev + * Pointer to the Ethernet device structure. + * @param[in] shared_rss + * The shared RSS action object to be removed. + * @param[out] error + * Perform verbose error reporting if not NULL. Initialized in case of + * error only. + * + * @return + * 0 on success, otherwise negative errno value. + */ +static int +__flow_dv_action_rss_release(struct rte_eth_dev *dev, + struct mlx5_shared_action_rss *shared_rss, + struct rte_flow_error *error) +{ + struct rte_flow_shared_action *shared_action = NULL; + uint32_t old_refcnt = 1; + int remaining = __flow_dv_action_rss_hrxqs_release(dev, shared_rss); + + if (remaining) { + return rte_flow_error_set(error, ETOOMANYREFS, + RTE_FLOW_ERROR_TYPE_ACTION, + NULL, + "shared rss hrxq has references"); + } + shared_action = container_of(shared_rss, + struct rte_flow_shared_action, rss); + if (!__atomic_compare_exchange_n(&shared_action->refcnt, &old_refcnt, + 0, 0, + __ATOMIC_ACQUIRE, __ATOMIC_RELAXED)) { + return rte_flow_error_set(error, ETOOMANYREFS, + RTE_FLOW_ERROR_TYPE_ACTION, + NULL, + "shared rss has references"); + } + rte_free(shared_rss->queue); + return 0; +} + +/** + * Create shared action, lock free, + * (mutex should be acquired by caller). + * Dispatcher for action type specific call. + * + * @param[in] dev + * Pointer to the Ethernet device structure. + * @param[in] conf + * Shared action configuration. + * @param[in] action + * Action specification used to create shared action. + * @param[out] error + * Perform verbose error reporting if not NULL. Initialized in case of + * error only. + * + * @return + * A valid shared action handle in case of success, NULL otherwise and + * rte_errno is set. + */ +static struct rte_flow_shared_action * +__flow_dv_action_create(struct rte_eth_dev *dev, + const struct rte_flow_shared_action_conf *conf, + const struct rte_flow_action *action, + struct rte_flow_error *error) +{ + struct rte_flow_shared_action *shared_action = NULL; + struct mlx5_priv *priv = dev->data->dev_private; + + switch (action->type) { + case RTE_FLOW_ACTION_TYPE_RSS: + shared_action = __flow_dv_action_rss_create(dev, conf, + action->conf, + error); + break; + default: + rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, + NULL, "action type not supported"); + break; + } + if (shared_action) { + __atomic_add_fetch(&shared_action->refcnt, 1, + __ATOMIC_RELAXED); + LIST_INSERT_HEAD(&priv->shared_actions, shared_action, next); + } + return shared_action; +} + +/** + * Destroy the shared action. + * Release action related resources on the NIC and the memory. + * Lock free, (mutex should be acquired by caller). + * Dispatcher for action type specific call. + * + * @param[in] dev + * Pointer to the Ethernet device structure. + * @param[in] action + * The shared action object to be removed. + * @param[out] error + * Perform verbose error reporting if not NULL. Initialized in case of + * error only. + * + * @return + * 0 on success, otherwise negative errno value. + */ +static int +__flow_dv_action_destroy(struct rte_eth_dev *dev, + struct rte_flow_shared_action *action, + struct rte_flow_error *error) +{ + int ret; + + switch (action->type) { + case MLX5_RTE_FLOW_ACTION_TYPE_SHARED_RSS: + ret = __flow_dv_action_rss_release(dev, &action->rss, error); + break; + default: + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, + NULL, + "action type not supported"); + } + if (ret) + return ret; + LIST_REMOVE(action, next); + rte_free(action); + return 0; } /** - * Remove the flow from the NIC but keeps it in memory. - * Lock free, (mutex should be acquired by caller). + * Updates in place shared RSS action configuration. * * @param[in] dev - * Pointer to Ethernet device. - * @param[in, out] flow - * Pointer to flow structure. + * Pointer to the Ethernet device structure. + * @param[in] shared_rss + * The shared RSS action object to be updated. + * @param[in] action_conf + * RSS action specification used to modify *shared_rss*. + * @param[out] error + * Perform verbose error reporting if not NULL. Initialized in case of + * error only. + * + * @return + * 0 on success, otherwise negative errno value. + * @note: currently only support update of RSS queues. */ -static void -__flow_dv_remove(struct rte_eth_dev *dev, struct rte_flow *flow) +static int +__flow_dv_action_rss_update(struct rte_eth_dev *dev, + struct mlx5_shared_action_rss *shared_rss, + const struct rte_flow_action_rss *action_conf, + struct rte_flow_error *error) { - struct mlx5_flow_handle *dh; - uint32_t handle_idx; - struct mlx5_priv *priv = dev->data->dev_private; - - if (!flow) - return; - handle_idx = flow->dev_handles; - while (handle_idx) { - dh = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], - handle_idx); - if (!dh) - return; - if (dh->drv_flow) { - claim_zero(mlx5_flow_os_destroy_flow(dh->drv_flow)); - dh->drv_flow = NULL; + size_t i; + int ret; + void *queue = NULL; + const uint8_t *rss_key; + uint32_t rss_key_len; + uint32_t queue_size = action_conf->queue_num * sizeof(uint16_t); + + queue = mlx5_malloc(MLX5_MEM_ZERO, + RTE_ALIGN_CEIL(queue_size, sizeof(void *)), + 0, SOCKET_ID_ANY); + if (!queue) + return rte_flow_error_set(error, ENOMEM, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "cannot allocate resource memory"); + if (action_conf->key) { + rss_key = action_conf->key; + rss_key_len = action_conf->key_len; + } else { + rss_key = rss_hash_default_key; + rss_key_len = MLX5_RSS_HASH_KEY_LEN; + } + for (i = 0; i < MLX5_RSS_HASH_FIELDS_LEN; i++) { + uint32_t hrxq_idx; + uint64_t hash_fields = mlx5_rss_hash_fields[i]; + int tunnel; + + for (tunnel = 0; tunnel < 2; tunnel++) { + hrxq_idx = __flow_dv_action_rss_hrxq_lookup + (shared_rss, hash_fields, tunnel); + MLX5_ASSERT(hrxq_idx); + ret = mlx5_hrxq_modify + (dev, hrxq_idx, + rss_key, rss_key_len, + hash_fields, + action_conf->queue, action_conf->queue_num); + if (ret) { + mlx5_free(queue); + return rte_flow_error_set + (error, rte_errno, + RTE_FLOW_ERROR_TYPE_ACTION, NULL, + "cannot update hash queue"); + } } - if (dh->fate_action == MLX5_FLOW_FATE_DROP || - dh->fate_action == MLX5_FLOW_FATE_QUEUE || - dh->fate_action == MLX5_FLOW_FATE_DEFAULT_MISS) - flow_dv_fate_resource_release(dev, dh); - if (dh->vf_vlan.tag && dh->vf_vlan.created) - mlx5_vlan_vmwa_release(dev, &dh->vf_vlan); - handle_idx = dh->next.next; } + mlx5_free(shared_rss->queue); + shared_rss->queue = queue; + memcpy(shared_rss->queue, action_conf->queue, queue_size); + shared_rss->origin.queue = shared_rss->queue; + shared_rss->origin.queue_num = action_conf->queue_num; + return 0; } /** - * Remove the flow from the NIC and the memory. - * Lock free, (mutex should be acquired by caller). + * Updates in place shared action configuration, lock free, + * (mutex should be acquired by caller). * * @param[in] dev * Pointer to the Ethernet device structure. - * @param[in, out] flow - * Pointer to flow structure. + * @param[in] action + * The shared action object to be updated. + * @param[in] action_conf + * Action specification used to modify *action*. + * *action_conf* should be of type correlating with type of the *action*, + * otherwise considered as invalid. + * @param[out] error + * Perform verbose error reporting if not NULL. Initialized in case of + * error only. + * + * @return + * 0 on success, otherwise negative errno value. */ -static void -__flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow) +static int +__flow_dv_action_update(struct rte_eth_dev *dev, + struct rte_flow_shared_action *action, + const void *action_conf, + struct rte_flow_error *error) { - struct mlx5_flow_handle *dev_handle; - struct mlx5_priv *priv = dev->data->dev_private; - - if (!flow) - return; - __flow_dv_remove(dev, flow); - if (flow->counter) { - flow_dv_counter_release(dev, flow->counter); - flow->counter = 0; - } - if (flow->meter) { - struct mlx5_flow_meter *fm; - - fm = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_MTR], - flow->meter); - if (fm) - mlx5_flow_meter_detach(fm); - flow->meter = 0; - } - while (flow->dev_handles) { - uint32_t tmp_idx = flow->dev_handles; - - dev_handle = mlx5_ipool_get(priv->sh->ipool - [MLX5_IPOOL_MLX5_FLOW], tmp_idx); - if (!dev_handle) - return; - flow->dev_handles = dev_handle->next.next; - if (dev_handle->dvh.matcher) - flow_dv_matcher_release(dev, dev_handle); - if (dev_handle->dvh.rix_encap_decap) - flow_dv_encap_decap_resource_release(dev, dev_handle); - if (dev_handle->dvh.modify_hdr) - flow_dv_modify_hdr_resource_release(dev_handle); - if (dev_handle->dvh.rix_push_vlan) - flow_dv_push_vlan_action_resource_release(dev, - dev_handle); - if (dev_handle->dvh.rix_tag) - flow_dv_tag_release(dev, - dev_handle->dvh.rix_tag); - flow_dv_fate_resource_release(dev, dev_handle); - mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], - tmp_idx); + switch (action->type) { + case MLX5_RTE_FLOW_ACTION_TYPE_SHARED_RSS: + return __flow_dv_action_rss_update(dev, &action->rss, + action_conf, error); + default: + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, + NULL, + "action type not supported"); } } - /** * Query a dv flow rule for its statistics via devx. * @@ -9420,6 +11475,52 @@ flow_dv_query_count(struct rte_eth_dev *dev, struct rte_flow *flow, "counters are not available"); } +/** + * Query a flow rule AGE action for aging information. + * + * @param[in] dev + * Pointer to Ethernet device. + * @param[in] flow + * Pointer to the sub flow. + * @param[out] data + * data retrieved by the query. + * @param[out] error + * Perform verbose error reporting if not NULL. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +flow_dv_query_age(struct rte_eth_dev *dev, struct rte_flow *flow, + void *data, struct rte_flow_error *error) +{ + struct rte_flow_query_age *resp = data; + + if (flow->counter) { + struct mlx5_age_param *age_param = + flow_dv_counter_idx_get_age(dev, flow->counter); + + if (!age_param || !age_param->timeout) + return rte_flow_error_set + (error, EINVAL, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, "cannot read age data"); + resp->aged = __atomic_load_n(&age_param->state, + __ATOMIC_RELAXED) == + AGE_TMOUT ? 1 : 0; + resp->sec_since_last_hit_valid = !resp->aged; + if (resp->sec_since_last_hit_valid) + resp->sec_since_last_hit = + __atomic_load_n(&age_param->sec_since_last_hit, + __ATOMIC_RELAXED); + return 0; + } + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "age data not available"); +} + /** * Query a flow. * @@ -9442,6 +11543,9 @@ flow_dv_query(struct rte_eth_dev *dev, case RTE_FLOW_ACTION_TYPE_COUNT: ret = flow_dv_query_count(dev, flow, data, error); break; + case RTE_FLOW_ACTION_TYPE_AGE: + ret = flow_dv_query_age(dev, flow, data, error); + break; default: return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, @@ -9573,7 +11677,8 @@ flow_dv_prepare_mtr_tables(struct rte_eth_dev *dev, dtb = &mtb->ingress; /* Create the meter table with METER level. */ dtb->tbl = flow_dv_tbl_resource_get(dev, MLX5_FLOW_TABLE_LEVEL_METER, - egress, transfer, &error); + egress, transfer, false, NULL, 0, + &error); if (!dtb->tbl) { DRV_LOG(ERR, "Failed to create meter policer table."); return -1; @@ -9581,7 +11686,8 @@ flow_dv_prepare_mtr_tables(struct rte_eth_dev *dev, /* Create the meter suffix table with SUFFIX level. */ dtb->sfx_tbl = flow_dv_tbl_resource_get(dev, MLX5_FLOW_TABLE_LEVEL_SUFFIX, - egress, transfer, &error); + egress, transfer, false, NULL, 0, + &error); if (!dtb->sfx_tbl) { DRV_LOG(ERR, "Failed to create meter suffix table."); return -1; @@ -9864,6 +11970,103 @@ error: return -1; } +/** + * Validate the batch counter support in root table. + * + * Create a simple flow with invalid counter and drop action on root table to + * validate if batch counter with offset on root table is supported or not. + * + * @param[in] dev + * Pointer to rte_eth_dev structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +int +mlx5_flow_dv_discover_counter_offset_support(struct rte_eth_dev *dev) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_dev_ctx_shared *sh = priv->sh; + struct mlx5_flow_dv_match_params mask = { + .size = sizeof(mask.buf), + }; + struct mlx5_flow_dv_match_params value = { + .size = sizeof(value.buf), + }; + struct mlx5dv_flow_matcher_attr dv_attr = { + .type = IBV_FLOW_ATTR_NORMAL, + .priority = 0, + .match_criteria_enable = 0, + .match_mask = (void *)&mask, + }; + void *actions[2] = { 0 }; + struct mlx5_flow_tbl_resource *tbl = NULL, *dest_tbl = NULL; + struct mlx5_devx_obj *dcs = NULL; + void *matcher = NULL; + void *flow = NULL; + int i, ret = -1; + + tbl = flow_dv_tbl_resource_get(dev, 0, 0, 0, false, NULL, 0, NULL); + if (!tbl) + goto err; + dest_tbl = flow_dv_tbl_resource_get(dev, 1, 0, 0, false, NULL, 0, NULL); + if (!dest_tbl) + goto err; + dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0x4); + if (!dcs) + goto err; + ret = mlx5_flow_os_create_flow_action_count(dcs->obj, UINT16_MAX, + &actions[0]); + if (ret) + goto err; + ret = mlx5_flow_os_create_flow_action_dest_flow_tbl + (dest_tbl->obj, &actions[1]); + if (ret) + goto err; + dv_attr.match_criteria_enable = flow_dv_matcher_enable(mask.buf); + ret = mlx5_flow_os_create_flow_matcher(sh->ctx, &dv_attr, tbl->obj, + &matcher); + if (ret) + goto err; + ret = mlx5_flow_os_create_flow(matcher, (void *)&value, 2, + actions, &flow); +err: + /* + * If batch counter with offset is not supported, the driver will not + * validate the invalid offset value, flow create should success. + * In this case, it means batch counter is not supported in root table. + * + * Otherwise, if flow create is failed, counter offset is supported. + */ + if (flow) { + DRV_LOG(INFO, "Batch counter is not supported in root " + "table. Switch to fallback mode."); + rte_errno = ENOTSUP; + ret = -rte_errno; + claim_zero(mlx5_flow_os_destroy_flow(flow)); + } else { + /* Check matcher to make sure validate fail at flow create. */ + if (!matcher || (matcher && errno != EINVAL)) + DRV_LOG(ERR, "Unexpected error in counter offset " + "support detection"); + ret = 0; + } + for (i = 0; i < 2; i++) { + if (actions[i]) + claim_zero(mlx5_flow_os_destroy_flow_action + (actions[i])); + } + if (matcher) + claim_zero(mlx5_flow_os_destroy_flow_matcher(matcher)); + if (tbl) + flow_dv_tbl_resource_release(dev, tbl); + if (dest_tbl) + flow_dv_tbl_resource_release(dev, dest_tbl); + if (dcs) + claim_zero(mlx5_devx_cmd_destroy(dcs)); + return ret; +} + /** * Query a devx counter. * @@ -10026,7 +12229,7 @@ flow_dv_counter_allocate(struct rte_eth_dev *dev) uint32_t cnt; flow_dv_shared_lock(dev); - cnt = flow_dv_counter_alloc(dev, 0, 0, 1, 0); + cnt = flow_dv_counter_alloc(dev, 0); flow_dv_shared_unlock(dev); return cnt; } @@ -10042,6 +12245,117 @@ flow_dv_counter_free(struct rte_eth_dev *dev, uint32_t cnt) flow_dv_shared_unlock(dev); } +/** + * Validate shared action. + * Dispatcher for action type specific validation. + * + * @param[in] dev + * Pointer to the Ethernet device structure. + * @param[in] conf + * Shared action configuration. + * @param[in] action + * The shared action object to validate. + * @param[out] error + * Perform verbose error reporting if not NULL. Initialized in case of + * error only. + * + * @return + * 0 on success, otherwise negative errno value. + */ +static int +flow_dv_action_validate(struct rte_eth_dev *dev, + const struct rte_flow_shared_action_conf *conf, + const struct rte_flow_action *action, + struct rte_flow_error *error) +{ + RTE_SET_USED(conf); + switch (action->type) { + case RTE_FLOW_ACTION_TYPE_RSS: + return mlx5_validate_action_rss(dev, action, error); + default: + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, + NULL, + "action type not supported"); + } +} + +/* + * Mutex-protected thunk to lock-free __flow_dv_action_create(). + */ +static struct rte_flow_shared_action * +flow_dv_action_create(struct rte_eth_dev *dev, + const struct rte_flow_shared_action_conf *conf, + const struct rte_flow_action *action, + struct rte_flow_error *error) +{ + struct rte_flow_shared_action *shared_action = NULL; + + flow_dv_shared_lock(dev); + shared_action = __flow_dv_action_create(dev, conf, action, error); + flow_dv_shared_unlock(dev); + return shared_action; +} + +/* + * Mutex-protected thunk to lock-free __flow_dv_action_destroy(). + */ +static int +flow_dv_action_destroy(struct rte_eth_dev *dev, + struct rte_flow_shared_action *action, + struct rte_flow_error *error) +{ + int ret; + + flow_dv_shared_lock(dev); + ret = __flow_dv_action_destroy(dev, action, error); + flow_dv_shared_unlock(dev); + return ret; +} + +/* + * Mutex-protected thunk to lock-free __flow_dv_action_update(). + */ +static int +flow_dv_action_update(struct rte_eth_dev *dev, + struct rte_flow_shared_action *action, + const void *action_conf, + struct rte_flow_error *error) +{ + int ret; + + flow_dv_shared_lock(dev); + ret = __flow_dv_action_update(dev, action, action_conf, + error); + flow_dv_shared_unlock(dev); + return ret; +} + +static int +flow_dv_sync_domain(struct rte_eth_dev *dev, uint32_t domains, uint32_t flags) +{ + struct mlx5_priv *priv = dev->data->dev_private; + int ret = 0; + + if ((domains & MLX5_DOMAIN_BIT_NIC_RX) && priv->sh->rx_domain != NULL) { + ret = mlx5_glue->dr_sync_domain(priv->sh->rx_domain, + flags); + if (ret != 0) + return ret; + } + if ((domains & MLX5_DOMAIN_BIT_NIC_TX) && priv->sh->tx_domain != NULL) { + ret = mlx5_glue->dr_sync_domain(priv->sh->tx_domain, flags); + if (ret != 0) + return ret; + } + if ((domains & MLX5_DOMAIN_BIT_FDB) && priv->sh->fdb_domain != NULL) { + ret = mlx5_glue->dr_sync_domain(priv->sh->fdb_domain, flags); + if (ret != 0) + return ret; + } + return 0; +} + const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops = { .validate = flow_dv_validate, .prepare = flow_dv_prepare, @@ -10058,6 +12372,11 @@ const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops = { .counter_free = flow_dv_counter_free, .counter_query = flow_dv_counter_query, .get_aged_flows = flow_get_aged_flows, + .action_validate = flow_dv_action_validate, + .action_create = flow_dv_action_create, + .action_destroy = flow_dv_action_destroy, + .action_update = flow_dv_action_update, + .sync_domain = flow_dv_sync_domain, }; #endif /* HAVE_IBV_FLOW_DV_SUPPORT */