X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fmlx5%2Fmlx5_flow_dv.c;h=5e230a3c259e6b4033dad1d67a69cd2ea5e8ca21;hb=a0e4728c430c5d0367bf85d29b5063d456d5da5f;hp=dc8d9526fbb1363e8d658942ff86cbb7d451342d;hpb=6ad7cfaa666c3c7240d02b91e17c4cfe6d1f0575;p=dpdk.git diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c index dc8d9526fb..5e230a3c25 100644 --- a/drivers/net/mlx5/mlx5_flow_dv.c +++ b/drivers/net/mlx5/mlx5_flow_dv.c @@ -8,16 +8,6 @@ #include #include -/* Verbs header. */ -/* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */ -#ifdef PEDANTIC -#pragma GCC diagnostic ignored "-Wpedantic" -#endif -#include -#ifdef PEDANTIC -#pragma GCC diagnostic error "-Wpedantic" -#endif - #include #include #include @@ -29,16 +19,21 @@ #include #include #include +#include +#include #include #include #include +#include #include "mlx5_defs.h" #include "mlx5.h" #include "mlx5_common_os.h" #include "mlx5_flow.h" +#include "mlx5_flow_os.h" #include "mlx5_rxtx.h" +#include "rte_pmd_mlx5.h" #ifdef HAVE_IBV_FLOW_DV_SUPPORT @@ -76,11 +71,18 @@ union flow_dv_attr { }; static int -flow_dv_tbl_resource_release(struct rte_eth_dev *dev, +flow_dv_tbl_resource_release(struct mlx5_dev_ctx_shared *sh, struct mlx5_flow_tbl_resource *tbl); static int -flow_dv_default_miss_resource_release(struct rte_eth_dev *dev); +flow_dv_encap_decap_resource_release(struct rte_eth_dev *dev, + uint32_t encap_decap_idx); + +static int +flow_dv_port_id_action_resource_release(struct rte_eth_dev *dev, + uint32_t port_id); +static void +flow_dv_shared_rss_action_release(struct rte_eth_dev *dev, uint32_t srss); /** * Initialize flow attributes structure according to flow items' types. @@ -276,45 +278,6 @@ mlx5_flow_tunnel_ip_check(const struct rte_flow_item *item __rte_unused, } } -/** - * Acquire the synchronizing object to protect multithreaded access - * to shared dv context. Lock occurs only if context is actually - * shared, i.e. we have multiport IB device and representors are - * created. - * - * @param[in] dev - * Pointer to the rte_eth_dev structure. - */ -static void -flow_dv_shared_lock(struct rte_eth_dev *dev) -{ - struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5_dev_ctx_shared *sh = priv->sh; - - if (sh->dv_refcnt > 1) { - int ret; - - ret = pthread_mutex_lock(&sh->dv_mutex); - MLX5_ASSERT(!ret); - (void)ret; - } -} - -static void -flow_dv_shared_unlock(struct rte_eth_dev *dev) -{ - struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5_dev_ctx_shared *sh = priv->sh; - - if (sh->dv_refcnt > 1) { - int ret; - - ret = pthread_mutex_unlock(&sh->dv_mutex); - MLX5_ASSERT(!ret); - (void)ret; - } -} - /* Update VLAN's VID/PCP based on input rte_flow_action. * * @param[in] action @@ -951,7 +914,7 @@ flow_dv_convert_action_modify_tcp_ack } static enum mlx5_modification_field reg_to_field[] = { - [REG_NONE] = MLX5_MODI_OUT_NONE, + [REG_NON] = MLX5_MODI_OUT_NONE, [REG_A] = MLX5_MODI_META_DATA_REG_A, [REG_B] = MLX5_MODI_META_DATA_REG_B, [REG_C_0] = MLX5_MODI_META_REG_C_0, @@ -991,7 +954,7 @@ flow_dv_convert_action_set_reg return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, NULL, "too many items to modify"); - MLX5_ASSERT(conf->id != REG_NONE); + MLX5_ASSERT(conf->id != REG_NON); MLX5_ASSERT(conf->id < RTE_DIM(reg_to_field)); actions[i] = (struct mlx5_modification_cmd) { .action_type = MLX5_MODIFICATION_TYPE_SET, @@ -1041,7 +1004,7 @@ flow_dv_convert_action_set_tag ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, conf->index, error); if (ret < 0) return ret; - MLX5_ASSERT(ret != REG_NONE); + MLX5_ASSERT(ret != REG_NON); MLX5_ASSERT((unsigned int)ret < RTE_DIM(reg_to_field)); reg_type = reg_to_field[ret]; MLX5_ASSERT(reg_type > 0); @@ -1158,8 +1121,7 @@ flow_dv_convert_action_mark(struct rte_eth_dev *dev, .mask = &mask, }; struct field_modify_info reg_c_x[] = { - {4, 0, 0}, /* dynamic instead of MLX5_MODI_META_REG_C_1. */ - {0, 0, 0}, + [1] = {0, 0, 0}, }; int reg; @@ -1179,7 +1141,7 @@ flow_dv_convert_action_mark(struct rte_eth_dev *dev, mask = rte_cpu_to_be_32(mask) & msk_c0; mask = rte_cpu_to_be_32(mask << shl_c0); } - reg_c_x[0].id = reg_to_field[reg]; + reg_c_x[0] = (struct field_modify_info){4, 0, reg_to_field[reg]}; return flow_dv_convert_modify_action(&item, reg_c_x, NULL, resource, MLX5_MODIFICATION_TYPE_SET, error); } @@ -1425,7 +1387,7 @@ flow_dv_validate_item_mark(struct rte_eth_dev *dev, ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask, (const uint8_t *)&nic_mask, sizeof(struct rte_flow_item_mark), - error); + MLX5_ITEM_RANGE_NOT_ACCEPTED, error); if (ret < 0) return ret; return 0; @@ -1483,6 +1445,13 @@ flow_dv_validate_item_meta(struct rte_eth_dev *dev __rte_unused, "isn't supported"); if (reg != REG_A) nic_mask.data = priv->sh->dv_meta_mask; + } else if (attr->transfer) { + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "extended metadata feature " + "should be enabled when " + "meta item is requested " + "with e-switch mode "); } if (!mask) mask = &rte_flow_item_meta_mask; @@ -1494,7 +1463,7 @@ flow_dv_validate_item_meta(struct rte_eth_dev *dev __rte_unused, ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask, (const uint8_t *)&nic_mask, sizeof(struct rte_flow_item_meta), - error); + MLX5_ITEM_RANGE_NOT_ACCEPTED, error); return ret; } @@ -1547,7 +1516,7 @@ flow_dv_validate_item_tag(struct rte_eth_dev *dev, ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask, (const uint8_t *)&nic_mask, sizeof(struct rte_flow_item_tag), - error); + MLX5_ITEM_RANGE_NOT_ACCEPTED, error); if (ret < 0) return ret; if (mask->index != 0xff) @@ -1558,7 +1527,7 @@ flow_dv_validate_item_tag(struct rte_eth_dev *dev, ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, spec->index, error); if (ret < 0) return ret; - MLX5_ASSERT(ret != REG_NONE); + MLX5_ASSERT(ret != REG_NON); return 0; } @@ -1618,7 +1587,7 @@ flow_dv_validate_item_port_id(struct rte_eth_dev *dev, (item, (const uint8_t *)mask, (const uint8_t *)&rte_flow_item_port_id_mask, sizeof(struct rte_flow_item_port_id), - error); + MLX5_ITEM_RANGE_NOT_ACCEPTED, error); if (ret) return ret; if (!spec) @@ -1668,6 +1637,7 @@ flow_dv_validate_item_vlan(const struct rte_flow_item *item, const struct rte_flow_item_vlan nic_mask = { .tci = RTE_BE16(UINT16_MAX), .inner_type = RTE_BE16(UINT16_MAX), + .has_more_vlan = 1, }; const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); int ret; @@ -1691,7 +1661,7 @@ flow_dv_validate_item_vlan(const struct rte_flow_item *item, ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask, (const uint8_t *)&nic_mask, sizeof(struct rte_flow_item_vlan), - error); + MLX5_ITEM_RANGE_NOT_ACCEPTED, error); if (ret) return ret; if (!tunnel && mask->tci != RTE_BE16(0x0fff)) { @@ -1778,11 +1748,240 @@ flow_dv_validate_item_gtp(struct rte_eth_dev *dev, RTE_FLOW_ERROR_TYPE_ITEM, item, "Match is supported for GTP" " flags only"); - return mlx5_flow_item_acceptable - (item, (const uint8_t *)mask, - (const uint8_t *)&nic_mask, - sizeof(struct rte_flow_item_gtp), - error); + return mlx5_flow_item_acceptable(item, (const uint8_t *)mask, + (const uint8_t *)&nic_mask, + sizeof(struct rte_flow_item_gtp), + MLX5_ITEM_RANGE_NOT_ACCEPTED, error); +} + +/** + * Validate IPV4 item. + * Use existing validation function mlx5_flow_validate_item_ipv4(), and + * add specific validation of fragment_offset field, + * + * @param[in] item + * Item specification. + * @param[in] item_flags + * Bit-fields that holds the items detected until now. + * @param[out] error + * Pointer to error structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +flow_dv_validate_item_ipv4(const struct rte_flow_item *item, + uint64_t item_flags, + uint64_t last_item, + uint16_t ether_type, + struct rte_flow_error *error) +{ + int ret; + const struct rte_flow_item_ipv4 *spec = item->spec; + const struct rte_flow_item_ipv4 *last = item->last; + const struct rte_flow_item_ipv4 *mask = item->mask; + rte_be16_t fragment_offset_spec = 0; + rte_be16_t fragment_offset_last = 0; + const struct rte_flow_item_ipv4 nic_ipv4_mask = { + .hdr = { + .src_addr = RTE_BE32(0xffffffff), + .dst_addr = RTE_BE32(0xffffffff), + .type_of_service = 0xff, + .fragment_offset = RTE_BE16(0xffff), + .next_proto_id = 0xff, + .time_to_live = 0xff, + }, + }; + + ret = mlx5_flow_validate_item_ipv4(item, item_flags, last_item, + ether_type, &nic_ipv4_mask, + MLX5_ITEM_RANGE_ACCEPTED, error); + if (ret < 0) + return ret; + if (spec && mask) + fragment_offset_spec = spec->hdr.fragment_offset & + mask->hdr.fragment_offset; + if (!fragment_offset_spec) + return 0; + /* + * spec and mask are valid, enforce using full mask to make sure the + * complete value is used correctly. + */ + if ((mask->hdr.fragment_offset & RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK)) + != RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK)) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM_MASK, + item, "must use full mask for" + " fragment_offset"); + /* + * Match on fragment_offset 0x2000 means MF is 1 and frag-offset is 0, + * indicating this is 1st fragment of fragmented packet. + * This is not yet supported in MLX5, return appropriate error message. + */ + if (fragment_offset_spec == RTE_BE16(RTE_IPV4_HDR_MF_FLAG)) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "match on first fragment not " + "supported"); + if (fragment_offset_spec && !last) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "specified value not supported"); + /* spec and last are valid, validate the specified range. */ + fragment_offset_last = last->hdr.fragment_offset & + mask->hdr.fragment_offset; + /* + * Match on fragment_offset spec 0x2001 and last 0x3fff + * means MF is 1 and frag-offset is > 0. + * This packet is fragment 2nd and onward, excluding last. + * This is not yet supported in MLX5, return appropriate + * error message. + */ + if (fragment_offset_spec == RTE_BE16(RTE_IPV4_HDR_MF_FLAG + 1) && + fragment_offset_last == RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK)) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM_LAST, + last, "match on following " + "fragments not supported"); + /* + * Match on fragment_offset spec 0x0001 and last 0x1fff + * means MF is 0 and frag-offset is > 0. + * This packet is last fragment of fragmented packet. + * This is not yet supported in MLX5, return appropriate + * error message. + */ + if (fragment_offset_spec == RTE_BE16(1) && + fragment_offset_last == RTE_BE16(RTE_IPV4_HDR_OFFSET_MASK)) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM_LAST, + last, "match on last " + "fragment not supported"); + /* + * Match on fragment_offset spec 0x0001 and last 0x3fff + * means MF and/or frag-offset is not 0. + * This is a fragmented packet. + * Other range values are invalid and rejected. + */ + if (!(fragment_offset_spec == RTE_BE16(1) && + fragment_offset_last == RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK))) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM_LAST, last, + "specified range not supported"); + return 0; +} + +/** + * Validate IPV6 fragment extension item. + * + * @param[in] item + * Item specification. + * @param[in] item_flags + * Bit-fields that holds the items detected until now. + * @param[out] error + * Pointer to error structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +flow_dv_validate_item_ipv6_frag_ext(const struct rte_flow_item *item, + uint64_t item_flags, + struct rte_flow_error *error) +{ + const struct rte_flow_item_ipv6_frag_ext *spec = item->spec; + const struct rte_flow_item_ipv6_frag_ext *last = item->last; + const struct rte_flow_item_ipv6_frag_ext *mask = item->mask; + rte_be16_t frag_data_spec = 0; + rte_be16_t frag_data_last = 0; + const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); + const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 : + MLX5_FLOW_LAYER_OUTER_L4; + int ret = 0; + struct rte_flow_item_ipv6_frag_ext nic_mask = { + .hdr = { + .next_header = 0xff, + .frag_data = RTE_BE16(0xffff), + }, + }; + + if (item_flags & l4m) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "ipv6 fragment extension item cannot " + "follow L4 item."); + if ((tunnel && !(item_flags & MLX5_FLOW_LAYER_INNER_L3_IPV6)) || + (!tunnel && !(item_flags & MLX5_FLOW_LAYER_OUTER_L3_IPV6))) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "ipv6 fragment extension item must " + "follow ipv6 item"); + if (spec && mask) + frag_data_spec = spec->hdr.frag_data & mask->hdr.frag_data; + if (!frag_data_spec) + return 0; + /* + * spec and mask are valid, enforce using full mask to make sure the + * complete value is used correctly. + */ + if ((mask->hdr.frag_data & RTE_BE16(RTE_IPV6_FRAG_USED_MASK)) != + RTE_BE16(RTE_IPV6_FRAG_USED_MASK)) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM_MASK, + item, "must use full mask for" + " frag_data"); + /* + * Match on frag_data 0x00001 means M is 1 and frag-offset is 0. + * This is 1st fragment of fragmented packet. + */ + if (frag_data_spec == RTE_BE16(RTE_IPV6_EHDR_MF_MASK)) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "match on first fragment not " + "supported"); + if (frag_data_spec && !last) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "specified value not supported"); + ret = mlx5_flow_item_acceptable + (item, (const uint8_t *)mask, + (const uint8_t *)&nic_mask, + sizeof(struct rte_flow_item_ipv6_frag_ext), + MLX5_ITEM_RANGE_ACCEPTED, error); + if (ret) + return ret; + /* spec and last are valid, validate the specified range. */ + frag_data_last = last->hdr.frag_data & mask->hdr.frag_data; + /* + * Match on frag_data spec 0x0009 and last 0xfff9 + * means M is 1 and frag-offset is > 0. + * This packet is fragment 2nd and onward, excluding last. + * This is not yet supported in MLX5, return appropriate + * error message. + */ + if (frag_data_spec == RTE_BE16(RTE_IPV6_EHDR_FO_ALIGN | + RTE_IPV6_EHDR_MF_MASK) && + frag_data_last == RTE_BE16(RTE_IPV6_FRAG_USED_MASK)) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM_LAST, + last, "match on following " + "fragments not supported"); + /* + * Match on frag_data spec 0x0008 and last 0xfff8 + * means M is 0 and frag-offset is > 0. + * This packet is last fragment of fragmented packet. + * This is not yet supported in MLX5, return appropriate + * error message. + */ + if (frag_data_spec == RTE_BE16(RTE_IPV6_EHDR_FO_ALIGN) && + frag_data_last == RTE_BE16(RTE_IPV6_EHDR_FO_MASK)) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM_LAST, + last, "match on last " + "fragment not supported"); + /* Other range values are invalid and rejected. */ + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM_LAST, last, + "specified range not supported"); } /** @@ -1832,7 +2031,17 @@ flow_dv_validate_action_pop_vlan(struct rte_eth_dev *dev, RTE_FLOW_ERROR_TYPE_ACTION, action, "no support for multiple VLAN " "actions"); - if (!(item_flags & MLX5_FLOW_LAYER_OUTER_VLAN)) + /* Pop VLAN with preceding Decap requires inner header with VLAN. */ + if ((action_flags & MLX5_FLOW_ACTION_DECAP) && + !(item_flags & MLX5_FLOW_LAYER_INNER_VLAN)) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "cannot pop vlan after decap without " + "match on inner vlan in the flow"); + /* Pop VLAN without preceding Decap requires outer header with VLAN. */ + if (!(action_flags & MLX5_FLOW_ACTION_DECAP) && + !(item_flags & MLX5_FLOW_LAYER_OUTER_VLAN)) return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, @@ -1941,22 +2150,11 @@ flow_dv_validate_action_push_vlan(struct rte_eth_dev *dev, const struct rte_flow_action_of_push_vlan *push_vlan = action->conf; const struct mlx5_priv *priv = dev->data->dev_private; - if (!attr->transfer && attr->ingress) - return rte_flow_error_set(error, ENOTSUP, - RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, - NULL, - "push VLAN action not supported for " - "ingress"); if (push_vlan->ethertype != RTE_BE16(RTE_ETHER_TYPE_VLAN) && push_vlan->ethertype != RTE_BE16(RTE_ETHER_TYPE_QINQ)) return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, action, "invalid vlan ethertype"); - if (action_flags & MLX5_FLOW_VLAN_ACTIONS) - return rte_flow_error_set(error, ENOTSUP, - RTE_FLOW_ERROR_TYPE_ACTION, action, - "no support for multiple VLAN " - "actions"); if (action_flags & MLX5_FLOW_ACTION_PORT_ID) return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, action, @@ -2421,6 +2619,11 @@ flow_dv_validate_action_decap(struct rte_eth_dev *dev, { const struct mlx5_priv *priv = dev->data->dev_private; + if (priv->config.hca_attr.scatter_fcs_w_decap_disable && + !priv->config.decap_en) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, NULL, + "decap is not enabled"); if (action_flags & MLX5_FLOW_XCAP_ACTIONS) return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, NULL, @@ -2541,6 +2744,102 @@ flow_dv_validate_action_raw_encap_decap return 0; } +/** + * Match encap_decap resource. + * + * @param list + * Pointer to the hash list. + * @param entry + * Pointer to exist resource entry object. + * @param key + * Key of the new entry. + * @param ctx_cb + * Pointer to new encap_decap resource. + * + * @return + * 0 on matching, none-zero otherwise. + */ +int +flow_dv_encap_decap_match_cb(struct mlx5_hlist *list __rte_unused, + struct mlx5_hlist_entry *entry, + uint64_t key __rte_unused, void *cb_ctx) +{ + struct mlx5_flow_cb_ctx *ctx = cb_ctx; + struct mlx5_flow_dv_encap_decap_resource *resource = ctx->data; + struct mlx5_flow_dv_encap_decap_resource *cache_resource; + + cache_resource = container_of(entry, + struct mlx5_flow_dv_encap_decap_resource, + entry); + if (resource->entry.key == cache_resource->entry.key && + resource->reformat_type == cache_resource->reformat_type && + resource->ft_type == cache_resource->ft_type && + resource->flags == cache_resource->flags && + resource->size == cache_resource->size && + !memcmp((const void *)resource->buf, + (const void *)cache_resource->buf, + resource->size)) + return 0; + return -1; +} + +/** + * Allocate encap_decap resource. + * + * @param list + * Pointer to the hash list. + * @param entry + * Pointer to exist resource entry object. + * @param ctx_cb + * Pointer to new encap_decap resource. + * + * @return + * 0 on matching, none-zero otherwise. + */ +struct mlx5_hlist_entry * +flow_dv_encap_decap_create_cb(struct mlx5_hlist *list, + uint64_t key __rte_unused, + void *cb_ctx) +{ + struct mlx5_dev_ctx_shared *sh = list->ctx; + struct mlx5_flow_cb_ctx *ctx = cb_ctx; + struct mlx5dv_dr_domain *domain; + struct mlx5_flow_dv_encap_decap_resource *resource = ctx->data; + struct mlx5_flow_dv_encap_decap_resource *cache_resource; + uint32_t idx; + int ret; + + if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB) + domain = sh->fdb_domain; + else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX) + domain = sh->rx_domain; + else + domain = sh->tx_domain; + /* Register new encap/decap resource. */ + cache_resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_DECAP_ENCAP], + &idx); + if (!cache_resource) { + rte_flow_error_set(ctx->error, ENOMEM, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "cannot allocate resource memory"); + return NULL; + } + *cache_resource = *resource; + cache_resource->idx = idx; + ret = mlx5_flow_os_create_flow_action_packet_reformat + (sh->ctx, domain, cache_resource, + &cache_resource->action); + if (ret) { + mlx5_ipool_free(sh->ipool[MLX5_IPOOL_DECAP_ENCAP], idx); + rte_flow_error_set(ctx->error, ENOMEM, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, "cannot create action"); + return NULL; + } + + return &cache_resource->entry; +} + /** * Find existing encap/decap resource or create and register a new one. * @@ -2565,65 +2864,32 @@ flow_dv_encap_decap_resource_register { struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_dev_ctx_shared *sh = priv->sh; - struct mlx5_flow_dv_encap_decap_resource *cache_resource; - struct mlx5dv_dr_domain *domain; - uint32_t idx = 0; + struct mlx5_hlist_entry *entry; + union mlx5_flow_encap_decap_key encap_decap_key = { + { + .ft_type = resource->ft_type, + .refmt_type = resource->reformat_type, + .buf_size = resource->size, + .table_level = !!dev_flow->dv.group, + .cksum = 0, + } + }; + struct mlx5_flow_cb_ctx ctx = { + .error = error, + .data = resource, + }; resource->flags = dev_flow->dv.group ? 0 : 1; - if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB) - domain = sh->fdb_domain; - else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX) - domain = sh->rx_domain; - else - domain = sh->tx_domain; - /* Lookup a matching resource from cache. */ - ILIST_FOREACH(sh->ipool[MLX5_IPOOL_DECAP_ENCAP], sh->encaps_decaps, idx, - cache_resource, next) { - if (resource->reformat_type == cache_resource->reformat_type && - resource->ft_type == cache_resource->ft_type && - resource->flags == cache_resource->flags && - resource->size == cache_resource->size && - !memcmp((const void *)resource->buf, - (const void *)cache_resource->buf, - resource->size)) { - DRV_LOG(DEBUG, "encap/decap resource %p: refcnt %d++", - (void *)cache_resource, - rte_atomic32_read(&cache_resource->refcnt)); - rte_atomic32_inc(&cache_resource->refcnt); - dev_flow->handle->dvh.rix_encap_decap = idx; - dev_flow->dv.encap_decap = cache_resource; - return 0; - } - } - /* Register new encap/decap resource. */ - cache_resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_DECAP_ENCAP], - &dev_flow->handle->dvh.rix_encap_decap); - if (!cache_resource) - return rte_flow_error_set(error, ENOMEM, - RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, - "cannot allocate resource memory"); - *cache_resource = *resource; - cache_resource->action = - mlx5_glue->dv_create_flow_action_packet_reformat - (sh->ctx, cache_resource->reformat_type, - cache_resource->ft_type, domain, cache_resource->flags, - cache_resource->size, - (cache_resource->size ? cache_resource->buf : NULL)); - if (!cache_resource->action) { - rte_free(cache_resource); - return rte_flow_error_set(error, ENOMEM, - RTE_FLOW_ERROR_TYPE_UNSPECIFIED, - NULL, "cannot create action"); - } - rte_atomic32_init(&cache_resource->refcnt); - rte_atomic32_inc(&cache_resource->refcnt); - ILIST_INSERT(sh->ipool[MLX5_IPOOL_DECAP_ENCAP], &sh->encaps_decaps, - dev_flow->handle->dvh.rix_encap_decap, cache_resource, - next); - dev_flow->dv.encap_decap = cache_resource; - DRV_LOG(DEBUG, "new encap/decap resource %p: refcnt %d++", - (void *)cache_resource, - rte_atomic32_read(&cache_resource->refcnt)); + encap_decap_key.cksum = __rte_raw_cksum(resource->buf, + resource->size, 0); + resource->entry.key = encap_decap_key.v64; + entry = mlx5_hlist_register(sh->encaps_decaps, resource->entry.key, + &ctx); + if (!entry) + return -rte_errno; + resource = container_of(entry, typeof(*resource), entry); + dev_flow->dv.encap_decap = resource; + dev_flow->handle->dvh.rix_encap_decap = resource->idx; return 0; } @@ -2647,71 +2913,62 @@ flow_dv_jump_tbl_resource_register (struct rte_eth_dev *dev __rte_unused, struct mlx5_flow_tbl_resource *tbl, struct mlx5_flow *dev_flow, - struct rte_flow_error *error) + struct rte_flow_error *error __rte_unused) { struct mlx5_flow_tbl_data_entry *tbl_data = container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl); - int cnt; MLX5_ASSERT(tbl); - cnt = rte_atomic32_read(&tbl_data->jump.refcnt); - if (!cnt) { - tbl_data->jump.action = - mlx5_glue->dr_create_flow_action_dest_flow_tbl - (tbl->obj); - if (!tbl_data->jump.action) - return rte_flow_error_set(error, ENOMEM, - RTE_FLOW_ERROR_TYPE_UNSPECIFIED, - NULL, "cannot create jump action"); - DRV_LOG(DEBUG, "new jump table resource %p: refcnt %d++", - (void *)&tbl_data->jump, cnt); - } else { - /* old jump should not make the table ref++. */ - flow_dv_tbl_resource_release(dev, &tbl_data->tbl); - MLX5_ASSERT(tbl_data->jump.action); - DRV_LOG(DEBUG, "existed jump table resource %p: refcnt %d++", - (void *)&tbl_data->jump, cnt); - } - rte_atomic32_inc(&tbl_data->jump.refcnt); + MLX5_ASSERT(tbl_data->jump.action); dev_flow->handle->rix_jump = tbl_data->idx; dev_flow->dv.jump = &tbl_data->jump; return 0; } -/** - * Find existing default miss resource or create and register a new one. - * - * @param[in, out] dev - * Pointer to rte_eth_dev structure. - * @param[out] error - * pointer to error structure. - * - * @return - * 0 on success otherwise -errno and errno is set. - */ -static int -flow_dv_default_miss_resource_register(struct rte_eth_dev *dev, - struct rte_flow_error *error) +int +flow_dv_port_id_match_cb(struct mlx5_cache_list *list __rte_unused, + struct mlx5_cache_entry *entry, void *cb_ctx) { - struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5_dev_ctx_shared *sh = priv->sh; - struct mlx5_flow_default_miss_resource *cache_resource = - &sh->default_miss; - int cnt = rte_atomic32_read(&cache_resource->refcnt); - - if (!cnt) { - MLX5_ASSERT(cache_resource->action); - cache_resource->action = - mlx5_glue->dr_create_flow_action_default_miss(); - if (!cache_resource->action) - return rte_flow_error_set(error, ENOMEM, - RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, - "cannot create default miss action"); - DRV_LOG(DEBUG, "new default miss resource %p: refcnt %d++", - (void *)cache_resource->action, cnt); - } - rte_atomic32_inc(&cache_resource->refcnt); - return 0; + struct mlx5_flow_cb_ctx *ctx = cb_ctx; + struct mlx5_flow_dv_port_id_action_resource *ref = ctx->data; + struct mlx5_flow_dv_port_id_action_resource *res = + container_of(entry, typeof(*res), entry); + + return ref->port_id != res->port_id; +} + +struct mlx5_cache_entry * +flow_dv_port_id_create_cb(struct mlx5_cache_list *list, + struct mlx5_cache_entry *entry __rte_unused, + void *cb_ctx) +{ + struct mlx5_dev_ctx_shared *sh = list->ctx; + struct mlx5_flow_cb_ctx *ctx = cb_ctx; + struct mlx5_flow_dv_port_id_action_resource *ref = ctx->data; + struct mlx5_flow_dv_port_id_action_resource *cache; + uint32_t idx; + int ret; + + /* Register new port id action resource. */ + cache = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_PORT_ID], &idx); + if (!cache) { + rte_flow_error_set(ctx->error, ENOMEM, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "cannot allocate port_id action cache memory"); + return NULL; + } + *cache = *ref; + ret = mlx5_flow_os_create_flow_action_dest_port(sh->fdb_domain, + ref->port_id, + &cache->action); + if (ret) { + mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PORT_ID], idx); + rte_flow_error_set(ctx->error, ENOMEM, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "cannot create action"); + return NULL; + } + return &cache->entry; } /** @@ -2737,58 +2994,74 @@ flow_dv_port_id_action_resource_register struct rte_flow_error *error) { struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5_dev_ctx_shared *sh = priv->sh; - struct mlx5_flow_dv_port_id_action_resource *cache_resource; - uint32_t idx = 0; + struct mlx5_cache_entry *entry; + struct mlx5_flow_dv_port_id_action_resource *cache; + struct mlx5_flow_cb_ctx ctx = { + .error = error, + .data = resource, + }; - /* Lookup a matching resource from cache. */ - ILIST_FOREACH(sh->ipool[MLX5_IPOOL_PORT_ID], sh->port_id_action_list, - idx, cache_resource, next) { - if (resource->port_id == cache_resource->port_id) { - DRV_LOG(DEBUG, "port id action resource resource %p: " - "refcnt %d++", - (void *)cache_resource, - rte_atomic32_read(&cache_resource->refcnt)); - rte_atomic32_inc(&cache_resource->refcnt); - dev_flow->handle->rix_port_id_action = idx; - dev_flow->dv.port_id_action = cache_resource; - return 0; - } - } - /* Register new port id action resource. */ - cache_resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_PORT_ID], - &dev_flow->handle->rix_port_id_action); - if (!cache_resource) - return rte_flow_error_set(error, ENOMEM, - RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, - "cannot allocate resource memory"); - *cache_resource = *resource; - /* - * Depending on rdma_core version the glue routine calls - * either mlx5dv_dr_action_create_dest_ib_port(domain, dev_port) - * or mlx5dv_dr_action_create_dest_vport(domain, vport_id). - */ - cache_resource->action = - mlx5_glue->dr_create_flow_action_dest_port - (priv->sh->fdb_domain, resource->port_id); - if (!cache_resource->action) { - rte_free(cache_resource); - return rte_flow_error_set(error, ENOMEM, - RTE_FLOW_ERROR_TYPE_UNSPECIFIED, - NULL, "cannot create action"); - } - rte_atomic32_init(&cache_resource->refcnt); - rte_atomic32_inc(&cache_resource->refcnt); - ILIST_INSERT(sh->ipool[MLX5_IPOOL_PORT_ID], &sh->port_id_action_list, - dev_flow->handle->rix_port_id_action, cache_resource, - next); - dev_flow->dv.port_id_action = cache_resource; - DRV_LOG(DEBUG, "new port id action resource %p: refcnt %d++", - (void *)cache_resource, - rte_atomic32_read(&cache_resource->refcnt)); + entry = mlx5_cache_register(&priv->sh->port_id_action_list, &ctx); + if (!entry) + return -rte_errno; + cache = container_of(entry, typeof(*cache), entry); + dev_flow->dv.port_id_action = cache; + dev_flow->handle->rix_port_id_action = cache->idx; return 0; } +int +flow_dv_push_vlan_match_cb(struct mlx5_cache_list *list __rte_unused, + struct mlx5_cache_entry *entry, void *cb_ctx) +{ + struct mlx5_flow_cb_ctx *ctx = cb_ctx; + struct mlx5_flow_dv_push_vlan_action_resource *ref = ctx->data; + struct mlx5_flow_dv_push_vlan_action_resource *res = + container_of(entry, typeof(*res), entry); + + return ref->vlan_tag != res->vlan_tag || ref->ft_type != res->ft_type; +} + +struct mlx5_cache_entry * +flow_dv_push_vlan_create_cb(struct mlx5_cache_list *list, + struct mlx5_cache_entry *entry __rte_unused, + void *cb_ctx) +{ + struct mlx5_dev_ctx_shared *sh = list->ctx; + struct mlx5_flow_cb_ctx *ctx = cb_ctx; + struct mlx5_flow_dv_push_vlan_action_resource *ref = ctx->data; + struct mlx5_flow_dv_push_vlan_action_resource *cache; + struct mlx5dv_dr_domain *domain; + uint32_t idx; + int ret; + + /* Register new port id action resource. */ + cache = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_PUSH_VLAN], &idx); + if (!cache) { + rte_flow_error_set(ctx->error, ENOMEM, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "cannot allocate push_vlan action cache memory"); + return NULL; + } + *cache = *ref; + if (ref->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB) + domain = sh->fdb_domain; + else if (ref->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX) + domain = sh->rx_domain; + else + domain = sh->tx_domain; + ret = mlx5_flow_os_create_flow_action_push_vlan(domain, ref->vlan_tag, + &cache->action); + if (ret) { + mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PUSH_VLAN], idx); + rte_flow_error_set(ctx->error, ENOMEM, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "cannot create push vlan action"); + return NULL; + } + return &cache->entry; +} + /** * Find existing push vlan resource or create and register a new one. * @@ -2812,63 +3085,25 @@ flow_dv_push_vlan_action_resource_register struct rte_flow_error *error) { struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5_dev_ctx_shared *sh = priv->sh; - struct mlx5_flow_dv_push_vlan_action_resource *cache_resource; - struct mlx5dv_dr_domain *domain; - uint32_t idx = 0; + struct mlx5_flow_dv_push_vlan_action_resource *cache; + struct mlx5_cache_entry *entry; + struct mlx5_flow_cb_ctx ctx = { + .error = error, + .data = resource, + }; - /* Lookup a matching resource from cache. */ - ILIST_FOREACH(sh->ipool[MLX5_IPOOL_PUSH_VLAN], - sh->push_vlan_action_list, idx, cache_resource, next) { - if (resource->vlan_tag == cache_resource->vlan_tag && - resource->ft_type == cache_resource->ft_type) { - DRV_LOG(DEBUG, "push-VLAN action resource resource %p: " - "refcnt %d++", - (void *)cache_resource, - rte_atomic32_read(&cache_resource->refcnt)); - rte_atomic32_inc(&cache_resource->refcnt); - dev_flow->handle->dvh.rix_push_vlan = idx; - dev_flow->dv.push_vlan_res = cache_resource; - return 0; - } - } - /* Register new push_vlan action resource. */ - cache_resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_PUSH_VLAN], - &dev_flow->handle->dvh.rix_push_vlan); - if (!cache_resource) - return rte_flow_error_set(error, ENOMEM, - RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, - "cannot allocate resource memory"); - *cache_resource = *resource; - if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB) - domain = sh->fdb_domain; - else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX) - domain = sh->rx_domain; - else - domain = sh->tx_domain; - cache_resource->action = - mlx5_glue->dr_create_flow_action_push_vlan(domain, - resource->vlan_tag); - if (!cache_resource->action) { - rte_free(cache_resource); - return rte_flow_error_set(error, ENOMEM, - RTE_FLOW_ERROR_TYPE_UNSPECIFIED, - NULL, "cannot create action"); - } - rte_atomic32_init(&cache_resource->refcnt); - rte_atomic32_inc(&cache_resource->refcnt); - ILIST_INSERT(sh->ipool[MLX5_IPOOL_PUSH_VLAN], - &sh->push_vlan_action_list, - dev_flow->handle->dvh.rix_push_vlan, - cache_resource, next); - dev_flow->dv.push_vlan_res = cache_resource; - DRV_LOG(DEBUG, "new push vlan action resource %p: refcnt %d++", - (void *)cache_resource, - rte_atomic32_read(&cache_resource->refcnt)); + entry = mlx5_cache_register(&priv->sh->push_vlan_action_list, &ctx); + if (!entry) + return -rte_errno; + cache = container_of(entry, typeof(*cache), entry); + + dev_flow->handle->dvh.rix_push_vlan = cache->idx; + dev_flow->dv.push_vlan_res = cache; return 0; } + /** - * Get the size of specific rte_flow_item_type + * Get the size of specific rte_flow_item_type hdr size * * @param[in] item_type * Tested rte_flow_item_type. @@ -2877,43 +3112,39 @@ flow_dv_push_vlan_action_resource_register * sizeof struct item_type, 0 if void or irrelevant. */ static size_t -flow_dv_get_item_len(const enum rte_flow_item_type item_type) +flow_dv_get_item_hdr_len(const enum rte_flow_item_type item_type) { size_t retval; switch (item_type) { case RTE_FLOW_ITEM_TYPE_ETH: - retval = sizeof(struct rte_flow_item_eth); + retval = sizeof(struct rte_ether_hdr); break; case RTE_FLOW_ITEM_TYPE_VLAN: - retval = sizeof(struct rte_flow_item_vlan); + retval = sizeof(struct rte_vlan_hdr); break; case RTE_FLOW_ITEM_TYPE_IPV4: - retval = sizeof(struct rte_flow_item_ipv4); + retval = sizeof(struct rte_ipv4_hdr); break; case RTE_FLOW_ITEM_TYPE_IPV6: - retval = sizeof(struct rte_flow_item_ipv6); + retval = sizeof(struct rte_ipv6_hdr); break; case RTE_FLOW_ITEM_TYPE_UDP: - retval = sizeof(struct rte_flow_item_udp); + retval = sizeof(struct rte_udp_hdr); break; case RTE_FLOW_ITEM_TYPE_TCP: - retval = sizeof(struct rte_flow_item_tcp); + retval = sizeof(struct rte_tcp_hdr); break; case RTE_FLOW_ITEM_TYPE_VXLAN: - retval = sizeof(struct rte_flow_item_vxlan); + case RTE_FLOW_ITEM_TYPE_VXLAN_GPE: + retval = sizeof(struct rte_vxlan_hdr); break; case RTE_FLOW_ITEM_TYPE_GRE: - retval = sizeof(struct rte_flow_item_gre); - break; case RTE_FLOW_ITEM_TYPE_NVGRE: - retval = sizeof(struct rte_flow_item_nvgre); - break; - case RTE_FLOW_ITEM_TYPE_VXLAN_GPE: - retval = sizeof(struct rte_flow_item_vxlan_gpe); + retval = sizeof(struct rte_gre_hdr); break; case RTE_FLOW_ITEM_TYPE_MPLS: - retval = sizeof(struct rte_flow_item_mpls); + retval = sizeof(struct rte_mpls_hdr); break; case RTE_FLOW_ITEM_TYPE_VOID: /* Fall through. */ default: @@ -2966,7 +3197,7 @@ flow_dv_convert_encap_data(const struct rte_flow_item *items, uint8_t *buf, RTE_FLOW_ERROR_TYPE_ACTION, NULL, "invalid empty data"); for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) { - len = flow_dv_get_item_len(items->type); + len = flow_dv_get_item_hdr_len(items->type); if (len + temp_size > MLX5_ENCAP_MAX_LEN) return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, @@ -3326,6 +3557,8 @@ flow_dv_create_action_push_vlan(struct rte_eth_dev *dev, (dev, &res, dev_flow, error); } +static int fdb_mirror; + /** * Validate the modify-header actions. * @@ -3353,6 +3586,12 @@ flow_dv_validate_action_modify_hdr(const uint64_t action_flags, RTE_FLOW_ERROR_TYPE_ACTION, NULL, "can't have encap action before" " modify action"); + if ((action_flags & MLX5_FLOW_ACTION_SAMPLE) && fdb_mirror) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, NULL, + "can't support sample action before" + " modify action for E-Switch" + " mirroring"); return 0; } @@ -3658,14 +3897,21 @@ flow_dv_validate_action_modify_ttl(const uint64_t action_flags, * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int -flow_dv_validate_action_jump(const struct rte_flow_action *action, +flow_dv_validate_action_jump(struct rte_eth_dev *dev, + const struct mlx5_flow_tunnel *tunnel, + const struct rte_flow_action *action, uint64_t action_flags, const struct rte_flow_attr *attributes, bool external, struct rte_flow_error *error) { uint32_t target_group, table; int ret = 0; - + struct flow_grp_info grp_info = { + .external = !!external, + .transfer = !!attributes->transfer, + .fdb_def_rule = 1, + .std_tbl_fix = 0 + }; if (action_flags & (MLX5_FLOW_FATE_ACTIONS | MLX5_FLOW_FATE_ESWITCH_ACTIONS)) return rte_flow_error_set(error, EINVAL, @@ -3676,17 +3922,25 @@ flow_dv_validate_action_jump(const struct rte_flow_action *action, return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, NULL, "jump with meter not support"); + if ((action_flags & MLX5_FLOW_ACTION_SAMPLE) && fdb_mirror) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, NULL, + "E-Switch mirroring can't support" + " Sample action and jump action in" + " same flow now"); if (!action->conf) return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL, "action configuration not set"); target_group = ((const struct rte_flow_action_jump *)action->conf)->group; - ret = mlx5_flow_group_to_table(attributes, external, target_group, - true, &table, error); + ret = mlx5_flow_group_to_table(dev, tunnel, target_group, &table, + &grp_info, error); if (ret) return ret; - if (attributes->group == target_group) + if (attributes->group == target_group && + !(action_flags & (MLX5_FLOW_ACTION_TUNNEL_SET | + MLX5_FLOW_ACTION_TUNNEL_MATCH))) return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, NULL, "target group must be other than" @@ -3875,7 +4129,8 @@ flow_dv_validate_action_age(uint64_t action_flags, struct mlx5_priv *priv = dev->data->dev_private; const struct rte_flow_action_age *age = action->conf; - if (!priv->config.devx || priv->counter_fallback) + if (!priv->config.devx || (priv->sh->cmng.counter_fallback && + !priv->sh->aso_age_mng)) return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, @@ -3884,14 +4139,14 @@ flow_dv_validate_action_age(uint64_t action_flags, return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, action, "configuration cannot be null"); - if (age->timeout >= UINT16_MAX / 2 / 10) - return rte_flow_error_set(error, ENOTSUP, + if (!(age->timeout)) + return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, action, - "Max age time: 3275 seconds"); + "invalid timeout value 0"); if (action_flags & MLX5_FLOW_ACTION_AGE) return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, NULL, - "Duplicate age ctions set"); + "duplicate age actions set"); return 0; } @@ -3963,6 +4218,264 @@ flow_dv_validate_action_modify_ipv6_dscp(const uint64_t action_flags, return ret; } +/** + * Match modify-header resource. + * + * @param list + * Pointer to the hash list. + * @param entry + * Pointer to exist resource entry object. + * @param key + * Key of the new entry. + * @param ctx + * Pointer to new modify-header resource. + * + * @return + * 0 on matching, non-zero otherwise. + */ +int +flow_dv_modify_match_cb(struct mlx5_hlist *list __rte_unused, + struct mlx5_hlist_entry *entry, + uint64_t key __rte_unused, void *cb_ctx) +{ + struct mlx5_flow_cb_ctx *ctx = cb_ctx; + struct mlx5_flow_dv_modify_hdr_resource *ref = ctx->data; + struct mlx5_flow_dv_modify_hdr_resource *resource = + container_of(entry, typeof(*resource), entry); + uint32_t key_len = sizeof(*ref) - offsetof(typeof(*ref), ft_type); + + key_len += ref->actions_num * sizeof(ref->actions[0]); + return ref->actions_num != resource->actions_num || + memcmp(&ref->ft_type, &resource->ft_type, key_len); +} + +struct mlx5_hlist_entry * +flow_dv_modify_create_cb(struct mlx5_hlist *list, uint64_t key __rte_unused, + void *cb_ctx) +{ + struct mlx5_dev_ctx_shared *sh = list->ctx; + struct mlx5_flow_cb_ctx *ctx = cb_ctx; + struct mlx5dv_dr_domain *ns; + struct mlx5_flow_dv_modify_hdr_resource *entry; + struct mlx5_flow_dv_modify_hdr_resource *ref = ctx->data; + int ret; + uint32_t data_len = ref->actions_num * sizeof(ref->actions[0]); + uint32_t key_len = sizeof(*ref) - offsetof(typeof(*ref), ft_type); + + entry = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*entry) + data_len, 0, + SOCKET_ID_ANY); + if (!entry) { + rte_flow_error_set(ctx->error, ENOMEM, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "cannot allocate resource memory"); + return NULL; + } + rte_memcpy(&entry->ft_type, + RTE_PTR_ADD(ref, offsetof(typeof(*ref), ft_type)), + key_len + data_len); + if (entry->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB) + ns = sh->fdb_domain; + else if (entry->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_TX) + ns = sh->tx_domain; + else + ns = sh->rx_domain; + ret = mlx5_flow_os_create_flow_action_modify_header + (sh->ctx, ns, entry, + data_len, &entry->action); + if (ret) { + mlx5_free(entry); + rte_flow_error_set(ctx->error, ENOMEM, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, "cannot create modification action"); + return NULL; + } + return &entry->entry; +} + +/** + * Validate the sample action. + * + * @param[in] action_flags + * Holds the actions detected until now. + * @param[in] action + * Pointer to the sample action. + * @param[in] dev + * Pointer to the Ethernet device structure. + * @param[in] attr + * Attributes of flow that includes this action. + * @param[out] error + * Pointer to error structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +flow_dv_validate_action_sample(uint64_t action_flags, + const struct rte_flow_action *action, + struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + struct rte_flow_error *error) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_dev_config *dev_conf = &priv->config; + const struct rte_flow_action_sample *sample = action->conf; + const struct rte_flow_action *act; + uint64_t sub_action_flags = 0; + uint16_t queue_index = 0xFFFF; + int actions_n = 0; + int ret; + fdb_mirror = 0; + + if (!sample) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, action, + "configuration cannot be NULL"); + if (sample->ratio == 0) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, action, + "ratio value starts from 1"); + if (!priv->config.devx || (sample->ratio > 0 && !priv->sampler_en)) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "sample action not supported"); + if (action_flags & MLX5_FLOW_ACTION_SAMPLE) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, NULL, + "Multiple sample actions not " + "supported"); + if (action_flags & MLX5_FLOW_ACTION_METER) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, action, + "wrong action order, meter should " + "be after sample action"); + if (action_flags & MLX5_FLOW_ACTION_JUMP) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, action, + "wrong action order, jump should " + "be after sample action"); + act = sample->actions; + for (; act->type != RTE_FLOW_ACTION_TYPE_END; act++) { + if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, + act, "too many actions"); + switch (act->type) { + case RTE_FLOW_ACTION_TYPE_QUEUE: + ret = mlx5_flow_validate_action_queue(act, + sub_action_flags, + dev, + attr, error); + if (ret < 0) + return ret; + queue_index = ((const struct rte_flow_action_queue *) + (act->conf))->index; + sub_action_flags |= MLX5_FLOW_ACTION_QUEUE; + ++actions_n; + break; + case RTE_FLOW_ACTION_TYPE_MARK: + ret = flow_dv_validate_action_mark(dev, act, + sub_action_flags, + attr, error); + if (ret < 0) + return ret; + if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) + sub_action_flags |= MLX5_FLOW_ACTION_MARK | + MLX5_FLOW_ACTION_MARK_EXT; + else + sub_action_flags |= MLX5_FLOW_ACTION_MARK; + ++actions_n; + break; + case RTE_FLOW_ACTION_TYPE_COUNT: + ret = flow_dv_validate_action_count(dev, error); + if (ret < 0) + return ret; + sub_action_flags |= MLX5_FLOW_ACTION_COUNT; + ++actions_n; + break; + case RTE_FLOW_ACTION_TYPE_PORT_ID: + ret = flow_dv_validate_action_port_id(dev, + sub_action_flags, + act, + attr, + error); + if (ret) + return ret; + sub_action_flags |= MLX5_FLOW_ACTION_PORT_ID; + ++actions_n; + break; + case RTE_FLOW_ACTION_TYPE_RAW_ENCAP: + ret = flow_dv_validate_action_raw_encap_decap + (dev, NULL, act->conf, attr, &sub_action_flags, + &actions_n, error); + if (ret < 0) + return ret; + ++actions_n; + break; + default: + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, + NULL, + "Doesn't support optional " + "action"); + } + } + if (attr->ingress && !attr->transfer) { + if (!(sub_action_flags & MLX5_FLOW_ACTION_QUEUE)) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + NULL, + "Ingress must has a dest " + "QUEUE for Sample"); + } else if (attr->egress && !attr->transfer) { + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, + NULL, + "Sample Only support Ingress " + "or E-Switch"); + } else if (sample->actions->type != RTE_FLOW_ACTION_TYPE_END) { + MLX5_ASSERT(attr->transfer); + if (sample->ratio > 1) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, + NULL, + "E-Switch doesn't support " + "any optional action " + "for sampling"); + fdb_mirror = 1; + if (sub_action_flags & MLX5_FLOW_ACTION_QUEUE) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, + NULL, + "unsupported action QUEUE"); + if (!(sub_action_flags & MLX5_FLOW_ACTION_PORT_ID)) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + NULL, + "E-Switch must has a dest " + "port for mirroring"); + } + /* Continue validation for Xcap actions.*/ + if ((sub_action_flags & MLX5_FLOW_XCAP_ACTIONS) && + (queue_index == 0xFFFF || + mlx5_rxq_get_type(dev, queue_index) != MLX5_RXQ_TYPE_HAIRPIN)) { + if ((sub_action_flags & MLX5_FLOW_XCAP_ACTIONS) == + MLX5_FLOW_XCAP_ACTIONS) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, + NULL, "encap and decap " + "combination aren't " + "supported"); + if (!attr->transfer && attr->ingress && (sub_action_flags & + MLX5_FLOW_ACTION_ENCAP)) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, + NULL, "encap is not supported" + " for ingress traffic"); + } + return 0; +} + /** * Find existing modify-header resource or create and register a new one. * @@ -3987,9 +4500,14 @@ flow_dv_modify_hdr_resource_register { struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_dev_ctx_shared *sh = priv->sh; - struct mlx5_flow_dv_modify_hdr_resource *cache_resource; - struct mlx5dv_dr_domain *ns; - uint32_t actions_len; + uint32_t key_len = sizeof(*resource) - + offsetof(typeof(*resource), ft_type) + + resource->actions_num * sizeof(resource->actions[0]); + struct mlx5_hlist_entry *entry; + struct mlx5_flow_cb_ctx ctx = { + .error = error, + .data = resource, + }; resource->flags = dev_flow->dv.group ? 0 : MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL; @@ -3998,56 +4516,12 @@ flow_dv_modify_hdr_resource_register return rte_flow_error_set(error, EOVERFLOW, RTE_FLOW_ERROR_TYPE_ACTION, NULL, "too many modify header items"); - if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB) - ns = sh->fdb_domain; - else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_TX) - ns = sh->tx_domain; - else - ns = sh->rx_domain; - /* Lookup a matching resource from cache. */ - actions_len = resource->actions_num * sizeof(resource->actions[0]); - LIST_FOREACH(cache_resource, &sh->modify_cmds, next) { - if (resource->ft_type == cache_resource->ft_type && - resource->actions_num == cache_resource->actions_num && - resource->flags == cache_resource->flags && - !memcmp((const void *)resource->actions, - (const void *)cache_resource->actions, - actions_len)) { - DRV_LOG(DEBUG, "modify-header resource %p: refcnt %d++", - (void *)cache_resource, - rte_atomic32_read(&cache_resource->refcnt)); - rte_atomic32_inc(&cache_resource->refcnt); - dev_flow->handle->dvh.modify_hdr = cache_resource; - return 0; - } - } - /* Register new modify-header resource. */ - cache_resource = rte_calloc(__func__, 1, - sizeof(*cache_resource) + actions_len, 0); - if (!cache_resource) - return rte_flow_error_set(error, ENOMEM, - RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, - "cannot allocate resource memory"); - *cache_resource = *resource; - rte_memcpy(cache_resource->actions, resource->actions, actions_len); - cache_resource->action = - mlx5_glue->dv_create_flow_action_modify_header - (sh->ctx, cache_resource->ft_type, ns, - cache_resource->flags, actions_len, - (uint64_t *)cache_resource->actions); - if (!cache_resource->action) { - rte_free(cache_resource); - return rte_flow_error_set(error, ENOMEM, - RTE_FLOW_ERROR_TYPE_UNSPECIFIED, - NULL, "cannot create action"); - } - rte_atomic32_init(&cache_resource->refcnt); - rte_atomic32_inc(&cache_resource->refcnt); - LIST_INSERT_HEAD(&sh->modify_cmds, cache_resource, next); - dev_flow->handle->dvh.modify_hdr = cache_resource; - DRV_LOG(DEBUG, "new modify-header resource %p: refcnt %d++", - (void *)cache_resource, - rte_atomic32_read(&cache_resource->refcnt)); + resource->entry.key = __rte_raw_cksum(&resource->ft_type, key_len, 0); + entry = mlx5_hlist_register(sh->modify_cmds, resource->entry.key, &ctx); + if (!entry) + return -rte_errno; + resource = container_of(entry, typeof(*resource), entry); + dev_flow->handle->dvh.modify_hdr = resource; return 0; } @@ -4070,20 +4544,13 @@ flow_dv_counter_get_by_idx(struct rte_eth_dev *dev, struct mlx5_flow_counter_pool **ppool) { struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5_pools_container *cont; + struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng; struct mlx5_flow_counter_pool *pool; - uint32_t batch = 0, age = 0; - - idx--; - age = MLX_CNT_IS_AGE(idx); - idx = age ? idx - MLX5_CNT_AGE_OFFSET : idx; - if (idx >= MLX5_CNT_BATCH_OFFSET) { - idx -= MLX5_CNT_BATCH_OFFSET; - batch = 1; - } - cont = MLX5_CNT_CONTAINER(priv->sh, batch, age); - MLX5_ASSERT(idx / MLX5_COUNTERS_PER_POOL < cont->n); - pool = cont->pools[idx / MLX5_COUNTERS_PER_POOL]; + + /* Decrease to original index and clear shared bit. */ + idx = (idx - 1) & (MLX5_CNT_SHARED_OFFSET - 1); + MLX5_ASSERT(idx / MLX5_COUNTERS_PER_POOL < cmng->n); + pool = cmng->pools[idx / MLX5_COUNTERS_PER_POOL]; MLX5_ASSERT(pool); if (ppool) *ppool = pool; @@ -4115,8 +4582,8 @@ flow_dv_is_counter_in_pool(struct mlx5_flow_counter_pool *pool, int id) /** * Get a pool by devx counter ID. * - * @param[in] cont - * Pointer to the counter container. + * @param[in] cmng + * Pointer to the counter management. * @param[in] id * The counter devx ID. * @@ -4124,161 +4591,70 @@ flow_dv_is_counter_in_pool(struct mlx5_flow_counter_pool *pool, int id) * The counter pool pointer if exists, NULL otherwise, */ static struct mlx5_flow_counter_pool * -flow_dv_find_pool_by_id(struct mlx5_pools_container *cont, int id) +flow_dv_find_pool_by_id(struct mlx5_flow_counter_mng *cmng, int id) { uint32_t i; + struct mlx5_flow_counter_pool *pool = NULL; + rte_spinlock_lock(&cmng->pool_update_sl); /* Check last used pool. */ - if (cont->last_pool_idx != POOL_IDX_INVALID && - flow_dv_is_counter_in_pool(cont->pools[cont->last_pool_idx], id)) - return cont->pools[cont->last_pool_idx]; + if (cmng->last_pool_idx != POOL_IDX_INVALID && + flow_dv_is_counter_in_pool(cmng->pools[cmng->last_pool_idx], id)) { + pool = cmng->pools[cmng->last_pool_idx]; + goto out; + } /* ID out of range means no suitable pool in the container. */ - if (id > cont->max_id || id < cont->min_id) - return NULL; + if (id > cmng->max_id || id < cmng->min_id) + goto out; /* * Find the pool from the end of the container, since mostly counter * ID is sequence increasing, and the last pool should be the needed * one. */ - i = rte_atomic16_read(&cont->n_valid); + i = cmng->n_valid; while (i--) { - struct mlx5_flow_counter_pool *pool = cont->pools[i]; + struct mlx5_flow_counter_pool *pool_tmp = cmng->pools[i]; - if (flow_dv_is_counter_in_pool(pool, id)) - return pool; + if (flow_dv_is_counter_in_pool(pool_tmp, id)) { + pool = pool_tmp; + break; + } } - return NULL; +out: + rte_spinlock_unlock(&cmng->pool_update_sl); + return pool; } /** - * Allocate a new memory for the counter values wrapped by all the needed - * management. + * Resize a counter container. * * @param[in] dev * Pointer to the Ethernet device structure. - * @param[in] raws_n - * The raw memory areas - each one for MLX5_COUNTERS_PER_POOL counters. * * @return - * The new memory management pointer on success, otherwise NULL and rte_errno - * is set. + * 0 on success, otherwise negative errno value and rte_errno is set. */ -static struct mlx5_counter_stats_mem_mng * -flow_dv_create_counter_stat_mem_mng(struct rte_eth_dev *dev, int raws_n) +static int +flow_dv_container_resize(struct rte_eth_dev *dev) { struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5_dev_ctx_shared *sh = priv->sh; - struct mlx5_devx_mkey_attr mkey_attr; - struct mlx5_counter_stats_mem_mng *mem_mng; - volatile struct flow_counter_stats *raw_data; - int size = (sizeof(struct flow_counter_stats) * - MLX5_COUNTERS_PER_POOL + - sizeof(struct mlx5_counter_stats_raw)) * raws_n + - sizeof(struct mlx5_counter_stats_mem_mng); - uint8_t *mem = rte_calloc(__func__, 1, size, sysconf(_SC_PAGESIZE)); - int i; + struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng; + void *old_pools = cmng->pools; + uint32_t resize = cmng->n + MLX5_CNT_CONTAINER_RESIZE; + uint32_t mem_size = sizeof(struct mlx5_flow_counter_pool *) * resize; + void *pools = mlx5_malloc(MLX5_MEM_ZERO, mem_size, 0, SOCKET_ID_ANY); - if (!mem) { + if (!pools) { rte_errno = ENOMEM; - return NULL; - } - mem_mng = (struct mlx5_counter_stats_mem_mng *)(mem + size) - 1; - size = sizeof(*raw_data) * MLX5_COUNTERS_PER_POOL * raws_n; - mem_mng->umem = mlx5_glue->devx_umem_reg(sh->ctx, mem, size, - IBV_ACCESS_LOCAL_WRITE); - if (!mem_mng->umem) { - rte_errno = errno; - rte_free(mem); - return NULL; - } - mkey_attr.addr = (uintptr_t)mem; - mkey_attr.size = size; - mkey_attr.umem_id = mlx5_os_get_umem_id(mem_mng->umem); - mkey_attr.pd = sh->pdn; - mkey_attr.log_entity_size = 0; - mkey_attr.pg_access = 0; - mkey_attr.klm_array = NULL; - mkey_attr.klm_num = 0; - if (priv->config.hca_attr.relaxed_ordering_write && - priv->config.hca_attr.relaxed_ordering_read && - !haswell_broadwell_cpu) - mkey_attr.relaxed_ordering = 1; - mem_mng->dm = mlx5_devx_cmd_mkey_create(sh->ctx, &mkey_attr); - if (!mem_mng->dm) { - mlx5_glue->devx_umem_dereg(mem_mng->umem); - rte_errno = errno; - rte_free(mem); - return NULL; - } - mem_mng->raws = (struct mlx5_counter_stats_raw *)(mem + size); - raw_data = (volatile struct flow_counter_stats *)mem; - for (i = 0; i < raws_n; ++i) { - mem_mng->raws[i].mem_mng = mem_mng; - mem_mng->raws[i].data = raw_data + i * MLX5_COUNTERS_PER_POOL; - } - LIST_INSERT_HEAD(&sh->cmng.mem_mngs, mem_mng, next); - return mem_mng; -} - -/** - * Resize a counter container. - * - * @param[in] dev - * Pointer to the Ethernet device structure. - * @param[in] batch - * Whether the pool is for counter that was allocated by batch command. - * @param[in] age - * Whether the pool is for Aging counter. - * - * @return - * 0 on success, otherwise negative errno value and rte_errno is set. - */ -static int -flow_dv_container_resize(struct rte_eth_dev *dev, - uint32_t batch, uint32_t age) -{ - struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5_pools_container *cont = MLX5_CNT_CONTAINER(priv->sh, batch, - age); - struct mlx5_counter_stats_mem_mng *mem_mng = NULL; - void *old_pools = cont->pools; - uint32_t resize = cont->n + MLX5_CNT_CONTAINER_RESIZE; - uint32_t mem_size = sizeof(struct mlx5_flow_counter_pool *) * resize; - void *pools = rte_calloc(__func__, 1, mem_size, 0); - - if (!pools) { - rte_errno = ENOMEM; - return -ENOMEM; + return -ENOMEM; } if (old_pools) - memcpy(pools, old_pools, cont->n * + memcpy(pools, old_pools, cmng->n * sizeof(struct mlx5_flow_counter_pool *)); - /* - * Fallback mode query the counter directly, no background query - * resources are needed. - */ - if (!priv->counter_fallback) { - int i; - - mem_mng = flow_dv_create_counter_stat_mem_mng(dev, - MLX5_CNT_CONTAINER_RESIZE + MLX5_MAX_PENDING_QUERIES); - if (!mem_mng) { - rte_free(pools); - return -ENOMEM; - } - for (i = 0; i < MLX5_MAX_PENDING_QUERIES; ++i) - LIST_INSERT_HEAD(&priv->sh->cmng.free_stat_raws, - mem_mng->raws + - MLX5_CNT_CONTAINER_RESIZE + - i, next); - } - rte_spinlock_lock(&cont->resize_sl); - cont->n = resize; - cont->mem_mng = mem_mng; - cont->pools = pools; - rte_spinlock_unlock(&cont->resize_sl); + cmng->n = resize; + cmng->pools = pools; if (old_pools) - rte_free(old_pools); + mlx5_free(old_pools); return 0; } @@ -4304,25 +4680,15 @@ _flow_dv_query_count(struct rte_eth_dev *dev, uint32_t counter, uint64_t *pkts, struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_flow_counter_pool *pool = NULL; struct mlx5_flow_counter *cnt; - struct mlx5_flow_counter_ext *cnt_ext = NULL; int offset; cnt = flow_dv_counter_get_by_idx(dev, counter, &pool); MLX5_ASSERT(pool); - if (counter < MLX5_CNT_BATCH_OFFSET) { - cnt_ext = MLX5_CNT_TO_CNT_EXT(pool, cnt); - if (priv->counter_fallback) - return mlx5_devx_cmd_flow_counter_query(cnt_ext->dcs, 0, + if (priv->sh->cmng.counter_fallback) + return mlx5_devx_cmd_flow_counter_query(cnt->dcs_when_active, 0, 0, pkts, bytes, 0, NULL, NULL, 0); - } - rte_spinlock_lock(&pool->sl); - /* - * The single counters allocation may allocate smaller ID than the - * current allocated in parallel to the host reading. - * In this case the new counter values must be reported as 0. - */ - if (unlikely(cnt_ext && cnt_ext->dcs->id < pool->raw->min_dcs_id)) { + if (!pool->raw) { *pkts = 0; *bytes = 0; } else { @@ -4341,8 +4707,6 @@ _flow_dv_query_count(struct rte_eth_dev *dev, uint32_t counter, uint64_t *pkts, * Pointer to the Ethernet device structure. * @param[out] dcs * The devX counter handle. - * @param[in] batch - * Whether the pool is for counter that was allocated by batch command. * @param[in] age * Whether the pool is for counter that was allocated for aging. * @param[in/out] cont_cur @@ -4353,88 +4717,52 @@ _flow_dv_query_count(struct rte_eth_dev *dev, uint32_t counter, uint64_t *pkts, */ static struct mlx5_flow_counter_pool * flow_dv_pool_create(struct rte_eth_dev *dev, struct mlx5_devx_obj *dcs, - uint32_t batch, uint32_t age) + uint32_t age) { struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_flow_counter_pool *pool; - struct mlx5_pools_container *cont = MLX5_CNT_CONTAINER(priv->sh, batch, - age); - int16_t n_valid = rte_atomic16_read(&cont->n_valid); + struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng; + bool fallback = priv->sh->cmng.counter_fallback; uint32_t size = sizeof(*pool); - if (cont->n == n_valid && flow_dv_container_resize(dev, batch, age)) - return NULL; - size += MLX5_COUNTERS_PER_POOL * CNT_SIZE; - size += (batch ? 0 : MLX5_COUNTERS_PER_POOL * CNTEXT_SIZE); - size += (!age ? 0 : MLX5_COUNTERS_PER_POOL * AGE_SIZE); - pool = rte_calloc(__func__, 1, size, 0); + size += MLX5_COUNTERS_PER_POOL * MLX5_CNT_SIZE; + size += (!age ? 0 : MLX5_COUNTERS_PER_POOL * MLX5_AGE_SIZE); + pool = mlx5_malloc(MLX5_MEM_ZERO, size, 0, SOCKET_ID_ANY); if (!pool) { rte_errno = ENOMEM; return NULL; } - pool->min_dcs = dcs; - if (!priv->counter_fallback) - pool->raw = cont->mem_mng->raws + n_valid % - MLX5_CNT_CONTAINER_RESIZE; - pool->raw_hw = NULL; - pool->type = 0; - pool->type |= (batch ? 0 : CNT_POOL_TYPE_EXT); - pool->type |= (!age ? 0 : CNT_POOL_TYPE_AGE); + pool->raw = NULL; + pool->is_aged = !!age; pool->query_gen = 0; + pool->min_dcs = dcs; rte_spinlock_init(&pool->sl); + rte_spinlock_init(&pool->csl); TAILQ_INIT(&pool->counters[0]); TAILQ_INIT(&pool->counters[1]); - TAILQ_INSERT_HEAD(&cont->pool_list, pool, next); - pool->index = n_valid; - cont->pools[n_valid] = pool; - if (!batch) { + pool->time_of_last_age_check = MLX5_CURR_TIME_SEC; + rte_spinlock_lock(&cmng->pool_update_sl); + pool->index = cmng->n_valid; + if (pool->index == cmng->n && flow_dv_container_resize(dev)) { + mlx5_free(pool); + rte_spinlock_unlock(&cmng->pool_update_sl); + return NULL; + } + cmng->pools[pool->index] = pool; + cmng->n_valid++; + if (unlikely(fallback)) { int base = RTE_ALIGN_FLOOR(dcs->id, MLX5_COUNTERS_PER_POOL); - if (base < cont->min_id) - cont->min_id = base; - if (base > cont->max_id) - cont->max_id = base + MLX5_COUNTERS_PER_POOL - 1; - cont->last_pool_idx = pool->index; + if (base < cmng->min_id) + cmng->min_id = base; + if (base > cmng->max_id) + cmng->max_id = base + MLX5_COUNTERS_PER_POOL - 1; + cmng->last_pool_idx = pool->index; } - /* Pool initialization must be updated before host thread access. */ - rte_cio_wmb(); - rte_atomic16_add(&cont->n_valid, 1); + rte_spinlock_unlock(&cmng->pool_update_sl); return pool; } -/** - * Update the minimum dcs-id for aged or no-aged counter pool. - * - * @param[in] dev - * Pointer to the Ethernet device structure. - * @param[in] pool - * Current counter pool. - * @param[in] batch - * Whether the pool is for counter that was allocated by batch command. - * @param[in] age - * Whether the counter is for aging. - */ -static void -flow_dv_counter_update_min_dcs(struct rte_eth_dev *dev, - struct mlx5_flow_counter_pool *pool, - uint32_t batch, uint32_t age) -{ - struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5_flow_counter_pool *other; - struct mlx5_pools_container *cont; - - cont = MLX5_CNT_CONTAINER(priv->sh, batch, (age ^ 0x1)); - other = flow_dv_find_pool_by_id(cont, pool->min_dcs->id); - if (!other) - return; - if (pool->min_dcs->id < other->min_dcs->id) { - rte_atomic64_set(&other->a64_dcs, - rte_atomic64_read(&pool->a64_dcs)); - } else { - rte_atomic64_set(&pool->a64_dcs, - rte_atomic64_read(&other->a64_dcs)); - } -} /** * Prepare a new counter and/or a new counter pool. * @@ -4442,8 +4770,6 @@ flow_dv_counter_update_min_dcs(struct rte_eth_dev *dev, * Pointer to the Ethernet device structure. * @param[out] cnt_free * Where to put the pointer of a new counter. - * @param[in] batch - * Whether the pool is for counter that was allocated by batch command. * @param[in] age * Whether the pool is for counter that was allocated for aging. * @@ -4454,50 +4780,45 @@ flow_dv_counter_update_min_dcs(struct rte_eth_dev *dev, static struct mlx5_flow_counter_pool * flow_dv_counter_pool_prepare(struct rte_eth_dev *dev, struct mlx5_flow_counter **cnt_free, - uint32_t batch, uint32_t age) + uint32_t age) { struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5_pools_container *cont; + struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng; struct mlx5_flow_counter_pool *pool; struct mlx5_counters tmp_tq; struct mlx5_devx_obj *dcs = NULL; struct mlx5_flow_counter *cnt; + enum mlx5_counter_type cnt_type = + age ? MLX5_COUNTER_TYPE_AGE : MLX5_COUNTER_TYPE_ORIGIN; + bool fallback = priv->sh->cmng.counter_fallback; uint32_t i; - cont = MLX5_CNT_CONTAINER(priv->sh, batch, age); - if (!batch) { + if (fallback) { /* bulk_bitmap must be 0 for single counter allocation. */ dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0); if (!dcs) return NULL; - pool = flow_dv_find_pool_by_id(cont, dcs->id); + pool = flow_dv_find_pool_by_id(cmng, dcs->id); if (!pool) { - pool = flow_dv_pool_create(dev, dcs, batch, age); + pool = flow_dv_pool_create(dev, dcs, age); if (!pool) { mlx5_devx_cmd_destroy(dcs); return NULL; } - } else if (dcs->id < pool->min_dcs->id) { - rte_atomic64_set(&pool->a64_dcs, - (int64_t)(uintptr_t)dcs); } - flow_dv_counter_update_min_dcs(dev, - pool, batch, age); i = dcs->id % MLX5_COUNTERS_PER_POOL; cnt = MLX5_POOL_GET_CNT(pool, i); cnt->pool = pool; - MLX5_GET_POOL_CNT_EXT(pool, i)->dcs = dcs; + cnt->dcs_when_free = dcs; *cnt_free = cnt; return pool; } - /* bulk_bitmap is in 128 counters units. */ - if (priv->config.hca_attr.flow_counter_bulk_alloc_bitmap & 0x4) - dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0x4); + dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0x4); if (!dcs) { rte_errno = ENODATA; return NULL; } - pool = flow_dv_pool_create(dev, dcs, batch, age); + pool = flow_dv_pool_create(dev, dcs, age); if (!pool) { mlx5_devx_cmd_destroy(dcs); return NULL; @@ -4508,57 +4829,19 @@ flow_dv_counter_pool_prepare(struct rte_eth_dev *dev, cnt->pool = pool; TAILQ_INSERT_HEAD(&tmp_tq, cnt, next); } - rte_spinlock_lock(&cont->csl); - TAILQ_CONCAT(&cont->counters, &tmp_tq, next); - rte_spinlock_unlock(&cont->csl); + rte_spinlock_lock(&cmng->csl[cnt_type]); + TAILQ_CONCAT(&cmng->counters[cnt_type], &tmp_tq, next); + rte_spinlock_unlock(&cmng->csl[cnt_type]); *cnt_free = MLX5_POOL_GET_CNT(pool, 0); (*cnt_free)->pool = pool; return pool; } -/** - * Search for existed shared counter. - * - * @param[in] dev - * Pointer to the Ethernet device structure. - * @param[in] id - * The shared counter ID to search. - * @param[out] ppool - * mlx5 flow counter pool in the container, - * - * @return - * NULL if not existed, otherwise pointer to the shared extend counter. - */ -static struct mlx5_flow_counter_ext * -flow_dv_counter_shared_search(struct rte_eth_dev *dev, uint32_t id, - struct mlx5_flow_counter_pool **ppool) -{ - struct mlx5_priv *priv = dev->data->dev_private; - union mlx5_l3t_data data; - uint32_t cnt_idx; - - if (mlx5_l3t_get_entry(priv->sh->cnt_id_tbl, id, &data) || !data.dword) - return NULL; - cnt_idx = data.dword; - /* - * Shared counters don't have age info. The counter extend is after - * the counter datat structure. - */ - return (struct mlx5_flow_counter_ext *) - ((flow_dv_counter_get_by_idx(dev, cnt_idx, ppool)) + 1); -} - /** * Allocate a flow counter. * * @param[in] dev * Pointer to the Ethernet device structure. - * @param[in] shared - * Indicate if this counter is shared with other flows. - * @param[in] id - * Counter identifier. - * @param[in] group - * Counter flow group. * @param[in] age * Whether the counter was allocated for aging. * @@ -4566,109 +4849,128 @@ flow_dv_counter_shared_search(struct rte_eth_dev *dev, uint32_t id, * Index to flow counter on success, 0 otherwise and rte_errno is set. */ static uint32_t -flow_dv_counter_alloc(struct rte_eth_dev *dev, uint32_t shared, uint32_t id, - uint16_t group, uint32_t age) +flow_dv_counter_alloc(struct rte_eth_dev *dev, uint32_t age) { struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_flow_counter_pool *pool = NULL; struct mlx5_flow_counter *cnt_free = NULL; - struct mlx5_flow_counter_ext *cnt_ext = NULL; - /* - * Currently group 0 flow counter cannot be assigned to a flow if it is - * not the first one in the batch counter allocation, so it is better - * to allocate counters one by one for these flows in a separate - * container. - * A counter can be shared between different groups so need to take - * shared counters from the single container. - */ - uint32_t batch = (group && !shared && !priv->counter_fallback) ? 1 : 0; - struct mlx5_pools_container *cont = MLX5_CNT_CONTAINER(priv->sh, batch, - age); + bool fallback = priv->sh->cmng.counter_fallback; + struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng; + enum mlx5_counter_type cnt_type = + age ? MLX5_COUNTER_TYPE_AGE : MLX5_COUNTER_TYPE_ORIGIN; uint32_t cnt_idx; if (!priv->config.devx) { rte_errno = ENOTSUP; return 0; } - if (shared) { - cnt_ext = flow_dv_counter_shared_search(dev, id, &pool); - if (cnt_ext) { - if (cnt_ext->ref_cnt + 1 == 0) { - rte_errno = E2BIG; - return 0; - } - cnt_ext->ref_cnt++; - cnt_idx = pool->index * MLX5_COUNTERS_PER_POOL + - (cnt_ext->dcs->id % MLX5_COUNTERS_PER_POOL) - + 1; - return cnt_idx; - } - } /* Get free counters from container. */ - rte_spinlock_lock(&cont->csl); - cnt_free = TAILQ_FIRST(&cont->counters); + rte_spinlock_lock(&cmng->csl[cnt_type]); + cnt_free = TAILQ_FIRST(&cmng->counters[cnt_type]); if (cnt_free) - TAILQ_REMOVE(&cont->counters, cnt_free, next); - rte_spinlock_unlock(&cont->csl); - if (!cnt_free && !flow_dv_counter_pool_prepare(dev, &cnt_free, - batch, age)) + TAILQ_REMOVE(&cmng->counters[cnt_type], cnt_free, next); + rte_spinlock_unlock(&cmng->csl[cnt_type]); + if (!cnt_free && !flow_dv_counter_pool_prepare(dev, &cnt_free, age)) goto err; pool = cnt_free->pool; - if (!batch) - cnt_ext = MLX5_CNT_TO_CNT_EXT(pool, cnt_free); + if (fallback) + cnt_free->dcs_when_active = cnt_free->dcs_when_free; /* Create a DV counter action only in the first time usage. */ if (!cnt_free->action) { uint16_t offset; struct mlx5_devx_obj *dcs; + int ret; - if (batch) { + if (!fallback) { offset = MLX5_CNT_ARRAY_IDX(pool, cnt_free); dcs = pool->min_dcs; } else { offset = 0; - dcs = cnt_ext->dcs; + dcs = cnt_free->dcs_when_free; } - cnt_free->action = mlx5_glue->dv_create_flow_action_counter - (dcs->obj, offset); - if (!cnt_free->action) { + ret = mlx5_flow_os_create_flow_action_count(dcs->obj, offset, + &cnt_free->action); + if (ret) { rte_errno = errno; goto err; } } cnt_idx = MLX5_MAKE_CNT_IDX(pool->index, MLX5_CNT_ARRAY_IDX(pool, cnt_free)); - cnt_idx += batch * MLX5_CNT_BATCH_OFFSET; - cnt_idx += age * MLX5_CNT_AGE_OFFSET; /* Update the counter reset values. */ if (_flow_dv_query_count(dev, cnt_idx, &cnt_free->hits, &cnt_free->bytes)) goto err; - if (cnt_ext) { - cnt_ext->shared = shared; - cnt_ext->ref_cnt = 1; - cnt_ext->id = id; - if (shared) { - union mlx5_l3t_data data; - - data.dword = cnt_idx; - if (mlx5_l3t_set_entry(priv->sh->cnt_id_tbl, id, &data)) - return 0; - } - } - if (!priv->counter_fallback && !priv->sh->cmng.query_thread_on) + if (!fallback && !priv->sh->cmng.query_thread_on) /* Start the asynchronous batch query by the host thread. */ mlx5_set_query_alarm(priv->sh); return cnt_idx; err: if (cnt_free) { cnt_free->pool = pool; - rte_spinlock_lock(&cont->csl); - TAILQ_INSERT_TAIL(&cont->counters, cnt_free, next); - rte_spinlock_unlock(&cont->csl); + if (fallback) + cnt_free->dcs_when_free = cnt_free->dcs_when_active; + rte_spinlock_lock(&cmng->csl[cnt_type]); + TAILQ_INSERT_TAIL(&cmng->counters[cnt_type], cnt_free, next); + rte_spinlock_unlock(&cmng->csl[cnt_type]); } return 0; } +/** + * Allocate a shared flow counter. + * + * @param[in] ctx + * Pointer to the shared counter configuration. + * @param[in] data + * Pointer to save the allocated counter index. + * + * @return + * Index to flow counter on success, 0 otherwise and rte_errno is set. + */ + +static int32_t +flow_dv_counter_alloc_shared_cb(void *ctx, union mlx5_l3t_data *data) +{ + struct mlx5_shared_counter_conf *conf = ctx; + struct rte_eth_dev *dev = conf->dev; + struct mlx5_flow_counter *cnt; + + data->dword = flow_dv_counter_alloc(dev, 0); + data->dword |= MLX5_CNT_SHARED_OFFSET; + cnt = flow_dv_counter_get_by_idx(dev, data->dword, NULL); + cnt->shared_info.id = conf->id; + return 0; +} + +/** + * Get a shared flow counter. + * + * @param[in] dev + * Pointer to the Ethernet device structure. + * @param[in] id + * Counter identifier. + * + * @return + * Index to flow counter on success, 0 otherwise and rte_errno is set. + */ +static uint32_t +flow_dv_counter_get_shared(struct rte_eth_dev *dev, uint32_t id) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_shared_counter_conf conf = { + .dev = dev, + .id = id, + }; + union mlx5_l3t_data data = { + .dword = 0, + }; + + mlx5_l3t_prepare_entry(priv->sh->cnt_id_tbl, id, &data, + flow_dv_counter_alloc_shared_cb, &conf); + return data.dword; +} + /** * Get age param from counter index. * @@ -4710,13 +5012,13 @@ flow_dv_counter_remove_from_age(struct rte_eth_dev *dev, struct mlx5_age_info *age_info; struct mlx5_age_param *age_param; struct mlx5_priv *priv = dev->data->dev_private; + uint16_t expected = AGE_CANDIDATE; age_info = GET_PORT_AGE_INFO(priv); age_param = flow_dv_counter_idx_get_age(dev, counter); - if (rte_atomic16_cmpset((volatile uint16_t *) - &age_param->state, - AGE_CANDIDATE, AGE_FREE) - != AGE_CANDIDATE) { + if (!__atomic_compare_exchange_n(&age_param->state, &expected, + AGE_FREE, false, __ATOMIC_RELAXED, + __ATOMIC_RELAXED)) { /** * We need the lock even it is age timeout, * since counter may still in process. @@ -4724,9 +5026,10 @@ flow_dv_counter_remove_from_age(struct rte_eth_dev *dev, rte_spinlock_lock(&age_info->aged_sl); TAILQ_REMOVE(&age_info->aged_counters, cnt, next); rte_spinlock_unlock(&age_info->aged_sl); + __atomic_store_n(&age_param->state, AGE_FREE, __ATOMIC_RELAXED); } - rte_atomic16_set(&age_param->state, AGE_FREE); } + /** * Release a flow counter. * @@ -4736,28 +5039,21 @@ flow_dv_counter_remove_from_age(struct rte_eth_dev *dev, * Index to the counter handler. */ static void -flow_dv_counter_release(struct rte_eth_dev *dev, uint32_t counter) +flow_dv_counter_free(struct rte_eth_dev *dev, uint32_t counter) { struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_flow_counter_pool *pool = NULL; struct mlx5_flow_counter *cnt; - struct mlx5_flow_counter_ext *cnt_ext = NULL; + enum mlx5_counter_type cnt_type; if (!counter) return; cnt = flow_dv_counter_get_by_idx(dev, counter, &pool); MLX5_ASSERT(pool); - if (counter < MLX5_CNT_BATCH_OFFSET) { - cnt_ext = MLX5_CNT_TO_CNT_EXT(pool, cnt); - if (cnt_ext) { - if (--cnt_ext->ref_cnt) - return; - if (cnt_ext->shared) - mlx5_l3t_clear_entry(priv->sh->cnt_id_tbl, - cnt_ext->id); - } - } - if (IS_AGE_POOL(pool)) + if (IS_SHARED_CNT(counter) && + mlx5_l3t_clear_entry(priv->sh->cnt_id_tbl, cnt->shared_info.id)) + return; + if (pool->is_aged) flow_dv_counter_remove_from_age(dev, counter, cnt); cnt->pool = pool; /* @@ -4770,12 +5066,19 @@ flow_dv_counter_release(struct rte_eth_dev *dev, uint32_t counter) * function both operate with the different list. * */ - if (!priv->counter_fallback) + if (!priv->sh->cmng.counter_fallback) { + rte_spinlock_lock(&pool->csl); TAILQ_INSERT_TAIL(&pool->counters[pool->query_gen], cnt, next); - else - TAILQ_INSERT_TAIL(&((MLX5_CNT_CONTAINER - (priv->sh, 0, 0))->counters), + rte_spinlock_unlock(&pool->csl); + } else { + cnt->dcs_when_free = cnt->dcs_when_active; + cnt_type = pool->is_aged ? MLX5_COUNTER_TYPE_AGE : + MLX5_COUNTER_TYPE_ORIGIN; + rte_spinlock_lock(&priv->sh->cmng.csl[cnt_type]); + TAILQ_INSERT_TAIL(&priv->sh->cmng.counters[cnt_type], cnt, next); + rte_spinlock_unlock(&priv->sh->cmng.csl[cnt_type]); + } } /** @@ -4798,8 +5101,9 @@ flow_dv_counter_release(struct rte_eth_dev *dev, uint32_t counter) */ static int flow_dv_validate_attributes(struct rte_eth_dev *dev, + const struct mlx5_flow_tunnel *tunnel, const struct rte_flow_attr *attributes, - bool external __rte_unused, + const struct flow_grp_info *grp_info, struct rte_flow_error *error) { struct mlx5_priv *priv = dev->data->dev_private; @@ -4807,6 +5111,8 @@ flow_dv_validate_attributes(struct rte_eth_dev *dev, int ret = 0; #ifndef HAVE_MLX5DV_DR + RTE_SET_USED(tunnel); + RTE_SET_USED(grp_info); if (attributes->group) return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR_GROUP, @@ -4815,9 +5121,8 @@ flow_dv_validate_attributes(struct rte_eth_dev *dev, #else uint32_t table = 0; - ret = mlx5_flow_group_to_table(attributes, external, - attributes->group, !!priv->fdb_def_rule, - &table, error); + ret = mlx5_flow_group_to_table(dev, tunnel, attributes->group, &table, + grp_info, error); if (ret) return ret; if (!table) @@ -4900,15 +5205,6 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, .dst_port = RTE_BE16(UINT16_MAX), } }; - const struct rte_flow_item_ipv4 nic_ipv4_mask = { - .hdr = { - .src_addr = RTE_BE32(0xffffffff), - .dst_addr = RTE_BE32(0xffffffff), - .type_of_service = 0xff, - .next_proto_id = 0xff, - .time_to_live = 0xff, - }, - }; const struct rte_flow_item_ipv6 nic_ipv6_mask = { .hdr = { .src_addr = @@ -4921,6 +5217,18 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, .proto = 0xff, .hop_limits = 0xff, }, + .has_frag_ext = 1, + }; + const struct rte_flow_item_ecpri nic_ecpri_mask = { + .hdr = { + .common = { + .u32 = + RTE_BE32(((const struct rte_ecpri_common_hdr) { + .type = 0xFF, + }).u32), + }, + .dummy[0] = 0xffffffff, + }, }; struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_dev_config *dev_conf = &priv->config; @@ -4928,10 +5236,29 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, const struct rte_flow_item_vlan *vlan_m = NULL; int16_t rw_act_num = 0; uint64_t is_root; + const struct mlx5_flow_tunnel *tunnel; + struct flow_grp_info grp_info = { + .external = !!external, + .transfer = !!attr->transfer, + .fdb_def_rule = !!priv->fdb_def_rule, + }; + const struct rte_eth_hairpin_conf *conf; if (items == NULL) return -1; - ret = flow_dv_validate_attributes(dev, attr, external, error); + if (is_flow_tunnel_match_rule(dev, attr, items, actions)) { + tunnel = flow_items_to_tunnel(items); + action_flags |= MLX5_FLOW_ACTION_TUNNEL_MATCH | + MLX5_FLOW_ACTION_DECAP; + } else if (is_flow_tunnel_steer_rule(dev, attr, items, actions)) { + tunnel = flow_actions_to_tunnel(actions); + action_flags |= MLX5_FLOW_ACTION_TUNNEL_SET; + } else { + tunnel = NULL; + } + grp_info.std_tbl_fix = tunnel_use_standard_attr_group_translate + (dev, tunnel, attr, items, actions); + ret = flow_dv_validate_attributes(dev, tunnel, attr, &grp_info, error); if (ret < 0) return ret; is_root = (uint64_t)ret; @@ -4939,7 +5266,20 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); int type = items->type; + if (!mlx5_flow_os_item_supported(type)) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, + NULL, "item not supported"); switch (type) { + case MLX5_RTE_FLOW_ITEM_TYPE_TUNNEL: + if (items[0].type != (typeof(items[0].type)) + MLX5_RTE_FLOW_ITEM_TYPE_TUNNEL) + return rte_flow_error_set + (error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + NULL, "MLX5 private items " + "must be the first"); + break; case RTE_FLOW_ITEM_TYPE_VOID: break; case RTE_FLOW_ITEM_TYPE_PORT_ID: @@ -4951,7 +5291,7 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, break; case RTE_FLOW_ITEM_TYPE_ETH: ret = mlx5_flow_validate_item_eth(items, item_flags, - error); + true, error); if (ret < 0) return ret; last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 : @@ -4993,11 +5333,9 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, case RTE_FLOW_ITEM_TYPE_IPV4: mlx5_flow_tunnel_ip_check(items, next_protocol, &item_flags, &tunnel); - ret = mlx5_flow_validate_item_ipv4(items, item_flags, - last_item, - ether_type, - &nic_ipv4_mask, - error); + ret = flow_dv_validate_item_ipv4(items, item_flags, + last_item, ether_type, + error); if (ret < 0) return ret; last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 : @@ -5045,6 +5383,29 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, next_protocol = 0xff; } break; + case RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT: + ret = flow_dv_validate_item_ipv6_frag_ext(items, + item_flags, + error); + if (ret < 0) + return ret; + last_item = tunnel ? + MLX5_FLOW_LAYER_INNER_L3_IPV6_FRAG_EXT : + MLX5_FLOW_LAYER_OUTER_L3_IPV6_FRAG_EXT; + if (items->mask != NULL && + ((const struct rte_flow_item_ipv6_frag_ext *) + items->mask)->hdr.next_header) { + next_protocol = + ((const struct rte_flow_item_ipv6_frag_ext *) + items->spec)->hdr.next_header; + next_protocol &= + ((const struct rte_flow_item_ipv6_frag_ext *) + items->mask)->hdr.next_header; + } else { + /* Reset for inner layer. */ + next_protocol = 0xff; + } + break; case RTE_FLOW_ITEM_TYPE_TCP: ret = mlx5_flow_validate_item_tcp (items, item_flags, @@ -5168,6 +5529,17 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, return ret; last_item = MLX5_FLOW_LAYER_GTP; break; + case RTE_FLOW_ITEM_TYPE_ECPRI: + /* Capacity will be checked in the translate stage. */ + ret = mlx5_flow_validate_item_ecpri(items, item_flags, + last_item, + ether_type, + &nic_ecpri_mask, + error); + if (ret < 0) + return ret; + last_item = MLX5_FLOW_LAYER_ECPRI; + break; default: return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, @@ -5177,6 +5549,12 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, } for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { int type = actions->type; + + if (!mlx5_flow_os_action_supported(type)) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, + actions, + "action not supported"); if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS) return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, @@ -5487,7 +5865,7 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, rw_act_num += MLX5_ACT_NUM_MDF_TTL; break; case RTE_FLOW_ACTION_TYPE_JUMP: - ret = flow_dv_validate_action_jump(actions, + ret = flow_dv_validate_action_jump(dev, tunnel, actions, action_flags, attr, external, error); @@ -5550,6 +5928,15 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, /* Meter action will add one more TAG action. */ rw_act_num += MLX5_ACT_NUM_SET_TAG; break; + case MLX5_RTE_FLOW_ACTION_TYPE_AGE: + if (!attr->group) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "Shared ASO age action is not supported for group 0"); + action_flags |= MLX5_FLOW_ACTION_AGE; + ++actions_n; + break; case RTE_FLOW_ACTION_TYPE_AGE: ret = flow_dv_validate_action_age(action_flags, actions, dev, @@ -5587,6 +5974,26 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, action_flags |= MLX5_FLOW_ACTION_SET_IPV6_DSCP; rw_act_num += MLX5_ACT_NUM_SET_DSCP; break; + case RTE_FLOW_ACTION_TYPE_SAMPLE: + ret = flow_dv_validate_action_sample(action_flags, + actions, dev, + attr, error); + if (ret < 0) + return ret; + action_flags |= MLX5_FLOW_ACTION_SAMPLE; + ++actions_n; + break; + case MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET: + if (actions[0].type != (typeof(actions[0].type)) + MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET) + return rte_flow_error_set + (error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + NULL, "MLX5 private action " + "must be the first"); + + action_flags |= MLX5_FLOW_ACTION_TUNNEL_SET; + break; default: return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, @@ -5594,6 +6001,54 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, "action not supported"); } } + /* + * Validate actions in flow rules + * - Explicit decap action is prohibited by the tunnel offload API. + * - Drop action in tunnel steer rule is prohibited by the API. + * - Application cannot use MARK action because it's value can mask + * tunnel default miss nitification. + * - JUMP in tunnel match rule has no support in current PMD + * implementation. + * - TAG & META are reserved for future uses. + */ + if (action_flags & MLX5_FLOW_ACTION_TUNNEL_SET) { + uint64_t bad_actions_mask = MLX5_FLOW_ACTION_DECAP | + MLX5_FLOW_ACTION_MARK | + MLX5_FLOW_ACTION_SET_TAG | + MLX5_FLOW_ACTION_SET_META | + MLX5_FLOW_ACTION_DROP; + + if (action_flags & bad_actions_mask) + return rte_flow_error_set + (error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, NULL, + "Invalid RTE action in tunnel " + "set decap rule"); + if (!(action_flags & MLX5_FLOW_ACTION_JUMP)) + return rte_flow_error_set + (error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, NULL, + "tunnel set decap rule must terminate " + "with JUMP"); + if (!attr->ingress) + return rte_flow_error_set + (error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, NULL, + "tunnel flows for ingress traffic only"); + } + if (action_flags & MLX5_FLOW_ACTION_TUNNEL_MATCH) { + uint64_t bad_actions_mask = MLX5_FLOW_ACTION_JUMP | + MLX5_FLOW_ACTION_MARK | + MLX5_FLOW_ACTION_SET_TAG | + MLX5_FLOW_ACTION_SET_META; + + if (action_flags & bad_actions_mask) + return rte_flow_error_set + (error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, NULL, + "Invalid RTE action in tunnel " + "set match rule"); + } /* * Validate the drop action mutual exclusion with other actions. * Drop action is mutually-exclusive with any other action, except for @@ -5642,23 +6097,50 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, actions, "no fate action is found"); } - /* Continue validation for Xcap actions.*/ - if ((action_flags & MLX5_FLOW_XCAP_ACTIONS) && (queue_index == 0xFFFF || - mlx5_rxq_get_type(dev, queue_index) != MLX5_RXQ_TYPE_HAIRPIN)) { + /* + * Continue validation for Xcap and VLAN actions. + * If hairpin is working in explicit TX rule mode, there is no actions + * splitting and the validation of hairpin ingress flow should be the + * same as other standard flows. + */ + if ((action_flags & (MLX5_FLOW_XCAP_ACTIONS | + MLX5_FLOW_VLAN_ACTIONS)) && + (queue_index == 0xFFFF || + mlx5_rxq_get_type(dev, queue_index) != MLX5_RXQ_TYPE_HAIRPIN || + ((conf = mlx5_rxq_get_hairpin_conf(dev, queue_index)) != NULL && + conf->tx_explicit != 0))) { if ((action_flags & MLX5_FLOW_XCAP_ACTIONS) == MLX5_FLOW_XCAP_ACTIONS) return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, NULL, "encap and decap " "combination aren't supported"); - if (!attr->transfer && attr->ingress && (action_flags & - MLX5_FLOW_ACTION_ENCAP)) - return rte_flow_error_set(error, ENOTSUP, - RTE_FLOW_ERROR_TYPE_ACTION, - NULL, "encap is not supported" - " for ingress traffic"); + if (!attr->transfer && attr->ingress) { + if (action_flags & MLX5_FLOW_ACTION_ENCAP) + return rte_flow_error_set + (error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, + NULL, "encap is not supported" + " for ingress traffic"); + else if (action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN) + return rte_flow_error_set + (error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, + NULL, "push VLAN action not " + "supported for ingress"); + else if ((action_flags & MLX5_FLOW_VLAN_ACTIONS) == + MLX5_FLOW_VLAN_ACTIONS) + return rte_flow_error_set + (error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, + NULL, "no support for " + "multiple VLAN actions"); + } } - /* Hairpin flow will add one more TAG action. */ + /* + * Hairpin flow will add one more TAG action in TX implicit mode. + * In TX explicit mode, there will be no hairpin flow ID. + */ if (hairpin > 0) rw_act_num += MLX5_ACT_NUM_SET_TAG; /* extra metadata enabled: one more TAG action will be add. */ @@ -5706,9 +6188,11 @@ flow_dv_prepare(struct rte_eth_dev *dev, struct mlx5_flow *dev_flow; struct mlx5_flow_handle *dev_handle; struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace(); + MLX5_ASSERT(wks); /* In case of corrupting the memory. */ - if (priv->flow_idx >= MLX5_NUM_MAX_DEV_FLOWS) { + if (wks->flow_idx >= MLX5_NUM_MAX_DEV_FLOWS) { rte_flow_error_set(error, ENOSPC, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, "not free temporary device flow"); @@ -5722,11 +6206,19 @@ flow_dv_prepare(struct rte_eth_dev *dev, "not enough memory to create flow handle"); return NULL; } - /* No multi-thread supporting. */ - dev_flow = &((struct mlx5_flow *)priv->inter_flows)[priv->flow_idx++]; + MLX5_ASSERT(wks->flow_idx + 1 < RTE_DIM(wks->flows)); + dev_flow = &wks->flows[wks->flow_idx++]; dev_flow->handle = dev_handle; dev_flow->handle_idx = handle_idx; - dev_flow->dv.value.size = MLX5_ST_SZ_BYTES(fte_match_param); + /* + * In some old rdma-core releases, before continuing, a check of the + * length of matching parameter will be done at first. It needs to use + * the length without misc4 param. If the flow has misc4 support, then + * the length needs to be adjusted accordingly. Each param member is + * aligned with a 64B boundary naturally. + */ + dev_flow->dv.value.size = MLX5_ST_SZ_BYTES(fte_match_param) - + MLX5_ST_SZ_BYTES(fte_match_set_misc4); /* * The matching value needs to be cleared to 0 before using. In the * past, it will be automatically cleared when using rte_*alloc @@ -5822,9 +6314,10 @@ flow_dv_translate_item_eth(void *matcher, void *key, .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff", .src.addr_bytes = "\xff\xff\xff\xff\xff\xff", .type = RTE_BE16(0xffff), + .has_vlan = 0, }; - void *headers_m; - void *headers_v; + void *hdrs_m; + void *hdrs_v; char *l24_v; unsigned int i; @@ -5833,58 +6326,70 @@ flow_dv_translate_item_eth(void *matcher, void *key, if (!eth_m) eth_m = &nic_mask; if (inner) { - headers_m = MLX5_ADDR_OF(fte_match_param, matcher, + hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher, inner_headers); - headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers); + hdrs_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers); } else { - headers_m = MLX5_ADDR_OF(fte_match_param, matcher, + hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher, outer_headers); - headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers); + hdrs_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers); } - memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m, dmac_47_16), + memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_m, dmac_47_16), ð_m->dst, sizeof(eth_m->dst)); /* The value must be in the range of the mask. */ - l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, dmac_47_16); + l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_v, dmac_47_16); for (i = 0; i < sizeof(eth_m->dst); ++i) l24_v[i] = eth_m->dst.addr_bytes[i] & eth_v->dst.addr_bytes[i]; - memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m, smac_47_16), + memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_m, smac_47_16), ð_m->src, sizeof(eth_m->src)); - l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, smac_47_16); + l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_v, smac_47_16); /* The value must be in the range of the mask. */ for (i = 0; i < sizeof(eth_m->dst); ++i) l24_v[i] = eth_m->src.addr_bytes[i] & eth_v->src.addr_bytes[i]; - if (eth_v->type) { - /* When ethertype is present set mask for tagged VLAN. */ - MLX5_SET(fte_match_set_lyr_2_4, headers_m, cvlan_tag, 1); - /* Set value for tagged VLAN if ethertype is 802.1Q. */ - if (eth_v->type == RTE_BE16(RTE_ETHER_TYPE_VLAN) || - eth_v->type == RTE_BE16(RTE_ETHER_TYPE_QINQ)) { - MLX5_SET(fte_match_set_lyr_2_4, headers_v, cvlan_tag, - 1); - /* Return here to avoid setting match on ethertype. */ - return; - } - } /* * HW supports match on one Ethertype, the Ethertype following the last * VLAN tag of the packet (see PRM). * Set match on ethertype only if ETH header is not followed by VLAN. * HW is optimized for IPv4/IPv6. In such cases, avoid setting * ethertype, and use ip_version field instead. + * eCPRI over Ether layer will use type value 0xAEFE. */ - if (eth_v->type == RTE_BE16(RTE_ETHER_TYPE_IPV4) && - eth_m->type == 0xFFFF) { - flow_dv_set_match_ip_version(group, headers_v, headers_m, 4); - } else if (eth_v->type == RTE_BE16(RTE_ETHER_TYPE_IPV6) && - eth_m->type == 0xFFFF) { - flow_dv_set_match_ip_version(group, headers_v, headers_m, 6); - } else { - MLX5_SET(fte_match_set_lyr_2_4, headers_m, ethertype, - rte_be_to_cpu_16(eth_m->type)); - l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, - ethertype); - *(uint16_t *)(l24_v) = eth_m->type & eth_v->type; + if (eth_m->type == 0xFFFF) { + /* Set cvlan_tag mask for any single\multi\un-tagged case. */ + MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, cvlan_tag, 1); + switch (eth_v->type) { + case RTE_BE16(RTE_ETHER_TYPE_VLAN): + MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 1); + return; + case RTE_BE16(RTE_ETHER_TYPE_QINQ): + MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, svlan_tag, 1); + MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag, 1); + return; + case RTE_BE16(RTE_ETHER_TYPE_IPV4): + flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 4); + return; + case RTE_BE16(RTE_ETHER_TYPE_IPV6): + flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 6); + return; + default: + break; + } + } + if (eth_m->has_vlan) { + MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, cvlan_tag, 1); + if (eth_v->has_vlan) { + /* + * Here, when also has_more_vlan field in VLAN item is + * not set, only single-tagged packets will be matched. + */ + MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 1); + return; + } } + MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, ethertype, + rte_be_to_cpu_16(eth_m->type)); + l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_v, ethertype); + *(uint16_t *)(l24_v) = eth_m->type & eth_v->type; } /** @@ -5909,19 +6414,19 @@ flow_dv_translate_item_vlan(struct mlx5_flow *dev_flow, { const struct rte_flow_item_vlan *vlan_m = item->mask; const struct rte_flow_item_vlan *vlan_v = item->spec; - void *headers_m; - void *headers_v; + void *hdrs_m; + void *hdrs_v; uint16_t tci_m; uint16_t tci_v; if (inner) { - headers_m = MLX5_ADDR_OF(fte_match_param, matcher, + hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher, inner_headers); - headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers); + hdrs_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers); } else { - headers_m = MLX5_ADDR_OF(fte_match_param, matcher, + hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher, outer_headers); - headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers); + hdrs_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers); /* * This is workaround, masks are not supported, * and pre-validated. @@ -5934,37 +6439,54 @@ flow_dv_translate_item_vlan(struct mlx5_flow *dev_flow, * When VLAN item exists in flow, mark packet as tagged, * even if TCI is not specified. */ - MLX5_SET(fte_match_set_lyr_2_4, headers_m, cvlan_tag, 1); - MLX5_SET(fte_match_set_lyr_2_4, headers_v, cvlan_tag, 1); + if (!MLX5_GET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag)) { + MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, cvlan_tag, 1); + MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 1); + } if (!vlan_v) return; if (!vlan_m) vlan_m = &rte_flow_item_vlan_mask; tci_m = rte_be_to_cpu_16(vlan_m->tci); tci_v = rte_be_to_cpu_16(vlan_m->tci & vlan_v->tci); - MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_vid, tci_m); - MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid, tci_v); - MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_cfi, tci_m >> 12); - MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_cfi, tci_v >> 12); - MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_prio, tci_m >> 13); - MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_prio, tci_v >> 13); + MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, first_vid, tci_m); + MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, first_vid, tci_v); + MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, first_cfi, tci_m >> 12); + MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, first_cfi, tci_v >> 12); + MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, first_prio, tci_m >> 13); + MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, first_prio, tci_v >> 13); /* * HW is optimized for IPv4/IPv6. In such cases, avoid setting * ethertype, and use ip_version field instead. */ - if (vlan_v->inner_type == RTE_BE16(RTE_ETHER_TYPE_IPV4) && - vlan_m->inner_type == 0xFFFF) { - flow_dv_set_match_ip_version(group, headers_v, headers_m, 4); - } else if (vlan_v->inner_type == RTE_BE16(RTE_ETHER_TYPE_IPV6) && - vlan_m->inner_type == 0xFFFF) { - flow_dv_set_match_ip_version(group, headers_v, headers_m, 6); - } else { - MLX5_SET(fte_match_set_lyr_2_4, headers_m, ethertype, - rte_be_to_cpu_16(vlan_m->inner_type)); - MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, - rte_be_to_cpu_16(vlan_m->inner_type & - vlan_v->inner_type)); + if (vlan_m->inner_type == 0xFFFF) { + switch (vlan_v->inner_type) { + case RTE_BE16(RTE_ETHER_TYPE_VLAN): + MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, svlan_tag, 1); + MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag, 1); + MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 0); + return; + case RTE_BE16(RTE_ETHER_TYPE_IPV4): + flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 4); + return; + case RTE_BE16(RTE_ETHER_TYPE_IPV6): + flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 6); + return; + default: + break; + } } + if (vlan_m->has_more_vlan && vlan_v->has_more_vlan) { + MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, svlan_tag, 1); + MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag, 1); + /* Only one vlan_tag bit can be set. */ + MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 0); + return; + } + MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, ethertype, + rte_be_to_cpu_16(vlan_m->inner_type)); + MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, ethertype, + rte_be_to_cpu_16(vlan_m->inner_type & vlan_v->inner_type)); } /** @@ -5976,8 +6498,6 @@ flow_dv_translate_item_vlan(struct mlx5_flow *dev_flow, * Flow matcher value. * @param[in] item * Flow pattern to translate. - * @param[in] item_flags - * Bit-fields that holds the items detected until now. * @param[in] inner * Item is inner pattern. * @param[in] group @@ -5986,7 +6506,6 @@ flow_dv_translate_item_vlan(struct mlx5_flow *dev_flow, static void flow_dv_translate_item_ipv4(void *matcher, void *key, const struct rte_flow_item *item, - const uint64_t item_flags, int inner, uint32_t group) { const struct rte_flow_item_ipv4 *ipv4_m = item->mask; @@ -6016,13 +6535,6 @@ flow_dv_translate_item_ipv4(void *matcher, void *key, headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers); } flow_dv_set_match_ip_version(group, headers_v, headers_m, 4); - /* - * On outer header (which must contains L2), or inner header with L2, - * set cvlan_tag mask bit to mark this packet as untagged. - * This should be done even if item->spec is empty. - */ - if (!inner || item_flags & MLX5_FLOW_LAYER_INNER_L2) - MLX5_SET(fte_match_set_lyr_2_4, headers_m, cvlan_tag, 1); if (!ipv4_v) return; if (!ipv4_m) @@ -6054,6 +6566,10 @@ flow_dv_translate_item_ipv4(void *matcher, void *key, ipv4_m->hdr.time_to_live); MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ttl_hoplimit, ipv4_v->hdr.time_to_live & ipv4_m->hdr.time_to_live); + MLX5_SET(fte_match_set_lyr_2_4, headers_m, frag, + !!(ipv4_m->hdr.fragment_offset)); + MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag, + !!(ipv4_v->hdr.fragment_offset & ipv4_m->hdr.fragment_offset)); } /** @@ -6065,8 +6581,6 @@ flow_dv_translate_item_ipv4(void *matcher, void *key, * Flow matcher value. * @param[in] item * Flow pattern to translate. - * @param[in] item_flags - * Bit-fields that holds the items detected until now. * @param[in] inner * Item is inner pattern. * @param[in] group @@ -6075,7 +6589,6 @@ flow_dv_translate_item_ipv4(void *matcher, void *key, static void flow_dv_translate_item_ipv6(void *matcher, void *key, const struct rte_flow_item *item, - const uint64_t item_flags, int inner, uint32_t group) { const struct rte_flow_item_ipv6 *ipv6_m = item->mask; @@ -6114,13 +6627,6 @@ flow_dv_translate_item_ipv6(void *matcher, void *key, headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers); } flow_dv_set_match_ip_version(group, headers_v, headers_m, 6); - /* - * On outer header (which must contains L2), or inner header with L2, - * set cvlan_tag mask bit to mark this packet as untagged. - * This should be done even if item->spec is empty. - */ - if (!inner || item_flags & MLX5_FLOW_LAYER_INNER_L2) - MLX5_SET(fte_match_set_lyr_2_4, headers_m, cvlan_tag, 1); if (!ipv6_v) return; if (!ipv6_m) @@ -6169,10 +6675,14 @@ flow_dv_translate_item_ipv6(void *matcher, void *key, ipv6_m->hdr.hop_limits); MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ttl_hoplimit, ipv6_v->hdr.hop_limits & ipv6_m->hdr.hop_limits); + MLX5_SET(fte_match_set_lyr_2_4, headers_m, frag, + !!(ipv6_m->has_frag_ext)); + MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag, + !!(ipv6_v->has_frag_ext & ipv6_m->has_frag_ext)); } /** - * Add TCP item to matcher and to the value. + * Add IPV6 fragment extension item to matcher and to the value. * * @param[in, out] matcher * Flow matcher. @@ -6184,12 +6694,63 @@ flow_dv_translate_item_ipv6(void *matcher, void *key, * Item is inner pattern. */ static void -flow_dv_translate_item_tcp(void *matcher, void *key, - const struct rte_flow_item *item, - int inner) +flow_dv_translate_item_ipv6_frag_ext(void *matcher, void *key, + const struct rte_flow_item *item, + int inner) { - const struct rte_flow_item_tcp *tcp_m = item->mask; - const struct rte_flow_item_tcp *tcp_v = item->spec; + const struct rte_flow_item_ipv6_frag_ext *ipv6_frag_ext_m = item->mask; + const struct rte_flow_item_ipv6_frag_ext *ipv6_frag_ext_v = item->spec; + const struct rte_flow_item_ipv6_frag_ext nic_mask = { + .hdr = { + .next_header = 0xff, + .frag_data = RTE_BE16(0xffff), + }, + }; + void *headers_m; + void *headers_v; + + if (inner) { + headers_m = MLX5_ADDR_OF(fte_match_param, matcher, + inner_headers); + headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers); + } else { + headers_m = MLX5_ADDR_OF(fte_match_param, matcher, + outer_headers); + headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers); + } + /* IPv6 fragment extension item exists, so packet is IP fragment. */ + MLX5_SET(fte_match_set_lyr_2_4, headers_m, frag, 1); + MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag, 1); + if (!ipv6_frag_ext_v) + return; + if (!ipv6_frag_ext_m) + ipv6_frag_ext_m = &nic_mask; + MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, + ipv6_frag_ext_m->hdr.next_header); + MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, + ipv6_frag_ext_v->hdr.next_header & + ipv6_frag_ext_m->hdr.next_header); +} + +/** + * Add TCP item to matcher and to the value. + * + * @param[in, out] matcher + * Flow matcher. + * @param[in, out] key + * Flow matcher value. + * @param[in] item + * Flow pattern to translate. + * @param[in] inner + * Item is inner pattern. + */ +static void +flow_dv_translate_item_tcp(void *matcher, void *key, + const struct rte_flow_item *item, + int inner) +{ + const struct rte_flow_item_tcp *tcp_m = item->mask; + const struct rte_flow_item_tcp *tcp_v = item->spec; void *headers_m; void *headers_v; @@ -6405,8 +6966,8 @@ flow_dv_translate_item_nvgre(void *matcher, void *key, const struct rte_flow_item_nvgre *nvgre_v = item->spec; void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters); void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters); - const char *tni_flow_id_m = (const char *)nvgre_m->tni; - const char *tni_flow_id_v = (const char *)nvgre_v->tni; + const char *tni_flow_id_m; + const char *tni_flow_id_v; char *gre_key_m; char *gre_key_v; int size; @@ -6431,6 +6992,8 @@ flow_dv_translate_item_nvgre(void *matcher, void *key, return; if (!nvgre_m) nvgre_m = &rte_flow_item_nvgre_mask; + tni_flow_id_m = (const char *)nvgre_m->tni; + tni_flow_id_v = (const char *)nvgre_v->tni; size = sizeof(nvgre_m->tni) + sizeof(nvgre_m->flow_id); gre_key_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, gre_key_h); gre_key_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, gre_key_h); @@ -7101,12 +7664,6 @@ flow_dv_translate_item_icmp6(void *matcher, void *key, return; if (!icmp6_m) icmp6_m = &rte_flow_item_icmp6_mask; - /* - * Force flow only to match the non-fragmented IPv6 ICMPv6 packets. - * If only the protocol is specified, no need to match the frag. - */ - MLX5_SET(fte_match_set_lyr_2_4, headers_m, frag, 1); - MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag, 0); MLX5_SET(fte_match_set_misc3, misc3_m, icmpv6_type, icmp6_m->type); MLX5_SET(fte_match_set_misc3, misc3_v, icmpv6_type, icmp6_v->type & icmp6_m->type); @@ -7134,6 +7691,8 @@ flow_dv_translate_item_icmp(void *matcher, void *key, { const struct rte_flow_item_icmp *icmp_m = item->mask; const struct rte_flow_item_icmp *icmp_v = item->spec; + uint32_t icmp_header_data_m = 0; + uint32_t icmp_header_data_v = 0; void *headers_m; void *headers_v; void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher, @@ -7154,12 +7713,6 @@ flow_dv_translate_item_icmp(void *matcher, void *key, return; if (!icmp_m) icmp_m = &rte_flow_item_icmp_mask; - /* - * Force flow only to match the non-fragmented IPv4 ICMP packets. - * If only the protocol is specified, no need to match the frag. - */ - MLX5_SET(fte_match_set_lyr_2_4, headers_m, frag, 1); - MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag, 0); MLX5_SET(fte_match_set_misc3, misc3_m, icmp_type, icmp_m->hdr.icmp_type); MLX5_SET(fte_match_set_misc3, misc3_v, icmp_type, @@ -7168,6 +7721,17 @@ flow_dv_translate_item_icmp(void *matcher, void *key, icmp_m->hdr.icmp_code); MLX5_SET(fte_match_set_misc3, misc3_v, icmp_code, icmp_v->hdr.icmp_code & icmp_m->hdr.icmp_code); + icmp_header_data_m = rte_be_to_cpu_16(icmp_m->hdr.icmp_seq_nb); + icmp_header_data_m |= rte_be_to_cpu_16(icmp_m->hdr.icmp_ident) << 16; + if (icmp_header_data_m) { + icmp_header_data_v = rte_be_to_cpu_16(icmp_v->hdr.icmp_seq_nb); + icmp_header_data_v |= + rte_be_to_cpu_16(icmp_v->hdr.icmp_ident) << 16; + MLX5_SET(fte_match_set_misc3, misc3_m, icmp_header_data, + icmp_header_data_m); + MLX5_SET(fte_match_set_misc3, misc3_v, icmp_header_data, + icmp_header_data_v & icmp_header_data_m); + } } /** @@ -7225,6 +7789,92 @@ flow_dv_translate_item_gtp(void *matcher, void *key, rte_be_to_cpu_32(gtp_v->teid & gtp_m->teid)); } +/** + * Add eCPRI item to matcher and to the value. + * + * @param[in] dev + * The devich to configure through. + * @param[in, out] matcher + * Flow matcher. + * @param[in, out] key + * Flow matcher value. + * @param[in] item + * Flow pattern to translate. + * @param[in] samples + * Sample IDs to be used in the matching. + */ +static void +flow_dv_translate_item_ecpri(struct rte_eth_dev *dev, void *matcher, + void *key, const struct rte_flow_item *item) +{ + struct mlx5_priv *priv = dev->data->dev_private; + const struct rte_flow_item_ecpri *ecpri_m = item->mask; + const struct rte_flow_item_ecpri *ecpri_v = item->spec; + struct rte_ecpri_common_hdr common; + void *misc4_m = MLX5_ADDR_OF(fte_match_param, matcher, + misc_parameters_4); + void *misc4_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_4); + uint32_t *samples; + void *dw_m; + void *dw_v; + + if (!ecpri_v) + return; + if (!ecpri_m) + ecpri_m = &rte_flow_item_ecpri_mask; + /* + * Maximal four DW samples are supported in a single matching now. + * Two are used now for a eCPRI matching: + * 1. Type: one byte, mask should be 0x00ff0000 in network order + * 2. ID of a message: one or two bytes, mask 0xffff0000 or 0xff000000 + * if any. + */ + if (!ecpri_m->hdr.common.u32) + return; + samples = priv->sh->fp[MLX5_FLEX_PARSER_ECPRI_0].ids; + /* Need to take the whole DW as the mask to fill the entry. */ + dw_m = MLX5_ADDR_OF(fte_match_set_misc4, misc4_m, + prog_sample_field_value_0); + dw_v = MLX5_ADDR_OF(fte_match_set_misc4, misc4_v, + prog_sample_field_value_0); + /* Already big endian (network order) in the header. */ + *(uint32_t *)dw_m = ecpri_m->hdr.common.u32; + *(uint32_t *)dw_v = ecpri_v->hdr.common.u32; + /* Sample#0, used for matching type, offset 0. */ + MLX5_SET(fte_match_set_misc4, misc4_m, + prog_sample_field_id_0, samples[0]); + /* It makes no sense to set the sample ID in the mask field. */ + MLX5_SET(fte_match_set_misc4, misc4_v, + prog_sample_field_id_0, samples[0]); + /* + * Checking if message body part needs to be matched. + * Some wildcard rules only matching type field should be supported. + */ + if (ecpri_m->hdr.dummy[0]) { + common.u32 = rte_be_to_cpu_32(ecpri_v->hdr.common.u32); + switch (common.type) { + case RTE_ECPRI_MSG_TYPE_IQ_DATA: + case RTE_ECPRI_MSG_TYPE_RTC_CTRL: + case RTE_ECPRI_MSG_TYPE_DLY_MSR: + dw_m = MLX5_ADDR_OF(fte_match_set_misc4, misc4_m, + prog_sample_field_value_1); + dw_v = MLX5_ADDR_OF(fte_match_set_misc4, misc4_v, + prog_sample_field_value_1); + *(uint32_t *)dw_m = ecpri_m->hdr.dummy[0]; + *(uint32_t *)dw_v = ecpri_v->hdr.dummy[0]; + /* Sample#1, to match message body, offset 4. */ + MLX5_SET(fte_match_set_misc4, misc4_m, + prog_sample_field_id_1, samples[1]); + MLX5_SET(fte_match_set_misc4, misc4_v, + prog_sample_field_id_1, samples[1]); + break; + default: + /* Others, do not match any sample ID. */ + break; + } + } +} + static uint32_t matcher_zero[MLX5_ST_SZ_DW(fte_match_param)] = { 0 }; #define HEADER_IS_ZERO(match_criteria, headers) \ @@ -7260,9 +7910,80 @@ flow_dv_matcher_enable(uint32_t *match_criteria) match_criteria_enable |= (!HEADER_IS_ZERO(match_criteria, misc_parameters_3)) << MLX5_MATCH_CRITERIA_ENABLE_MISC3_BIT; + match_criteria_enable |= + (!HEADER_IS_ZERO(match_criteria, misc_parameters_4)) << + MLX5_MATCH_CRITERIA_ENABLE_MISC4_BIT; return match_criteria_enable; } +struct mlx5_hlist_entry * +flow_dv_tbl_create_cb(struct mlx5_hlist *list, uint64_t key64, void *cb_ctx) +{ + struct mlx5_dev_ctx_shared *sh = list->ctx; + struct mlx5_flow_cb_ctx *ctx = cb_ctx; + struct rte_eth_dev *dev = ctx->dev; + struct mlx5_flow_tbl_data_entry *tbl_data; + struct mlx5_flow_tbl_tunnel_prm *tt_prm = ctx->data; + struct rte_flow_error *error = ctx->error; + union mlx5_flow_tbl_key key = { .v64 = key64 }; + struct mlx5_flow_tbl_resource *tbl; + void *domain; + uint32_t idx = 0; + int ret; + + tbl_data = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_JUMP], &idx); + if (!tbl_data) { + rte_flow_error_set(error, ENOMEM, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "cannot allocate flow table data entry"); + return NULL; + } + tbl_data->idx = idx; + tbl_data->tunnel = tt_prm->tunnel; + tbl_data->group_id = tt_prm->group_id; + tbl_data->external = tt_prm->external; + tbl_data->tunnel_offload = is_tunnel_offload_active(dev); + tbl_data->is_egress = !!key.direction; + tbl = &tbl_data->tbl; + if (key.dummy) + return &tbl_data->entry; + if (key.domain) + domain = sh->fdb_domain; + else if (key.direction) + domain = sh->tx_domain; + else + domain = sh->rx_domain; + ret = mlx5_flow_os_create_flow_tbl(domain, key.table_id, &tbl->obj); + if (ret) { + rte_flow_error_set(error, ENOMEM, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, "cannot create flow table object"); + mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], idx); + return NULL; + } + if (key.table_id) { + ret = mlx5_flow_os_create_flow_action_dest_flow_tbl + (tbl->obj, &tbl_data->jump.action); + if (ret) { + rte_flow_error_set(error, ENOMEM, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "cannot create flow jump action"); + mlx5_flow_os_destroy_flow_tbl(tbl->obj); + mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], idx); + return NULL; + } + } + MKSTR(matcher_name, "%s_%s_%u_matcher_cache", + key.domain ? "FDB" : "NIC", key.direction ? "egress" : "ingress", + key.table_id); + mlx5_cache_list_init(&tbl_data->matchers, matcher_name, 0, sh, + flow_dv_matcher_create_cb, + flow_dv_matcher_match_cb, + flow_dv_matcher_remove_cb); + return &tbl_data->entry; +} /** * Get a flow table. @@ -7275,93 +7996,107 @@ flow_dv_matcher_enable(uint32_t *match_criteria) * Direction of the table. * @param[in] transfer * E-Switch or NIC flow. + * @param[in] dummy + * Dummy entry for dv API. * @param[out] error * pointer to error structure. * * @return * Returns tables resource based on the index, NULL in case of failed. */ -static struct mlx5_flow_tbl_resource * +struct mlx5_flow_tbl_resource * flow_dv_tbl_resource_get(struct rte_eth_dev *dev, uint32_t table_id, uint8_t egress, uint8_t transfer, + bool external, + const struct mlx5_flow_tunnel *tunnel, + uint32_t group_id, uint8_t dummy, struct rte_flow_error *error) { struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5_dev_ctx_shared *sh = priv->sh; - struct mlx5_flow_tbl_resource *tbl; union mlx5_flow_tbl_key table_key = { { .table_id = table_id, - .reserved = 0, + .dummy = dummy, .domain = !!transfer, .direction = !!egress, } }; - struct mlx5_hlist_entry *pos = mlx5_hlist_lookup(sh->flow_tbls, - table_key.v64); + struct mlx5_flow_tbl_tunnel_prm tt_prm = { + .tunnel = tunnel, + .group_id = group_id, + .external = external, + }; + struct mlx5_flow_cb_ctx ctx = { + .dev = dev, + .error = error, + .data = &tt_prm, + }; + struct mlx5_hlist_entry *entry; struct mlx5_flow_tbl_data_entry *tbl_data; - uint32_t idx = 0; - int ret; - void *domain; - if (pos) { - tbl_data = container_of(pos, struct mlx5_flow_tbl_data_entry, - entry); - tbl = &tbl_data->tbl; - rte_atomic32_inc(&tbl->refcnt); - return tbl; - } - tbl_data = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_JUMP], &idx); - if (!tbl_data) { - rte_flow_error_set(error, ENOMEM, - RTE_FLOW_ERROR_TYPE_UNSPECIFIED, - NULL, - "cannot allocate flow table data entry"); - return NULL; - } - tbl_data->idx = idx; - tbl = &tbl_data->tbl; - pos = &tbl_data->entry; - if (transfer) - domain = sh->fdb_domain; - else if (egress) - domain = sh->tx_domain; - else - domain = sh->rx_domain; - tbl->obj = mlx5_glue->dr_create_flow_tbl(domain, table_id); - if (!tbl->obj) { + entry = mlx5_hlist_register(priv->sh->flow_tbls, table_key.v64, &ctx); + if (!entry) { rte_flow_error_set(error, ENOMEM, - RTE_FLOW_ERROR_TYPE_UNSPECIFIED, - NULL, "cannot create flow table object"); - mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], idx); + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "cannot get table"); return NULL; } - /* - * No multi-threads now, but still better to initialize the reference - * count before insert it into the hash list. - */ - rte_atomic32_init(&tbl->refcnt); - /* Jump action reference count is initialized here. */ - rte_atomic32_init(&tbl_data->jump.refcnt); - pos->key = table_key.v64; - ret = mlx5_hlist_insert(sh->flow_tbls, pos); - if (ret < 0) { - rte_flow_error_set(error, -ret, - RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, - "cannot insert flow table data entry"); - mlx5_glue->dr_destroy_flow_tbl(tbl->obj); - mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], idx); + DRV_LOG(DEBUG, "Table_id %u tunnel %u group %u registered.", + table_id, tunnel ? tunnel->tunnel_id : 0, group_id); + tbl_data = container_of(entry, struct mlx5_flow_tbl_data_entry, entry); + return &tbl_data->tbl; +} + +void +flow_dv_tbl_remove_cb(struct mlx5_hlist *list, + struct mlx5_hlist_entry *entry) +{ + struct mlx5_dev_ctx_shared *sh = list->ctx; + struct mlx5_flow_tbl_data_entry *tbl_data = + container_of(entry, struct mlx5_flow_tbl_data_entry, entry); + + MLX5_ASSERT(entry && sh); + if (tbl_data->jump.action) + mlx5_flow_os_destroy_flow_action(tbl_data->jump.action); + if (tbl_data->tbl.obj) + mlx5_flow_os_destroy_flow_tbl(tbl_data->tbl.obj); + if (tbl_data->tunnel_offload && tbl_data->external) { + struct mlx5_hlist_entry *he; + struct mlx5_hlist *tunnel_grp_hash; + struct mlx5_flow_tunnel_hub *thub = sh->tunnel_hub; + union tunnel_tbl_key tunnel_key = { + .tunnel_id = tbl_data->tunnel ? + tbl_data->tunnel->tunnel_id : 0, + .group = tbl_data->group_id + }; + union mlx5_flow_tbl_key table_key = { + .v64 = entry->key + }; + uint32_t table_id = table_key.table_id; + + tunnel_grp_hash = tbl_data->tunnel ? + tbl_data->tunnel->groups : + thub->groups; + he = mlx5_hlist_lookup(tunnel_grp_hash, tunnel_key.val, NULL); + if (he) + mlx5_hlist_unregister(tunnel_grp_hash, he); + DRV_LOG(DEBUG, + "Table_id %u tunnel %u group %u released.", + table_id, + tbl_data->tunnel ? + tbl_data->tunnel->tunnel_id : 0, + tbl_data->group_id); } - rte_atomic32_inc(&tbl->refcnt); - return tbl; + mlx5_cache_list_destroy(&tbl_data->matchers); + mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], tbl_data->idx); } /** * Release a flow table. * - * @param[in] dev - * Pointer to rte_eth_dev structure. + * @param[in] sh + * Pointer to device shared structure. * @param[in] tbl * Table resource to be released. * @@ -7369,28 +8104,72 @@ flow_dv_tbl_resource_get(struct rte_eth_dev *dev, * Returns 0 if table was released, else return 1; */ static int -flow_dv_tbl_resource_release(struct rte_eth_dev *dev, +flow_dv_tbl_resource_release(struct mlx5_dev_ctx_shared *sh, struct mlx5_flow_tbl_resource *tbl) { - struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5_dev_ctx_shared *sh = priv->sh; struct mlx5_flow_tbl_data_entry *tbl_data = container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl); if (!tbl) return 0; - if (rte_atomic32_dec_and_test(&tbl->refcnt)) { - struct mlx5_hlist_entry *pos = &tbl_data->entry; - - mlx5_glue->dr_destroy_flow_tbl(tbl->obj); - tbl->obj = NULL; - /* remove the entry from the hash list and free memory. */ - mlx5_hlist_remove(sh->flow_tbls, pos); - mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_JUMP], - tbl_data->idx); - return 0; + return mlx5_hlist_unregister(sh->flow_tbls, &tbl_data->entry); +} + +int +flow_dv_matcher_match_cb(struct mlx5_cache_list *list __rte_unused, + struct mlx5_cache_entry *entry, void *cb_ctx) +{ + struct mlx5_flow_cb_ctx *ctx = cb_ctx; + struct mlx5_flow_dv_matcher *ref = ctx->data; + struct mlx5_flow_dv_matcher *cur = container_of(entry, typeof(*cur), + entry); + + return cur->crc != ref->crc || + cur->priority != ref->priority || + memcmp((const void *)cur->mask.buf, + (const void *)ref->mask.buf, ref->mask.size); +} + +struct mlx5_cache_entry * +flow_dv_matcher_create_cb(struct mlx5_cache_list *list, + struct mlx5_cache_entry *entry __rte_unused, + void *cb_ctx) +{ + struct mlx5_dev_ctx_shared *sh = list->ctx; + struct mlx5_flow_cb_ctx *ctx = cb_ctx; + struct mlx5_flow_dv_matcher *ref = ctx->data; + struct mlx5_flow_dv_matcher *cache; + struct mlx5dv_flow_matcher_attr dv_attr = { + .type = IBV_FLOW_ATTR_NORMAL, + .match_mask = (void *)&ref->mask, + }; + struct mlx5_flow_tbl_data_entry *tbl = container_of(ref->tbl, + typeof(*tbl), tbl); + int ret; + + cache = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*cache), 0, SOCKET_ID_ANY); + if (!cache) { + rte_flow_error_set(ctx->error, ENOMEM, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "cannot create matcher"); + return NULL; } - return 1; + *cache = *ref; + dv_attr.match_criteria_enable = + flow_dv_matcher_enable(cache->mask.buf); + dv_attr.priority = ref->priority; + if (tbl->is_egress) + dv_attr.flags |= IBV_FLOW_ATTR_FLAGS_EGRESS; + ret = mlx5_flow_os_create_flow_matcher(sh->ctx, &dv_attr, tbl->tbl.obj, + &cache->matcher_object); + if (ret) { + mlx5_free(cache); + rte_flow_error_set(ctx->error, ENOMEM, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "cannot create matcher"); + return NULL; + } + return &cache->entry; } /** @@ -7412,88 +8191,75 @@ flow_dv_tbl_resource_release(struct rte_eth_dev *dev, */ static int flow_dv_matcher_register(struct rte_eth_dev *dev, - struct mlx5_flow_dv_matcher *matcher, + struct mlx5_flow_dv_matcher *ref, union mlx5_flow_tbl_key *key, struct mlx5_flow *dev_flow, + const struct mlx5_flow_tunnel *tunnel, + uint32_t group_id, struct rte_flow_error *error) { - struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5_dev_ctx_shared *sh = priv->sh; - struct mlx5_flow_dv_matcher *cache_matcher; - struct mlx5dv_flow_matcher_attr dv_attr = { - .type = IBV_FLOW_ATTR_NORMAL, - .match_mask = (void *)&matcher->mask, - }; + struct mlx5_cache_entry *entry; + struct mlx5_flow_dv_matcher *cache; struct mlx5_flow_tbl_resource *tbl; struct mlx5_flow_tbl_data_entry *tbl_data; + struct mlx5_flow_cb_ctx ctx = { + .error = error, + .data = ref, + }; - tbl = flow_dv_tbl_resource_get(dev, key->table_id, key->direction, - key->domain, error); + /** + * tunnel offload API requires this registration for cases when + * tunnel match rule was inserted before tunnel set rule. + */ + tbl = flow_dv_tbl_resource_get(dev, key->table_id, + key->direction, key->domain, + dev_flow->external, tunnel, + group_id, 0, error); if (!tbl) return -rte_errno; /* No need to refill the error info */ tbl_data = container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl); - /* Lookup from cache. */ - LIST_FOREACH(cache_matcher, &tbl_data->matchers, next) { - if (matcher->crc == cache_matcher->crc && - matcher->priority == cache_matcher->priority && - !memcmp((const void *)matcher->mask.buf, - (const void *)cache_matcher->mask.buf, - cache_matcher->mask.size)) { - DRV_LOG(DEBUG, - "%s group %u priority %hd use %s " - "matcher %p: refcnt %d++", - key->domain ? "FDB" : "NIC", key->table_id, - cache_matcher->priority, - key->direction ? "tx" : "rx", - (void *)cache_matcher, - rte_atomic32_read(&cache_matcher->refcnt)); - rte_atomic32_inc(&cache_matcher->refcnt); - dev_flow->handle->dvh.matcher = cache_matcher; - /* old matcher should not make the table ref++. */ - flow_dv_tbl_resource_release(dev, tbl); - return 0; - } - } - /* Register new matcher. */ - cache_matcher = rte_calloc(__func__, 1, sizeof(*cache_matcher), 0); - if (!cache_matcher) { - flow_dv_tbl_resource_release(dev, tbl); + ref->tbl = tbl; + entry = mlx5_cache_register(&tbl_data->matchers, &ctx); + if (!entry) { + flow_dv_tbl_resource_release(MLX5_SH(dev), tbl); return rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, - "cannot allocate matcher memory"); + "cannot allocate ref memory"); } - *cache_matcher = *matcher; - dv_attr.match_criteria_enable = - flow_dv_matcher_enable(cache_matcher->mask.buf); - dv_attr.priority = matcher->priority; - if (key->direction) - dv_attr.flags |= IBV_FLOW_ATTR_FLAGS_EGRESS; - cache_matcher->matcher_object = - mlx5_glue->dv_create_flow_matcher(sh->ctx, &dv_attr, tbl->obj); - if (!cache_matcher->matcher_object) { - rte_free(cache_matcher); -#ifdef HAVE_MLX5DV_DR - flow_dv_tbl_resource_release(dev, tbl); -#endif - return rte_flow_error_set(error, ENOMEM, - RTE_FLOW_ERROR_TYPE_UNSPECIFIED, - NULL, "cannot create matcher"); - } - /* Save the table information */ - cache_matcher->tbl = tbl; - rte_atomic32_init(&cache_matcher->refcnt); - /* only matcher ref++, table ref++ already done above in get API. */ - rte_atomic32_inc(&cache_matcher->refcnt); - LIST_INSERT_HEAD(&tbl_data->matchers, cache_matcher, next); - dev_flow->handle->dvh.matcher = cache_matcher; - DRV_LOG(DEBUG, "%s group %u priority %hd new %s matcher %p: refcnt %d", - key->domain ? "FDB" : "NIC", key->table_id, - cache_matcher->priority, - key->direction ? "tx" : "rx", (void *)cache_matcher, - rte_atomic32_read(&cache_matcher->refcnt)); + cache = container_of(entry, typeof(*cache), entry); + dev_flow->handle->dvh.matcher = cache; return 0; } +struct mlx5_hlist_entry * +flow_dv_tag_create_cb(struct mlx5_hlist *list, uint64_t key, void *ctx) +{ + struct mlx5_dev_ctx_shared *sh = list->ctx; + struct rte_flow_error *error = ctx; + struct mlx5_flow_dv_tag_resource *entry; + uint32_t idx = 0; + int ret; + + entry = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_TAG], &idx); + if (!entry) { + rte_flow_error_set(error, ENOMEM, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "cannot allocate resource memory"); + return NULL; + } + entry->idx = idx; + ret = mlx5_flow_os_create_flow_action_tag(key, + &entry->action); + if (ret) { + mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TAG], idx); + rte_flow_error_set(error, ENOMEM, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, "cannot create action"); + return NULL; + } + return &entry->entry; +} + /** * Find existing tag resource or create and register a new one. * @@ -7517,52 +8283,32 @@ flow_dv_tag_resource_register struct rte_flow_error *error) { struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5_dev_ctx_shared *sh = priv->sh; struct mlx5_flow_dv_tag_resource *cache_resource; struct mlx5_hlist_entry *entry; - /* Lookup a matching resource from cache. */ - entry = mlx5_hlist_lookup(sh->tag_table, (uint64_t)tag_be24); + entry = mlx5_hlist_register(priv->sh->tag_table, tag_be24, error); if (entry) { cache_resource = container_of (entry, struct mlx5_flow_dv_tag_resource, entry); - rte_atomic32_inc(&cache_resource->refcnt); dev_flow->handle->dvh.rix_tag = cache_resource->idx; dev_flow->dv.tag_resource = cache_resource; - DRV_LOG(DEBUG, "cached tag resource %p: refcnt now %d++", - (void *)cache_resource, - rte_atomic32_read(&cache_resource->refcnt)); return 0; } - /* Register new resource. */ - cache_resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_TAG], - &dev_flow->handle->dvh.rix_tag); - if (!cache_resource) - return rte_flow_error_set(error, ENOMEM, - RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, - "cannot allocate resource memory"); - cache_resource->entry.key = (uint64_t)tag_be24; - cache_resource->action = mlx5_glue->dv_create_flow_action_tag(tag_be24); - if (!cache_resource->action) { - rte_free(cache_resource); - return rte_flow_error_set(error, ENOMEM, - RTE_FLOW_ERROR_TYPE_UNSPECIFIED, - NULL, "cannot create action"); - } - rte_atomic32_init(&cache_resource->refcnt); - rte_atomic32_inc(&cache_resource->refcnt); - if (mlx5_hlist_insert(sh->tag_table, &cache_resource->entry)) { - mlx5_glue->destroy_flow_action(cache_resource->action); - rte_free(cache_resource); - return rte_flow_error_set(error, EEXIST, - RTE_FLOW_ERROR_TYPE_UNSPECIFIED, - NULL, "cannot insert tag"); - } - dev_flow->dv.tag_resource = cache_resource; - DRV_LOG(DEBUG, "new tag resource %p: refcnt now %d++", - (void *)cache_resource, - rte_atomic32_read(&cache_resource->refcnt)); - return 0; + return -rte_errno; +} + +void +flow_dv_tag_remove_cb(struct mlx5_hlist *list, + struct mlx5_hlist_entry *entry) +{ + struct mlx5_dev_ctx_shared *sh = list->ctx; + struct mlx5_flow_dv_tag_resource *tag = + container_of(entry, struct mlx5_flow_dv_tag_resource, entry); + + MLX5_ASSERT(tag && sh && tag->action); + claim_zero(mlx5_flow_os_destroy_flow_action(tag->action)); + DRV_LOG(DEBUG, "Tag %p: removed.", (void *)tag); + mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TAG], tag->idx); } /** @@ -7581,24 +8327,14 @@ flow_dv_tag_release(struct rte_eth_dev *dev, uint32_t tag_idx) { struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5_dev_ctx_shared *sh = priv->sh; struct mlx5_flow_dv_tag_resource *tag; tag = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_TAG], tag_idx); if (!tag) return 0; DRV_LOG(DEBUG, "port %u tag %p: refcnt %d--", - dev->data->port_id, (void *)tag, - rte_atomic32_read(&tag->refcnt)); - if (rte_atomic32_dec_and_test(&tag->refcnt)) { - claim_zero(mlx5_glue->destroy_flow_action(tag->action)); - mlx5_hlist_remove(sh->tag_table, &tag->entry); - DRV_LOG(DEBUG, "port %u tag %p: removed", - dev->data->port_id, (void *)tag); - mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_TAG], tag_idx); - return 0; - } - return 1; + dev->data->port_id, (void *)tag, tag->entry.ref_cnt); + return mlx5_hlist_unregister(priv->sh->tag_table, &tag->entry); } /** @@ -7673,31 +8409,22 @@ flow_dv_translate_create_counter(struct rte_eth_dev *dev, uint32_t counter; struct mlx5_age_param *age_param; - counter = flow_dv_counter_alloc(dev, - count ? count->shared : 0, - count ? count->id : 0, - dev_flow->dv.group, !!age); + if (count && count->shared) + counter = flow_dv_counter_get_shared(dev, count->id); + else + counter = flow_dv_counter_alloc(dev, !!age); if (!counter || age == NULL) return counter; age_param = flow_dv_counter_idx_get_age(dev, counter); - /* - * The counter age accuracy may have a bit delay. Have 3/4 - * second bias on the timeount in order to let it age in time. - */ age_param->context = age->context ? age->context : (void *)(uintptr_t)(dev_flow->flow_idx); - /* - * The counter age accuracy may have a bit delay. Have 3/4 - * second bias on the timeount in order to let it age in time. - */ - age_param->timeout = age->timeout * 10 - MLX5_AGING_TIME_DELAY; - /* Set expire time in unit of 0.1 sec. */ + age_param->timeout = age->timeout; age_param->port_id = dev->data->port_id; - age_param->expire = age_param->timeout + - rte_rdtsc() / (rte_get_tsc_hz() / 10); - rte_atomic16_set(&age_param->state, AGE_CANDIDATE); + __atomic_store_n(&age_param->sec_since_last_hit, 0, __ATOMIC_RELAXED); + __atomic_store_n(&age_param->state, AGE_CANDIDATE, __ATOMIC_RELAXED); return counter; } + /** * Add Tx queue matcher * @@ -7815,1299 +8542,3175 @@ flow_dv_hashfields_set(struct mlx5_flow *dev_flow, } /** - * Fill the flow with DV spec, lock free - * (mutex should be acquired by caller). + * Prepare an Rx Hash queue. * - * @param[in] dev - * Pointer to rte_eth_dev structure. - * @param[in, out] dev_flow - * Pointer to the sub flow. - * @param[in] attr - * Pointer to the flow attributes. - * @param[in] items - * Pointer to the list of items. - * @param[in] actions - * Pointer to the list of actions. - * @param[out] error - * Pointer to the error structure. + * @param dev + * Pointer to Ethernet device. + * @param[in] dev_flow + * Pointer to the mlx5_flow. + * @param[in] rss_desc + * Pointer to the mlx5_flow_rss_desc. + * @param[out] hrxq_idx + * Hash Rx queue index. * * @return - * 0 on success, a negative errno value otherwise and rte_errno is set. + * The Verbs/DevX object initialised, NULL otherwise and rte_errno is set. */ -static int -__flow_dv_translate(struct rte_eth_dev *dev, - struct mlx5_flow *dev_flow, - const struct rte_flow_attr *attr, - const struct rte_flow_item items[], - const struct rte_flow_action actions[], - struct rte_flow_error *error) +static struct mlx5_hrxq * +flow_dv_hrxq_prepare(struct rte_eth_dev *dev, + struct mlx5_flow *dev_flow, + struct mlx5_flow_rss_desc *rss_desc, + uint32_t *hrxq_idx) { struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5_dev_config *dev_conf = &priv->config; - struct rte_flow *flow = dev_flow->flow; - struct mlx5_flow_handle *handle = dev_flow->handle; - struct mlx5_flow_rss_desc *rss_desc = &((struct mlx5_flow_rss_desc *) - priv->rss_desc) - [!!priv->flow_nested_idx]; - uint64_t item_flags = 0; - uint64_t last_item = 0; - uint64_t action_flags = 0; - uint64_t priority = attr->priority; - struct mlx5_flow_dv_matcher matcher = { - .mask = { - .size = sizeof(matcher.mask.buf), - }, + struct mlx5_flow_handle *dh = dev_flow->handle; + struct mlx5_hrxq *hrxq; + + MLX5_ASSERT(rss_desc->queue_num); + rss_desc->key_len = MLX5_RSS_HASH_KEY_LEN; + rss_desc->hash_fields = dev_flow->hash_fields; + rss_desc->tunnel = !!(dh->layers & MLX5_FLOW_LAYER_TUNNEL); + rss_desc->shared_rss = 0; + *hrxq_idx = mlx5_hrxq_get(dev, rss_desc); + if (!*hrxq_idx) + return NULL; + hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ], + *hrxq_idx); + return hrxq; +} + +/** + * Release sample sub action resource. + * + * @param[in, out] dev + * Pointer to rte_eth_dev structure. + * @param[in] act_res + * Pointer to sample sub action resource. + */ +static void +flow_dv_sample_sub_actions_release(struct rte_eth_dev *dev, + struct mlx5_flow_sub_actions_idx *act_res) +{ + if (act_res->rix_hrxq) { + mlx5_hrxq_release(dev, act_res->rix_hrxq); + act_res->rix_hrxq = 0; + } + if (act_res->rix_encap_decap) { + flow_dv_encap_decap_resource_release(dev, + act_res->rix_encap_decap); + act_res->rix_encap_decap = 0; + } + if (act_res->rix_port_id_action) { + flow_dv_port_id_action_resource_release(dev, + act_res->rix_port_id_action); + act_res->rix_port_id_action = 0; + } + if (act_res->rix_tag) { + flow_dv_tag_release(dev, act_res->rix_tag); + act_res->rix_tag = 0; + } + if (act_res->cnt) { + flow_dv_counter_free(dev, act_res->cnt); + act_res->cnt = 0; + } +} + +int +flow_dv_sample_match_cb(struct mlx5_cache_list *list __rte_unused, + struct mlx5_cache_entry *entry, void *cb_ctx) +{ + struct mlx5_flow_cb_ctx *ctx = cb_ctx; + struct rte_eth_dev *dev = ctx->dev; + struct mlx5_flow_dv_sample_resource *resource = ctx->data; + struct mlx5_flow_dv_sample_resource *cache_resource = + container_of(entry, typeof(*cache_resource), entry); + + if (resource->ratio == cache_resource->ratio && + resource->ft_type == cache_resource->ft_type && + resource->ft_id == cache_resource->ft_id && + resource->set_action == cache_resource->set_action && + !memcmp((void *)&resource->sample_act, + (void *)&cache_resource->sample_act, + sizeof(struct mlx5_flow_sub_actions_list))) { + /* + * Existing sample action should release the prepared + * sub-actions reference counter. + */ + flow_dv_sample_sub_actions_release(dev, + &resource->sample_idx); + return 0; + } + return 1; +} + +struct mlx5_cache_entry * +flow_dv_sample_create_cb(struct mlx5_cache_list *list __rte_unused, + struct mlx5_cache_entry *entry __rte_unused, + void *cb_ctx) +{ + struct mlx5_flow_cb_ctx *ctx = cb_ctx; + struct rte_eth_dev *dev = ctx->dev; + struct mlx5_flow_dv_sample_resource *resource = ctx->data; + void **sample_dv_actions = resource->sub_actions; + struct mlx5_flow_dv_sample_resource *cache_resource; + struct mlx5dv_dr_flow_sampler_attr sampler_attr; + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_dev_ctx_shared *sh = priv->sh; + struct mlx5_flow_tbl_resource *tbl; + uint32_t idx = 0; + const uint32_t next_ft_step = 1; + uint32_t next_ft_id = resource->ft_id + next_ft_step; + uint8_t is_egress = 0; + uint8_t is_transfer = 0; + struct rte_flow_error *error = ctx->error; + + /* Register new sample resource. */ + cache_resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_SAMPLE], &idx); + if (!cache_resource) { + rte_flow_error_set(error, ENOMEM, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "cannot allocate resource memory"); + return NULL; + } + *cache_resource = *resource; + /* Create normal path table level */ + if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB) + is_transfer = 1; + else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_TX) + is_egress = 1; + tbl = flow_dv_tbl_resource_get(dev, next_ft_id, + is_egress, is_transfer, + true, NULL, 0, 0, error); + if (!tbl) { + rte_flow_error_set(error, ENOMEM, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "fail to create normal path table " + "for sample"); + goto error; + } + cache_resource->normal_path_tbl = tbl; + if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB) { + cache_resource->default_miss = + mlx5_glue->dr_create_flow_action_default_miss(); + if (!cache_resource->default_miss) { + rte_flow_error_set(error, ENOMEM, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "cannot create default miss " + "action"); + goto error; + } + sample_dv_actions[resource->sample_act.actions_num++] = + cache_resource->default_miss; + } + /* Create a DR sample action */ + sampler_attr.sample_ratio = cache_resource->ratio; + sampler_attr.default_next_table = tbl->obj; + sampler_attr.num_sample_actions = resource->sample_act.actions_num; + sampler_attr.sample_actions = (struct mlx5dv_dr_action **) + &sample_dv_actions[0]; + sampler_attr.action = cache_resource->set_action; + cache_resource->verbs_action = + mlx5_glue->dr_create_flow_action_sampler(&sampler_attr); + if (!cache_resource->verbs_action) { + rte_flow_error_set(error, ENOMEM, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, "cannot create sample action"); + goto error; + } + cache_resource->idx = idx; + return &cache_resource->entry; +error: + if (cache_resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB && + cache_resource->default_miss) + claim_zero(mlx5_glue->destroy_flow_action + (cache_resource->default_miss)); + else + flow_dv_sample_sub_actions_release(dev, + &cache_resource->sample_idx); + if (cache_resource->normal_path_tbl) + flow_dv_tbl_resource_release(MLX5_SH(dev), + cache_resource->normal_path_tbl); + mlx5_ipool_free(sh->ipool[MLX5_IPOOL_SAMPLE], idx); + return NULL; + +} + +/** + * Find existing sample resource or create and register a new one. + * + * @param[in, out] dev + * Pointer to rte_eth_dev structure. + * @param[in] resource + * Pointer to sample resource. + * @parm[in, out] dev_flow + * Pointer to the dev_flow. + * @param[out] error + * pointer to error structure. + * + * @return + * 0 on success otherwise -errno and errno is set. + */ +static int +flow_dv_sample_resource_register(struct rte_eth_dev *dev, + struct mlx5_flow_dv_sample_resource *resource, + struct mlx5_flow *dev_flow, + struct rte_flow_error *error) +{ + struct mlx5_flow_dv_sample_resource *cache_resource; + struct mlx5_cache_entry *entry; + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_flow_cb_ctx ctx = { + .dev = dev, + .error = error, + .data = resource, }; - int actions_n = 0; - bool actions_end = false; - union { - struct mlx5_flow_dv_modify_hdr_resource res; - uint8_t len[sizeof(struct mlx5_flow_dv_modify_hdr_resource) + - sizeof(struct mlx5_modification_cmd) * - (MLX5_MAX_MODIFY_NUM + 1)]; - } mhdr_dummy; - struct mlx5_flow_dv_modify_hdr_resource *mhdr_res = &mhdr_dummy.res; - const struct rte_flow_action_count *count = NULL; - const struct rte_flow_action_age *age = NULL; - union flow_dv_attr flow_attr = { .attr = 0 }; - uint32_t tag_be; - union mlx5_flow_tbl_key tbl_key; - uint32_t modify_action_position = UINT32_MAX; - void *match_mask = matcher.mask.buf; - void *match_value = dev_flow->dv.value.buf; - uint8_t next_protocol = 0xff; - struct rte_vlan_hdr vlan = { 0 }; - uint32_t table; - int ret = 0; - mhdr_res->ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX : - MLX5DV_FLOW_TABLE_TYPE_NIC_RX; - ret = mlx5_flow_group_to_table(attr, dev_flow->external, attr->group, - !!priv->fdb_def_rule, &table, error); - if (ret) - return ret; - dev_flow->dv.group = table; - if (attr->transfer) - mhdr_res->ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB; - if (priority == MLX5_FLOW_PRIO_RSVD) - priority = dev_conf->flow_prio - 1; - /* number of actions must be set to 0 in case of dirty stack. */ - mhdr_res->actions_num = 0; - for (; !actions_end ; actions++) { - const struct rte_flow_action_queue *queue; - const struct rte_flow_action_rss *rss; - const struct rte_flow_action *action = actions; - const uint8_t *rss_key; - const struct rte_flow_action_jump *jump_data; - const struct rte_flow_action_meter *mtr; - struct mlx5_flow_tbl_resource *tbl; - uint32_t port_id = 0; - struct mlx5_flow_dv_port_id_action_resource port_id_resource; - int action_type = actions->type; - const struct rte_flow_action *found_action = NULL; - struct mlx5_flow_meter *fm = NULL; + entry = mlx5_cache_register(&priv->sh->sample_action_list, &ctx); + if (!entry) + return -rte_errno; + cache_resource = container_of(entry, typeof(*cache_resource), entry); + dev_flow->handle->dvh.rix_sample = cache_resource->idx; + dev_flow->dv.sample_res = cache_resource; + return 0; +} - switch (action_type) { - case RTE_FLOW_ACTION_TYPE_VOID: +int +flow_dv_dest_array_match_cb(struct mlx5_cache_list *list __rte_unused, + struct mlx5_cache_entry *entry, void *cb_ctx) +{ + struct mlx5_flow_cb_ctx *ctx = cb_ctx; + struct mlx5_flow_dv_dest_array_resource *resource = ctx->data; + struct rte_eth_dev *dev = ctx->dev; + struct mlx5_flow_dv_dest_array_resource *cache_resource = + container_of(entry, typeof(*cache_resource), entry); + uint32_t idx = 0; + + if (resource->num_of_dest == cache_resource->num_of_dest && + resource->ft_type == cache_resource->ft_type && + !memcmp((void *)cache_resource->sample_act, + (void *)resource->sample_act, + (resource->num_of_dest * + sizeof(struct mlx5_flow_sub_actions_list)))) { + /* + * Existing sample action should release the prepared + * sub-actions reference counter. + */ + for (idx = 0; idx < resource->num_of_dest; idx++) + flow_dv_sample_sub_actions_release(dev, + &resource->sample_idx[idx]); + return 0; + } + return 1; +} + +struct mlx5_cache_entry * +flow_dv_dest_array_create_cb(struct mlx5_cache_list *list __rte_unused, + struct mlx5_cache_entry *entry __rte_unused, + void *cb_ctx) +{ + struct mlx5_flow_cb_ctx *ctx = cb_ctx; + struct rte_eth_dev *dev = ctx->dev; + struct mlx5_flow_dv_dest_array_resource *cache_resource; + struct mlx5_flow_dv_dest_array_resource *resource = ctx->data; + struct mlx5dv_dr_action_dest_attr *dest_attr[MLX5_MAX_DEST_NUM] = { 0 }; + struct mlx5dv_dr_action_dest_reformat dest_reformat[MLX5_MAX_DEST_NUM]; + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_dev_ctx_shared *sh = priv->sh; + struct mlx5_flow_sub_actions_list *sample_act; + struct mlx5dv_dr_domain *domain; + uint32_t idx = 0, res_idx = 0; + struct rte_flow_error *error = ctx->error; + + /* Register new destination array resource. */ + cache_resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_DEST_ARRAY], + &res_idx); + if (!cache_resource) { + rte_flow_error_set(error, ENOMEM, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "cannot allocate resource memory"); + return NULL; + } + *cache_resource = *resource; + if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB) + domain = sh->fdb_domain; + else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX) + domain = sh->rx_domain; + else + domain = sh->tx_domain; + for (idx = 0; idx < resource->num_of_dest; idx++) { + dest_attr[idx] = (struct mlx5dv_dr_action_dest_attr *) + mlx5_malloc(MLX5_MEM_ZERO, + sizeof(struct mlx5dv_dr_action_dest_attr), + 0, SOCKET_ID_ANY); + if (!dest_attr[idx]) { + rte_flow_error_set(error, ENOMEM, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "cannot allocate resource memory"); + goto error; + } + dest_attr[idx]->type = MLX5DV_DR_ACTION_DEST; + sample_act = &resource->sample_act[idx]; + if (sample_act->action_flags == MLX5_FLOW_ACTION_QUEUE) { + dest_attr[idx]->dest = sample_act->dr_queue_action; + } else if (sample_act->action_flags == + (MLX5_FLOW_ACTION_PORT_ID | MLX5_FLOW_ACTION_ENCAP)) { + dest_attr[idx]->type = MLX5DV_DR_ACTION_DEST_REFORMAT; + dest_attr[idx]->dest_reformat = &dest_reformat[idx]; + dest_attr[idx]->dest_reformat->reformat = + sample_act->dr_encap_action; + dest_attr[idx]->dest_reformat->dest = + sample_act->dr_port_id_action; + } else if (sample_act->action_flags == + MLX5_FLOW_ACTION_PORT_ID) { + dest_attr[idx]->dest = sample_act->dr_port_id_action; + } + } + /* create a dest array actioin */ + cache_resource->action = mlx5_glue->dr_create_flow_action_dest_array + (domain, + cache_resource->num_of_dest, + dest_attr); + if (!cache_resource->action) { + rte_flow_error_set(error, ENOMEM, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "cannot create destination array action"); + goto error; + } + cache_resource->idx = res_idx; + for (idx = 0; idx < resource->num_of_dest; idx++) + mlx5_free(dest_attr[idx]); + return &cache_resource->entry; +error: + for (idx = 0; idx < resource->num_of_dest; idx++) { + struct mlx5_flow_sub_actions_idx *act_res = + &cache_resource->sample_idx[idx]; + if (act_res->rix_hrxq && + !mlx5_hrxq_release(dev, + act_res->rix_hrxq)) + act_res->rix_hrxq = 0; + if (act_res->rix_encap_decap && + !flow_dv_encap_decap_resource_release(dev, + act_res->rix_encap_decap)) + act_res->rix_encap_decap = 0; + if (act_res->rix_port_id_action && + !flow_dv_port_id_action_resource_release(dev, + act_res->rix_port_id_action)) + act_res->rix_port_id_action = 0; + if (dest_attr[idx]) + mlx5_free(dest_attr[idx]); + } + + mlx5_ipool_free(sh->ipool[MLX5_IPOOL_DEST_ARRAY], res_idx); + return NULL; +} + +/** + * Find existing destination array resource or create and register a new one. + * + * @param[in, out] dev + * Pointer to rte_eth_dev structure. + * @param[in] resource + * Pointer to destination array resource. + * @parm[in, out] dev_flow + * Pointer to the dev_flow. + * @param[out] error + * pointer to error structure. + * + * @return + * 0 on success otherwise -errno and errno is set. + */ +static int +flow_dv_dest_array_resource_register(struct rte_eth_dev *dev, + struct mlx5_flow_dv_dest_array_resource *resource, + struct mlx5_flow *dev_flow, + struct rte_flow_error *error) +{ + struct mlx5_flow_dv_dest_array_resource *cache_resource; + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_cache_entry *entry; + struct mlx5_flow_cb_ctx ctx = { + .dev = dev, + .error = error, + .data = resource, + }; + + entry = mlx5_cache_register(&priv->sh->dest_array_list, &ctx); + if (!entry) + return -rte_errno; + cache_resource = container_of(entry, typeof(*cache_resource), entry); + dev_flow->handle->dvh.rix_dest_array = cache_resource->idx; + dev_flow->dv.dest_array_res = cache_resource; + return 0; +} + +/** + * Convert Sample action to DV specification. + * + * @param[in] dev + * Pointer to rte_eth_dev structure. + * @param[in] action + * Pointer to action structure. + * @param[in, out] dev_flow + * Pointer to the mlx5_flow. + * @param[in] attr + * Pointer to the flow attributes. + * @param[in, out] num_of_dest + * Pointer to the num of destination. + * @param[in, out] sample_actions + * Pointer to sample actions list. + * @param[in, out] res + * Pointer to sample resource. + * @param[out] error + * Pointer to the error structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +flow_dv_translate_action_sample(struct rte_eth_dev *dev, + const struct rte_flow_action *action, + struct mlx5_flow *dev_flow, + const struct rte_flow_attr *attr, + uint32_t *num_of_dest, + void **sample_actions, + struct mlx5_flow_dv_sample_resource *res, + struct rte_flow_error *error) +{ + struct mlx5_priv *priv = dev->data->dev_private; + const struct rte_flow_action_sample *sample_action; + const struct rte_flow_action *sub_actions; + const struct rte_flow_action_queue *queue; + struct mlx5_flow_sub_actions_list *sample_act; + struct mlx5_flow_sub_actions_idx *sample_idx; + struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace(); + struct mlx5_flow_rss_desc *rss_desc; + uint64_t action_flags = 0; + + MLX5_ASSERT(wks); + rss_desc = &wks->rss_desc; + sample_act = &res->sample_act; + sample_idx = &res->sample_idx; + sample_action = (const struct rte_flow_action_sample *)action->conf; + res->ratio = sample_action->ratio; + sub_actions = sample_action->actions; + for (; sub_actions->type != RTE_FLOW_ACTION_TYPE_END; sub_actions++) { + int type = sub_actions->type; + uint32_t pre_rix = 0; + void *pre_r; + switch (type) { + case RTE_FLOW_ACTION_TYPE_QUEUE: + { + struct mlx5_hrxq *hrxq; + uint32_t hrxq_idx; + + queue = sub_actions->conf; + rss_desc->queue_num = 1; + rss_desc->queue[0] = queue->index; + hrxq = flow_dv_hrxq_prepare(dev, dev_flow, + rss_desc, &hrxq_idx); + if (!hrxq) + return rte_flow_error_set + (error, rte_errno, + RTE_FLOW_ERROR_TYPE_ACTION, + NULL, + "cannot create fate queue"); + sample_act->dr_queue_action = hrxq->action; + sample_idx->rix_hrxq = hrxq_idx; + sample_actions[sample_act->actions_num++] = + hrxq->action; + (*num_of_dest)++; + action_flags |= MLX5_FLOW_ACTION_QUEUE; + if (action_flags & MLX5_FLOW_ACTION_MARK) + dev_flow->handle->rix_hrxq = hrxq_idx; + dev_flow->handle->fate_action = + MLX5_FLOW_FATE_QUEUE; + break; + } + case RTE_FLOW_ACTION_TYPE_MARK: + { + uint32_t tag_be = mlx5_flow_mark_set + (((const struct rte_flow_action_mark *) + (sub_actions->conf))->id); + + dev_flow->handle->mark = 1; + pre_rix = dev_flow->handle->dvh.rix_tag; + /* Save the mark resource before sample */ + pre_r = dev_flow->dv.tag_resource; + if (flow_dv_tag_resource_register(dev, tag_be, + dev_flow, error)) + return -rte_errno; + MLX5_ASSERT(dev_flow->dv.tag_resource); + sample_act->dr_tag_action = + dev_flow->dv.tag_resource->action; + sample_idx->rix_tag = + dev_flow->handle->dvh.rix_tag; + sample_actions[sample_act->actions_num++] = + sample_act->dr_tag_action; + /* Recover the mark resource after sample */ + dev_flow->dv.tag_resource = pre_r; + dev_flow->handle->dvh.rix_tag = pre_rix; + action_flags |= MLX5_FLOW_ACTION_MARK; + break; + } + case RTE_FLOW_ACTION_TYPE_COUNT: + { + uint32_t counter; + + counter = flow_dv_translate_create_counter(dev, + dev_flow, sub_actions->conf, 0); + if (!counter) + return rte_flow_error_set + (error, rte_errno, + RTE_FLOW_ERROR_TYPE_ACTION, + NULL, + "cannot create counter" + " object."); + sample_idx->cnt = counter; + sample_act->dr_cnt_action = + (flow_dv_counter_get_by_idx(dev, + counter, NULL))->action; + sample_actions[sample_act->actions_num++] = + sample_act->dr_cnt_action; + action_flags |= MLX5_FLOW_ACTION_COUNT; break; + } case RTE_FLOW_ACTION_TYPE_PORT_ID: - if (flow_dv_translate_action_port_id(dev, action, + { + struct mlx5_flow_dv_port_id_action_resource + port_id_resource; + uint32_t port_id = 0; + + memset(&port_id_resource, 0, sizeof(port_id_resource)); + /* Save the port id resource before sample */ + pre_rix = dev_flow->handle->rix_port_id_action; + pre_r = dev_flow->dv.port_id_action; + if (flow_dv_translate_action_port_id(dev, sub_actions, &port_id, error)) return -rte_errno; port_id_resource.port_id = port_id; - MLX5_ASSERT(!handle->rix_port_id_action); if (flow_dv_port_id_action_resource_register (dev, &port_id_resource, dev_flow, error)) return -rte_errno; - dev_flow->dv.actions[actions_n++] = - dev_flow->dv.port_id_action->action; + sample_act->dr_port_id_action = + dev_flow->dv.port_id_action->action; + sample_idx->rix_port_id_action = + dev_flow->handle->rix_port_id_action; + sample_actions[sample_act->actions_num++] = + sample_act->dr_port_id_action; + /* Recover the port id resource after sample */ + dev_flow->dv.port_id_action = pre_r; + dev_flow->handle->rix_port_id_action = pre_rix; + (*num_of_dest)++; action_flags |= MLX5_FLOW_ACTION_PORT_ID; - dev_flow->handle->fate_action = MLX5_FLOW_FATE_PORT_ID; break; - case RTE_FLOW_ACTION_TYPE_FLAG: - action_flags |= MLX5_FLOW_ACTION_FLAG; - dev_flow->handle->mark = 1; - if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) { - struct rte_flow_action_mark mark = { - .id = MLX5_FLOW_MARK_DEFAULT, - }; - - if (flow_dv_convert_action_mark(dev, &mark, - mhdr_res, - error)) - return -rte_errno; - action_flags |= MLX5_FLOW_ACTION_MARK_EXT; - break; - } - tag_be = mlx5_flow_mark_set(MLX5_FLOW_MARK_DEFAULT); - /* - * Only one FLAG or MARK is supported per device flow - * right now. So the pointer to the tag resource must be - * zero before the register process. - */ - MLX5_ASSERT(!handle->dvh.rix_tag); - if (flow_dv_tag_resource_register(dev, tag_be, - dev_flow, error)) - return -rte_errno; - MLX5_ASSERT(dev_flow->dv.tag_resource); - dev_flow->dv.actions[actions_n++] = - dev_flow->dv.tag_resource->action; - break; - case RTE_FLOW_ACTION_TYPE_MARK: - action_flags |= MLX5_FLOW_ACTION_MARK; - dev_flow->handle->mark = 1; - if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) { - const struct rte_flow_action_mark *mark = - (const struct rte_flow_action_mark *) - actions->conf; - - if (flow_dv_convert_action_mark(dev, mark, - mhdr_res, - error)) - return -rte_errno; - action_flags |= MLX5_FLOW_ACTION_MARK_EXT; - break; - } - /* Fall-through */ - case MLX5_RTE_FLOW_ACTION_TYPE_MARK: - /* Legacy (non-extensive) MARK action. */ - tag_be = mlx5_flow_mark_set - (((const struct rte_flow_action_mark *) - (actions->conf))->id); - MLX5_ASSERT(!handle->dvh.rix_tag); - if (flow_dv_tag_resource_register(dev, tag_be, - dev_flow, error)) - return -rte_errno; - MLX5_ASSERT(dev_flow->dv.tag_resource); - dev_flow->dv.actions[actions_n++] = - dev_flow->dv.tag_resource->action; - break; - case RTE_FLOW_ACTION_TYPE_SET_META: - if (flow_dv_convert_action_set_meta - (dev, mhdr_res, attr, - (const struct rte_flow_action_set_meta *) - actions->conf, error)) - return -rte_errno; - action_flags |= MLX5_FLOW_ACTION_SET_META; - break; - case RTE_FLOW_ACTION_TYPE_SET_TAG: - if (flow_dv_convert_action_set_tag - (dev, mhdr_res, - (const struct rte_flow_action_set_tag *) - actions->conf, error)) - return -rte_errno; - action_flags |= MLX5_FLOW_ACTION_SET_TAG; - break; - case RTE_FLOW_ACTION_TYPE_DROP: - action_flags |= MLX5_FLOW_ACTION_DROP; - dev_flow->handle->fate_action = MLX5_FLOW_FATE_DROP; - break; - case RTE_FLOW_ACTION_TYPE_QUEUE: - queue = actions->conf; - rss_desc->queue_num = 1; - rss_desc->queue[0] = queue->index; - action_flags |= MLX5_FLOW_ACTION_QUEUE; - dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE; - break; - case RTE_FLOW_ACTION_TYPE_RSS: - rss = actions->conf; - memcpy(rss_desc->queue, rss->queue, - rss->queue_num * sizeof(uint16_t)); - rss_desc->queue_num = rss->queue_num; - /* NULL RSS key indicates default RSS key. */ - rss_key = !rss->key ? rss_hash_default_key : rss->key; - memcpy(rss_desc->key, rss_key, MLX5_RSS_HASH_KEY_LEN); - /* - * rss->level and rss.types should be set in advance - * when expanding items for RSS. - */ - action_flags |= MLX5_FLOW_ACTION_RSS; - dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE; - break; - case RTE_FLOW_ACTION_TYPE_AGE: - case RTE_FLOW_ACTION_TYPE_COUNT: - if (!dev_conf->devx) { - return rte_flow_error_set - (error, ENOTSUP, - RTE_FLOW_ERROR_TYPE_UNSPECIFIED, - NULL, - "count action not supported"); - } - /* Save information first, will apply later. */ - if (actions->type == RTE_FLOW_ACTION_TYPE_COUNT) - count = action->conf; - else - age = action->conf; - action_flags |= MLX5_FLOW_ACTION_COUNT; - break; - case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN: - dev_flow->dv.actions[actions_n++] = - priv->sh->pop_vlan_action; - action_flags |= MLX5_FLOW_ACTION_OF_POP_VLAN; - break; - case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN: - if (!(action_flags & - MLX5_FLOW_ACTION_OF_SET_VLAN_VID)) - flow_dev_get_vlan_info_from_items(items, &vlan); - vlan.eth_proto = rte_be_to_cpu_16 - ((((const struct rte_flow_action_of_push_vlan *) - actions->conf)->ethertype)); - found_action = mlx5_flow_find_action - (actions + 1, - RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID); - if (found_action) - mlx5_update_vlan_vid_pcp(found_action, &vlan); - found_action = mlx5_flow_find_action - (actions + 1, - RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP); - if (found_action) - mlx5_update_vlan_vid_pcp(found_action, &vlan); - if (flow_dv_create_action_push_vlan - (dev, attr, &vlan, dev_flow, error)) - return -rte_errno; - dev_flow->dv.actions[actions_n++] = - dev_flow->dv.push_vlan_res->action; - action_flags |= MLX5_FLOW_ACTION_OF_PUSH_VLAN; - break; - case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP: - /* of_vlan_push action handled this action */ - MLX5_ASSERT(action_flags & - MLX5_FLOW_ACTION_OF_PUSH_VLAN); - break; - case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID: - if (action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN) - break; - flow_dev_get_vlan_info_from_items(items, &vlan); - mlx5_update_vlan_vid_pcp(actions, &vlan); - /* If no VLAN push - this is a modify header action */ - if (flow_dv_convert_action_modify_vlan_vid - (mhdr_res, actions, error)) - return -rte_errno; - action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_VID; - break; - case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP: - case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP: - if (flow_dv_create_action_l2_encap(dev, actions, + } + case RTE_FLOW_ACTION_TYPE_RAW_ENCAP: + /* Save the encap resource before sample */ + pre_rix = dev_flow->handle->dvh.rix_encap_decap; + pre_r = dev_flow->dv.encap_decap; + if (flow_dv_create_action_l2_encap(dev, sub_actions, dev_flow, attr->transfer, error)) return -rte_errno; - dev_flow->dv.actions[actions_n++] = - dev_flow->dv.encap_decap->action; - action_flags |= MLX5_FLOW_ACTION_ENCAP; - break; - case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP: - case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP: - if (flow_dv_create_action_l2_decap(dev, dev_flow, - attr->transfer, - error)) - return -rte_errno; - dev_flow->dv.actions[actions_n++] = - dev_flow->dv.encap_decap->action; - action_flags |= MLX5_FLOW_ACTION_DECAP; - break; - case RTE_FLOW_ACTION_TYPE_RAW_ENCAP: - /* Handle encap with preceding decap. */ - if (action_flags & MLX5_FLOW_ACTION_DECAP) { - if (flow_dv_create_action_raw_encap - (dev, actions, dev_flow, attr, error)) - return -rte_errno; - dev_flow->dv.actions[actions_n++] = - dev_flow->dv.encap_decap->action; - } else { - /* Handle encap without preceding decap. */ - if (flow_dv_create_action_l2_encap - (dev, actions, dev_flow, attr->transfer, - error)) - return -rte_errno; - dev_flow->dv.actions[actions_n++] = - dev_flow->dv.encap_decap->action; - } + sample_act->dr_encap_action = + dev_flow->dv.encap_decap->action; + sample_idx->rix_encap_decap = + dev_flow->handle->dvh.rix_encap_decap; + sample_actions[sample_act->actions_num++] = + sample_act->dr_encap_action; + /* Recover the encap resource after sample */ + dev_flow->dv.encap_decap = pre_r; + dev_flow->handle->dvh.rix_encap_decap = pre_rix; action_flags |= MLX5_FLOW_ACTION_ENCAP; break; - case RTE_FLOW_ACTION_TYPE_RAW_DECAP: - while ((++action)->type == RTE_FLOW_ACTION_TYPE_VOID) - ; - if (action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP) { - if (flow_dv_create_action_l2_decap - (dev, dev_flow, attr->transfer, error)) - return -rte_errno; - dev_flow->dv.actions[actions_n++] = - dev_flow->dv.encap_decap->action; - } - /* If decap is followed by encap, handle it at encap. */ - action_flags |= MLX5_FLOW_ACTION_DECAP; - break; - case RTE_FLOW_ACTION_TYPE_JUMP: - jump_data = action->conf; - ret = mlx5_flow_group_to_table(attr, dev_flow->external, - jump_data->group, - !!priv->fdb_def_rule, - &table, error); - if (ret) - return ret; - tbl = flow_dv_tbl_resource_get(dev, table, - attr->egress, - attr->transfer, error); - if (!tbl) - return rte_flow_error_set - (error, errno, - RTE_FLOW_ERROR_TYPE_ACTION, - NULL, - "cannot create jump action."); - if (flow_dv_jump_tbl_resource_register - (dev, tbl, dev_flow, error)) { - flow_dv_tbl_resource_release(dev, tbl); + default: + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "Not support for sampler action"); + } + } + sample_act->action_flags = action_flags; + res->ft_id = dev_flow->dv.group; + if (attr->transfer) { + union { + uint32_t action_in[MLX5_ST_SZ_DW(set_action_in)]; + uint64_t set_action; + } action_ctx = { .set_action = 0 }; + + res->ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB; + MLX5_SET(set_action_in, action_ctx.action_in, action_type, + MLX5_MODIFICATION_TYPE_SET); + MLX5_SET(set_action_in, action_ctx.action_in, field, + MLX5_MODI_META_REG_C_0); + MLX5_SET(set_action_in, action_ctx.action_in, data, + priv->vport_meta_tag); + res->set_action = action_ctx.set_action; + } else if (attr->ingress) { + res->ft_type = MLX5DV_FLOW_TABLE_TYPE_NIC_RX; + } else { + res->ft_type = MLX5DV_FLOW_TABLE_TYPE_NIC_TX; + } + return 0; +} + +/** + * Convert Sample action to DV specification. + * + * @param[in] dev + * Pointer to rte_eth_dev structure. + * @param[in, out] dev_flow + * Pointer to the mlx5_flow. + * @param[in] num_of_dest + * The num of destination. + * @param[in, out] res + * Pointer to sample resource. + * @param[in, out] mdest_res + * Pointer to destination array resource. + * @param[in] sample_actions + * Pointer to sample path actions list. + * @param[in] action_flags + * Holds the actions detected until now. + * @param[out] error + * Pointer to the error structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +flow_dv_create_action_sample(struct rte_eth_dev *dev, + struct mlx5_flow *dev_flow, + uint32_t num_of_dest, + struct mlx5_flow_dv_sample_resource *res, + struct mlx5_flow_dv_dest_array_resource *mdest_res, + void **sample_actions, + uint64_t action_flags, + struct rte_flow_error *error) +{ + /* update normal path action resource into last index of array */ + uint32_t dest_index = MLX5_MAX_DEST_NUM - 1; + struct mlx5_flow_sub_actions_list *sample_act = + &mdest_res->sample_act[dest_index]; + struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace(); + struct mlx5_flow_rss_desc *rss_desc; + uint32_t normal_idx = 0; + struct mlx5_hrxq *hrxq; + uint32_t hrxq_idx; + + MLX5_ASSERT(wks); + rss_desc = &wks->rss_desc; + if (num_of_dest > 1) { + if (sample_act->action_flags & MLX5_FLOW_ACTION_QUEUE) { + /* Handle QP action for mirroring */ + hrxq = flow_dv_hrxq_prepare(dev, dev_flow, + rss_desc, &hrxq_idx); + if (!hrxq) return rte_flow_error_set - (error, errno, - RTE_FLOW_ERROR_TYPE_ACTION, - NULL, - "cannot create jump action."); - } - dev_flow->dv.actions[actions_n++] = - dev_flow->dv.jump->action; - action_flags |= MLX5_FLOW_ACTION_JUMP; - dev_flow->handle->fate_action = MLX5_FLOW_FATE_JUMP; - break; - case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC: - case RTE_FLOW_ACTION_TYPE_SET_MAC_DST: - if (flow_dv_convert_action_modify_mac - (mhdr_res, actions, error)) - return -rte_errno; - action_flags |= actions->type == - RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ? - MLX5_FLOW_ACTION_SET_MAC_SRC : - MLX5_FLOW_ACTION_SET_MAC_DST; - break; - case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC: - case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST: - if (flow_dv_convert_action_modify_ipv4 - (mhdr_res, actions, error)) - return -rte_errno; - action_flags |= actions->type == - RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ? - MLX5_FLOW_ACTION_SET_IPV4_SRC : - MLX5_FLOW_ACTION_SET_IPV4_DST; - break; - case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC: - case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST: - if (flow_dv_convert_action_modify_ipv6 - (mhdr_res, actions, error)) - return -rte_errno; - action_flags |= actions->type == - RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ? - MLX5_FLOW_ACTION_SET_IPV6_SRC : - MLX5_FLOW_ACTION_SET_IPV6_DST; - break; - case RTE_FLOW_ACTION_TYPE_SET_TP_SRC: - case RTE_FLOW_ACTION_TYPE_SET_TP_DST: - if (flow_dv_convert_action_modify_tp - (mhdr_res, actions, items, - &flow_attr, dev_flow, !!(action_flags & - MLX5_FLOW_ACTION_DECAP), error)) - return -rte_errno; - action_flags |= actions->type == - RTE_FLOW_ACTION_TYPE_SET_TP_SRC ? - MLX5_FLOW_ACTION_SET_TP_SRC : - MLX5_FLOW_ACTION_SET_TP_DST; - break; - case RTE_FLOW_ACTION_TYPE_DEC_TTL: - if (flow_dv_convert_action_modify_dec_ttl - (mhdr_res, items, &flow_attr, dev_flow, - !!(action_flags & - MLX5_FLOW_ACTION_DECAP), error)) - return -rte_errno; - action_flags |= MLX5_FLOW_ACTION_DEC_TTL; - break; - case RTE_FLOW_ACTION_TYPE_SET_TTL: - if (flow_dv_convert_action_modify_ttl - (mhdr_res, actions, items, &flow_attr, - dev_flow, !!(action_flags & - MLX5_FLOW_ACTION_DECAP), error)) - return -rte_errno; - action_flags |= MLX5_FLOW_ACTION_SET_TTL; - break; - case RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ: - case RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ: - if (flow_dv_convert_action_modify_tcp_seq - (mhdr_res, actions, error)) - return -rte_errno; - action_flags |= actions->type == - RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ ? - MLX5_FLOW_ACTION_INC_TCP_SEQ : - MLX5_FLOW_ACTION_DEC_TCP_SEQ; - break; + (error, rte_errno, + RTE_FLOW_ERROR_TYPE_ACTION, + NULL, + "cannot create rx queue"); + normal_idx++; + mdest_res->sample_idx[dest_index].rix_hrxq = hrxq_idx; + sample_act->dr_queue_action = hrxq->action; + if (action_flags & MLX5_FLOW_ACTION_MARK) + dev_flow->handle->rix_hrxq = hrxq_idx; + dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE; + } + if (sample_act->action_flags & MLX5_FLOW_ACTION_ENCAP) { + normal_idx++; + mdest_res->sample_idx[dest_index].rix_encap_decap = + dev_flow->handle->dvh.rix_encap_decap; + sample_act->dr_encap_action = + dev_flow->dv.encap_decap->action; + } + if (sample_act->action_flags & MLX5_FLOW_ACTION_PORT_ID) { + normal_idx++; + mdest_res->sample_idx[dest_index].rix_port_id_action = + dev_flow->handle->rix_port_id_action; + sample_act->dr_port_id_action = + dev_flow->dv.port_id_action->action; + } + sample_act->actions_num = normal_idx; + /* update sample action resource into first index of array */ + mdest_res->ft_type = res->ft_type; + memcpy(&mdest_res->sample_idx[0], &res->sample_idx, + sizeof(struct mlx5_flow_sub_actions_idx)); + memcpy(&mdest_res->sample_act[0], &res->sample_act, + sizeof(struct mlx5_flow_sub_actions_list)); + mdest_res->num_of_dest = num_of_dest; + if (flow_dv_dest_array_resource_register(dev, mdest_res, + dev_flow, error)) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + NULL, "can't create sample " + "action"); + } else { + res->sub_actions = sample_actions; + if (flow_dv_sample_resource_register(dev, res, dev_flow, error)) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + NULL, + "can't create sample action"); + } + return 0; +} - case RTE_FLOW_ACTION_TYPE_INC_TCP_ACK: - case RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK: - if (flow_dv_convert_action_modify_tcp_ack - (mhdr_res, actions, error)) - return -rte_errno; - action_flags |= actions->type == - RTE_FLOW_ACTION_TYPE_INC_TCP_ACK ? - MLX5_FLOW_ACTION_INC_TCP_ACK : - MLX5_FLOW_ACTION_DEC_TCP_ACK; - break; - case MLX5_RTE_FLOW_ACTION_TYPE_TAG: - if (flow_dv_convert_action_set_reg - (mhdr_res, actions, error)) - return -rte_errno; - action_flags |= MLX5_FLOW_ACTION_SET_TAG; - break; - case MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG: - if (flow_dv_convert_action_copy_mreg - (dev, mhdr_res, actions, error)) - return -rte_errno; - action_flags |= MLX5_FLOW_ACTION_SET_TAG; - break; - case MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS: - action_flags |= MLX5_FLOW_ACTION_DEFAULT_MISS; - dev_flow->handle->fate_action = - MLX5_FLOW_FATE_DEFAULT_MISS; - break; - case RTE_FLOW_ACTION_TYPE_METER: - mtr = actions->conf; - if (!flow->meter) { - fm = mlx5_flow_meter_attach(priv, mtr->mtr_id, - attr, error); - if (!fm) - return rte_flow_error_set(error, - rte_errno, - RTE_FLOW_ERROR_TYPE_ACTION, - NULL, - "meter not found " - "or invalid parameters"); - flow->meter = fm->idx; - } - /* Set the meter action. */ - if (!fm) { - fm = mlx5_ipool_get(priv->sh->ipool - [MLX5_IPOOL_MTR], flow->meter); - if (!fm) - return rte_flow_error_set(error, - rte_errno, - RTE_FLOW_ERROR_TYPE_ACTION, - NULL, - "meter not found " - "or invalid parameters"); - } - dev_flow->dv.actions[actions_n++] = - fm->mfts->meter_action; - action_flags |= MLX5_FLOW_ACTION_METER; - break; - case RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP: - if (flow_dv_convert_action_modify_ipv4_dscp(mhdr_res, - actions, error)) - return -rte_errno; - action_flags |= MLX5_FLOW_ACTION_SET_IPV4_DSCP; - break; - case RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP: - if (flow_dv_convert_action_modify_ipv6_dscp(mhdr_res, - actions, error)) - return -rte_errno; - action_flags |= MLX5_FLOW_ACTION_SET_IPV6_DSCP; - break; - case RTE_FLOW_ACTION_TYPE_END: - actions_end = true; - if (mhdr_res->actions_num) { - /* create modify action if needed. */ - if (flow_dv_modify_hdr_resource_register - (dev, mhdr_res, dev_flow, error)) - return -rte_errno; - dev_flow->dv.actions[modify_action_position] = - handle->dvh.modify_hdr->action; - } - if (action_flags & MLX5_FLOW_ACTION_COUNT) { - flow->counter = - flow_dv_translate_create_counter(dev, - dev_flow, count, age); +/** + * Remove an ASO age action from age actions list. + * + * @param[in] dev + * Pointer to the Ethernet device structure. + * @param[in] age + * Pointer to the aso age action handler. + */ +static void +flow_dv_aso_age_remove_from_age(struct rte_eth_dev *dev, + struct mlx5_aso_age_action *age) +{ + struct mlx5_age_info *age_info; + struct mlx5_age_param *age_param = &age->age_params; + struct mlx5_priv *priv = dev->data->dev_private; + uint16_t expected = AGE_CANDIDATE; - if (!flow->counter) - return rte_flow_error_set - (error, rte_errno, - RTE_FLOW_ERROR_TYPE_ACTION, - NULL, - "cannot create counter" - " object."); - dev_flow->dv.actions[actions_n++] = - (flow_dv_counter_get_by_idx(dev, - flow->counter, NULL))->action; - } - break; - default: - break; + age_info = GET_PORT_AGE_INFO(priv); + if (!__atomic_compare_exchange_n(&age_param->state, &expected, + AGE_FREE, false, __ATOMIC_RELAXED, + __ATOMIC_RELAXED)) { + /** + * We need the lock even it is age timeout, + * since age action may still in process. + */ + rte_spinlock_lock(&age_info->aged_sl); + LIST_REMOVE(age, next); + rte_spinlock_unlock(&age_info->aged_sl); + __atomic_store_n(&age_param->state, AGE_FREE, __ATOMIC_RELAXED); + } +} + +/** + * Release an ASO age action. + * + * @param[in] dev + * Pointer to the Ethernet device structure. + * @param[in] age_idx + * Index of ASO age action to release. + * @param[in] flow + * True if the release operation is during flow destroy operation. + * False if the release operation is during action destroy operation. + * + * @return + * 0 when age action was removed, otherwise the number of references. + */ +static int +flow_dv_aso_age_release(struct rte_eth_dev *dev, uint32_t age_idx) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng; + struct mlx5_aso_age_action *age = flow_aso_age_get_by_idx(dev, age_idx); + uint32_t ret = __atomic_sub_fetch(&age->refcnt, 1, __ATOMIC_RELAXED); + + if (!ret) { + flow_dv_aso_age_remove_from_age(dev, age); + rte_spinlock_lock(&mng->free_sl); + LIST_INSERT_HEAD(&mng->free, age, next); + rte_spinlock_unlock(&mng->free_sl); + } + return ret; +} + +/** + * Resize the ASO age pools array by MLX5_CNT_CONTAINER_RESIZE pools. + * + * @param[in] dev + * Pointer to the Ethernet device structure. + * + * @return + * 0 on success, otherwise negative errno value and rte_errno is set. + */ +static int +flow_dv_aso_age_pools_resize(struct rte_eth_dev *dev) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng; + void *old_pools = mng->pools; + uint32_t resize = mng->n + MLX5_CNT_CONTAINER_RESIZE; + uint32_t mem_size = sizeof(struct mlx5_aso_age_pool *) * resize; + void *pools = mlx5_malloc(MLX5_MEM_ZERO, mem_size, 0, SOCKET_ID_ANY); + + if (!pools) { + rte_errno = ENOMEM; + return -ENOMEM; + } + if (old_pools) { + memcpy(pools, old_pools, + mng->n * sizeof(struct mlx5_flow_counter_pool *)); + mlx5_free(old_pools); + } else { + /* First ASO flow hit allocation - starting ASO data-path. */ + int ret = mlx5_aso_queue_start(priv->sh); + + if (ret) { + mlx5_free(pools); + return ret; } - if (mhdr_res->actions_num && - modify_action_position == UINT32_MAX) - modify_action_position = actions_n++; } - dev_flow->dv.actions_n = actions_n; - dev_flow->act_flags = action_flags; - for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) { - int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); - int item_type = items->type; + mng->n = resize; + mng->pools = pools; + return 0; +} - switch (item_type) { - case RTE_FLOW_ITEM_TYPE_PORT_ID: - flow_dv_translate_item_port_id(dev, match_mask, - match_value, items); - last_item = MLX5_FLOW_ITEM_PORT_ID; - break; - case RTE_FLOW_ITEM_TYPE_ETH: - flow_dv_translate_item_eth(match_mask, match_value, - items, tunnel, - dev_flow->dv.group); - matcher.priority = action_flags & - MLX5_FLOW_ACTION_DEFAULT_MISS && - !dev_flow->external ? - MLX5_PRIORITY_MAP_L3 : - MLX5_PRIORITY_MAP_L2; - last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 : - MLX5_FLOW_LAYER_OUTER_L2; - break; - case RTE_FLOW_ITEM_TYPE_VLAN: - flow_dv_translate_item_vlan(dev_flow, - match_mask, match_value, - items, tunnel, - dev_flow->dv.group); - matcher.priority = MLX5_PRIORITY_MAP_L2; - last_item = tunnel ? (MLX5_FLOW_LAYER_INNER_L2 | - MLX5_FLOW_LAYER_INNER_VLAN) : - (MLX5_FLOW_LAYER_OUTER_L2 | - MLX5_FLOW_LAYER_OUTER_VLAN); - break; - case RTE_FLOW_ITEM_TYPE_IPV4: - mlx5_flow_tunnel_ip_check(items, next_protocol, - &item_flags, &tunnel); - flow_dv_translate_item_ipv4(match_mask, match_value, - items, item_flags, tunnel, - dev_flow->dv.group); - matcher.priority = MLX5_PRIORITY_MAP_L3; - last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 : - MLX5_FLOW_LAYER_OUTER_L3_IPV4; - if (items->mask != NULL && - ((const struct rte_flow_item_ipv4 *) - items->mask)->hdr.next_proto_id) { - next_protocol = - ((const struct rte_flow_item_ipv4 *) - (items->spec))->hdr.next_proto_id; - next_protocol &= - ((const struct rte_flow_item_ipv4 *) - (items->mask))->hdr.next_proto_id; - } else { - /* Reset for inner layer. */ - next_protocol = 0xff; - } - break; - case RTE_FLOW_ITEM_TYPE_IPV6: - mlx5_flow_tunnel_ip_check(items, next_protocol, - &item_flags, &tunnel); - flow_dv_translate_item_ipv6(match_mask, match_value, - items, item_flags, tunnel, - dev_flow->dv.group); - matcher.priority = MLX5_PRIORITY_MAP_L3; - last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 : - MLX5_FLOW_LAYER_OUTER_L3_IPV6; - if (items->mask != NULL && - ((const struct rte_flow_item_ipv6 *) - items->mask)->hdr.proto) { - next_protocol = - ((const struct rte_flow_item_ipv6 *) - items->spec)->hdr.proto; - next_protocol &= - ((const struct rte_flow_item_ipv6 *) - items->mask)->hdr.proto; - } else { - /* Reset for inner layer. */ - next_protocol = 0xff; +/** + * Create and initialize a new ASO aging pool. + * + * @param[in] dev + * Pointer to the Ethernet device structure. + * @param[out] age_free + * Where to put the pointer of a new age action. + * + * @return + * The age actions pool pointer and @p age_free is set on success, + * NULL otherwise and rte_errno is set. + */ +static struct mlx5_aso_age_pool * +flow_dv_age_pool_create(struct rte_eth_dev *dev, + struct mlx5_aso_age_action **age_free) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng; + struct mlx5_aso_age_pool *pool = NULL; + struct mlx5_devx_obj *obj = NULL; + uint32_t i; + + obj = mlx5_devx_cmd_create_flow_hit_aso_obj(priv->sh->ctx, + priv->sh->pdn); + if (!obj) { + rte_errno = ENODATA; + DRV_LOG(ERR, "Failed to create flow_hit_aso_obj using DevX."); + return NULL; + } + pool = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*pool), 0, SOCKET_ID_ANY); + if (!pool) { + claim_zero(mlx5_devx_cmd_destroy(obj)); + rte_errno = ENOMEM; + return NULL; + } + pool->flow_hit_aso_obj = obj; + pool->time_of_last_age_check = MLX5_CURR_TIME_SEC; + rte_spinlock_lock(&mng->resize_sl); + pool->index = mng->next; + /* Resize pools array if there is no room for the new pool in it. */ + if (pool->index == mng->n && flow_dv_aso_age_pools_resize(dev)) { + claim_zero(mlx5_devx_cmd_destroy(obj)); + mlx5_free(pool); + rte_spinlock_unlock(&mng->resize_sl); + return NULL; + } + mng->pools[pool->index] = pool; + mng->next++; + rte_spinlock_unlock(&mng->resize_sl); + /* Assign the first action in the new pool, the rest go to free list. */ + *age_free = &pool->actions[0]; + for (i = 1; i < MLX5_ASO_AGE_ACTIONS_PER_POOL; i++) { + pool->actions[i].offset = i; + LIST_INSERT_HEAD(&mng->free, &pool->actions[i], next); + } + return pool; +} + +/** + * Allocate a ASO aging bit. + * + * @param[in] dev + * Pointer to the Ethernet device structure. + * + * @return + * Index to ASO age action on success, 0 otherwise and rte_errno is set. + */ +static uint32_t +flow_dv_aso_age_alloc(struct rte_eth_dev *dev) +{ + struct mlx5_priv *priv = dev->data->dev_private; + const struct mlx5_aso_age_pool *pool; + struct mlx5_aso_age_action *age_free = NULL; + struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng; + + MLX5_ASSERT(mng); + /* Try to get the next free age action bit. */ + rte_spinlock_lock(&mng->free_sl); + age_free = LIST_FIRST(&mng->free); + if (age_free) { + LIST_REMOVE(age_free, next); + } else if (!flow_dv_age_pool_create(dev, &age_free)) { + rte_spinlock_unlock(&mng->free_sl); + return 0; /* 0 is an error.*/ + } + rte_spinlock_unlock(&mng->free_sl); + pool = container_of + ((const struct mlx5_aso_age_action (*)[MLX5_ASO_AGE_ACTIONS_PER_POOL]) + (age_free - age_free->offset), const struct mlx5_aso_age_pool, + actions); + if (!age_free->dr_action) { + age_free->dr_action = mlx5_glue->dr_action_create_flow_hit + (pool->flow_hit_aso_obj->obj, + age_free->offset, REG_C_5); + if (!age_free->dr_action) { + rte_errno = errno; + rte_spinlock_lock(&mng->free_sl); + LIST_INSERT_HEAD(&mng->free, age_free, next); + rte_spinlock_unlock(&mng->free_sl); + return 0; /* 0 is an error.*/ + } + } + __atomic_store_n(&age_free->refcnt, 1, __ATOMIC_RELAXED); + return pool->index | ((age_free->offset + 1) << 16); +} + +/** + * Create a age action using ASO mechanism. + * + * @param[in] dev + * Pointer to rte_eth_dev structure. + * @param[in] age + * Pointer to the aging action configuration. + * + * @return + * Index to flow counter on success, 0 otherwise. + */ +static uint32_t +flow_dv_translate_create_aso_age(struct rte_eth_dev *dev, + const struct rte_flow_action_age *age) +{ + uint32_t age_idx = 0; + struct mlx5_aso_age_action *aso_age; + + age_idx = flow_dv_aso_age_alloc(dev); + if (!age_idx) + return 0; + aso_age = flow_aso_age_get_by_idx(dev, age_idx); + aso_age->age_params.context = age->context; + aso_age->age_params.timeout = age->timeout; + aso_age->age_params.port_id = dev->data->port_id; + __atomic_store_n(&aso_age->age_params.sec_since_last_hit, 0, + __ATOMIC_RELAXED); + __atomic_store_n(&aso_age->age_params.state, AGE_CANDIDATE, + __ATOMIC_RELAXED); + return age_idx; +} + +/** + * Fill the flow with DV spec, lock free + * (mutex should be acquired by caller). + * + * @param[in] dev + * Pointer to rte_eth_dev structure. + * @param[in, out] dev_flow + * Pointer to the sub flow. + * @param[in] attr + * Pointer to the flow attributes. + * @param[in] items + * Pointer to the list of items. + * @param[in] actions + * Pointer to the list of actions. + * @param[out] error + * Pointer to the error structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +flow_dv_translate(struct rte_eth_dev *dev, + struct mlx5_flow *dev_flow, + const struct rte_flow_attr *attr, + const struct rte_flow_item items[], + const struct rte_flow_action actions[], + struct rte_flow_error *error) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_dev_config *dev_conf = &priv->config; + struct rte_flow *flow = dev_flow->flow; + struct mlx5_flow_handle *handle = dev_flow->handle; + struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace(); + struct mlx5_flow_rss_desc *rss_desc; + uint64_t item_flags = 0; + uint64_t last_item = 0; + uint64_t action_flags = 0; + uint64_t priority = attr->priority; + struct mlx5_flow_dv_matcher matcher = { + .mask = { + .size = sizeof(matcher.mask.buf) - + MLX5_ST_SZ_BYTES(fte_match_set_misc4), + }, + }; + int actions_n = 0; + bool actions_end = false; + union { + struct mlx5_flow_dv_modify_hdr_resource res; + uint8_t len[sizeof(struct mlx5_flow_dv_modify_hdr_resource) + + sizeof(struct mlx5_modification_cmd) * + (MLX5_MAX_MODIFY_NUM + 1)]; + } mhdr_dummy; + struct mlx5_flow_dv_modify_hdr_resource *mhdr_res = &mhdr_dummy.res; + const struct rte_flow_action_count *count = NULL; + const struct rte_flow_action_age *age = NULL; + union flow_dv_attr flow_attr = { .attr = 0 }; + uint32_t tag_be; + union mlx5_flow_tbl_key tbl_key; + uint32_t modify_action_position = UINT32_MAX; + void *match_mask = matcher.mask.buf; + void *match_value = dev_flow->dv.value.buf; + uint8_t next_protocol = 0xff; + struct rte_vlan_hdr vlan = { 0 }; + struct mlx5_flow_dv_dest_array_resource mdest_res; + struct mlx5_flow_dv_sample_resource sample_res; + void *sample_actions[MLX5_DV_MAX_NUMBER_OF_ACTIONS] = {0}; + struct mlx5_flow_sub_actions_list *sample_act; + uint32_t sample_act_pos = UINT32_MAX; + uint32_t num_of_dest = 0; + int tmp_actions_n = 0; + uint32_t table; + int ret = 0; + const struct mlx5_flow_tunnel *tunnel; + struct flow_grp_info grp_info = { + .external = !!dev_flow->external, + .transfer = !!attr->transfer, + .fdb_def_rule = !!priv->fdb_def_rule, + .skip_scale = !!dev_flow->skip_scale, + }; + + if (!wks) + return rte_flow_error_set(error, ENOMEM, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "failed to push flow workspace"); + rss_desc = &wks->rss_desc; + memset(&mdest_res, 0, sizeof(struct mlx5_flow_dv_dest_array_resource)); + memset(&sample_res, 0, sizeof(struct mlx5_flow_dv_sample_resource)); + mhdr_res->ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX : + MLX5DV_FLOW_TABLE_TYPE_NIC_RX; + /* update normal path action resource into last index of array */ + sample_act = &mdest_res.sample_act[MLX5_MAX_DEST_NUM - 1]; + tunnel = is_flow_tunnel_match_rule(dev, attr, items, actions) ? + flow_items_to_tunnel(items) : + is_flow_tunnel_steer_rule(dev, attr, items, actions) ? + flow_actions_to_tunnel(actions) : + dev_flow->tunnel ? dev_flow->tunnel : NULL; + mhdr_res->ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX : + MLX5DV_FLOW_TABLE_TYPE_NIC_RX; + grp_info.std_tbl_fix = tunnel_use_standard_attr_group_translate + (dev, tunnel, attr, items, actions); + ret = mlx5_flow_group_to_table(dev, tunnel, attr->group, &table, + &grp_info, error); + if (ret) + return ret; + dev_flow->dv.group = table; + if (attr->transfer) + mhdr_res->ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB; + if (priority == MLX5_FLOW_PRIO_RSVD) + priority = dev_conf->flow_prio - 1; + /* number of actions must be set to 0 in case of dirty stack. */ + mhdr_res->actions_num = 0; + if (is_flow_tunnel_match_rule(dev, attr, items, actions)) { + /* + * do not add decap action if match rule drops packet + * HW rejects rules with decap & drop + * + * if tunnel match rule was inserted before matching tunnel set + * rule flow table used in the match rule must be registered. + * current implementation handles that in the + * flow_dv_match_register() at the function end. + */ + bool add_decap = true; + const struct rte_flow_action *ptr = actions; + + for (; ptr->type != RTE_FLOW_ACTION_TYPE_END; ptr++) { + if (ptr->type == RTE_FLOW_ACTION_TYPE_DROP) { + add_decap = false; + break; } + } + if (add_decap) { + if (flow_dv_create_action_l2_decap(dev, dev_flow, + attr->transfer, + error)) + return -rte_errno; + dev_flow->dv.actions[actions_n++] = + dev_flow->dv.encap_decap->action; + action_flags |= MLX5_FLOW_ACTION_DECAP; + } + } + for (; !actions_end ; actions++) { + const struct rte_flow_action_queue *queue; + const struct rte_flow_action_rss *rss; + const struct rte_flow_action *action = actions; + const uint8_t *rss_key; + const struct rte_flow_action_meter *mtr; + struct mlx5_flow_tbl_resource *tbl; + struct mlx5_aso_age_action *age_act; + uint32_t port_id = 0; + struct mlx5_flow_dv_port_id_action_resource port_id_resource; + int action_type = actions->type; + const struct rte_flow_action *found_action = NULL; + struct mlx5_flow_meter *fm = NULL; + uint32_t jump_group = 0; + + if (!mlx5_flow_os_action_supported(action_type)) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, + actions, + "action not supported"); + switch (action_type) { + case MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET: + action_flags |= MLX5_FLOW_ACTION_TUNNEL_SET; break; - case RTE_FLOW_ITEM_TYPE_TCP: - flow_dv_translate_item_tcp(match_mask, match_value, - items, tunnel); - matcher.priority = MLX5_PRIORITY_MAP_L4; - last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP : - MLX5_FLOW_LAYER_OUTER_L4_TCP; + case RTE_FLOW_ACTION_TYPE_VOID: break; - case RTE_FLOW_ITEM_TYPE_UDP: - flow_dv_translate_item_udp(match_mask, match_value, - items, tunnel); - matcher.priority = MLX5_PRIORITY_MAP_L4; - last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP : - MLX5_FLOW_LAYER_OUTER_L4_UDP; + case RTE_FLOW_ACTION_TYPE_PORT_ID: + if (flow_dv_translate_action_port_id(dev, action, + &port_id, error)) + return -rte_errno; + port_id_resource.port_id = port_id; + MLX5_ASSERT(!handle->rix_port_id_action); + if (flow_dv_port_id_action_resource_register + (dev, &port_id_resource, dev_flow, error)) + return -rte_errno; + dev_flow->dv.actions[actions_n++] = + dev_flow->dv.port_id_action->action; + action_flags |= MLX5_FLOW_ACTION_PORT_ID; + dev_flow->handle->fate_action = MLX5_FLOW_FATE_PORT_ID; + sample_act->action_flags |= MLX5_FLOW_ACTION_PORT_ID; + num_of_dest++; break; - case RTE_FLOW_ITEM_TYPE_GRE: - flow_dv_translate_item_gre(match_mask, match_value, - items, tunnel); - matcher.priority = rss_desc->level >= 2 ? - MLX5_PRIORITY_MAP_L2 : MLX5_PRIORITY_MAP_L4; - last_item = MLX5_FLOW_LAYER_GRE; - break; - case RTE_FLOW_ITEM_TYPE_GRE_KEY: - flow_dv_translate_item_gre_key(match_mask, - match_value, items); - last_item = MLX5_FLOW_LAYER_GRE_KEY; + case RTE_FLOW_ACTION_TYPE_FLAG: + action_flags |= MLX5_FLOW_ACTION_FLAG; + dev_flow->handle->mark = 1; + if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) { + struct rte_flow_action_mark mark = { + .id = MLX5_FLOW_MARK_DEFAULT, + }; + + if (flow_dv_convert_action_mark(dev, &mark, + mhdr_res, + error)) + return -rte_errno; + action_flags |= MLX5_FLOW_ACTION_MARK_EXT; + break; + } + tag_be = mlx5_flow_mark_set(MLX5_FLOW_MARK_DEFAULT); + /* + * Only one FLAG or MARK is supported per device flow + * right now. So the pointer to the tag resource must be + * zero before the register process. + */ + MLX5_ASSERT(!handle->dvh.rix_tag); + if (flow_dv_tag_resource_register(dev, tag_be, + dev_flow, error)) + return -rte_errno; + MLX5_ASSERT(dev_flow->dv.tag_resource); + dev_flow->dv.actions[actions_n++] = + dev_flow->dv.tag_resource->action; break; - case RTE_FLOW_ITEM_TYPE_NVGRE: - flow_dv_translate_item_nvgre(match_mask, match_value, - items, tunnel); - matcher.priority = rss_desc->level >= 2 ? - MLX5_PRIORITY_MAP_L2 : MLX5_PRIORITY_MAP_L4; - last_item = MLX5_FLOW_LAYER_GRE; + case RTE_FLOW_ACTION_TYPE_MARK: + action_flags |= MLX5_FLOW_ACTION_MARK; + dev_flow->handle->mark = 1; + if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) { + const struct rte_flow_action_mark *mark = + (const struct rte_flow_action_mark *) + actions->conf; + + if (flow_dv_convert_action_mark(dev, mark, + mhdr_res, + error)) + return -rte_errno; + action_flags |= MLX5_FLOW_ACTION_MARK_EXT; + break; + } + /* Fall-through */ + case MLX5_RTE_FLOW_ACTION_TYPE_MARK: + /* Legacy (non-extensive) MARK action. */ + tag_be = mlx5_flow_mark_set + (((const struct rte_flow_action_mark *) + (actions->conf))->id); + MLX5_ASSERT(!handle->dvh.rix_tag); + if (flow_dv_tag_resource_register(dev, tag_be, + dev_flow, error)) + return -rte_errno; + MLX5_ASSERT(dev_flow->dv.tag_resource); + dev_flow->dv.actions[actions_n++] = + dev_flow->dv.tag_resource->action; break; - case RTE_FLOW_ITEM_TYPE_VXLAN: - flow_dv_translate_item_vxlan(match_mask, match_value, - items, tunnel); - matcher.priority = rss_desc->level >= 2 ? - MLX5_PRIORITY_MAP_L2 : MLX5_PRIORITY_MAP_L4; - last_item = MLX5_FLOW_LAYER_VXLAN; + case RTE_FLOW_ACTION_TYPE_SET_META: + if (flow_dv_convert_action_set_meta + (dev, mhdr_res, attr, + (const struct rte_flow_action_set_meta *) + actions->conf, error)) + return -rte_errno; + action_flags |= MLX5_FLOW_ACTION_SET_META; break; - case RTE_FLOW_ITEM_TYPE_VXLAN_GPE: - flow_dv_translate_item_vxlan_gpe(match_mask, - match_value, items, - tunnel); - matcher.priority = rss_desc->level >= 2 ? - MLX5_PRIORITY_MAP_L2 : MLX5_PRIORITY_MAP_L4; - last_item = MLX5_FLOW_LAYER_VXLAN_GPE; + case RTE_FLOW_ACTION_TYPE_SET_TAG: + if (flow_dv_convert_action_set_tag + (dev, mhdr_res, + (const struct rte_flow_action_set_tag *) + actions->conf, error)) + return -rte_errno; + action_flags |= MLX5_FLOW_ACTION_SET_TAG; break; - case RTE_FLOW_ITEM_TYPE_GENEVE: - flow_dv_translate_item_geneve(match_mask, match_value, - items, tunnel); - matcher.priority = rss_desc->level >= 2 ? - MLX5_PRIORITY_MAP_L2 : MLX5_PRIORITY_MAP_L4; - last_item = MLX5_FLOW_LAYER_GENEVE; + case RTE_FLOW_ACTION_TYPE_DROP: + action_flags |= MLX5_FLOW_ACTION_DROP; + dev_flow->handle->fate_action = MLX5_FLOW_FATE_DROP; break; - case RTE_FLOW_ITEM_TYPE_MPLS: - flow_dv_translate_item_mpls(match_mask, match_value, - items, last_item, tunnel); - matcher.priority = rss_desc->level >= 2 ? - MLX5_PRIORITY_MAP_L2 : MLX5_PRIORITY_MAP_L4; - last_item = MLX5_FLOW_LAYER_MPLS; + case RTE_FLOW_ACTION_TYPE_QUEUE: + queue = actions->conf; + rss_desc->queue_num = 1; + rss_desc->queue[0] = queue->index; + action_flags |= MLX5_FLOW_ACTION_QUEUE; + dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE; + sample_act->action_flags |= MLX5_FLOW_ACTION_QUEUE; + num_of_dest++; break; - case RTE_FLOW_ITEM_TYPE_MARK: - flow_dv_translate_item_mark(dev, match_mask, - match_value, items); - last_item = MLX5_FLOW_ITEM_MARK; + case RTE_FLOW_ACTION_TYPE_RSS: + rss = actions->conf; + memcpy(rss_desc->queue, rss->queue, + rss->queue_num * sizeof(uint16_t)); + rss_desc->queue_num = rss->queue_num; + /* NULL RSS key indicates default RSS key. */ + rss_key = !rss->key ? rss_hash_default_key : rss->key; + memcpy(rss_desc->key, rss_key, MLX5_RSS_HASH_KEY_LEN); + /* + * rss->level and rss.types should be set in advance + * when expanding items for RSS. + */ + action_flags |= MLX5_FLOW_ACTION_RSS; + dev_flow->handle->fate_action = rss_desc->shared_rss ? + MLX5_FLOW_FATE_SHARED_RSS : + MLX5_FLOW_FATE_QUEUE; + break; + case MLX5_RTE_FLOW_ACTION_TYPE_AGE: + flow->age = (uint32_t)(uintptr_t)(action->conf); + age_act = flow_aso_age_get_by_idx(dev, flow->age); + __atomic_fetch_add(&age_act->refcnt, 1, + __ATOMIC_RELAXED); + dev_flow->dv.actions[actions_n++] = age_act->dr_action; + action_flags |= MLX5_FLOW_ACTION_AGE; break; - case RTE_FLOW_ITEM_TYPE_META: - flow_dv_translate_item_meta(dev, match_mask, - match_value, attr, items); - last_item = MLX5_FLOW_ITEM_METADATA; + case RTE_FLOW_ACTION_TYPE_AGE: + if (priv->sh->flow_hit_aso_en && attr->group) { + flow->age = flow_dv_translate_create_aso_age + (dev, action->conf); + if (!flow->age) + return rte_flow_error_set + (error, rte_errno, + RTE_FLOW_ERROR_TYPE_ACTION, + NULL, + "can't create ASO age action"); + dev_flow->dv.actions[actions_n++] = + (flow_aso_age_get_by_idx + (dev, flow->age))->dr_action; + action_flags |= MLX5_FLOW_ACTION_AGE; + break; + } + /* Fall-through */ + case RTE_FLOW_ACTION_TYPE_COUNT: + if (!dev_conf->devx) { + return rte_flow_error_set + (error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "count action not supported"); + } + /* Save information first, will apply later. */ + if (actions->type == RTE_FLOW_ACTION_TYPE_COUNT) + count = action->conf; + else + age = action->conf; + action_flags |= MLX5_FLOW_ACTION_COUNT; break; - case RTE_FLOW_ITEM_TYPE_ICMP: - flow_dv_translate_item_icmp(match_mask, match_value, - items, tunnel); - last_item = MLX5_FLOW_LAYER_ICMP; + case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN: + dev_flow->dv.actions[actions_n++] = + priv->sh->pop_vlan_action; + action_flags |= MLX5_FLOW_ACTION_OF_POP_VLAN; break; - case RTE_FLOW_ITEM_TYPE_ICMP6: - flow_dv_translate_item_icmp6(match_mask, match_value, - items, tunnel); - last_item = MLX5_FLOW_LAYER_ICMP6; + case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN: + if (!(action_flags & + MLX5_FLOW_ACTION_OF_SET_VLAN_VID)) + flow_dev_get_vlan_info_from_items(items, &vlan); + vlan.eth_proto = rte_be_to_cpu_16 + ((((const struct rte_flow_action_of_push_vlan *) + actions->conf)->ethertype)); + found_action = mlx5_flow_find_action + (actions + 1, + RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID); + if (found_action) + mlx5_update_vlan_vid_pcp(found_action, &vlan); + found_action = mlx5_flow_find_action + (actions + 1, + RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP); + if (found_action) + mlx5_update_vlan_vid_pcp(found_action, &vlan); + if (flow_dv_create_action_push_vlan + (dev, attr, &vlan, dev_flow, error)) + return -rte_errno; + dev_flow->dv.actions[actions_n++] = + dev_flow->dv.push_vlan_res->action; + action_flags |= MLX5_FLOW_ACTION_OF_PUSH_VLAN; break; - case RTE_FLOW_ITEM_TYPE_TAG: - flow_dv_translate_item_tag(dev, match_mask, - match_value, items); - last_item = MLX5_FLOW_ITEM_TAG; + case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP: + /* of_vlan_push action handled this action */ + MLX5_ASSERT(action_flags & + MLX5_FLOW_ACTION_OF_PUSH_VLAN); break; - case MLX5_RTE_FLOW_ITEM_TYPE_TAG: - flow_dv_translate_mlx5_item_tag(dev, match_mask, - match_value, items); - last_item = MLX5_FLOW_ITEM_TAG; + case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID: + if (action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN) + break; + flow_dev_get_vlan_info_from_items(items, &vlan); + mlx5_update_vlan_vid_pcp(actions, &vlan); + /* If no VLAN push - this is a modify header action */ + if (flow_dv_convert_action_modify_vlan_vid + (mhdr_res, actions, error)) + return -rte_errno; + action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_VID; break; - case MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE: - flow_dv_translate_item_tx_queue(dev, match_mask, - match_value, - items); - last_item = MLX5_FLOW_ITEM_TX_QUEUE; + case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP: + case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP: + if (flow_dv_create_action_l2_encap(dev, actions, + dev_flow, + attr->transfer, + error)) + return -rte_errno; + dev_flow->dv.actions[actions_n++] = + dev_flow->dv.encap_decap->action; + action_flags |= MLX5_FLOW_ACTION_ENCAP; + if (action_flags & MLX5_FLOW_ACTION_SAMPLE) + sample_act->action_flags |= + MLX5_FLOW_ACTION_ENCAP; break; - case RTE_FLOW_ITEM_TYPE_GTP: - flow_dv_translate_item_gtp(match_mask, match_value, - items, tunnel); - matcher.priority = rss_desc->level >= 2 ? - MLX5_PRIORITY_MAP_L2 : MLX5_PRIORITY_MAP_L4; - last_item = MLX5_FLOW_LAYER_GTP; + case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP: + case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP: + if (flow_dv_create_action_l2_decap(dev, dev_flow, + attr->transfer, + error)) + return -rte_errno; + dev_flow->dv.actions[actions_n++] = + dev_flow->dv.encap_decap->action; + action_flags |= MLX5_FLOW_ACTION_DECAP; break; - default: + case RTE_FLOW_ACTION_TYPE_RAW_ENCAP: + /* Handle encap with preceding decap. */ + if (action_flags & MLX5_FLOW_ACTION_DECAP) { + if (flow_dv_create_action_raw_encap + (dev, actions, dev_flow, attr, error)) + return -rte_errno; + dev_flow->dv.actions[actions_n++] = + dev_flow->dv.encap_decap->action; + } else { + /* Handle encap without preceding decap. */ + if (flow_dv_create_action_l2_encap + (dev, actions, dev_flow, attr->transfer, + error)) + return -rte_errno; + dev_flow->dv.actions[actions_n++] = + dev_flow->dv.encap_decap->action; + } + action_flags |= MLX5_FLOW_ACTION_ENCAP; + if (action_flags & MLX5_FLOW_ACTION_SAMPLE) + sample_act->action_flags |= + MLX5_FLOW_ACTION_ENCAP; break; + case RTE_FLOW_ACTION_TYPE_RAW_DECAP: + while ((++action)->type == RTE_FLOW_ACTION_TYPE_VOID) + ; + if (action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP) { + if (flow_dv_create_action_l2_decap + (dev, dev_flow, attr->transfer, error)) + return -rte_errno; + dev_flow->dv.actions[actions_n++] = + dev_flow->dv.encap_decap->action; + } + /* If decap is followed by encap, handle it at encap. */ + action_flags |= MLX5_FLOW_ACTION_DECAP; + break; + case RTE_FLOW_ACTION_TYPE_JUMP: + jump_group = ((const struct rte_flow_action_jump *) + action->conf)->group; + grp_info.std_tbl_fix = 0; + grp_info.skip_scale = 0; + ret = mlx5_flow_group_to_table(dev, tunnel, + jump_group, + &table, + &grp_info, error); + if (ret) + return ret; + tbl = flow_dv_tbl_resource_get(dev, table, attr->egress, + attr->transfer, + !!dev_flow->external, + tunnel, jump_group, 0, + error); + if (!tbl) + return rte_flow_error_set + (error, errno, + RTE_FLOW_ERROR_TYPE_ACTION, + NULL, + "cannot create jump action."); + if (flow_dv_jump_tbl_resource_register + (dev, tbl, dev_flow, error)) { + flow_dv_tbl_resource_release(MLX5_SH(dev), tbl); + return rte_flow_error_set + (error, errno, + RTE_FLOW_ERROR_TYPE_ACTION, + NULL, + "cannot create jump action."); + } + dev_flow->dv.actions[actions_n++] = + dev_flow->dv.jump->action; + action_flags |= MLX5_FLOW_ACTION_JUMP; + dev_flow->handle->fate_action = MLX5_FLOW_FATE_JUMP; + break; + case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC: + case RTE_FLOW_ACTION_TYPE_SET_MAC_DST: + if (flow_dv_convert_action_modify_mac + (mhdr_res, actions, error)) + return -rte_errno; + action_flags |= actions->type == + RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ? + MLX5_FLOW_ACTION_SET_MAC_SRC : + MLX5_FLOW_ACTION_SET_MAC_DST; + break; + case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC: + case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST: + if (flow_dv_convert_action_modify_ipv4 + (mhdr_res, actions, error)) + return -rte_errno; + action_flags |= actions->type == + RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ? + MLX5_FLOW_ACTION_SET_IPV4_SRC : + MLX5_FLOW_ACTION_SET_IPV4_DST; + break; + case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC: + case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST: + if (flow_dv_convert_action_modify_ipv6 + (mhdr_res, actions, error)) + return -rte_errno; + action_flags |= actions->type == + RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ? + MLX5_FLOW_ACTION_SET_IPV6_SRC : + MLX5_FLOW_ACTION_SET_IPV6_DST; + break; + case RTE_FLOW_ACTION_TYPE_SET_TP_SRC: + case RTE_FLOW_ACTION_TYPE_SET_TP_DST: + if (flow_dv_convert_action_modify_tp + (mhdr_res, actions, items, + &flow_attr, dev_flow, !!(action_flags & + MLX5_FLOW_ACTION_DECAP), error)) + return -rte_errno; + action_flags |= actions->type == + RTE_FLOW_ACTION_TYPE_SET_TP_SRC ? + MLX5_FLOW_ACTION_SET_TP_SRC : + MLX5_FLOW_ACTION_SET_TP_DST; + break; + case RTE_FLOW_ACTION_TYPE_DEC_TTL: + if (flow_dv_convert_action_modify_dec_ttl + (mhdr_res, items, &flow_attr, dev_flow, + !!(action_flags & + MLX5_FLOW_ACTION_DECAP), error)) + return -rte_errno; + action_flags |= MLX5_FLOW_ACTION_DEC_TTL; + break; + case RTE_FLOW_ACTION_TYPE_SET_TTL: + if (flow_dv_convert_action_modify_ttl + (mhdr_res, actions, items, &flow_attr, + dev_flow, !!(action_flags & + MLX5_FLOW_ACTION_DECAP), error)) + return -rte_errno; + action_flags |= MLX5_FLOW_ACTION_SET_TTL; + break; + case RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ: + case RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ: + if (flow_dv_convert_action_modify_tcp_seq + (mhdr_res, actions, error)) + return -rte_errno; + action_flags |= actions->type == + RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ ? + MLX5_FLOW_ACTION_INC_TCP_SEQ : + MLX5_FLOW_ACTION_DEC_TCP_SEQ; + break; + + case RTE_FLOW_ACTION_TYPE_INC_TCP_ACK: + case RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK: + if (flow_dv_convert_action_modify_tcp_ack + (mhdr_res, actions, error)) + return -rte_errno; + action_flags |= actions->type == + RTE_FLOW_ACTION_TYPE_INC_TCP_ACK ? + MLX5_FLOW_ACTION_INC_TCP_ACK : + MLX5_FLOW_ACTION_DEC_TCP_ACK; + break; + case MLX5_RTE_FLOW_ACTION_TYPE_TAG: + if (flow_dv_convert_action_set_reg + (mhdr_res, actions, error)) + return -rte_errno; + action_flags |= MLX5_FLOW_ACTION_SET_TAG; + break; + case MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG: + if (flow_dv_convert_action_copy_mreg + (dev, mhdr_res, actions, error)) + return -rte_errno; + action_flags |= MLX5_FLOW_ACTION_SET_TAG; + break; + case MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS: + action_flags |= MLX5_FLOW_ACTION_DEFAULT_MISS; + dev_flow->handle->fate_action = + MLX5_FLOW_FATE_DEFAULT_MISS; + break; + case RTE_FLOW_ACTION_TYPE_METER: + mtr = actions->conf; + if (!flow->meter) { + fm = mlx5_flow_meter_attach(priv, mtr->mtr_id, + attr, error); + if (!fm) + return rte_flow_error_set(error, + rte_errno, + RTE_FLOW_ERROR_TYPE_ACTION, + NULL, + "meter not found " + "or invalid parameters"); + flow->meter = fm->idx; + } + /* Set the meter action. */ + if (!fm) { + fm = mlx5_ipool_get(priv->sh->ipool + [MLX5_IPOOL_MTR], flow->meter); + if (!fm) + return rte_flow_error_set(error, + rte_errno, + RTE_FLOW_ERROR_TYPE_ACTION, + NULL, + "meter not found " + "or invalid parameters"); + } + dev_flow->dv.actions[actions_n++] = + fm->mfts->meter_action; + action_flags |= MLX5_FLOW_ACTION_METER; + break; + case RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP: + if (flow_dv_convert_action_modify_ipv4_dscp(mhdr_res, + actions, error)) + return -rte_errno; + action_flags |= MLX5_FLOW_ACTION_SET_IPV4_DSCP; + break; + case RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP: + if (flow_dv_convert_action_modify_ipv6_dscp(mhdr_res, + actions, error)) + return -rte_errno; + action_flags |= MLX5_FLOW_ACTION_SET_IPV6_DSCP; + break; + case RTE_FLOW_ACTION_TYPE_SAMPLE: + sample_act_pos = actions_n; + ret = flow_dv_translate_action_sample(dev, + actions, + dev_flow, attr, + &num_of_dest, + sample_actions, + &sample_res, + error); + if (ret < 0) + return ret; + actions_n++; + action_flags |= MLX5_FLOW_ACTION_SAMPLE; + /* put encap action into group if work with port id */ + if ((action_flags & MLX5_FLOW_ACTION_ENCAP) && + (action_flags & MLX5_FLOW_ACTION_PORT_ID)) + sample_act->action_flags |= + MLX5_FLOW_ACTION_ENCAP; + break; + case RTE_FLOW_ACTION_TYPE_END: + actions_end = true; + if (mhdr_res->actions_num) { + /* create modify action if needed. */ + if (flow_dv_modify_hdr_resource_register + (dev, mhdr_res, dev_flow, error)) + return -rte_errno; + dev_flow->dv.actions[modify_action_position] = + handle->dvh.modify_hdr->action; + } + if (action_flags & MLX5_FLOW_ACTION_COUNT) { + flow->counter = + flow_dv_translate_create_counter(dev, + dev_flow, count, age); + + if (!flow->counter) + return rte_flow_error_set + (error, rte_errno, + RTE_FLOW_ERROR_TYPE_ACTION, + NULL, + "cannot create counter" + " object."); + dev_flow->dv.actions[actions_n] = + (flow_dv_counter_get_by_idx(dev, + flow->counter, NULL))->action; + actions_n++; + } + if (action_flags & MLX5_FLOW_ACTION_SAMPLE) { + ret = flow_dv_create_action_sample(dev, + dev_flow, + num_of_dest, + &sample_res, + &mdest_res, + sample_actions, + action_flags, + error); + if (ret < 0) + return rte_flow_error_set + (error, rte_errno, + RTE_FLOW_ERROR_TYPE_ACTION, + NULL, + "cannot create sample action"); + if (num_of_dest > 1) { + dev_flow->dv.actions[sample_act_pos] = + dev_flow->dv.dest_array_res->action; + } else { + dev_flow->dv.actions[sample_act_pos] = + dev_flow->dv.sample_res->verbs_action; + } + } + break; + default: + break; + } + if (mhdr_res->actions_num && + modify_action_position == UINT32_MAX) + modify_action_position = actions_n++; + } + /* + * For multiple destination (sample action with ratio=1), the encap + * action and port id action will be combined into group action. + * So need remove the original these actions in the flow and only + * use the sample action instead of. + */ + if (num_of_dest > 1 && sample_act->dr_port_id_action) { + int i; + void *temp_actions[MLX5_DV_MAX_NUMBER_OF_ACTIONS] = {0}; + + for (i = 0; i < actions_n; i++) { + if ((sample_act->dr_encap_action && + sample_act->dr_encap_action == + dev_flow->dv.actions[i]) || + (sample_act->dr_port_id_action && + sample_act->dr_port_id_action == + dev_flow->dv.actions[i])) + continue; + temp_actions[tmp_actions_n++] = dev_flow->dv.actions[i]; + } + memcpy((void *)dev_flow->dv.actions, + (void *)temp_actions, + tmp_actions_n * sizeof(void *)); + actions_n = tmp_actions_n; + } + dev_flow->dv.actions_n = actions_n; + dev_flow->act_flags = action_flags; + for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) { + int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); + int item_type = items->type; + + if (!mlx5_flow_os_item_supported(item_type)) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, + NULL, "item not supported"); + switch (item_type) { + case RTE_FLOW_ITEM_TYPE_PORT_ID: + flow_dv_translate_item_port_id(dev, match_mask, + match_value, items); + last_item = MLX5_FLOW_ITEM_PORT_ID; + break; + case RTE_FLOW_ITEM_TYPE_ETH: + flow_dv_translate_item_eth(match_mask, match_value, + items, tunnel, + dev_flow->dv.group); + matcher.priority = action_flags & + MLX5_FLOW_ACTION_DEFAULT_MISS && + !dev_flow->external ? + MLX5_PRIORITY_MAP_L3 : + MLX5_PRIORITY_MAP_L2; + last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 : + MLX5_FLOW_LAYER_OUTER_L2; + break; + case RTE_FLOW_ITEM_TYPE_VLAN: + flow_dv_translate_item_vlan(dev_flow, + match_mask, match_value, + items, tunnel, + dev_flow->dv.group); + matcher.priority = MLX5_PRIORITY_MAP_L2; + last_item = tunnel ? (MLX5_FLOW_LAYER_INNER_L2 | + MLX5_FLOW_LAYER_INNER_VLAN) : + (MLX5_FLOW_LAYER_OUTER_L2 | + MLX5_FLOW_LAYER_OUTER_VLAN); + break; + case RTE_FLOW_ITEM_TYPE_IPV4: + mlx5_flow_tunnel_ip_check(items, next_protocol, + &item_flags, &tunnel); + flow_dv_translate_item_ipv4(match_mask, match_value, + items, tunnel, + dev_flow->dv.group); + matcher.priority = MLX5_PRIORITY_MAP_L3; + last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 : + MLX5_FLOW_LAYER_OUTER_L3_IPV4; + if (items->mask != NULL && + ((const struct rte_flow_item_ipv4 *) + items->mask)->hdr.next_proto_id) { + next_protocol = + ((const struct rte_flow_item_ipv4 *) + (items->spec))->hdr.next_proto_id; + next_protocol &= + ((const struct rte_flow_item_ipv4 *) + (items->mask))->hdr.next_proto_id; + } else { + /* Reset for inner layer. */ + next_protocol = 0xff; + } + break; + case RTE_FLOW_ITEM_TYPE_IPV6: + mlx5_flow_tunnel_ip_check(items, next_protocol, + &item_flags, &tunnel); + flow_dv_translate_item_ipv6(match_mask, match_value, + items, tunnel, + dev_flow->dv.group); + matcher.priority = MLX5_PRIORITY_MAP_L3; + last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 : + MLX5_FLOW_LAYER_OUTER_L3_IPV6; + if (items->mask != NULL && + ((const struct rte_flow_item_ipv6 *) + items->mask)->hdr.proto) { + next_protocol = + ((const struct rte_flow_item_ipv6 *) + items->spec)->hdr.proto; + next_protocol &= + ((const struct rte_flow_item_ipv6 *) + items->mask)->hdr.proto; + } else { + /* Reset for inner layer. */ + next_protocol = 0xff; + } + break; + case RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT: + flow_dv_translate_item_ipv6_frag_ext(match_mask, + match_value, + items, tunnel); + last_item = tunnel ? + MLX5_FLOW_LAYER_INNER_L3_IPV6_FRAG_EXT : + MLX5_FLOW_LAYER_OUTER_L3_IPV6_FRAG_EXT; + if (items->mask != NULL && + ((const struct rte_flow_item_ipv6_frag_ext *) + items->mask)->hdr.next_header) { + next_protocol = + ((const struct rte_flow_item_ipv6_frag_ext *) + items->spec)->hdr.next_header; + next_protocol &= + ((const struct rte_flow_item_ipv6_frag_ext *) + items->mask)->hdr.next_header; + } else { + /* Reset for inner layer. */ + next_protocol = 0xff; + } + break; + case RTE_FLOW_ITEM_TYPE_TCP: + flow_dv_translate_item_tcp(match_mask, match_value, + items, tunnel); + matcher.priority = MLX5_PRIORITY_MAP_L4; + last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP : + MLX5_FLOW_LAYER_OUTER_L4_TCP; + break; + case RTE_FLOW_ITEM_TYPE_UDP: + flow_dv_translate_item_udp(match_mask, match_value, + items, tunnel); + matcher.priority = MLX5_PRIORITY_MAP_L4; + last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP : + MLX5_FLOW_LAYER_OUTER_L4_UDP; + break; + case RTE_FLOW_ITEM_TYPE_GRE: + flow_dv_translate_item_gre(match_mask, match_value, + items, tunnel); + matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc); + last_item = MLX5_FLOW_LAYER_GRE; + break; + case RTE_FLOW_ITEM_TYPE_GRE_KEY: + flow_dv_translate_item_gre_key(match_mask, + match_value, items); + last_item = MLX5_FLOW_LAYER_GRE_KEY; + break; + case RTE_FLOW_ITEM_TYPE_NVGRE: + flow_dv_translate_item_nvgre(match_mask, match_value, + items, tunnel); + matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc); + last_item = MLX5_FLOW_LAYER_GRE; + break; + case RTE_FLOW_ITEM_TYPE_VXLAN: + flow_dv_translate_item_vxlan(match_mask, match_value, + items, tunnel); + matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc); + last_item = MLX5_FLOW_LAYER_VXLAN; + break; + case RTE_FLOW_ITEM_TYPE_VXLAN_GPE: + flow_dv_translate_item_vxlan_gpe(match_mask, + match_value, items, + tunnel); + matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc); + last_item = MLX5_FLOW_LAYER_VXLAN_GPE; + break; + case RTE_FLOW_ITEM_TYPE_GENEVE: + flow_dv_translate_item_geneve(match_mask, match_value, + items, tunnel); + matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc); + last_item = MLX5_FLOW_LAYER_GENEVE; + break; + case RTE_FLOW_ITEM_TYPE_MPLS: + flow_dv_translate_item_mpls(match_mask, match_value, + items, last_item, tunnel); + matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc); + last_item = MLX5_FLOW_LAYER_MPLS; + break; + case RTE_FLOW_ITEM_TYPE_MARK: + flow_dv_translate_item_mark(dev, match_mask, + match_value, items); + last_item = MLX5_FLOW_ITEM_MARK; + break; + case RTE_FLOW_ITEM_TYPE_META: + flow_dv_translate_item_meta(dev, match_mask, + match_value, attr, items); + last_item = MLX5_FLOW_ITEM_METADATA; + break; + case RTE_FLOW_ITEM_TYPE_ICMP: + flow_dv_translate_item_icmp(match_mask, match_value, + items, tunnel); + last_item = MLX5_FLOW_LAYER_ICMP; + break; + case RTE_FLOW_ITEM_TYPE_ICMP6: + flow_dv_translate_item_icmp6(match_mask, match_value, + items, tunnel); + last_item = MLX5_FLOW_LAYER_ICMP6; + break; + case RTE_FLOW_ITEM_TYPE_TAG: + flow_dv_translate_item_tag(dev, match_mask, + match_value, items); + last_item = MLX5_FLOW_ITEM_TAG; + break; + case MLX5_RTE_FLOW_ITEM_TYPE_TAG: + flow_dv_translate_mlx5_item_tag(dev, match_mask, + match_value, items); + last_item = MLX5_FLOW_ITEM_TAG; + break; + case MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE: + flow_dv_translate_item_tx_queue(dev, match_mask, + match_value, + items); + last_item = MLX5_FLOW_ITEM_TX_QUEUE; + break; + case RTE_FLOW_ITEM_TYPE_GTP: + flow_dv_translate_item_gtp(match_mask, match_value, + items, tunnel); + matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc); + last_item = MLX5_FLOW_LAYER_GTP; + break; + case RTE_FLOW_ITEM_TYPE_ECPRI: + if (!mlx5_flex_parser_ecpri_exist(dev)) { + /* Create it only the first time to be used. */ + ret = mlx5_flex_parser_ecpri_alloc(dev); + if (ret) + return rte_flow_error_set + (error, -ret, + RTE_FLOW_ERROR_TYPE_ITEM, + NULL, + "cannot create eCPRI parser"); + } + /* Adjust the length matcher and device flow value. */ + matcher.mask.size = MLX5_ST_SZ_BYTES(fte_match_param); + dev_flow->dv.value.size = + MLX5_ST_SZ_BYTES(fte_match_param); + flow_dv_translate_item_ecpri(dev, match_mask, + match_value, items); + /* No other protocol should follow eCPRI layer. */ + last_item = MLX5_FLOW_LAYER_ECPRI; + break; + default: + break; + } + item_flags |= last_item; + } + /* + * When E-Switch mode is enabled, we have two cases where we need to + * set the source port manually. + * The first one, is in case of Nic steering rule, and the second is + * E-Switch rule where no port_id item was found. In both cases + * the source port is set according the current port in use. + */ + if (!(item_flags & MLX5_FLOW_ITEM_PORT_ID) && + (priv->representor || priv->master)) { + if (flow_dv_translate_item_port_id(dev, match_mask, + match_value, NULL)) + return -rte_errno; + } +#ifdef RTE_LIBRTE_MLX5_DEBUG + MLX5_ASSERT(!flow_dv_check_valid_spec(matcher.mask.buf, + dev_flow->dv.value.buf)); +#endif + /* + * Layers may be already initialized from prefix flow if this dev_flow + * is the suffix flow. + */ + handle->layers |= item_flags; + if (action_flags & MLX5_FLOW_ACTION_RSS) + flow_dv_hashfields_set(dev_flow, rss_desc); + /* Register matcher. */ + matcher.crc = rte_raw_cksum((const void *)matcher.mask.buf, + matcher.mask.size); + matcher.priority = mlx5_flow_adjust_priority(dev, priority, + matcher.priority); + /* reserved field no needs to be set to 0 here. */ + tbl_key.domain = attr->transfer; + tbl_key.direction = attr->egress; + tbl_key.table_id = dev_flow->dv.group; + if (flow_dv_matcher_register(dev, &matcher, &tbl_key, dev_flow, + tunnel, attr->group, error)) + return -rte_errno; + return 0; +} + +/** + * Set hash RX queue by hash fields (see enum ibv_rx_hash_fields) + * and tunnel. + * + * @param[in, out] action + * Shred RSS action holding hash RX queue objects. + * @param[in] hash_fields + * Defines combination of packet fields to participate in RX hash. + * @param[in] tunnel + * Tunnel type + * @param[in] hrxq_idx + * Hash RX queue index to set. + * + * @return + * 0 on success, otherwise negative errno value. + */ +static int +__flow_dv_action_rss_hrxq_set(struct mlx5_shared_action_rss *action, + const uint64_t hash_fields, + const int tunnel, + uint32_t hrxq_idx) +{ + uint32_t *hrxqs = tunnel ? action->hrxq : action->hrxq_tunnel; + + switch (hash_fields & ~IBV_RX_HASH_INNER) { + case MLX5_RSS_HASH_IPV4: + hrxqs[0] = hrxq_idx; + return 0; + case MLX5_RSS_HASH_IPV4_TCP: + hrxqs[1] = hrxq_idx; + return 0; + case MLX5_RSS_HASH_IPV4_UDP: + hrxqs[2] = hrxq_idx; + return 0; + case MLX5_RSS_HASH_IPV6: + hrxqs[3] = hrxq_idx; + return 0; + case MLX5_RSS_HASH_IPV6_TCP: + hrxqs[4] = hrxq_idx; + return 0; + case MLX5_RSS_HASH_IPV6_UDP: + hrxqs[5] = hrxq_idx; + return 0; + case MLX5_RSS_HASH_NONE: + hrxqs[6] = hrxq_idx; + return 0; + default: + return -1; + } +} + +/** + * Look up for hash RX queue by hash fields (see enum ibv_rx_hash_fields) + * and tunnel. + * + * @param[in] dev + * Pointer to the Ethernet device structure. + * @param[in] idx + * Shared RSS action ID holding hash RX queue objects. + * @param[in] hash_fields + * Defines combination of packet fields to participate in RX hash. + * @param[in] tunnel + * Tunnel type + * + * @return + * Valid hash RX queue index, otherwise 0. + */ +static uint32_t +__flow_dv_action_rss_hrxq_lookup(struct rte_eth_dev *dev, uint32_t idx, + const uint64_t hash_fields, + const int tunnel) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_shared_action_rss *shared_rss = + mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], idx); + const uint32_t *hrxqs = tunnel ? shared_rss->hrxq : + shared_rss->hrxq_tunnel; + + switch (hash_fields & ~IBV_RX_HASH_INNER) { + case MLX5_RSS_HASH_IPV4: + return hrxqs[0]; + case MLX5_RSS_HASH_IPV4_TCP: + return hrxqs[1]; + case MLX5_RSS_HASH_IPV4_UDP: + return hrxqs[2]; + case MLX5_RSS_HASH_IPV6: + return hrxqs[3]; + case MLX5_RSS_HASH_IPV6_TCP: + return hrxqs[4]; + case MLX5_RSS_HASH_IPV6_UDP: + return hrxqs[5]; + case MLX5_RSS_HASH_NONE: + return hrxqs[6]; + default: + return 0; + } +} + +/** + * Retrieves hash RX queue suitable for the *flow*. + * If shared action configured for *flow* suitable hash RX queue will be + * retrieved from attached shared action. + * + * @param[in] dev + * Pointer to the Ethernet device structure. + * @param[in] dev_flow + * Pointer to the sub flow. + * @param[in] rss_desc + * Pointer to the RSS descriptor. + * @param[out] hrxq + * Pointer to retrieved hash RX queue object. + * + * @return + * Valid hash RX queue index, otherwise 0 and rte_errno is set. + */ +static uint32_t +__flow_dv_rss_get_hrxq(struct rte_eth_dev *dev, struct mlx5_flow *dev_flow, + struct mlx5_flow_rss_desc *rss_desc, + struct mlx5_hrxq **hrxq) +{ + struct mlx5_priv *priv = dev->data->dev_private; + uint32_t hrxq_idx; + + if (rss_desc->shared_rss) { + hrxq_idx = __flow_dv_action_rss_hrxq_lookup + (dev, rss_desc->shared_rss, + dev_flow->hash_fields, + !!(dev_flow->handle->layers & + MLX5_FLOW_LAYER_TUNNEL)); + if (hrxq_idx) + *hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ], + hrxq_idx); + } else { + *hrxq = flow_dv_hrxq_prepare(dev, dev_flow, rss_desc, + &hrxq_idx); + } + return hrxq_idx; +} + +/** + * Apply the flow to the NIC, lock free, + * (mutex should be acquired by caller). + * + * @param[in] dev + * Pointer to the Ethernet device structure. + * @param[in, out] flow + * Pointer to flow structure. + * @param[out] error + * Pointer to error structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow, + struct rte_flow_error *error) +{ + struct mlx5_flow_dv_workspace *dv; + struct mlx5_flow_handle *dh; + struct mlx5_flow_handle_dv *dv_h; + struct mlx5_flow *dev_flow; + struct mlx5_priv *priv = dev->data->dev_private; + uint32_t handle_idx; + int n; + int err; + int idx; + struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace(); + struct mlx5_flow_rss_desc *rss_desc = &wks->rss_desc; + + MLX5_ASSERT(wks); + if (rss_desc->shared_rss) { + dh = wks->flows[wks->flow_idx - 1].handle; + MLX5_ASSERT(dh->fate_action == MLX5_FLOW_FATE_SHARED_RSS); + dh->rix_srss = rss_desc->shared_rss; + } + for (idx = wks->flow_idx - 1; idx >= 0; idx--) { + dev_flow = &wks->flows[idx]; + dv = &dev_flow->dv; + dh = dev_flow->handle; + dv_h = &dh->dvh; + n = dv->actions_n; + if (dh->fate_action == MLX5_FLOW_FATE_DROP) { + if (dv->transfer) { + dv->actions[n++] = priv->sh->esw_drop_action; + } else { + MLX5_ASSERT(priv->drop_queue.hrxq); + dv->actions[n++] = + priv->drop_queue.hrxq->action; + } + } else if ((dh->fate_action == MLX5_FLOW_FATE_QUEUE && + !dv_h->rix_sample && !dv_h->rix_dest_array) || + (dh->fate_action == MLX5_FLOW_FATE_SHARED_RSS)) { + struct mlx5_hrxq *hrxq = NULL; + uint32_t hrxq_idx = __flow_dv_rss_get_hrxq + (dev, dev_flow, rss_desc, &hrxq); + if (!hrxq) { + rte_flow_error_set + (error, rte_errno, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "cannot get hash queue"); + goto error; + } + if (dh->fate_action == MLX5_FLOW_FATE_QUEUE) + dh->rix_hrxq = hrxq_idx; + dv->actions[n++] = hrxq->action; + } else if (dh->fate_action == MLX5_FLOW_FATE_DEFAULT_MISS) { + if (!priv->sh->default_miss_action) { + rte_flow_error_set + (error, rte_errno, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "default miss action not be created."); + goto error; + } + dv->actions[n++] = priv->sh->default_miss_action; } - item_flags |= last_item; + err = mlx5_flow_os_create_flow(dv_h->matcher->matcher_object, + (void *)&dv->value, n, + dv->actions, &dh->drv_flow); + if (err) { + rte_flow_error_set(error, errno, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "hardware refuses to create flow"); + goto error; + } + if (priv->vmwa_context && + dh->vf_vlan.tag && !dh->vf_vlan.created) { + /* + * The rule contains the VLAN pattern. + * For VF we are going to create VLAN + * interface to make hypervisor set correct + * e-Switch vport context. + */ + mlx5_vlan_vmwa_acquire(dev, &dh->vf_vlan); + } + } + return 0; +error: + err = rte_errno; /* Save rte_errno before cleanup. */ + SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles, + handle_idx, dh, next) { + /* hrxq is union, don't clear it if the flag is not set. */ + if (dh->fate_action == MLX5_FLOW_FATE_QUEUE && dh->rix_hrxq) { + mlx5_hrxq_release(dev, dh->rix_hrxq); + dh->rix_hrxq = 0; + } + if (dh->vf_vlan.tag && dh->vf_vlan.created) + mlx5_vlan_vmwa_release(dev, &dh->vf_vlan); + } + if (rss_desc->shared_rss) + wks->flows[wks->flow_idx - 1].handle->rix_srss = 0; + rte_errno = err; /* Restore rte_errno. */ + return -rte_errno; +} + +void +flow_dv_matcher_remove_cb(struct mlx5_cache_list *list __rte_unused, + struct mlx5_cache_entry *entry) +{ + struct mlx5_flow_dv_matcher *cache = container_of(entry, typeof(*cache), + entry); + + claim_zero(mlx5_flow_os_destroy_flow_matcher(cache->matcher_object)); + mlx5_free(cache); +} + +/** + * Release the flow matcher. + * + * @param dev + * Pointer to Ethernet device. + * @param handle + * Pointer to mlx5_flow_handle. + * + * @return + * 1 while a reference on it exists, 0 when freed. + */ +static int +flow_dv_matcher_release(struct rte_eth_dev *dev, + struct mlx5_flow_handle *handle) +{ + struct mlx5_flow_dv_matcher *matcher = handle->dvh.matcher; + struct mlx5_flow_tbl_data_entry *tbl = container_of(matcher->tbl, + typeof(*tbl), tbl); + int ret; + + MLX5_ASSERT(matcher->matcher_object); + ret = mlx5_cache_unregister(&tbl->matchers, &matcher->entry); + flow_dv_tbl_resource_release(MLX5_SH(dev), &tbl->tbl); + return ret; +} + +/** + * Release encap_decap resource. + * + * @param list + * Pointer to the hash list. + * @param entry + * Pointer to exist resource entry object. + */ +void +flow_dv_encap_decap_remove_cb(struct mlx5_hlist *list, + struct mlx5_hlist_entry *entry) +{ + struct mlx5_dev_ctx_shared *sh = list->ctx; + struct mlx5_flow_dv_encap_decap_resource *res = + container_of(entry, typeof(*res), entry); + + claim_zero(mlx5_flow_os_destroy_flow_action(res->action)); + mlx5_ipool_free(sh->ipool[MLX5_IPOOL_DECAP_ENCAP], res->idx); +} + +/** + * Release an encap/decap resource. + * + * @param dev + * Pointer to Ethernet device. + * @param encap_decap_idx + * Index of encap decap resource. + * + * @return + * 1 while a reference on it exists, 0 when freed. + */ +static int +flow_dv_encap_decap_resource_release(struct rte_eth_dev *dev, + uint32_t encap_decap_idx) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_flow_dv_encap_decap_resource *cache_resource; + + cache_resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_DECAP_ENCAP], + encap_decap_idx); + if (!cache_resource) + return 0; + MLX5_ASSERT(cache_resource->action); + return mlx5_hlist_unregister(priv->sh->encaps_decaps, + &cache_resource->entry); +} + +/** + * Release an jump to table action resource. + * + * @param dev + * Pointer to Ethernet device. + * @param handle + * Pointer to mlx5_flow_handle. + * + * @return + * 1 while a reference on it exists, 0 when freed. + */ +static int +flow_dv_jump_tbl_resource_release(struct rte_eth_dev *dev, + struct mlx5_flow_handle *handle) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_flow_tbl_data_entry *tbl_data; + + tbl_data = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_JUMP], + handle->rix_jump); + if (!tbl_data) + return 0; + return flow_dv_tbl_resource_release(MLX5_SH(dev), &tbl_data->tbl); +} + +void +flow_dv_modify_remove_cb(struct mlx5_hlist *list __rte_unused, + struct mlx5_hlist_entry *entry) +{ + struct mlx5_flow_dv_modify_hdr_resource *res = + container_of(entry, typeof(*res), entry); + + claim_zero(mlx5_flow_os_destroy_flow_action(res->action)); + mlx5_free(entry); +} + +/** + * Release a modify-header resource. + * + * @param dev + * Pointer to Ethernet device. + * @param handle + * Pointer to mlx5_flow_handle. + * + * @return + * 1 while a reference on it exists, 0 when freed. + */ +static int +flow_dv_modify_hdr_resource_release(struct rte_eth_dev *dev, + struct mlx5_flow_handle *handle) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_flow_dv_modify_hdr_resource *entry = handle->dvh.modify_hdr; + + MLX5_ASSERT(entry->action); + return mlx5_hlist_unregister(priv->sh->modify_cmds, &entry->entry); +} + +void +flow_dv_port_id_remove_cb(struct mlx5_cache_list *list, + struct mlx5_cache_entry *entry) +{ + struct mlx5_dev_ctx_shared *sh = list->ctx; + struct mlx5_flow_dv_port_id_action_resource *cache = + container_of(entry, typeof(*cache), entry); + + claim_zero(mlx5_flow_os_destroy_flow_action(cache->action)); + mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PORT_ID], cache->idx); +} + +/** + * Release port ID action resource. + * + * @param dev + * Pointer to Ethernet device. + * @param handle + * Pointer to mlx5_flow_handle. + * + * @return + * 1 while a reference on it exists, 0 when freed. + */ +static int +flow_dv_port_id_action_resource_release(struct rte_eth_dev *dev, + uint32_t port_id) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_flow_dv_port_id_action_resource *cache; + + cache = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_PORT_ID], port_id); + if (!cache) + return 0; + MLX5_ASSERT(cache->action); + return mlx5_cache_unregister(&priv->sh->port_id_action_list, + &cache->entry); +} + +/** + * Release shared RSS action resource. + * + * @param dev + * Pointer to Ethernet device. + * @param srss + * Shared RSS action index. + */ +static void +flow_dv_shared_rss_action_release(struct rte_eth_dev *dev, uint32_t srss) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_shared_action_rss *shared_rss; + + shared_rss = mlx5_ipool_get + (priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], srss); + __atomic_sub_fetch(&shared_rss->refcnt, 1, __ATOMIC_RELAXED); +} + +void +flow_dv_push_vlan_remove_cb(struct mlx5_cache_list *list, + struct mlx5_cache_entry *entry) +{ + struct mlx5_dev_ctx_shared *sh = list->ctx; + struct mlx5_flow_dv_push_vlan_action_resource *cache = + container_of(entry, typeof(*cache), entry); + + claim_zero(mlx5_flow_os_destroy_flow_action(cache->action)); + mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PUSH_VLAN], cache->idx); +} + +/** + * Release push vlan action resource. + * + * @param dev + * Pointer to Ethernet device. + * @param handle + * Pointer to mlx5_flow_handle. + * + * @return + * 1 while a reference on it exists, 0 when freed. + */ +static int +flow_dv_push_vlan_action_resource_release(struct rte_eth_dev *dev, + struct mlx5_flow_handle *handle) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_flow_dv_push_vlan_action_resource *cache; + uint32_t idx = handle->dvh.rix_push_vlan; + + cache = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_PUSH_VLAN], idx); + if (!cache) + return 0; + MLX5_ASSERT(cache->action); + return mlx5_cache_unregister(&priv->sh->push_vlan_action_list, + &cache->entry); +} + +/** + * Release the fate resource. + * + * @param dev + * Pointer to Ethernet device. + * @param handle + * Pointer to mlx5_flow_handle. + */ +static void +flow_dv_fate_resource_release(struct rte_eth_dev *dev, + struct mlx5_flow_handle *handle) +{ + if (!handle->rix_fate) + return; + switch (handle->fate_action) { + case MLX5_FLOW_FATE_QUEUE: + mlx5_hrxq_release(dev, handle->rix_hrxq); + break; + case MLX5_FLOW_FATE_JUMP: + flow_dv_jump_tbl_resource_release(dev, handle); + break; + case MLX5_FLOW_FATE_PORT_ID: + flow_dv_port_id_action_resource_release(dev, + handle->rix_port_id_action); + break; + case MLX5_FLOW_FATE_SHARED_RSS: + flow_dv_shared_rss_action_release(dev, handle->rix_srss); + break; + default: + DRV_LOG(DEBUG, "Incorrect fate action:%d", handle->fate_action); + break; } - /* - * When E-Switch mode is enabled, we have two cases where we need to - * set the source port manually. - * The first one, is in case of Nic steering rule, and the second is - * E-Switch rule where no port_id item was found. In both cases - * the source port is set according the current port in use. - */ - if (!(item_flags & MLX5_FLOW_ITEM_PORT_ID) && - (priv->representor || priv->master)) { - if (flow_dv_translate_item_port_id(dev, match_mask, - match_value, NULL)) - return -rte_errno; + handle->rix_fate = 0; +} + +void +flow_dv_sample_remove_cb(struct mlx5_cache_list *list, + struct mlx5_cache_entry *entry) +{ + struct rte_eth_dev *dev = list->ctx; + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_flow_dv_sample_resource *cache_resource = + container_of(entry, typeof(*cache_resource), entry); + + if (cache_resource->verbs_action) + claim_zero(mlx5_glue->destroy_flow_action + (cache_resource->verbs_action)); + if (cache_resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB) { + if (cache_resource->default_miss) + claim_zero(mlx5_glue->destroy_flow_action + (cache_resource->default_miss)); } -#ifdef RTE_LIBRTE_MLX5_DEBUG - MLX5_ASSERT(!flow_dv_check_valid_spec(matcher.mask.buf, - dev_flow->dv.value.buf)); -#endif - /* - * Layers may be already initialized from prefix flow if this dev_flow - * is the suffix flow. - */ - handle->layers |= item_flags; - if (action_flags & MLX5_FLOW_ACTION_RSS) - flow_dv_hashfields_set(dev_flow, rss_desc); - /* Register matcher. */ - matcher.crc = rte_raw_cksum((const void *)matcher.mask.buf, - matcher.mask.size); - matcher.priority = mlx5_flow_adjust_priority(dev, priority, - matcher.priority); - /* reserved field no needs to be set to 0 here. */ - tbl_key.domain = attr->transfer; - tbl_key.direction = attr->egress; - tbl_key.table_id = dev_flow->dv.group; - if (flow_dv_matcher_register(dev, &matcher, &tbl_key, dev_flow, error)) - return -rte_errno; - return 0; + if (cache_resource->normal_path_tbl) + flow_dv_tbl_resource_release(MLX5_SH(dev), + cache_resource->normal_path_tbl); + flow_dv_sample_sub_actions_release(dev, + &cache_resource->sample_idx); + mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_SAMPLE], + cache_resource->idx); + DRV_LOG(DEBUG, "sample resource %p: removed", + (void *)cache_resource); } /** - * Apply the flow to the NIC, lock free, - * (mutex should be acquired by caller). + * Release an sample resource. * - * @param[in] dev - * Pointer to the Ethernet device structure. - * @param[in, out] flow - * Pointer to flow structure. - * @param[out] error - * Pointer to error structure. + * @param dev + * Pointer to Ethernet device. + * @param handle + * Pointer to mlx5_flow_handle. * * @return - * 0 on success, a negative errno value otherwise and rte_errno is set. + * 1 while a reference on it exists, 0 when freed. */ static int -__flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow, - struct rte_flow_error *error) +flow_dv_sample_resource_release(struct rte_eth_dev *dev, + struct mlx5_flow_handle *handle) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_flow_dv_sample_resource *cache_resource; + + cache_resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_SAMPLE], + handle->dvh.rix_sample); + if (!cache_resource) + return 0; + MLX5_ASSERT(cache_resource->verbs_action); + return mlx5_cache_unregister(&priv->sh->sample_action_list, + &cache_resource->entry); +} + +void +flow_dv_dest_array_remove_cb(struct mlx5_cache_list *list, + struct mlx5_cache_entry *entry) +{ + struct rte_eth_dev *dev = list->ctx; + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_flow_dv_dest_array_resource *cache_resource = + container_of(entry, typeof(*cache_resource), entry); + uint32_t i = 0; + + MLX5_ASSERT(cache_resource->action); + if (cache_resource->action) + claim_zero(mlx5_glue->destroy_flow_action + (cache_resource->action)); + for (; i < cache_resource->num_of_dest; i++) + flow_dv_sample_sub_actions_release(dev, + &cache_resource->sample_idx[i]); + mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_DEST_ARRAY], + cache_resource->idx); + DRV_LOG(DEBUG, "destination array resource %p: removed", + (void *)cache_resource); +} + +/** + * Release an destination array resource. + * + * @param dev + * Pointer to Ethernet device. + * @param handle + * Pointer to mlx5_flow_handle. + * + * @return + * 1 while a reference on it exists, 0 when freed. + */ +static int +flow_dv_dest_array_resource_release(struct rte_eth_dev *dev, + struct mlx5_flow_handle *handle) { - struct mlx5_flow_dv_workspace *dv; - struct mlx5_flow_handle *dh; - struct mlx5_flow_handle_dv *dv_h; - struct mlx5_flow *dev_flow; struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_flow_dv_dest_array_resource *cache; + + cache = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_DEST_ARRAY], + handle->dvh.rix_dest_array); + if (!cache) + return 0; + MLX5_ASSERT(cache->action); + return mlx5_cache_unregister(&priv->sh->dest_array_list, + &cache->entry); +} + +/** + * Remove the flow from the NIC but keeps it in memory. + * Lock free, (mutex should be acquired by caller). + * + * @param[in] dev + * Pointer to Ethernet device. + * @param[in, out] flow + * Pointer to flow structure. + */ +static void +flow_dv_remove(struct rte_eth_dev *dev, struct rte_flow *flow) +{ + struct mlx5_flow_handle *dh; uint32_t handle_idx; - int n; - int err; - int idx; + struct mlx5_priv *priv = dev->data->dev_private; - for (idx = priv->flow_idx - 1; idx >= priv->flow_nested_idx; idx--) { - dev_flow = &((struct mlx5_flow *)priv->inter_flows)[idx]; - dv = &dev_flow->dv; - dh = dev_flow->handle; - dv_h = &dh->dvh; - n = dv->actions_n; - if (dh->fate_action == MLX5_FLOW_FATE_DROP) { - if (dv->transfer) { - dv->actions[n++] = priv->sh->esw_drop_action; - } else { - struct mlx5_hrxq *drop_hrxq; - drop_hrxq = mlx5_hrxq_drop_new(dev); - if (!drop_hrxq) { - rte_flow_error_set - (error, errno, - RTE_FLOW_ERROR_TYPE_UNSPECIFIED, - NULL, - "cannot get drop hash queue"); - goto error; - } - /* - * Drop queues will be released by the specify - * mlx5_hrxq_drop_release() function. Assign - * the special index to hrxq to mark the queue - * has been allocated. - */ - dh->rix_hrxq = UINT32_MAX; - dv->actions[n++] = drop_hrxq->action; - } - } else if (dh->fate_action == MLX5_FLOW_FATE_QUEUE) { - struct mlx5_hrxq *hrxq; - uint32_t hrxq_idx; - struct mlx5_flow_rss_desc *rss_desc = - &((struct mlx5_flow_rss_desc *)priv->rss_desc) - [!!priv->flow_nested_idx]; - - MLX5_ASSERT(rss_desc->queue_num); - hrxq_idx = mlx5_hrxq_get(dev, rss_desc->key, - MLX5_RSS_HASH_KEY_LEN, - dev_flow->hash_fields, - rss_desc->queue, - rss_desc->queue_num); - if (!hrxq_idx) { - hrxq_idx = mlx5_hrxq_new - (dev, rss_desc->key, - MLX5_RSS_HASH_KEY_LEN, - dev_flow->hash_fields, - rss_desc->queue, - rss_desc->queue_num, - !!(dh->layers & - MLX5_FLOW_LAYER_TUNNEL)); - } - hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ], - hrxq_idx); - if (!hrxq) { - rte_flow_error_set - (error, rte_errno, - RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, - "cannot get hash queue"); - goto error; - } - dh->rix_hrxq = hrxq_idx; - dv->actions[n++] = hrxq->action; - } else if (dh->fate_action == MLX5_FLOW_FATE_DEFAULT_MISS) { - if (flow_dv_default_miss_resource_register - (dev, error)) { - rte_flow_error_set - (error, rte_errno, - RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, - "cannot create default miss resource"); - goto error_default_miss; - } - dh->rix_default_fate = MLX5_FLOW_FATE_DEFAULT_MISS; - dv->actions[n++] = priv->sh->default_miss.action; - } - dh->drv_flow = - mlx5_glue->dv_create_flow(dv_h->matcher->matcher_object, - (void *)&dv->value, n, - dv->actions); - if (!dh->drv_flow) { - rte_flow_error_set(error, errno, - RTE_FLOW_ERROR_TYPE_UNSPECIFIED, - NULL, - "hardware refuses to create flow"); - goto error; - } - if (priv->vmwa_context && - dh->vf_vlan.tag && !dh->vf_vlan.created) { - /* - * The rule contains the VLAN pattern. - * For VF we are going to create VLAN - * interface to make hypervisor set correct - * e-Switch vport context. - */ - mlx5_vlan_vmwa_acquire(dev, &dh->vf_vlan); + if (!flow) + return; + handle_idx = flow->dev_handles; + while (handle_idx) { + dh = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], + handle_idx); + if (!dh) + return; + if (dh->drv_flow) { + claim_zero(mlx5_flow_os_destroy_flow(dh->drv_flow)); + dh->drv_flow = NULL; } + if (dh->fate_action == MLX5_FLOW_FATE_QUEUE) + flow_dv_fate_resource_release(dev, dh); + if (dh->vf_vlan.tag && dh->vf_vlan.created) + mlx5_vlan_vmwa_release(dev, &dh->vf_vlan); + handle_idx = dh->next.next; + } +} + +/** + * Remove the flow from the NIC and the memory. + * Lock free, (mutex should be acquired by caller). + * + * @param[in] dev + * Pointer to the Ethernet device structure. + * @param[in, out] flow + * Pointer to flow structure. + */ +static void +flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow) +{ + struct mlx5_flow_handle *dev_handle; + struct mlx5_priv *priv = dev->data->dev_private; + + if (!flow) + return; + flow_dv_remove(dev, flow); + if (flow->counter) { + flow_dv_counter_free(dev, flow->counter); + flow->counter = 0; + } + if (flow->meter) { + struct mlx5_flow_meter *fm; + + fm = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_MTR], + flow->meter); + if (fm) + mlx5_flow_meter_detach(fm); + flow->meter = 0; } - return 0; -error: - if (dh->fate_action == MLX5_FLOW_FATE_DEFAULT_MISS) - flow_dv_default_miss_resource_release(dev); -error_default_miss: - err = rte_errno; /* Save rte_errno before cleanup. */ - SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles, - handle_idx, dh, next) { - /* hrxq is union, don't clear it if the flag is not set. */ - if (dh->rix_hrxq) { - if (dh->fate_action == MLX5_FLOW_FATE_DROP) { - mlx5_hrxq_drop_release(dev); - dh->rix_hrxq = 0; - } else if (dh->fate_action == MLX5_FLOW_FATE_QUEUE) { - mlx5_hrxq_release(dev, dh->rix_hrxq); - dh->rix_hrxq = 0; - } - } - if (dh->vf_vlan.tag && dh->vf_vlan.created) - mlx5_vlan_vmwa_release(dev, &dh->vf_vlan); + if (flow->age) + flow_dv_aso_age_release(dev, flow->age); + while (flow->dev_handles) { + uint32_t tmp_idx = flow->dev_handles; + + dev_handle = mlx5_ipool_get(priv->sh->ipool + [MLX5_IPOOL_MLX5_FLOW], tmp_idx); + if (!dev_handle) + return; + flow->dev_handles = dev_handle->next.next; + if (dev_handle->dvh.matcher) + flow_dv_matcher_release(dev, dev_handle); + if (dev_handle->dvh.rix_sample) + flow_dv_sample_resource_release(dev, dev_handle); + if (dev_handle->dvh.rix_dest_array) + flow_dv_dest_array_resource_release(dev, dev_handle); + if (dev_handle->dvh.rix_encap_decap) + flow_dv_encap_decap_resource_release(dev, + dev_handle->dvh.rix_encap_decap); + if (dev_handle->dvh.modify_hdr) + flow_dv_modify_hdr_resource_release(dev, dev_handle); + if (dev_handle->dvh.rix_push_vlan) + flow_dv_push_vlan_action_resource_release(dev, + dev_handle); + if (dev_handle->dvh.rix_tag) + flow_dv_tag_release(dev, + dev_handle->dvh.rix_tag); + flow_dv_fate_resource_release(dev, dev_handle); + mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], + tmp_idx); } - rte_errno = err; /* Restore rte_errno. */ - return -rte_errno; } /** - * Release the flow matcher. + * Release array of hash RX queue objects. + * Helper function. * - * @param dev - * Pointer to Ethernet device. - * @param handle - * Pointer to mlx5_flow_handle. + * @param[in] dev + * Pointer to the Ethernet device structure. + * @param[in, out] hrxqs + * Array of hash RX queue objects. * * @return - * 1 while a reference on it exists, 0 when freed. + * Total number of references to hash RX queue objects in *hrxqs* array + * after this operation. */ static int -flow_dv_matcher_release(struct rte_eth_dev *dev, - struct mlx5_flow_handle *handle) +__flow_dv_hrxqs_release(struct rte_eth_dev *dev, + uint32_t (*hrxqs)[MLX5_RSS_HASH_FIELDS_LEN]) { - struct mlx5_flow_dv_matcher *matcher = handle->dvh.matcher; + size_t i; + int remaining = 0; - MLX5_ASSERT(matcher->matcher_object); - DRV_LOG(DEBUG, "port %u matcher %p: refcnt %d--", - dev->data->port_id, (void *)matcher, - rte_atomic32_read(&matcher->refcnt)); - if (rte_atomic32_dec_and_test(&matcher->refcnt)) { - claim_zero(mlx5_glue->dv_destroy_flow_matcher - (matcher->matcher_object)); - LIST_REMOVE(matcher, next); - /* table ref-- in release interface. */ - flow_dv_tbl_resource_release(dev, matcher->tbl); - rte_free(matcher); - DRV_LOG(DEBUG, "port %u matcher %p: removed", - dev->data->port_id, (void *)matcher); - return 0; + for (i = 0; i < RTE_DIM(*hrxqs); i++) { + int ret = mlx5_hrxq_release(dev, (*hrxqs)[i]); + + if (!ret) + (*hrxqs)[i] = 0; + remaining += ret; } - return 1; + return remaining; } /** - * Release an encap/decap resource. + * Release all hash RX queue objects representing shared RSS action. * - * @param dev - * Pointer to Ethernet device. - * @param handle - * Pointer to mlx5_flow_handle. + * @param[in] dev + * Pointer to the Ethernet device structure. + * @param[in, out] action + * Shared RSS action to remove hash RX queue objects from. * * @return - * 1 while a reference on it exists, 0 when freed. + * Total number of references to hash RX queue objects stored in *action* + * after this operation. + * Expected to be 0 if no external references held. */ static int -flow_dv_encap_decap_resource_release(struct rte_eth_dev *dev, - struct mlx5_flow_handle *handle) +__flow_dv_action_rss_hrxqs_release(struct rte_eth_dev *dev, + struct mlx5_shared_action_rss *action) { - struct mlx5_priv *priv = dev->data->dev_private; - uint32_t idx = handle->dvh.rix_encap_decap; - struct mlx5_flow_dv_encap_decap_resource *cache_resource; - - cache_resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_DECAP_ENCAP], - idx); - if (!cache_resource) - return 0; - MLX5_ASSERT(cache_resource->action); - DRV_LOG(DEBUG, "encap/decap resource %p: refcnt %d--", - (void *)cache_resource, - rte_atomic32_read(&cache_resource->refcnt)); - if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) { - claim_zero(mlx5_glue->destroy_flow_action - (cache_resource->action)); - ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_DECAP_ENCAP], - &priv->sh->encaps_decaps, idx, - cache_resource, next); - mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_DECAP_ENCAP], idx); - DRV_LOG(DEBUG, "encap/decap resource %p: removed", - (void *)cache_resource); - return 0; - } - return 1; + return __flow_dv_hrxqs_release(dev, &action->hrxq) + + __flow_dv_hrxqs_release(dev, &action->hrxq_tunnel); } /** - * Release an jump to table action resource. + * Setup shared RSS action. + * Prepare set of hash RX queue objects sufficient to handle all valid + * hash_fields combinations (see enum ibv_rx_hash_fields). * - * @param dev - * Pointer to Ethernet device. - * @param handle - * Pointer to mlx5_flow_handle. + * @param[in] dev + * Pointer to the Ethernet device structure. + * @param[in] action_idx + * Shared RSS action ipool index. + * @param[in, out] action + * Partially initialized shared RSS action. + * @param[out] error + * Perform verbose error reporting if not NULL. Initialized in case of + * error only. * * @return - * 1 while a reference on it exists, 0 when freed. + * 0 on success, otherwise negative errno value. */ static int -flow_dv_jump_tbl_resource_release(struct rte_eth_dev *dev, - struct mlx5_flow_handle *handle) +__flow_dv_action_rss_setup(struct rte_eth_dev *dev, + uint32_t action_idx, + struct mlx5_shared_action_rss *action, + struct rte_flow_error *error) { - struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5_flow_dv_jump_tbl_resource *cache_resource; - struct mlx5_flow_tbl_data_entry *tbl_data; + struct mlx5_flow_rss_desc rss_desc = { 0 }; + size_t i; + int err; - tbl_data = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_JUMP], - handle->rix_jump); - if (!tbl_data) - return 0; - cache_resource = &tbl_data->jump; - MLX5_ASSERT(cache_resource->action); - DRV_LOG(DEBUG, "jump table resource %p: refcnt %d--", - (void *)cache_resource, - rte_atomic32_read(&cache_resource->refcnt)); - if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) { - claim_zero(mlx5_glue->destroy_flow_action - (cache_resource->action)); - /* jump action memory free is inside the table release. */ - flow_dv_tbl_resource_release(dev, &tbl_data->tbl); - DRV_LOG(DEBUG, "jump table resource %p: removed", - (void *)cache_resource); - return 0; + memcpy(rss_desc.key, action->origin.key, MLX5_RSS_HASH_KEY_LEN); + rss_desc.key_len = MLX5_RSS_HASH_KEY_LEN; + rss_desc.const_q = action->origin.queue; + rss_desc.queue_num = action->origin.queue_num; + /* Set non-zero value to indicate a shared RSS. */ + rss_desc.shared_rss = action_idx; + for (i = 0; i < MLX5_RSS_HASH_FIELDS_LEN; i++) { + uint32_t hrxq_idx; + uint64_t hash_fields = mlx5_rss_hash_fields[i]; + int tunnel; + + for (tunnel = 0; tunnel < 2; tunnel++) { + rss_desc.tunnel = tunnel; + rss_desc.hash_fields = hash_fields; + hrxq_idx = mlx5_hrxq_get(dev, &rss_desc); + if (!hrxq_idx) { + rte_flow_error_set + (error, rte_errno, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "cannot get hash queue"); + goto error_hrxq_new; + } + err = __flow_dv_action_rss_hrxq_set + (action, hash_fields, tunnel, hrxq_idx); + MLX5_ASSERT(!err); + } } - return 1; + return 0; +error_hrxq_new: + err = rte_errno; + __flow_dv_action_rss_hrxqs_release(dev, action); + rte_errno = err; + return -rte_errno; } /** - * Release a default miss resource. + * Create shared RSS action. + * + * @param[in] dev + * Pointer to the Ethernet device structure. + * @param[in] conf + * Shared action configuration. + * @param[in] rss + * RSS action specification used to create shared action. + * @param[out] error + * Perform verbose error reporting if not NULL. Initialized in case of + * error only. * - * @param dev - * Pointer to Ethernet device. * @return - * 1 while a reference on it exists, 0 when freed. + * A valid shared action ID in case of success, 0 otherwise and + * rte_errno is set. */ -static int -flow_dv_default_miss_resource_release(struct rte_eth_dev *dev) +static uint32_t +__flow_dv_action_rss_create(struct rte_eth_dev *dev, + const struct rte_flow_shared_action_conf *conf, + const struct rte_flow_action_rss *rss, + struct rte_flow_error *error) { struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5_dev_ctx_shared *sh = priv->sh; - struct mlx5_flow_default_miss_resource *cache_resource = - &sh->default_miss; - - MLX5_ASSERT(cache_resource->action); - DRV_LOG(DEBUG, "default miss resource %p: refcnt %d--", - (void *)cache_resource->action, - rte_atomic32_read(&cache_resource->refcnt)); - if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) { - claim_zero(mlx5_glue->destroy_flow_action - (cache_resource->action)); - DRV_LOG(DEBUG, "default miss resource %p: removed", - (void *)cache_resource->action); - return 0; + struct mlx5_shared_action_rss *shared_action = NULL; + void *queue = NULL; + struct rte_flow_action_rss *origin; + const uint8_t *rss_key; + uint32_t queue_size = rss->queue_num * sizeof(uint16_t); + uint32_t idx; + + RTE_SET_USED(conf); + queue = mlx5_malloc(0, RTE_ALIGN_CEIL(queue_size, sizeof(void *)), + 0, SOCKET_ID_ANY); + shared_action = mlx5_ipool_zmalloc + (priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], &idx); + if (!shared_action || !queue) { + rte_flow_error_set(error, ENOMEM, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "cannot allocate resource memory"); + goto error_rss_init; } - return 1; + if (idx > (1u << MLX5_SHARED_ACTION_TYPE_OFFSET)) { + rte_flow_error_set(error, E2BIG, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "rss action number out of range"); + goto error_rss_init; + } + shared_action->queue = queue; + origin = &shared_action->origin; + origin->func = rss->func; + origin->level = rss->level; + /* RSS type 0 indicates default RSS type (ETH_RSS_IP). */ + origin->types = !rss->types ? ETH_RSS_IP : rss->types; + /* NULL RSS key indicates default RSS key. */ + rss_key = !rss->key ? rss_hash_default_key : rss->key; + memcpy(shared_action->key, rss_key, MLX5_RSS_HASH_KEY_LEN); + origin->key = &shared_action->key[0]; + origin->key_len = MLX5_RSS_HASH_KEY_LEN; + memcpy(shared_action->queue, rss->queue, queue_size); + origin->queue = shared_action->queue; + origin->queue_num = rss->queue_num; + if (__flow_dv_action_rss_setup(dev, idx, shared_action, error)) + goto error_rss_init; + __atomic_add_fetch(&shared_action->refcnt, 1, __ATOMIC_RELAXED); + rte_spinlock_lock(&priv->shared_act_sl); + ILIST_INSERT(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], + &priv->rss_shared_actions, idx, shared_action, next); + rte_spinlock_unlock(&priv->shared_act_sl); + return idx; +error_rss_init: + if (shared_action) + mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], + idx); + if (queue) + mlx5_free(queue); + return 0; } /** - * Release a modify-header resource. + * Destroy the shared RSS action. + * Release related hash RX queue objects. * - * @param handle - * Pointer to mlx5_flow_handle. + * @param[in] dev + * Pointer to the Ethernet device structure. + * @param[in] idx + * The shared RSS action object ID to be removed. + * @param[out] error + * Perform verbose error reporting if not NULL. Initialized in case of + * error only. * * @return - * 1 while a reference on it exists, 0 when freed. + * 0 on success, otherwise negative errno value. */ static int -flow_dv_modify_hdr_resource_release(struct mlx5_flow_handle *handle) +__flow_dv_action_rss_release(struct rte_eth_dev *dev, uint32_t idx, + struct rte_flow_error *error) { - struct mlx5_flow_dv_modify_hdr_resource *cache_resource = - handle->dvh.modify_hdr; + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_shared_action_rss *shared_rss = + mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], idx); + uint32_t old_refcnt = 1; + int remaining; - MLX5_ASSERT(cache_resource->action); - DRV_LOG(DEBUG, "modify-header resource %p: refcnt %d--", - (void *)cache_resource, - rte_atomic32_read(&cache_resource->refcnt)); - if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) { - claim_zero(mlx5_glue->destroy_flow_action - (cache_resource->action)); - LIST_REMOVE(cache_resource, next); - rte_free(cache_resource); - DRV_LOG(DEBUG, "modify-header resource %p: removed", - (void *)cache_resource); - return 0; - } - return 1; + if (!shared_rss) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, NULL, + "invalid shared action"); + remaining = __flow_dv_action_rss_hrxqs_release(dev, shared_rss); + if (remaining) + return rte_flow_error_set(error, ETOOMANYREFS, + RTE_FLOW_ERROR_TYPE_ACTION, + NULL, + "shared rss hrxq has references"); + if (!__atomic_compare_exchange_n(&shared_rss->refcnt, &old_refcnt, + 0, 0, __ATOMIC_ACQUIRE, + __ATOMIC_RELAXED)) + return rte_flow_error_set(error, ETOOMANYREFS, + RTE_FLOW_ERROR_TYPE_ACTION, + NULL, + "shared rss has references"); + rte_free(shared_rss->queue); + rte_spinlock_lock(&priv->shared_act_sl); + ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], + &priv->rss_shared_actions, idx, shared_rss, next); + rte_spinlock_unlock(&priv->shared_act_sl); + mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], + idx); + return 0; } /** - * Release port ID action resource. + * Create shared action, lock free, + * (mutex should be acquired by caller). + * Dispatcher for action type specific call. * - * @param dev - * Pointer to Ethernet device. - * @param handle - * Pointer to mlx5_flow_handle. + * @param[in] dev + * Pointer to the Ethernet device structure. + * @param[in] conf + * Shared action configuration. + * @param[in] action + * Action specification used to create shared action. + * @param[out] error + * Perform verbose error reporting if not NULL. Initialized in case of + * error only. * * @return - * 1 while a reference on it exists, 0 when freed. + * A valid shared action handle in case of success, NULL otherwise and + * rte_errno is set. */ -static int -flow_dv_port_id_action_resource_release(struct rte_eth_dev *dev, - struct mlx5_flow_handle *handle) +static struct rte_flow_shared_action * +flow_dv_action_create(struct rte_eth_dev *dev, + const struct rte_flow_shared_action_conf *conf, + const struct rte_flow_action *action, + struct rte_flow_error *err) { - struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5_flow_dv_port_id_action_resource *cache_resource; - uint32_t idx = handle->rix_port_id_action; + uint32_t idx = 0; + uint32_t ret = 0; - cache_resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_PORT_ID], - idx); - if (!cache_resource) - return 0; - MLX5_ASSERT(cache_resource->action); - DRV_LOG(DEBUG, "port ID action resource %p: refcnt %d--", - (void *)cache_resource, - rte_atomic32_read(&cache_resource->refcnt)); - if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) { - claim_zero(mlx5_glue->destroy_flow_action - (cache_resource->action)); - ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_PORT_ID], - &priv->sh->port_id_action_list, idx, - cache_resource, next); - mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_PORT_ID], idx); - DRV_LOG(DEBUG, "port id action resource %p: removed", - (void *)cache_resource); - return 0; + switch (action->type) { + case RTE_FLOW_ACTION_TYPE_RSS: + ret = __flow_dv_action_rss_create(dev, conf, action->conf, err); + idx = (MLX5_SHARED_ACTION_TYPE_RSS << + MLX5_SHARED_ACTION_TYPE_OFFSET) | ret; + break; + case RTE_FLOW_ACTION_TYPE_AGE: + ret = flow_dv_translate_create_aso_age(dev, action->conf); + idx = (MLX5_SHARED_ACTION_TYPE_AGE << + MLX5_SHARED_ACTION_TYPE_OFFSET) | ret; + if (ret) { + struct mlx5_aso_age_action *aso_age = + flow_aso_age_get_by_idx(dev, ret); + + if (!aso_age->age_params.context) + aso_age->age_params.context = + (void *)(uintptr_t)idx; + } + break; + default: + rte_flow_error_set(err, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, + NULL, "action type not supported"); + break; } - return 1; + return ret ? (struct rte_flow_shared_action *)(uintptr_t)idx : NULL; } /** - * Release push vlan action resource. + * Destroy the shared action. + * Release action related resources on the NIC and the memory. + * Lock free, (mutex should be acquired by caller). + * Dispatcher for action type specific call. * - * @param dev - * Pointer to Ethernet device. - * @param handle - * Pointer to mlx5_flow_handle. + * @param[in] dev + * Pointer to the Ethernet device structure. + * @param[in] action + * The shared action object to be removed. + * @param[out] error + * Perform verbose error reporting if not NULL. Initialized in case of + * error only. * * @return - * 1 while a reference on it exists, 0 when freed. + * 0 on success, otherwise negative errno value. */ static int -flow_dv_push_vlan_action_resource_release(struct rte_eth_dev *dev, - struct mlx5_flow_handle *handle) +flow_dv_action_destroy(struct rte_eth_dev *dev, + struct rte_flow_shared_action *action, + struct rte_flow_error *error) { - struct mlx5_priv *priv = dev->data->dev_private; - uint32_t idx = handle->dvh.rix_push_vlan; - struct mlx5_flow_dv_push_vlan_action_resource *cache_resource; + uint32_t act_idx = (uint32_t)(uintptr_t)action; + uint32_t type = act_idx >> MLX5_SHARED_ACTION_TYPE_OFFSET; + uint32_t idx = act_idx & ((1u << MLX5_SHARED_ACTION_TYPE_OFFSET) - 1); + int ret; - cache_resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_PUSH_VLAN], - idx); - if (!cache_resource) - return 0; - MLX5_ASSERT(cache_resource->action); - DRV_LOG(DEBUG, "push VLAN action resource %p: refcnt %d--", - (void *)cache_resource, - rte_atomic32_read(&cache_resource->refcnt)); - if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) { - claim_zero(mlx5_glue->destroy_flow_action - (cache_resource->action)); - ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_PUSH_VLAN], - &priv->sh->push_vlan_action_list, idx, - cache_resource, next); - mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_PUSH_VLAN], idx); - DRV_LOG(DEBUG, "push vlan action resource %p: removed", - (void *)cache_resource); + switch (type) { + case MLX5_SHARED_ACTION_TYPE_RSS: + return __flow_dv_action_rss_release(dev, idx, error); + case MLX5_SHARED_ACTION_TYPE_AGE: + ret = flow_dv_aso_age_release(dev, idx); + if (ret) + /* + * In this case, the last flow has a reference will + * actually release the age action. + */ + DRV_LOG(DEBUG, "Shared age action %" PRIu32 " was" + " released with references %d.", idx, ret); return 0; - } - return 1; -} - -/** - * Release the fate resource. - * - * @param dev - * Pointer to Ethernet device. - * @param handle - * Pointer to mlx5_flow_handle. - */ -static void -flow_dv_fate_resource_release(struct rte_eth_dev *dev, - struct mlx5_flow_handle *handle) -{ - if (!handle->rix_fate) - return; - switch (handle->fate_action) { - case MLX5_FLOW_FATE_DROP: - mlx5_hrxq_drop_release(dev); - break; - case MLX5_FLOW_FATE_QUEUE: - mlx5_hrxq_release(dev, handle->rix_hrxq); - break; - case MLX5_FLOW_FATE_JUMP: - flow_dv_jump_tbl_resource_release(dev, handle); - break; - case MLX5_FLOW_FATE_PORT_ID: - flow_dv_port_id_action_resource_release(dev, handle); - break; - case MLX5_FLOW_FATE_DEFAULT_MISS: - flow_dv_default_miss_resource_release(dev); - break; default: - DRV_LOG(DEBUG, "Incorrect fate action:%d", handle->fate_action); - break; + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, + NULL, + "action type not supported"); } - handle->rix_fate = 0; } /** - * Remove the flow from the NIC but keeps it in memory. - * Lock free, (mutex should be acquired by caller). + * Updates in place shared RSS action configuration. + * + * @param[in] dev + * Pointer to the Ethernet device structure. + * @param[in] idx + * The shared RSS action object ID to be updated. + * @param[in] action_conf + * RSS action specification used to modify *shared_rss*. + * @param[out] error + * Perform verbose error reporting if not NULL. Initialized in case of + * error only. * - * @param[in] dev - * Pointer to Ethernet device. - * @param[in, out] flow - * Pointer to flow structure. + * @return + * 0 on success, otherwise negative errno value. + * @note: currently only support update of RSS queues. */ -static void -__flow_dv_remove(struct rte_eth_dev *dev, struct rte_flow *flow) +static int +__flow_dv_action_rss_update(struct rte_eth_dev *dev, uint32_t idx, + const struct rte_flow_action_rss *action_conf, + struct rte_flow_error *error) { - struct mlx5_flow_handle *dh; - uint32_t handle_idx; struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_shared_action_rss *shared_rss = + mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], idx); + size_t i; + int ret; + void *queue = NULL; + const uint8_t *rss_key; + uint32_t rss_key_len; + uint32_t queue_size = action_conf->queue_num * sizeof(uint16_t); - if (!flow) - return; - handle_idx = flow->dev_handles; - while (handle_idx) { - dh = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], - handle_idx); - if (!dh) - return; - if (dh->drv_flow) { - claim_zero(mlx5_glue->dv_destroy_flow(dh->drv_flow)); - dh->drv_flow = NULL; + if (!shared_rss) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, NULL, + "invalid shared action to update"); + queue = mlx5_malloc(MLX5_MEM_ZERO, + RTE_ALIGN_CEIL(queue_size, sizeof(void *)), + 0, SOCKET_ID_ANY); + if (!queue) + return rte_flow_error_set(error, ENOMEM, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "cannot allocate resource memory"); + if (action_conf->key) { + rss_key = action_conf->key; + rss_key_len = action_conf->key_len; + } else { + rss_key = rss_hash_default_key; + rss_key_len = MLX5_RSS_HASH_KEY_LEN; + } + for (i = 0; i < MLX5_RSS_HASH_FIELDS_LEN; i++) { + uint32_t hrxq_idx; + uint64_t hash_fields = mlx5_rss_hash_fields[i]; + int tunnel; + + for (tunnel = 0; tunnel < 2; tunnel++) { + hrxq_idx = __flow_dv_action_rss_hrxq_lookup + (dev, idx, hash_fields, tunnel); + MLX5_ASSERT(hrxq_idx); + ret = mlx5_hrxq_modify + (dev, hrxq_idx, + rss_key, rss_key_len, + hash_fields, + action_conf->queue, action_conf->queue_num); + if (ret) { + mlx5_free(queue); + return rte_flow_error_set + (error, rte_errno, + RTE_FLOW_ERROR_TYPE_ACTION, NULL, + "cannot update hash queue"); + } } - if (dh->fate_action == MLX5_FLOW_FATE_DROP || - dh->fate_action == MLX5_FLOW_FATE_QUEUE || - dh->fate_action == MLX5_FLOW_FATE_DEFAULT_MISS) - flow_dv_fate_resource_release(dev, dh); - if (dh->vf_vlan.tag && dh->vf_vlan.created) - mlx5_vlan_vmwa_release(dev, &dh->vf_vlan); - handle_idx = dh->next.next; } + mlx5_free(shared_rss->queue); + shared_rss->queue = queue; + memcpy(shared_rss->queue, action_conf->queue, queue_size); + shared_rss->origin.queue = shared_rss->queue; + shared_rss->origin.queue_num = action_conf->queue_num; + return 0; } /** - * Remove the flow from the NIC and the memory. - * Lock free, (mutex should be acquired by caller). + * Updates in place shared action configuration, lock free, + * (mutex should be acquired by caller). * * @param[in] dev * Pointer to the Ethernet device structure. - * @param[in, out] flow - * Pointer to flow structure. + * @param[in] action + * The shared action object to be updated. + * @param[in] action_conf + * Action specification used to modify *action*. + * *action_conf* should be of type correlating with type of the *action*, + * otherwise considered as invalid. + * @param[out] error + * Perform verbose error reporting if not NULL. Initialized in case of + * error only. + * + * @return + * 0 on success, otherwise negative errno value. */ -static void -__flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow) -{ - struct mlx5_flow_handle *dev_handle; - struct mlx5_priv *priv = dev->data->dev_private; - - if (!flow) - return; - __flow_dv_remove(dev, flow); - if (flow->counter) { - flow_dv_counter_release(dev, flow->counter); - flow->counter = 0; - } - if (flow->meter) { - struct mlx5_flow_meter *fm; - - fm = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_MTR], - flow->meter); - if (fm) - mlx5_flow_meter_detach(fm); - flow->meter = 0; +static int +flow_dv_action_update(struct rte_eth_dev *dev, + struct rte_flow_shared_action *action, + const void *action_conf, + struct rte_flow_error *err) +{ + uint32_t act_idx = (uint32_t)(uintptr_t)action; + uint32_t type = act_idx >> MLX5_SHARED_ACTION_TYPE_OFFSET; + uint32_t idx = act_idx & ((1u << MLX5_SHARED_ACTION_TYPE_OFFSET) - 1); + + switch (type) { + case MLX5_SHARED_ACTION_TYPE_RSS: + return __flow_dv_action_rss_update(dev, idx, action_conf, err); + default: + return rte_flow_error_set(err, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, + NULL, + "action type update not supported"); } - while (flow->dev_handles) { - uint32_t tmp_idx = flow->dev_handles; +} - dev_handle = mlx5_ipool_get(priv->sh->ipool - [MLX5_IPOOL_MLX5_FLOW], tmp_idx); - if (!dev_handle) - return; - flow->dev_handles = dev_handle->next.next; - if (dev_handle->dvh.matcher) - flow_dv_matcher_release(dev, dev_handle); - if (dev_handle->dvh.rix_encap_decap) - flow_dv_encap_decap_resource_release(dev, dev_handle); - if (dev_handle->dvh.modify_hdr) - flow_dv_modify_hdr_resource_release(dev_handle); - if (dev_handle->dvh.rix_push_vlan) - flow_dv_push_vlan_action_resource_release(dev, - dev_handle); - if (dev_handle->dvh.rix_tag) - flow_dv_tag_release(dev, - dev_handle->dvh.rix_tag); - flow_dv_fate_resource_release(dev, dev_handle); - mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], - tmp_idx); +static int +flow_dv_action_query(struct rte_eth_dev *dev, + const struct rte_flow_shared_action *action, void *data, + struct rte_flow_error *error) +{ + struct mlx5_age_param *age_param; + struct rte_flow_query_age *resp; + uint32_t act_idx = (uint32_t)(uintptr_t)action; + uint32_t type = act_idx >> MLX5_SHARED_ACTION_TYPE_OFFSET; + uint32_t idx = act_idx & ((1u << MLX5_SHARED_ACTION_TYPE_OFFSET) - 1); + + switch (type) { + case MLX5_SHARED_ACTION_TYPE_AGE: + age_param = &flow_aso_age_get_by_idx(dev, idx)->age_params; + resp = data; + resp->aged = __atomic_load_n(&age_param->state, + __ATOMIC_RELAXED) == AGE_TMOUT ? + 1 : 0; + resp->sec_since_last_hit_valid = !resp->aged; + if (resp->sec_since_last_hit_valid) + resp->sec_since_last_hit = __atomic_load_n + (&age_param->sec_since_last_hit, __ATOMIC_RELAXED); + return 0; + default: + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, + NULL, + "action type query not supported"); } } @@ -9167,6 +11770,55 @@ flow_dv_query_count(struct rte_eth_dev *dev, struct rte_flow *flow, "counters are not available"); } +/** + * Query a flow rule AGE action for aging information. + * + * @param[in] dev + * Pointer to Ethernet device. + * @param[in] flow + * Pointer to the sub flow. + * @param[out] data + * data retrieved by the query. + * @param[out] error + * Perform verbose error reporting if not NULL. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +flow_dv_query_age(struct rte_eth_dev *dev, struct rte_flow *flow, + void *data, struct rte_flow_error *error) +{ + struct rte_flow_query_age *resp = data; + struct mlx5_age_param *age_param; + + if (flow->age) { + struct mlx5_aso_age_action *act = + flow_aso_age_get_by_idx(dev, flow->age); + + age_param = &act->age_params; + } else if (flow->counter) { + age_param = flow_dv_counter_idx_get_age(dev, flow->counter); + + if (!age_param || !age_param->timeout) + return rte_flow_error_set + (error, EINVAL, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, "cannot read age data"); + } else { + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, "age data not available"); + } + resp->aged = __atomic_load_n(&age_param->state, __ATOMIC_RELAXED) == + AGE_TMOUT ? 1 : 0; + resp->sec_since_last_hit_valid = !resp->aged; + if (resp->sec_since_last_hit_valid) + resp->sec_since_last_hit = __atomic_load_n + (&age_param->sec_since_last_hit, __ATOMIC_RELAXED); + return 0; +} + /** * Query a flow. * @@ -9189,6 +11841,9 @@ flow_dv_query(struct rte_eth_dev *dev, case RTE_FLOW_ACTION_TYPE_COUNT: ret = flow_dv_query_count(dev, flow, data, error); break; + case RTE_FLOW_ACTION_TYPE_AGE: + ret = flow_dv_query_age(dev, flow, data, error); + break; default: return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, @@ -9222,47 +11877,49 @@ flow_dv_destroy_mtr_tbl(struct rte_eth_dev *dev, if (!mtd || !priv->config.dv_flow_en) return 0; if (mtd->ingress.policer_rules[RTE_MTR_DROPPED]) - claim_zero(mlx5_glue->dv_destroy_flow - (mtd->ingress.policer_rules[RTE_MTR_DROPPED])); + claim_zero(mlx5_flow_os_destroy_flow + (mtd->ingress.policer_rules[RTE_MTR_DROPPED])); if (mtd->egress.policer_rules[RTE_MTR_DROPPED]) - claim_zero(mlx5_glue->dv_destroy_flow - (mtd->egress.policer_rules[RTE_MTR_DROPPED])); + claim_zero(mlx5_flow_os_destroy_flow + (mtd->egress.policer_rules[RTE_MTR_DROPPED])); if (mtd->transfer.policer_rules[RTE_MTR_DROPPED]) - claim_zero(mlx5_glue->dv_destroy_flow - (mtd->transfer.policer_rules[RTE_MTR_DROPPED])); + claim_zero(mlx5_flow_os_destroy_flow + (mtd->transfer.policer_rules[RTE_MTR_DROPPED])); if (mtd->egress.color_matcher) - claim_zero(mlx5_glue->dv_destroy_flow_matcher - (mtd->egress.color_matcher)); + claim_zero(mlx5_flow_os_destroy_flow_matcher + (mtd->egress.color_matcher)); if (mtd->egress.any_matcher) - claim_zero(mlx5_glue->dv_destroy_flow_matcher - (mtd->egress.any_matcher)); + claim_zero(mlx5_flow_os_destroy_flow_matcher + (mtd->egress.any_matcher)); if (mtd->egress.tbl) - flow_dv_tbl_resource_release(dev, mtd->egress.tbl); + flow_dv_tbl_resource_release(MLX5_SH(dev), mtd->egress.tbl); if (mtd->egress.sfx_tbl) - flow_dv_tbl_resource_release(dev, mtd->egress.sfx_tbl); + flow_dv_tbl_resource_release(MLX5_SH(dev), mtd->egress.sfx_tbl); if (mtd->ingress.color_matcher) - claim_zero(mlx5_glue->dv_destroy_flow_matcher - (mtd->ingress.color_matcher)); + claim_zero(mlx5_flow_os_destroy_flow_matcher + (mtd->ingress.color_matcher)); if (mtd->ingress.any_matcher) - claim_zero(mlx5_glue->dv_destroy_flow_matcher - (mtd->ingress.any_matcher)); + claim_zero(mlx5_flow_os_destroy_flow_matcher + (mtd->ingress.any_matcher)); if (mtd->ingress.tbl) - flow_dv_tbl_resource_release(dev, mtd->ingress.tbl); + flow_dv_tbl_resource_release(MLX5_SH(dev), mtd->ingress.tbl); if (mtd->ingress.sfx_tbl) - flow_dv_tbl_resource_release(dev, mtd->ingress.sfx_tbl); + flow_dv_tbl_resource_release(MLX5_SH(dev), + mtd->ingress.sfx_tbl); if (mtd->transfer.color_matcher) - claim_zero(mlx5_glue->dv_destroy_flow_matcher - (mtd->transfer.color_matcher)); + claim_zero(mlx5_flow_os_destroy_flow_matcher + (mtd->transfer.color_matcher)); if (mtd->transfer.any_matcher) - claim_zero(mlx5_glue->dv_destroy_flow_matcher - (mtd->transfer.any_matcher)); + claim_zero(mlx5_flow_os_destroy_flow_matcher + (mtd->transfer.any_matcher)); if (mtd->transfer.tbl) - flow_dv_tbl_resource_release(dev, mtd->transfer.tbl); + flow_dv_tbl_resource_release(MLX5_SH(dev), mtd->transfer.tbl); if (mtd->transfer.sfx_tbl) - flow_dv_tbl_resource_release(dev, mtd->transfer.sfx_tbl); + flow_dv_tbl_resource_release(MLX5_SH(dev), + mtd->transfer.sfx_tbl); if (mtd->drop_actn) - claim_zero(mlx5_glue->destroy_flow_action(mtd->drop_actn)); - rte_free(mtd); + claim_zero(mlx5_flow_os_destroy_flow_action(mtd->drop_actn)); + mlx5_free(mtd); return 0; } @@ -9310,6 +11967,7 @@ flow_dv_prepare_mtr_tables(struct rte_eth_dev *dev, struct mlx5_meter_domain_info *dtb; struct rte_flow_error error; int i = 0; + int ret; if (transfer) dtb = &mtb->transfer; @@ -9319,7 +11977,8 @@ flow_dv_prepare_mtr_tables(struct rte_eth_dev *dev, dtb = &mtb->ingress; /* Create the meter table with METER level. */ dtb->tbl = flow_dv_tbl_resource_get(dev, MLX5_FLOW_TABLE_LEVEL_METER, - egress, transfer, &error); + egress, transfer, false, NULL, 0, + 0, &error); if (!dtb->tbl) { DRV_LOG(ERR, "Failed to create meter policer table."); return -1; @@ -9327,7 +11986,8 @@ flow_dv_prepare_mtr_tables(struct rte_eth_dev *dev, /* Create the meter suffix table with SUFFIX level. */ dtb->sfx_tbl = flow_dv_tbl_resource_get(dev, MLX5_FLOW_TABLE_LEVEL_SUFFIX, - egress, transfer, &error); + egress, transfer, false, NULL, 0, + 0, &error); if (!dtb->sfx_tbl) { DRV_LOG(ERR, "Failed to create meter suffix table."); return -1; @@ -9335,10 +11995,9 @@ flow_dv_prepare_mtr_tables(struct rte_eth_dev *dev, /* Create matchers, Any and Color. */ dv_attr.priority = 3; dv_attr.match_criteria_enable = 0; - dtb->any_matcher = mlx5_glue->dv_create_flow_matcher(sh->ctx, - &dv_attr, - dtb->tbl->obj); - if (!dtb->any_matcher) { + ret = mlx5_flow_os_create_flow_matcher(sh->ctx, &dv_attr, dtb->tbl->obj, + &dtb->any_matcher); + if (ret) { DRV_LOG(ERR, "Failed to create meter" " policer default matcher."); goto error_exit; @@ -9348,10 +12007,9 @@ flow_dv_prepare_mtr_tables(struct rte_eth_dev *dev, 1 << MLX5_MATCH_CRITERIA_ENABLE_MISC2_BIT; flow_dv_match_meta_reg(mask.buf, value.buf, color_reg_c_idx, rte_col_2_mlx5_col(RTE_COLORS), UINT8_MAX); - dtb->color_matcher = mlx5_glue->dv_create_flow_matcher(sh->ctx, - &dv_attr, - dtb->tbl->obj); - if (!dtb->color_matcher) { + ret = mlx5_flow_os_create_flow_matcher(sh->ctx, &dv_attr, dtb->tbl->obj, + &dtb->color_matcher); + if (ret) { DRV_LOG(ERR, "Failed to create meter policer color matcher."); goto error_exit; } @@ -9359,10 +12017,10 @@ flow_dv_prepare_mtr_tables(struct rte_eth_dev *dev, actions[i++] = mtb->count_actns[RTE_MTR_DROPPED]; actions[i++] = mtb->drop_actn; /* Default rule: lowest priority, match any, actions: drop. */ - dtb->policer_rules[RTE_MTR_DROPPED] = - mlx5_glue->dv_create_flow(dtb->any_matcher, - (void *)&value, i, actions); - if (!dtb->policer_rules[RTE_MTR_DROPPED]) { + ret = mlx5_flow_os_create_flow(dtb->any_matcher, (void *)&value, i, + actions, + &dtb->policer_rules[RTE_MTR_DROPPED]); + if (ret) { DRV_LOG(ERR, "Failed to create meter policer drop rule."); goto error_exit; } @@ -9396,7 +12054,7 @@ flow_dv_create_mtr_tbl(struct rte_eth_dev *dev, rte_errno = ENOTSUP; return NULL; } - mtb = rte_calloc(__func__, 1, sizeof(*mtb), 0); + mtb = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*mtb), 0, SOCKET_ID_ANY); if (!mtb) { DRV_LOG(ERR, "Failed to allocate memory for meter."); return NULL; @@ -9411,8 +12069,8 @@ flow_dv_create_mtr_tbl(struct rte_eth_dev *dev, mtb->count_actns[i] = cnt->action; } /* Create drop action. */ - mtb->drop_actn = mlx5_glue->dr_create_flow_action_drop(); - if (!mtb->drop_actn) { + ret = mlx5_flow_os_create_flow_action_drop(&mtb->drop_actn); + if (ret) { DRV_LOG(ERR, "Failed to create drop action."); goto error_exit; } @@ -9456,13 +12114,13 @@ flow_dv_destroy_domain_policer_rule(struct mlx5_meter_domain_info *dt) for (i = 0; i < RTE_MTR_DROPPED; i++) { if (dt->policer_rules[i]) { - claim_zero(mlx5_glue->dv_destroy_flow - (dt->policer_rules[i])); + claim_zero(mlx5_flow_os_destroy_flow + (dt->policer_rules[i])); dt->policer_rules[i] = NULL; } } if (dt->jump_actn) { - claim_zero(mlx5_glue->destroy_flow_action(dt->jump_actn)); + claim_zero(mlx5_flow_os_destroy_flow_action(dt->jump_actn)); dt->jump_actn = NULL; } } @@ -9525,13 +12183,13 @@ flow_dv_create_policer_forward_rule(struct mlx5_flow_meter *fm, struct mlx5_meter_domains_infos *mtb = fm->mfts; void *actions[METER_ACTIONS]; int i; + int ret = 0; /* Create jump action. */ if (!dtb->jump_actn) - dtb->jump_actn = - mlx5_glue->dr_create_flow_action_dest_flow_tbl - (dtb->sfx_tbl->obj); - if (!dtb->jump_actn) { + ret = mlx5_flow_os_create_flow_action_dest_flow_tbl + (dtb->sfx_tbl->obj, &dtb->jump_actn); + if (ret) { DRV_LOG(ERR, "Failed to create policer jump action."); goto error; } @@ -9546,11 +12204,10 @@ flow_dv_create_policer_forward_rule(struct mlx5_flow_meter *fm, actions[j++] = mtb->drop_actn; else actions[j++] = dtb->jump_actn; - dtb->policer_rules[i] = - mlx5_glue->dv_create_flow(dtb->color_matcher, - (void *)&value, - j, actions); - if (!dtb->policer_rules[i]) { + ret = mlx5_flow_os_create_flow(dtb->color_matcher, + (void *)&value, j, actions, + &dtb->policer_rules[i]); + if (ret) { DRV_LOG(ERR, "Failed to create policer rule."); goto error; } @@ -9613,6 +12270,92 @@ error: return -1; } +/** + * Validate the batch counter support in root table. + * + * Create a simple flow with invalid counter and drop action on root table to + * validate if batch counter with offset on root table is supported or not. + * + * @param[in] dev + * Pointer to rte_eth_dev structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +int +mlx5_flow_dv_discover_counter_offset_support(struct rte_eth_dev *dev) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_dev_ctx_shared *sh = priv->sh; + struct mlx5_flow_dv_match_params mask = { + .size = sizeof(mask.buf), + }; + struct mlx5_flow_dv_match_params value = { + .size = sizeof(value.buf), + }; + struct mlx5dv_flow_matcher_attr dv_attr = { + .type = IBV_FLOW_ATTR_NORMAL, + .priority = 0, + .match_criteria_enable = 0, + .match_mask = (void *)&mask, + }; + void *actions[2] = { 0 }; + struct mlx5_flow_tbl_resource *tbl = NULL; + struct mlx5_devx_obj *dcs = NULL; + void *matcher = NULL; + void *flow = NULL; + int ret = -1; + + tbl = flow_dv_tbl_resource_get(dev, 0, 0, 0, false, NULL, 0, 0, NULL); + if (!tbl) + goto err; + dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0x4); + if (!dcs) + goto err; + ret = mlx5_flow_os_create_flow_action_count(dcs->obj, UINT16_MAX, + &actions[0]); + if (ret) + goto err; + actions[1] = priv->drop_queue.hrxq->action; + dv_attr.match_criteria_enable = flow_dv_matcher_enable(mask.buf); + ret = mlx5_flow_os_create_flow_matcher(sh->ctx, &dv_attr, tbl->obj, + &matcher); + if (ret) + goto err; + ret = mlx5_flow_os_create_flow(matcher, (void *)&value, 2, + actions, &flow); +err: + /* + * If batch counter with offset is not supported, the driver will not + * validate the invalid offset value, flow create should success. + * In this case, it means batch counter is not supported in root table. + * + * Otherwise, if flow create is failed, counter offset is supported. + */ + if (flow) { + DRV_LOG(INFO, "Batch counter is not supported in root " + "table. Switch to fallback mode."); + rte_errno = ENOTSUP; + ret = -rte_errno; + claim_zero(mlx5_flow_os_destroy_flow(flow)); + } else { + /* Check matcher to make sure validate fail at flow create. */ + if (!matcher || (matcher && errno != EINVAL)) + DRV_LOG(ERR, "Unexpected error in counter offset " + "support detection"); + ret = 0; + } + if (actions[0]) + claim_zero(mlx5_flow_os_destroy_flow_action(actions[0])); + if (matcher) + claim_zero(mlx5_flow_os_destroy_flow_matcher(matcher)); + if (tbl) + flow_dv_tbl_resource_release(MLX5_SH(dev), tbl); + if (dcs) + claim_zero(mlx5_devx_cmd_destroy(dcs)); + return ret; +} + /** * Query a devx counter. * @@ -9685,16 +12428,24 @@ flow_get_aged_flows(struct rte_eth_dev *dev, struct mlx5_age_info *age_info; struct mlx5_age_param *age_param; struct mlx5_flow_counter *counter; + struct mlx5_aso_age_action *act; int nb_flows = 0; if (nb_contexts && !context) return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, - NULL, - "Should assign at least one flow or" - " context to get if nb_contexts != 0"); + NULL, "empty context"); age_info = GET_PORT_AGE_INFO(priv); rte_spinlock_lock(&age_info->aged_sl); + LIST_FOREACH(act, &age_info->aged_aso, next) { + nb_flows++; + if (nb_contexts) { + context[nb_flows - 1] = + act->age_params.context; + if (!(--nb_contexts)) + break; + } + } TAILQ_FOREACH(counter, &age_info->aged_counters, next) { nb_flows++; if (nb_contexts) { @@ -9710,85 +12461,81 @@ flow_get_aged_flows(struct rte_eth_dev *dev, } /* - * Mutex-protected thunk to lock-free __flow_dv_translate(). + * Mutex-protected thunk to lock-free flow_dv_counter_alloc(). */ -static int -flow_dv_translate(struct rte_eth_dev *dev, - struct mlx5_flow *dev_flow, - const struct rte_flow_attr *attr, - const struct rte_flow_item items[], - const struct rte_flow_action actions[], - struct rte_flow_error *error) +static uint32_t +flow_dv_counter_allocate(struct rte_eth_dev *dev) { - int ret; - - flow_dv_shared_lock(dev); - ret = __flow_dv_translate(dev, dev_flow, attr, items, actions, error); - flow_dv_shared_unlock(dev); - return ret; + return flow_dv_counter_alloc(dev, 0); } -/* - * Mutex-protected thunk to lock-free __flow_dv_apply(). +/** + * Validate shared action. + * Dispatcher for action type specific validation. + * + * @param[in] dev + * Pointer to the Ethernet device structure. + * @param[in] conf + * Shared action configuration. + * @param[in] action + * The shared action object to validate. + * @param[out] error + * Perform verbose error reporting if not NULL. Initialized in case of + * error only. + * + * @return + * 0 on success, otherwise negative errno value. */ static int -flow_dv_apply(struct rte_eth_dev *dev, - struct rte_flow *flow, - struct rte_flow_error *error) -{ - int ret; - - flow_dv_shared_lock(dev); - ret = __flow_dv_apply(dev, flow, error); - flow_dv_shared_unlock(dev); - return ret; -} - -/* - * Mutex-protected thunk to lock-free __flow_dv_remove(). - */ -static void -flow_dv_remove(struct rte_eth_dev *dev, struct rte_flow *flow) +flow_dv_action_validate(struct rte_eth_dev *dev, + const struct rte_flow_shared_action_conf *conf, + const struct rte_flow_action *action, + struct rte_flow_error *err) { - flow_dv_shared_lock(dev); - __flow_dv_remove(dev, flow); - flow_dv_shared_unlock(dev); -} + struct mlx5_priv *priv = dev->data->dev_private; -/* - * Mutex-protected thunk to lock-free __flow_dv_destroy(). - */ -static void -flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow) -{ - flow_dv_shared_lock(dev); - __flow_dv_destroy(dev, flow); - flow_dv_shared_unlock(dev); + RTE_SET_USED(conf); + switch (action->type) { + case RTE_FLOW_ACTION_TYPE_RSS: + return mlx5_validate_action_rss(dev, action, err); + case RTE_FLOW_ACTION_TYPE_AGE: + if (!priv->sh->aso_age_mng) + return rte_flow_error_set(err, ENOTSUP, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "shared age action not supported"); + return flow_dv_validate_action_age(0, action, dev, err); + default: + return rte_flow_error_set(err, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, + NULL, + "action type not supported"); + } } -/* - * Mutex-protected thunk to lock-free flow_dv_counter_alloc(). - */ -static uint32_t -flow_dv_counter_allocate(struct rte_eth_dev *dev) +static int +flow_dv_sync_domain(struct rte_eth_dev *dev, uint32_t domains, uint32_t flags) { - uint32_t cnt; - - flow_dv_shared_lock(dev); - cnt = flow_dv_counter_alloc(dev, 0, 0, 1, 0); - flow_dv_shared_unlock(dev); - return cnt; -} + struct mlx5_priv *priv = dev->data->dev_private; + int ret = 0; -/* - * Mutex-protected thunk to lock-free flow_dv_counter_release(). - */ -static void -flow_dv_counter_free(struct rte_eth_dev *dev, uint32_t cnt) -{ - flow_dv_shared_lock(dev); - flow_dv_counter_release(dev, cnt); - flow_dv_shared_unlock(dev); + if ((domains & MLX5_DOMAIN_BIT_NIC_RX) && priv->sh->rx_domain != NULL) { + ret = mlx5_glue->dr_sync_domain(priv->sh->rx_domain, + flags); + if (ret != 0) + return ret; + } + if ((domains & MLX5_DOMAIN_BIT_NIC_TX) && priv->sh->tx_domain != NULL) { + ret = mlx5_glue->dr_sync_domain(priv->sh->tx_domain, flags); + if (ret != 0) + return ret; + } + if ((domains & MLX5_DOMAIN_BIT_FDB) && priv->sh->fdb_domain != NULL) { + ret = mlx5_glue->dr_sync_domain(priv->sh->fdb_domain, flags); + if (ret != 0) + return ret; + } + return 0; } const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops = { @@ -9807,6 +12554,13 @@ const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops = { .counter_free = flow_dv_counter_free, .counter_query = flow_dv_counter_query, .get_aged_flows = flow_get_aged_flows, + .action_validate = flow_dv_action_validate, + .action_create = flow_dv_action_create, + .action_destroy = flow_dv_action_destroy, + .action_update = flow_dv_action_update, + .action_query = flow_dv_action_query, + .sync_domain = flow_dv_sync_domain, }; #endif /* HAVE_IBV_FLOW_DV_SUPPORT */ +