X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fmlx5%2Fmlx5_flow_dv.c;h=9a0102abb574d6fb8ada495f7caf6576fc1199dd;hb=18ca4a4ec73a2e28df45a10543b7cdd45f2801dd;hp=5d9fbffd0e3c7feabaf01f1ab756d800563e0b60;hpb=252b5ae03685e3d4a0a184b7c6a603ea8837419b;p=dpdk.git diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c index 5d9fbffd0e..9a0102abb5 100644 --- a/drivers/net/mlx5/mlx5_flow_dv.c +++ b/drivers/net/mlx5/mlx5_flow_dv.c @@ -93,24 +93,14 @@ static int flow_dv_jump_tbl_resource_release(struct rte_eth_dev *dev, uint32_t rix_jump); -static inline uint16_t -mlx5_translate_tunnel_etypes(uint64_t pattern_flags) -{ - if (pattern_flags & MLX5_FLOW_LAYER_INNER_L2) - return RTE_ETHER_TYPE_TEB; - else if (pattern_flags & MLX5_FLOW_LAYER_INNER_L3_IPV4) - return RTE_ETHER_TYPE_IPV4; - else if (pattern_flags & MLX5_FLOW_LAYER_INNER_L3_IPV6) - return RTE_ETHER_TYPE_IPV6; - else if (pattern_flags & MLX5_FLOW_LAYER_MPLS) - return RTE_ETHER_TYPE_MPLS; - return 0; -} - static int16_t flow_dv_get_esw_manager_vport_id(struct rte_eth_dev *dev) { struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_common_device *cdev = priv->sh->cdev; + + if (cdev->config.hca_attr.esw_mgr_vport_id_valid) + return (int16_t)cdev->config.hca_attr.esw_mgr_vport_id; if (priv->pci_dev == NULL) return 0; @@ -172,6 +162,7 @@ flow_dv_attr_init(const struct rte_flow_item *item, union flow_dv_attr *attr, case RTE_FLOW_ITEM_TYPE_VXLAN_GPE: case RTE_FLOW_ITEM_TYPE_GENEVE: case RTE_FLOW_ITEM_TYPE_MPLS: + case RTE_FLOW_ITEM_TYPE_GTP: if (tunnel_decap) attr->attr = 0; break; @@ -326,7 +317,8 @@ flow_dv_hlist_prepare(struct mlx5_dev_ctx_shared *sh, struct mlx5_hlist **phl, mlx5_list_match_cb cb_match, mlx5_list_remove_cb cb_remove, mlx5_list_clone_cb cb_clone, - mlx5_list_clone_free_cb cb_clone_free) + mlx5_list_clone_free_cb cb_clone_free, + struct rte_flow_error *error) { struct mlx5_hlist *hl; struct mlx5_hlist *expected = NULL; @@ -341,7 +333,9 @@ flow_dv_hlist_prepare(struct mlx5_dev_ctx_shared *sh, struct mlx5_hlist **phl, cb_clone_free); if (!hl) { DRV_LOG(ERR, "%s hash creation failed", name); - rte_errno = ENOMEM; + rte_flow_error_set(error, ENOMEM, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "cannot allocate resource memory"); return NULL; } if (!__atomic_compare_exchange_n(phl, &expected, hl, false, @@ -1160,7 +1154,8 @@ flow_dv_convert_action_copy_mreg(struct rte_eth_dev *dev, uint32_t reg_c0 = priv->sh->dv_regc0_mask; MLX5_ASSERT(reg_c0); - MLX5_ASSERT(priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY); + MLX5_ASSERT(priv->sh->config.dv_xmeta_en != + MLX5_XMETA_MODE_LEGACY); if (conf->dst == REG_C_0) { /* Copy to reg_c[0], within mask only. */ reg_dst.offset = rte_bsf32(reg_c0); @@ -1465,7 +1460,7 @@ static void mlx5_flow_field_id_to_modify_info (const struct rte_flow_action_modify_data *data, struct field_modify_info *info, uint32_t *mask, - uint32_t width, uint32_t *shift, struct rte_eth_dev *dev, + uint32_t width, struct rte_eth_dev *dev, const struct rte_flow_attr *attr, struct rte_flow_error *error) { struct mlx5_priv *priv = dev->data->dev_private; @@ -1820,16 +1815,11 @@ mlx5_flow_field_id_to_modify_info { uint32_t meta_mask = priv->sh->dv_meta_mask; uint32_t meta_count = __builtin_popcount(meta_mask); - uint32_t msk_c0 = - rte_cpu_to_be_32(priv->sh->dv_regc0_mask); - uint32_t shl_c0 = rte_bsf32(msk_c0); int reg = flow_dv_get_metadata_reg(dev, attr, error); if (reg < 0) return; MLX5_ASSERT(reg != REG_NON); MLX5_ASSERT((unsigned int)reg < RTE_DIM(reg_to_field)); - if (reg == REG_C_0) - *shift = shl_c0; info[idx] = (struct field_modify_info){4, 0, reg_to_field[reg]}; if (mask) @@ -1881,29 +1871,33 @@ flow_dv_convert_action_modify_field struct field_modify_info dcopy[MLX5_ACT_MAX_MOD_FIELDS] = { {0, 0, 0} }; uint32_t mask[MLX5_ACT_MAX_MOD_FIELDS] = {0, 0, 0, 0, 0}; - uint32_t type; - uint32_t shift = 0; + uint32_t type, meta = 0; if (conf->src.field == RTE_FLOW_FIELD_POINTER || conf->src.field == RTE_FLOW_FIELD_VALUE) { type = MLX5_MODIFICATION_TYPE_SET; /** For SET fill the destination field (field) first. */ mlx5_flow_field_id_to_modify_info(&conf->dst, field, mask, - conf->width, &shift, dev, + conf->width, dev, attr, error); item.spec = conf->src.field == RTE_FLOW_FIELD_POINTER ? (void *)(uintptr_t)conf->src.pvalue : (void *)(uintptr_t)&conf->src.value; + if (conf->dst.field == RTE_FLOW_FIELD_META) { + meta = *(const unaligned_uint32_t *)item.spec; + meta = rte_cpu_to_be_32(meta); + item.spec = &meta; + } } else { type = MLX5_MODIFICATION_TYPE_COPY; /** For COPY fill the destination field (dcopy) without mask. */ mlx5_flow_field_id_to_modify_info(&conf->dst, dcopy, NULL, - conf->width, &shift, dev, + conf->width, dev, attr, error); /** Then construct the source field (field) with mask. */ mlx5_flow_field_id_to_modify_info(&conf->src, field, mask, - conf->width, &shift, - dev, attr, error); + conf->width, dev, + attr, error); } item.mask = &mask; return flow_dv_convert_modify_action(&item, @@ -1932,7 +1926,7 @@ flow_dv_validate_item_mark(struct rte_eth_dev *dev, struct rte_flow_error *error) { struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5_dev_config *config = &priv->config; + struct mlx5_sh_config *config = &priv->sh->config; const struct rte_flow_item_mark *spec = item->spec; const struct rte_flow_item_mark *mask = item->mask; const struct rte_flow_item_mark nic_mask = { @@ -2006,7 +2000,7 @@ flow_dv_validate_item_meta(struct rte_eth_dev *dev __rte_unused, struct rte_flow_error *error) { struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5_dev_config *config = &priv->config; + struct mlx5_sh_config *config = &priv->sh->config; const struct rte_flow_item_meta *spec = item->spec; const struct rte_flow_item_meta *mask = item->mask; struct rte_flow_item_meta nic_mask = { @@ -2332,7 +2326,7 @@ flow_dv_validate_item_gtp(struct rte_eth_dev *dev, .teid = RTE_BE32(0xffffffff), }; - if (!priv->config.hca_attr.tunnel_stateless_gtp) + if (!priv->sh->cdev->config.hca_attr.tunnel_stateless_gtp) return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item, "GTP support is not enabled"); @@ -2441,6 +2435,7 @@ flow_dv_validate_item_ipv4(struct rte_eth_dev *dev, { int ret; struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_hca_attr *attr = &priv->sh->cdev->config.hca_attr; const struct rte_flow_item_ipv4 *spec = item->spec; const struct rte_flow_item_ipv4 *last = item->last; const struct rte_flow_item_ipv4 *mask = item->mask; @@ -2459,8 +2454,8 @@ flow_dv_validate_item_ipv4(struct rte_eth_dev *dev, if (mask && (mask->hdr.version_ihl & RTE_IPV4_HDR_IHL_MASK)) { int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); - bool ihl_cap = !tunnel ? priv->config.hca_attr.outer_ipv4_ihl : - priv->config.hca_attr.inner_ipv4_ihl; + bool ihl_cap = !tunnel ? + attr->outer_ipv4_ihl : attr->inner_ipv4_ihl; if (!ihl_cap) return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, @@ -2879,8 +2874,6 @@ flow_dv_validate_action_push_vlan(struct rte_eth_dev *dev, { const struct rte_flow_action_of_push_vlan *push_vlan = action->conf; const struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5_dev_ctx_shared *sh = priv->sh; - bool direction_error = false; if (push_vlan->ethertype != RTE_BE16(RTE_ETHER_TYPE_VLAN) && push_vlan->ethertype != RTE_BE16(RTE_ETHER_TYPE_QINQ)) @@ -2892,22 +2885,6 @@ flow_dv_validate_action_push_vlan(struct rte_eth_dev *dev, RTE_FLOW_ERROR_TYPE_ACTION, action, "wrong action order, port_id should " "be after push VLAN"); - /* Push VLAN is not supported in ingress except for CX6 FDB mode. */ - if (attr->transfer) { - bool fdb_tx = priv->representor_id != UINT16_MAX; - bool is_cx5 = sh->steering_format_version == - MLX5_STEERING_LOGIC_FORMAT_CONNECTX_5; - - if (!fdb_tx && is_cx5) - direction_error = true; - } else if (attr->ingress) { - direction_error = true; - } - if (direction_error) - return rte_flow_error_set(error, ENOTSUP, - RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, - NULL, - "push vlan action not supported for ingress"); if (!attr->transfer && priv->representor) return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, @@ -3055,7 +3032,7 @@ flow_dv_validate_action_flag(struct rte_eth_dev *dev, struct rte_flow_error *error) { struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5_dev_config *config = &priv->config; + struct mlx5_sh_config *config = &priv->sh->config; int ret; /* Fall back if no extended metadata register support. */ @@ -3114,7 +3091,7 @@ flow_dv_validate_action_mark(struct rte_eth_dev *dev, struct rte_flow_error *error) { struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5_dev_config *config = &priv->config; + struct mlx5_sh_config *config = &priv->sh->config; const struct rte_flow_action_mark *mark = action->conf; int ret; @@ -3188,7 +3165,7 @@ flow_dv_validate_action_set_meta(struct rte_eth_dev *dev, struct rte_flow_error *error) { struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5_dev_config *config = &priv->config; + struct mlx5_sh_config *config = &priv->sh->config; const struct rte_flow_action_set_meta *conf; uint32_t nic_mask = UINT32_MAX; int reg; @@ -3283,6 +3260,25 @@ flow_dv_validate_action_set_tag(struct rte_eth_dev *dev, return 0; } +/** + * Indicates whether ASO aging is supported. + * + * @param[in] sh + * Pointer to shared device context structure. + * @param[in] attr + * Attributes of flow that includes AGE action. + * + * @return + * True when ASO aging is supported, false otherwise. + */ +static inline bool +flow_hit_aso_supported(const struct mlx5_dev_ctx_shared *sh, + const struct rte_flow_attr *attr) +{ + MLX5_ASSERT(sh && attr); + return (sh->flow_hit_aso_en && (attr->transfer || attr->group)); +} + /** * Validate count action. * @@ -3292,6 +3288,8 @@ flow_dv_validate_action_set_tag(struct rte_eth_dev *dev, * Indicator if action is shared. * @param[in] action_flags * Holds the actions detected until now. + * @param[in] attr + * Attributes of flow that includes this action. * @param[out] error * Pointer to error structure. * @@ -3301,21 +3299,22 @@ flow_dv_validate_action_set_tag(struct rte_eth_dev *dev, static int flow_dv_validate_action_count(struct rte_eth_dev *dev, bool shared, uint64_t action_flags, + const struct rte_flow_attr *attr, struct rte_flow_error *error) { struct mlx5_priv *priv = dev->data->dev_private; - if (!priv->sh->devx) + if (!priv->sh->cdev->config.devx) goto notsup_err; if (action_flags & MLX5_FLOW_ACTION_COUNT) return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, NULL, "duplicate count actions set"); if (shared && (action_flags & MLX5_FLOW_ACTION_AGE) && - !priv->sh->flow_hit_aso_en) + !flow_hit_aso_supported(priv->sh, attr)) return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, NULL, - "old age and shared count combination is not supported"); + "old age and indirect count combination is not supported"); #ifdef HAVE_IBV_FLOW_DEVX_COUNTERS return 0; #endif @@ -3399,8 +3398,8 @@ flow_dv_validate_action_decap(struct rte_eth_dev *dev, { const struct mlx5_priv *priv = dev->data->dev_private; - if (priv->config.hca_attr.scatter_fcs_w_decap_disable && - !priv->config.decap_en) + if (priv->sh->cdev->config.hca_attr.scatter_fcs_w_decap_disable && + !priv->sh->config.decap_en) return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, NULL, "decap is not enabled"); @@ -3740,7 +3739,8 @@ flow_dv_encap_decap_resource_register flow_dv_encap_decap_match_cb, flow_dv_encap_decap_remove_cb, flow_dv_encap_decap_clone_cb, - flow_dv_encap_decap_clone_free_cb); + flow_dv_encap_decap_clone_free_cb, + error); if (unlikely(!encaps_decaps)) return -rte_errno; resource->flags = dev_flow->dv.group ? 0 : 1; @@ -4039,7 +4039,7 @@ flow_dv_push_vlan_action_resource_register * @return * sizeof struct item_type, 0 if void or irrelevant. */ -static size_t +size_t flow_dv_get_item_hdr_len(const enum rte_flow_item_type item_type) { size_t retval; @@ -4105,7 +4105,7 @@ flow_dv_get_item_hdr_len(const enum rte_flow_item_type item_type) * @return * 0 on success, a negative errno value otherwise and rte_errno is set. */ -static int +int flow_dv_convert_encap_data(const struct rte_flow_item *items, uint8_t *buf, size_t *size, struct rte_flow_error *error) { @@ -4825,7 +4825,7 @@ flow_dv_validate_action_modify_field(struct rte_eth_dev *dev, { int ret = 0; struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5_dev_config *config = &priv->config; + struct mlx5_sh_config *config = &priv->sh->config; const struct rte_flow_action_modify_field *action_modify_field = action->conf; uint32_t dst_width = mlx5_flow_item_field_width(dev, @@ -4982,7 +4982,7 @@ flow_dv_validate_action_jump(struct rte_eth_dev *dev, const struct rte_flow_attr *attributes, bool external, struct rte_flow_error *error) { - uint32_t target_group, table; + uint32_t target_group, table = 0; int ret = 0; struct flow_grp_info grp_info = { .external = !!external, @@ -5013,6 +5013,10 @@ flow_dv_validate_action_jump(struct rte_eth_dev *dev, RTE_FLOW_ERROR_TYPE_ACTION, NULL, "target group must be other than" " the current flow group"); + if (table == 0) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION_CONF, + NULL, "root table shouldn't be destination"); return 0; } @@ -5312,8 +5316,8 @@ flow_dv_validate_action_age(uint64_t action_flags, struct mlx5_priv *priv = dev->data->dev_private; const struct rte_flow_action_age *age = action->conf; - if (!priv->sh->devx || (priv->sh->cmng.counter_fallback && - !priv->sh->aso_age_mng)) + if (!priv->sh->cdev->config.devx || + (priv->sh->cmng.counter_fallback && !priv->sh->aso_age_mng)) return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, @@ -5433,8 +5437,9 @@ flow_dv_modify_ipool_get(struct mlx5_dev_ctx_shared *sh, uint8_t index) .grow_trunk = 3, .grow_shift = 2, .need_lock = 1, - .release_mem_en = !!sh->reclaim_mode, - .per_core_cache = sh->reclaim_mode ? 0 : (1 << 16), + .release_mem_en = !!sh->config.reclaim_mode, + .per_core_cache = + sh->config.reclaim_mode ? 0 : (1 << 16), .malloc = mlx5_malloc, .free = mlx5_free, .type = "mlx5_modify_action_resource", @@ -5581,7 +5586,7 @@ flow_dv_validate_action_sample(uint64_t *action_flags, struct rte_flow_error *error) { struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5_dev_config *dev_conf = &priv->config; + struct mlx5_sh_config *dev_conf = &priv->sh->config; const struct rte_flow_action_sample *sample = action->conf; const struct rte_flow_action *act; uint64_t sub_action_flags = 0; @@ -5597,7 +5602,8 @@ flow_dv_validate_action_sample(uint64_t *action_flags, return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, action, "ratio value starts from 1"); - if (!priv->sh->devx || (sample->ratio > 0 && !priv->sampler_en)) + if (!priv->sh->cdev->config.devx || + (sample->ratio > 0 && !priv->sampler_en)) return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, @@ -5678,7 +5684,7 @@ flow_dv_validate_action_sample(uint64_t *action_flags, case RTE_FLOW_ACTION_TYPE_COUNT: ret = flow_dv_validate_action_count (dev, false, *action_flags | sub_action_flags, - error); + attr, error); if (ret < 0) return ret; *count = act->conf; @@ -5764,14 +5770,13 @@ flow_dv_validate_action_sample(uint64_t *action_flags, NULL, "E-Switch must has a dest " "port for mirroring"); - if (!priv->config.hca_attr.reg_c_preserve && + if (!priv->sh->cdev->config.hca_attr.reg_c_preserve && priv->representor_id != UINT16_MAX) *fdb_mirror_limit = 1; } /* Continue validation for Xcap actions.*/ if ((sub_action_flags & MLX5_FLOW_XCAP_ACTIONS) && - (queue_index == 0xFFFF || - mlx5_rxq_get_type(dev, queue_index) != MLX5_RXQ_TYPE_HAIRPIN)) { + (queue_index == 0xFFFF || !mlx5_rxq_is_hairpin(dev, queue_index))) { if ((sub_action_flags & MLX5_FLOW_XCAP_ACTIONS) == MLX5_FLOW_XCAP_ACTIONS) return rte_flow_error_set(error, ENOTSUP, @@ -5832,7 +5837,8 @@ flow_dv_modify_hdr_resource_register flow_dv_modify_match_cb, flow_dv_modify_remove_cb, flow_dv_modify_clone_cb, - flow_dv_modify_clone_free_cb); + flow_dv_modify_clone_free_cb, + error); if (unlikely(!modify_cmds)) return -rte_errno; resource->root = !dev_flow->dv.group; @@ -6185,7 +6191,7 @@ flow_dv_counter_alloc(struct rte_eth_dev *dev, uint32_t age) age ? MLX5_COUNTER_TYPE_AGE : MLX5_COUNTER_TYPE_ORIGIN; uint32_t cnt_idx; - if (!priv->sh->devx) { + if (!priv->sh->cdev->config.devx) { rte_errno = ENOTSUP; return 0; } @@ -6508,7 +6514,7 @@ flow_dv_mtr_alloc(struct rte_eth_dev *dev) struct mlx5_aso_mtr_pool *pool; uint32_t mtr_idx = 0; - if (!priv->sh->devx) { + if (!priv->sh->cdev->config.devx) { rte_errno = ENOTSUP; return 0; } @@ -6604,16 +6610,11 @@ flow_dv_validate_attributes(struct rte_eth_dev *dev, NULL, "priority out of range"); if (attributes->transfer) { - if (!priv->config.dv_esw_en) + if (!priv->sh->config.dv_esw_en) return rte_flow_error_set (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, "E-Switch dr is not supported"); - if (!(priv->representor || priv->master)) - return rte_flow_error_set - (error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, - NULL, "E-Switch configuration can only be" - " done by a master or a representor device"); if (attributes->egress) return rte_flow_error_set (error, ENOTSUP, @@ -6697,7 +6698,7 @@ flow_dv_validate_item_integrity(struct rte_eth_dev *dev, const struct rte_flow_item_integrity *spec = (typeof(spec)) integrity_item->spec; - if (!priv->config.hca_attr.pkt_integrity_match) + if (!priv->sh->cdev->config.hca_attr.pkt_integrity_match) return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, integrity_item, @@ -6844,7 +6845,7 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, bool external, int hairpin, struct rte_flow_error *error) { int ret; - uint64_t action_flags = 0; + uint64_t aso_mask, action_flags = 0; uint64_t item_flags = 0; uint64_t last_item = 0; uint8_t next_protocol = 0xff; @@ -6894,7 +6895,7 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, }, }; struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5_dev_config *dev_conf = &priv->config; + struct mlx5_sh_config *dev_conf = &priv->sh->config; uint16_t queue_index = 0xFFFF; const struct rte_flow_item_vlan *vlan_m = NULL; uint32_t rw_act_num = 0; @@ -6911,14 +6912,18 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, const struct rte_flow_item *integrity_items[2] = {NULL, NULL}; const struct rte_flow_item *port_id_item = NULL; bool def_policy = false; + bool shared_count = false; uint16_t udp_dport = 0; + uint32_t tag_id = 0; + const struct rte_flow_action_age *non_shared_age = NULL; + const struct rte_flow_action_count *count = NULL; if (items == NULL) return -1; tunnel = is_tunnel_offload_active(dev) ? mlx5_get_tof(items, actions, &tof_rule_type) : NULL; if (tunnel) { - if (!priv->config.dv_flow_en) + if (!dev_conf->dv_flow_en) return rte_flow_error_set (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, @@ -6951,6 +6956,14 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, switch (type) { case RTE_FLOW_ITEM_TYPE_VOID: break; + case RTE_FLOW_ITEM_TYPE_ESP: + ret = mlx5_flow_validate_item_esp(items, item_flags, + next_protocol, + error); + if (ret < 0) + return ret; + last_item = MLX5_FLOW_ITEM_ESP; + break; case RTE_FLOW_ITEM_TYPE_PORT_ID: ret = flow_dv_validate_item_port_id (dev, items, attr, item_flags, error); @@ -7112,6 +7125,13 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, gre_item = items; last_item = MLX5_FLOW_LAYER_GRE; break; + case RTE_FLOW_ITEM_TYPE_GRE_OPTION: + ret = mlx5_flow_validate_item_gre_option(dev, items, item_flags, + attr, gre_item, error); + if (ret < 0) + return ret; + last_item = MLX5_FLOW_LAYER_GRE; + break; case RTE_FLOW_ITEM_TYPE_NVGRE: ret = mlx5_flow_validate_item_nvgre(items, item_flags, next_protocol, @@ -7209,8 +7229,10 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, return ret; last_item = MLX5_FLOW_ITEM_TAG; break; - case MLX5_RTE_FLOW_ITEM_TYPE_TAG: case MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE: + last_item = MLX5_FLOW_ITEM_TX_QUEUE; + break; + case MLX5_RTE_FLOW_ITEM_TYPE_TAG: break; case RTE_FLOW_ITEM_TYPE_GTP: ret = flow_dv_validate_item_gtp(dev, items, item_flags, @@ -7281,7 +7303,6 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, } for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { int type = actions->type; - bool shared_count = false; if (!mlx5_flow_os_action_supported(type)) return rte_flow_error_set(error, ENOTSUP, @@ -7380,6 +7401,8 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, ++actions_n; if (action_flags & MLX5_FLOW_ACTION_SAMPLE) modify_after_mirror = 1; + tag_id = ((const struct rte_flow_action_set_tag *) + actions->conf)->index; action_flags |= MLX5_FLOW_ACTION_SET_TAG; rw_act_num += MLX5_ACT_NUM_SET_TAG; break; @@ -7438,9 +7461,10 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, case RTE_FLOW_ACTION_TYPE_COUNT: ret = flow_dv_validate_action_count(dev, shared_count, action_flags, - error); + attr, error); if (ret < 0) return ret; + count = actions->conf; action_flags |= MLX5_FLOW_ACTION_COUNT; ++actions_n; break; @@ -7746,6 +7770,7 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, ++actions_n; break; case RTE_FLOW_ACTION_TYPE_AGE: + non_shared_age = actions->conf; ret = flow_dv_validate_action_age(action_flags, actions, dev, error); @@ -7753,15 +7778,15 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, return ret; /* * Validate the regular AGE action (using counter) - * mutual exclusion with share counter actions. + * mutual exclusion with indirect counter actions. */ - if (!priv->sh->flow_hit_aso_en) { + if (!flow_hit_aso_supported(priv->sh, attr)) { if (shared_count) return rte_flow_error_set (error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, NULL, - "old age and shared count combination is not supported"); + "old age and indirect count combination is not supported"); if (sample_count) return rte_flow_error_set (error, EINVAL, @@ -7814,6 +7839,11 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, error); if (ret < 0) return ret; + if ((action_flags & MLX5_FLOW_ACTION_SET_TAG) && + tag_id == 0 && priv->mtr_color_reg == REG_NON) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, NULL, + "sample after tag action causes metadata tag index 0 corruption"); action_flags |= MLX5_FLOW_ACTION_SAMPLE; ++actions_n; break; @@ -7960,8 +7990,7 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, */ if ((action_flags & (MLX5_FLOW_XCAP_ACTIONS | MLX5_FLOW_VLAN_ACTIONS)) && - (queue_index == 0xFFFF || - mlx5_rxq_get_type(dev, queue_index) != MLX5_RXQ_TYPE_HAIRPIN || + (queue_index == 0xFFFF || !mlx5_rxq_is_hairpin(dev, queue_index) || ((conf = mlx5_rxq_get_hairpin_conf(dev, queue_index)) != NULL && conf->tx_explicit != 0))) { if ((action_flags & MLX5_FLOW_XCAP_ACTIONS) == @@ -7970,6 +7999,28 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, RTE_FLOW_ERROR_TYPE_ACTION, NULL, "encap and decap " "combination aren't supported"); + /* Push VLAN is not supported in ingress except for NICs newer than CX5. */ + if (action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN) { + struct mlx5_dev_ctx_shared *sh = priv->sh; + bool direction_error = false; + + if (attr->transfer) { + bool fdb_tx = priv->representor_id != UINT16_MAX; + bool is_cx5 = sh->steering_format_version == + MLX5_STEERING_LOGIC_FORMAT_CONNECTX_5; + + if (!fdb_tx && is_cx5) + direction_error = true; + } else if (attr->ingress) { + direction_error = true; + } + if (direction_error) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, + NULL, + "push VLAN action not supported " + "for ingress"); + } if (!attr->transfer && attr->ingress) { if (action_flags & MLX5_FLOW_ACTION_ENCAP) return rte_flow_error_set @@ -7977,12 +8028,6 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, RTE_FLOW_ERROR_TYPE_ACTION, NULL, "encap is not supported" " for ingress traffic"); - else if (action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN) - return rte_flow_error_set - (error, ENOTSUP, - RTE_FLOW_ERROR_TYPE_ACTION, - NULL, "push VLAN action not " - "supported for ingress"); else if ((action_flags & MLX5_FLOW_VLAN_ACTIONS) == MLX5_FLOW_VLAN_ACTIONS) return rte_flow_error_set @@ -8022,6 +8067,20 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, "cannot be done before meter action"); } } + /* + * Only support one ASO action in a single flow rule. + * non-shared AGE + counter will fallback to use HW counter, no ASO hit object. + * Group 0 uses HW counter for AGE too even if no counter action. + */ + aso_mask = (action_flags & MLX5_FLOW_ACTION_METER && priv->sh->meter_aso_en) << 2 | + (action_flags & MLX5_FLOW_ACTION_CT && priv->sh->ct_aso_en) << 1 | + (action_flags & MLX5_FLOW_ACTION_AGE && + !(non_shared_age && count) && + (attr->group || (attr->transfer && priv->fdb_def_rule)) && + priv->sh->flow_hit_aso_en); + if (__builtin_popcountl(aso_mask) > 1) + return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, + NULL, "unsupported combining AGE, METER, CT ASO actions in a single rule"); /* * Hairpin flow will add one more TAG action in TX implicit mode. * In TX explicit mode, there will be no hairpin flow ID. @@ -8045,6 +8104,18 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, NULL, "sample before modify action is not supported"); + /* + * Validation the NIC Egress flow on representor, except implicit + * hairpin default egress flow with TX_QUEUE item, other flows not + * work due to metadata regC0 mismatch. + */ + if ((!attr->transfer && attr->egress) && priv->representor && + !(item_flags & MLX5_FLOW_ITEM_TX_QUEUE)) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + NULL, + "NIC egress rules on representors" + " is not supported"); return 0; } @@ -8667,6 +8738,58 @@ flow_dv_translate_item_tcp(void *matcher, void *key, (tcp_v->hdr.tcp_flags & tcp_m->hdr.tcp_flags)); } +/** + * Add ESP item to matcher and to the value. + * + * @param[in, out] matcher + * Flow matcher. + * @param[in, out] key + * Flow matcher value. + * @param[in] item + * Flow pattern to translate. + * @param[in] inner + * Item is inner pattern. + */ +static void +flow_dv_translate_item_esp(void *matcher, void *key, + const struct rte_flow_item *item, + int inner) +{ + const struct rte_flow_item_esp *esp_m = item->mask; + const struct rte_flow_item_esp *esp_v = item->spec; + void *headers_m; + void *headers_v; + char *spi_m; + char *spi_v; + + if (inner) { + headers_m = MLX5_ADDR_OF(fte_match_param, matcher, + inner_headers); + headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers); + } else { + headers_m = MLX5_ADDR_OF(fte_match_param, matcher, + outer_headers); + headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers); + } + MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff); + MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_ESP); + if (!esp_v) + return; + if (!esp_m) + esp_m = &rte_flow_item_esp_mask; + headers_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters); + headers_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters); + if (inner) { + spi_m = MLX5_ADDR_OF(fte_match_set_misc, headers_m, inner_esp_spi); + spi_v = MLX5_ADDR_OF(fte_match_set_misc, headers_v, inner_esp_spi); + } else { + spi_m = MLX5_ADDR_OF(fte_match_set_misc, headers_m, outer_esp_spi); + spi_v = MLX5_ADDR_OF(fte_match_set_misc, headers_v, outer_esp_spi); + } + *(uint32_t *)spi_m = esp_m->hdr.spi; + *(uint32_t *)spi_v = esp_m->hdr.spi & esp_v->hdr.spi; +} + /** * Add UDP item to matcher and to the value. * @@ -8832,6 +8955,110 @@ flow_dv_translate_item_gre(void *matcher, void *key, protocol_m & protocol_v); } +/** + * Add GRE optional items to matcher and to the value. + * + * @param[in, out] matcher + * Flow matcher. + * @param[in, out] key + * Flow matcher value. + * @param[in] item + * Flow pattern to translate. + * @param[in] gre_item + * Pointer to gre_item. + * @param[in] pattern_flags + * Accumulated pattern flags. + */ +static void +flow_dv_translate_item_gre_option(void *matcher, void *key, + const struct rte_flow_item *item, + const struct rte_flow_item *gre_item, + uint64_t pattern_flags) +{ + const struct rte_flow_item_gre_opt *option_m = item->mask; + const struct rte_flow_item_gre_opt *option_v = item->spec; + const struct rte_flow_item_gre *gre_m = gre_item->mask; + const struct rte_flow_item_gre *gre_v = gre_item->spec; + static const struct rte_flow_item_gre empty_gre = {0}; + struct rte_flow_item gre_key_item; + uint16_t c_rsvd0_ver_m, c_rsvd0_ver_v; + uint16_t protocol_m, protocol_v; + void *misc5_m; + void *misc5_v; + + /* + * If only match key field, keep using misc for matching. + * If need to match checksum or sequence, using misc5 and do + * not need using misc. + */ + if (!(option_m->sequence.sequence || + option_m->checksum_rsvd.checksum)) { + flow_dv_translate_item_gre(matcher, key, gre_item, + pattern_flags); + gre_key_item.spec = &option_v->key.key; + gre_key_item.mask = &option_m->key.key; + flow_dv_translate_item_gre_key(matcher, key, &gre_key_item); + return; + } + if (!gre_v) { + gre_v = &empty_gre; + gre_m = &empty_gre; + } else { + if (!gre_m) + gre_m = &rte_flow_item_gre_mask; + } + protocol_v = gre_v->protocol; + protocol_m = gre_m->protocol; + if (!protocol_m) { + /* Force next protocol to prevent matchers duplication */ + uint16_t ether_type = + mlx5_translate_tunnel_etypes(pattern_flags); + if (ether_type) { + protocol_v = rte_be_to_cpu_16(ether_type); + protocol_m = UINT16_MAX; + } + } + c_rsvd0_ver_v = gre_v->c_rsvd0_ver; + c_rsvd0_ver_m = gre_m->c_rsvd0_ver; + if (option_m->sequence.sequence) { + c_rsvd0_ver_v |= RTE_BE16(0x1000); + c_rsvd0_ver_m |= RTE_BE16(0x1000); + } + if (option_m->key.key) { + c_rsvd0_ver_v |= RTE_BE16(0x2000); + c_rsvd0_ver_m |= RTE_BE16(0x2000); + } + if (option_m->checksum_rsvd.checksum) { + c_rsvd0_ver_v |= RTE_BE16(0x8000); + c_rsvd0_ver_m |= RTE_BE16(0x8000); + } + /* + * Hardware parses GRE optional field into the fixed location, + * do not need to adjust the tunnel dword indices. + */ + misc5_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_5); + misc5_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_5); + MLX5_SET(fte_match_set_misc5, misc5_v, tunnel_header_0, + rte_be_to_cpu_32((c_rsvd0_ver_v | protocol_v << 16) & + (c_rsvd0_ver_m | protocol_m << 16))); + MLX5_SET(fte_match_set_misc5, misc5_m, tunnel_header_0, + rte_be_to_cpu_32(c_rsvd0_ver_m | protocol_m << 16)); + MLX5_SET(fte_match_set_misc5, misc5_v, tunnel_header_1, + rte_be_to_cpu_32(option_v->checksum_rsvd.checksum & + option_m->checksum_rsvd.checksum)); + MLX5_SET(fte_match_set_misc5, misc5_m, tunnel_header_1, + rte_be_to_cpu_32(option_m->checksum_rsvd.checksum)); + MLX5_SET(fte_match_set_misc5, misc5_v, tunnel_header_2, + rte_be_to_cpu_32(option_v->key.key & option_m->key.key)); + MLX5_SET(fte_match_set_misc5, misc5_m, tunnel_header_2, + rte_be_to_cpu_32(option_m->key.key)); + MLX5_SET(fte_match_set_misc5, misc5_v, tunnel_header_3, + rte_be_to_cpu_32(option_v->sequence.sequence & + option_m->sequence.sequence)); + MLX5_SET(fte_match_set_misc5, misc5_m, tunnel_header_3, + rte_be_to_cpu_32(option_m->sequence.sequence)); +} + /** * Add NVGRE item to matcher and to the value. * @@ -10170,7 +10397,7 @@ flow_dv_translate_item_flex(struct rte_eth_dev *dev, void *matcher, void *key, /* Don't count both inner and outer flex items in one rule. */ if (mlx5_flex_acquire_index(dev, spec->handle, true) != index) MLX5_ASSERT(false); - dev_flow->handle->flex_item |= RTE_BIT32(index); + dev_flow->handle->flex_item |= (uint8_t)RTE_BIT32(index); } mlx5_flex_flow_translate_item(dev, matcher, key, item, is_inner); } @@ -10514,7 +10741,8 @@ flow_dv_tbl_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry) tbl_data->tunnel->tunnel_id : 0, tbl_data->group_id); } - mlx5_list_destroy(tbl_data->matchers); + if (tbl_data->matchers) + mlx5_list_destroy(tbl_data->matchers); mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], tbl_data->idx); } @@ -10769,7 +10997,8 @@ flow_dv_tag_resource_register flow_dv_tag_match_cb, flow_dv_tag_remove_cb, flow_dv_tag_clone_cb, - flow_dv_tag_clone_free_cb); + flow_dv_tag_clone_free_cb, + error); if (unlikely(!tag_table)) return -rte_errno; entry = mlx5_hlist_register(tag_table, tag_be24, &ctx); @@ -10950,10 +11179,8 @@ flow_dv_translate_item_tx_queue(struct rte_eth_dev *dev, { const struct mlx5_rte_flow_item_tx_queue *queue_m; const struct mlx5_rte_flow_item_tx_queue *queue_v; - void *misc_m = - MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters); - void *misc_v = - MLX5_ADDR_OF(fte_match_param, key, misc_parameters); + void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters); + void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters); struct mlx5_txq_ctrl *txq; uint32_t queue, mask; @@ -10964,7 +11191,7 @@ flow_dv_translate_item_tx_queue(struct rte_eth_dev *dev, txq = mlx5_txq_get(dev, queue_v->queue); if (!txq) return; - if (txq->type == MLX5_TXQ_TYPE_HAIRPIN) + if (txq->is_hairpin) queue = txq->obj->sq->id; else queue = txq->obj->sq_obj.sq->id; @@ -10977,78 +11204,89 @@ flow_dv_translate_item_tx_queue(struct rte_eth_dev *dev, /** * Set the hash fields according to the @p flow information. * - * @param[in] dev_flow - * Pointer to the mlx5_flow. + * @param[in] item_flags + * The match pattern item flags. * @param[in] rss_desc * Pointer to the mlx5_flow_rss_desc. + * @param[out] hash_fields + * Pointer to the RSS hash fields. */ -static void -flow_dv_hashfields_set(struct mlx5_flow *dev_flow, - struct mlx5_flow_rss_desc *rss_desc) +void +flow_dv_hashfields_set(uint64_t item_flags, + struct mlx5_flow_rss_desc *rss_desc, + uint64_t *hash_fields) { - uint64_t items = dev_flow->handle->layers; + uint64_t items = item_flags; + uint64_t fields = 0; int rss_inner = 0; uint64_t rss_types = rte_eth_rss_hf_refine(rss_desc->types); - dev_flow->hash_fields = 0; + *hash_fields = 0; #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT if (rss_desc->level >= 2) rss_inner = 1; #endif if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L3_IPV4)) || - (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L3_IPV4))) { + (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L3_IPV4)) || + !items) { if (rss_types & MLX5_IPV4_LAYER_TYPES) { if (rss_types & RTE_ETH_RSS_L3_SRC_ONLY) - dev_flow->hash_fields |= IBV_RX_HASH_SRC_IPV4; + fields |= IBV_RX_HASH_SRC_IPV4; else if (rss_types & RTE_ETH_RSS_L3_DST_ONLY) - dev_flow->hash_fields |= IBV_RX_HASH_DST_IPV4; + fields |= IBV_RX_HASH_DST_IPV4; else - dev_flow->hash_fields |= MLX5_IPV4_IBV_RX_HASH; + fields |= MLX5_IPV4_IBV_RX_HASH; } } else if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L3_IPV6)) || - (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L3_IPV6))) { + (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L3_IPV6)) || + !items) { if (rss_types & MLX5_IPV6_LAYER_TYPES) { if (rss_types & RTE_ETH_RSS_L3_SRC_ONLY) - dev_flow->hash_fields |= IBV_RX_HASH_SRC_IPV6; + fields |= IBV_RX_HASH_SRC_IPV6; else if (rss_types & RTE_ETH_RSS_L3_DST_ONLY) - dev_flow->hash_fields |= IBV_RX_HASH_DST_IPV6; + fields |= IBV_RX_HASH_DST_IPV6; else - dev_flow->hash_fields |= MLX5_IPV6_IBV_RX_HASH; + fields |= MLX5_IPV6_IBV_RX_HASH; } } - if (dev_flow->hash_fields == 0) + if (items & MLX5_FLOW_ITEM_ESP) { + if (rss_types & RTE_ETH_RSS_ESP) + fields |= IBV_RX_HASH_IPSEC_SPI; + } + if ((fields & ~IBV_RX_HASH_IPSEC_SPI) == 0) { + *hash_fields = fields; /* * There is no match between the RSS types and the * L3 protocol (IPv4/IPv6) defined in the flow rule. */ return; + } if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L4_UDP)) || - (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L4_UDP))) { + (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L4_UDP)) || + !items) { if (rss_types & RTE_ETH_RSS_UDP) { if (rss_types & RTE_ETH_RSS_L4_SRC_ONLY) - dev_flow->hash_fields |= - IBV_RX_HASH_SRC_PORT_UDP; + fields |= IBV_RX_HASH_SRC_PORT_UDP; else if (rss_types & RTE_ETH_RSS_L4_DST_ONLY) - dev_flow->hash_fields |= - IBV_RX_HASH_DST_PORT_UDP; + fields |= IBV_RX_HASH_DST_PORT_UDP; else - dev_flow->hash_fields |= MLX5_UDP_IBV_RX_HASH; + fields |= MLX5_UDP_IBV_RX_HASH; } } else if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L4_TCP)) || - (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L4_TCP))) { + (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L4_TCP)) || + !items) { if (rss_types & RTE_ETH_RSS_TCP) { if (rss_types & RTE_ETH_RSS_L4_SRC_ONLY) - dev_flow->hash_fields |= - IBV_RX_HASH_SRC_PORT_TCP; + fields |= IBV_RX_HASH_SRC_PORT_TCP; else if (rss_types & RTE_ETH_RSS_L4_DST_ONLY) - dev_flow->hash_fields |= - IBV_RX_HASH_DST_PORT_TCP; + fields |= IBV_RX_HASH_DST_PORT_TCP; else - dev_flow->hash_fields |= MLX5_TCP_IBV_RX_HASH; + fields |= MLX5_TCP_IBV_RX_HASH; } } if (rss_inner) - dev_flow->hash_fields |= IBV_RX_HASH_INNER; + fields |= IBV_RX_HASH_INNER; + *hash_fields = fields; } /** @@ -11072,8 +11310,8 @@ flow_dv_hrxq_prepare(struct rte_eth_dev *dev, struct mlx5_flow_rss_desc *rss_desc, uint32_t *hrxq_idx) { - struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_flow_handle *dh = dev_flow->handle; + uint32_t shared_rss = rss_desc->shared_rss; struct mlx5_hrxq *hrxq; MLX5_ASSERT(rss_desc->queue_num); @@ -11083,11 +11321,9 @@ flow_dv_hrxq_prepare(struct rte_eth_dev *dev, rss_desc->shared_rss = 0; if (rss_desc->hash_fields == 0) rss_desc->queue_num = 1; - *hrxq_idx = mlx5_hrxq_get(dev, rss_desc); - if (!*hrxq_idx) - return NULL; - hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ], - *hrxq_idx); + hrxq = mlx5_hrxq_get(dev, rss_desc); + *hrxq_idx = hrxq ? hrxq->idx : 0; + rss_desc->shared_rss = shared_rss; return hrxq; } @@ -11633,7 +11869,9 @@ flow_dv_translate_action_sample(struct rte_eth_dev *dev, * rss->level and rss.types should be set in advance * when expanding items for RSS. */ - flow_dv_hashfields_set(dev_flow, rss_desc); + flow_dv_hashfields_set(dev_flow->handle->layers, + rss_desc, + &dev_flow->hash_fields); hrxq = flow_dv_hrxq_prepare(dev, dev_flow, rss_desc, &hrxq_idx); if (!hrxq) @@ -11660,7 +11898,7 @@ flow_dv_translate_action_sample(struct rte_eth_dev *dev, (((const struct rte_flow_action_mark *) (sub_actions->conf))->id); - dev_flow->handle->mark = 1; + wks->mark = 1; pre_rix = dev_flow->handle->dvh.rix_tag; /* Save the mark resource before sample */ pre_r = dev_flow->dv.tag_resource; @@ -12525,7 +12763,7 @@ flow_dv_aso_ct_alloc(struct rte_eth_dev *dev, struct rte_flow_error *error) uint32_t ct_idx; MLX5_ASSERT(mng); - if (!priv->sh->devx) { + if (!priv->sh->cdev->config.devx) { rte_errno = ENOTSUP; return 0; } @@ -12654,7 +12892,7 @@ flow_dv_translate(struct rte_eth_dev *dev, struct rte_flow_error *error) { struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5_dev_config *dev_conf = &priv->config; + struct mlx5_sh_config *dev_conf = &priv->sh->config; struct rte_flow *flow = dev_flow->flow; struct mlx5_flow_handle *handle = dev_flow->handle; struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace(); @@ -12708,6 +12946,7 @@ flow_dv_translate(struct rte_eth_dev *dev, }; const struct rte_flow_item *integrity_items[2] = {NULL, NULL}; const struct rte_flow_item *tunnel_item = NULL; + const struct rte_flow_item *gre_item = NULL; if (!wks) return rte_flow_error_set(error, ENOMEM, @@ -12820,7 +13059,7 @@ flow_dv_translate(struct rte_eth_dev *dev, break; case RTE_FLOW_ACTION_TYPE_FLAG: action_flags |= MLX5_FLOW_ACTION_FLAG; - dev_flow->handle->mark = 1; + wks->mark = 1; if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) { struct rte_flow_action_mark mark = { .id = MLX5_FLOW_MARK_DEFAULT, @@ -12849,7 +13088,7 @@ flow_dv_translate(struct rte_eth_dev *dev, break; case RTE_FLOW_ACTION_TYPE_MARK: action_flags |= MLX5_FLOW_ACTION_MARK; - dev_flow->handle->mark = 1; + wks->mark = 1; if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) { const struct rte_flow_action_mark *mark = (const struct rte_flow_action_mark *) @@ -12963,7 +13202,7 @@ flow_dv_translate(struct rte_eth_dev *dev, } break; case RTE_FLOW_ACTION_TYPE_COUNT: - if (!priv->sh->devx) { + if (!priv->sh->cdev->config.devx) { return rte_flow_error_set (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, @@ -13306,8 +13545,7 @@ flow_dv_translate(struct rte_eth_dev *dev, */ if (action_flags & MLX5_FLOW_ACTION_AGE) { if ((non_shared_age && count) || - !(priv->sh->flow_hit_aso_en && - (attr->group || attr->transfer))) { + !flow_hit_aso_supported(priv->sh, attr)) { /* Creates age by counters. */ cnt_act = flow_dv_prepare_counter (dev, dev_flow, @@ -13367,6 +13605,11 @@ flow_dv_translate(struct rte_eth_dev *dev, RTE_FLOW_ERROR_TYPE_ITEM, NULL, "item not supported"); switch (item_type) { + case RTE_FLOW_ITEM_TYPE_ESP: + flow_dv_translate_item_esp(match_mask, match_value, + items, tunnel); + last_item = MLX5_FLOW_ITEM_ESP; + break; case RTE_FLOW_ITEM_TYPE_PORT_ID: flow_dv_translate_item_port_id (dev, match_mask, match_value, items, attr); @@ -13480,12 +13723,18 @@ flow_dv_translate(struct rte_eth_dev *dev, matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc); last_item = MLX5_FLOW_LAYER_GRE; tunnel_item = items; + gre_item = items; break; case RTE_FLOW_ITEM_TYPE_GRE_KEY: flow_dv_translate_item_gre_key(match_mask, match_value, items); last_item = MLX5_FLOW_LAYER_GRE_KEY; break; + case RTE_FLOW_ITEM_TYPE_GRE_OPTION: + matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc); + last_item = MLX5_FLOW_LAYER_GRE; + tunnel_item = items; + break; case RTE_FLOW_ITEM_TYPE_NVGRE: matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc); last_item = MLX5_FLOW_LAYER_GRE; @@ -13538,11 +13787,13 @@ flow_dv_translate(struct rte_eth_dev *dev, case RTE_FLOW_ITEM_TYPE_ICMP: flow_dv_translate_item_icmp(match_mask, match_value, items, tunnel); + matcher.priority = MLX5_PRIORITY_MAP_L4; last_item = MLX5_FLOW_LAYER_ICMP; break; case RTE_FLOW_ITEM_TYPE_ICMP6: flow_dv_translate_item_icmp6(match_mask, match_value, items, tunnel); + matcher.priority = MLX5_PRIORITY_MAP_L4; last_item = MLX5_FLOW_LAYER_ICMP6; break; case RTE_FLOW_ITEM_TYPE_TAG: @@ -13617,12 +13868,13 @@ flow_dv_translate(struct rte_eth_dev *dev, /* * When E-Switch mode is enabled, we have two cases where we need to * set the source port manually. - * The first one, is in case of Nic steering rule, and the second is - * E-Switch rule where no port_id item was found. In both cases - * the source port is set according the current port in use. + * The first one, is in case of NIC ingress steering rule, and the + * second is E-Switch rule where no port_id item was found. + * In both cases the source port is set according the current port + * in use. */ - if (!(item_flags & MLX5_FLOW_ITEM_PORT_ID) && - (priv->representor || priv->master)) { + if (!(item_flags & MLX5_FLOW_ITEM_PORT_ID) && priv->sh->esw_mode && + !(attr->egress && !attr->transfer)) { if (flow_dv_translate_item_port_id(dev, match_mask, match_value, NULL, attr)) return -rte_errno; @@ -13645,6 +13897,9 @@ flow_dv_translate(struct rte_eth_dev *dev, else if (tunnel_item->type == RTE_FLOW_ITEM_TYPE_NVGRE) flow_dv_translate_item_nvgre(match_mask, match_value, tunnel_item, item_flags); + else if (tunnel_item->type == RTE_FLOW_ITEM_TYPE_GRE_OPTION) + flow_dv_translate_item_gre_option(match_mask, match_value, + tunnel_item, gre_item, item_flags); else MLX5_ASSERT(false); } @@ -13658,7 +13913,9 @@ flow_dv_translate(struct rte_eth_dev *dev, */ handle->layers |= item_flags; if (action_flags & MLX5_FLOW_ACTION_RSS) - flow_dv_hashfields_set(dev_flow, rss_desc); + flow_dv_hashfields_set(dev_flow->handle->layers, + rss_desc, + &dev_flow->hash_fields); /* If has RSS action in the sample action, the Sample/Mirror resource * should be registered after the hash filed be update. */ @@ -13822,6 +14079,15 @@ __flow_dv_action_rss_hrxq_set(struct mlx5_shared_action_rss *action, case MLX5_RSS_HASH_NONE: hrxqs[6] = hrxq_idx; return 0; + case MLX5_RSS_HASH_IPV4_ESP: + hrxqs[7] = hrxq_idx; + return 0; + case MLX5_RSS_HASH_IPV6_ESP: + hrxqs[8] = hrxq_idx; + return 0; + case MLX5_RSS_HASH_ESP_SPI: + hrxqs[9] = hrxq_idx; + return 0; default: return -1; } @@ -13843,9 +14109,9 @@ __flow_dv_action_rss_hrxq_set(struct mlx5_shared_action_rss *action, * @return * Valid hash RX queue index, otherwise 0. */ -static uint32_t -__flow_dv_action_rss_hrxq_lookup(struct rte_eth_dev *dev, uint32_t idx, - const uint64_t hash_fields) +uint32_t +flow_dv_action_rss_hrxq_lookup(struct rte_eth_dev *dev, uint32_t idx, + const uint64_t hash_fields) { struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_shared_action_rss *shared_rss = @@ -13891,6 +14157,12 @@ __flow_dv_action_rss_hrxq_lookup(struct rte_eth_dev *dev, uint32_t idx, return hrxqs[5]; case MLX5_RSS_HASH_NONE: return hrxqs[6]; + case MLX5_RSS_HASH_IPV4_ESP: + return hrxqs[7]; + case MLX5_RSS_HASH_IPV6_ESP: + return hrxqs[8]; + case MLX5_RSS_HASH_ESP_SPI: + return hrxqs[9]; default: return 0; } @@ -13973,7 +14245,7 @@ flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow, struct mlx5_hrxq *hrxq = NULL; uint32_t hrxq_idx; - hrxq_idx = __flow_dv_action_rss_hrxq_lookup(dev, + hrxq_idx = flow_dv_action_rss_hrxq_lookup(dev, rss_desc->shared_rss, dev_flow->hash_fields); if (hrxq_idx) @@ -14009,7 +14281,7 @@ flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow, (error, errno, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, - (!priv->config.allow_duplicate_pattern && + (!priv->sh->config.allow_duplicate_pattern && errno == EEXIST) ? "duplicating pattern is not allowed" : "hardware refuses to create flow"); @@ -14508,7 +14780,7 @@ flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow) int index = rte_bsf32(dev_handle->flex_item); mlx5_flex_release_index(dev, index); - dev_handle->flex_item &= ~RTE_BIT32(index); + dev_handle->flex_item &= ~(uint8_t)RTE_BIT32(index); } if (dev_handle->dvh.matcher) flow_dv_matcher_release(dev, dev_handle); @@ -14607,19 +14879,19 @@ __flow_dv_action_rss_hrxqs_release(struct rte_eth_dev *dev, * MLX5_RSS_HASH_IPV4_DST_ONLY are mutually exclusive so they can share * same slot in mlx5_rss_hash_fields. * - * @param[in] rss - * Pointer to the shared action RSS conf. + * @param[in] orig_rss_types + * RSS type as provided in shared RSS action. * @param[in, out] hash_field * hash_field variable needed to be adjusted. * * @return * void */ -static void -__flow_dv_action_rss_l34_hash_adjust(struct mlx5_shared_action_rss *rss, - uint64_t *hash_field) +void +flow_dv_action_rss_l34_hash_adjust(uint64_t orig_rss_types, + uint64_t *hash_field) { - uint64_t rss_types = rss->origin.types; + uint64_t rss_types = rte_eth_rss_hf_refine(orig_rss_types); switch (*hash_field & ~IBV_RX_HASH_INNER) { case MLX5_RSS_HASH_IPV4: @@ -14699,16 +14971,20 @@ __flow_dv_action_rss_setup(struct rte_eth_dev *dev, struct mlx5_shared_action_rss *shared_rss, struct rte_flow_error *error) { + struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_flow_rss_desc rss_desc = { 0 }; size_t i; int err; - if (mlx5_ind_table_obj_setup(dev, shared_rss->ind_tbl, - !!dev->data->dev_started)) { + shared_rss->ind_tbl = mlx5_ind_table_obj_new + (dev, shared_rss->origin.queue, + shared_rss->origin.queue_num, + true, + !!dev->data->dev_started); + if (!shared_rss->ind_tbl) return rte_flow_error_set(error, rte_errno, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, "cannot setup indirection table"); - } memcpy(rss_desc.key, shared_rss->origin.key, MLX5_RSS_HASH_KEY_LEN); rss_desc.key_len = MLX5_RSS_HASH_KEY_LEN; rss_desc.const_q = shared_rss->origin.queue; @@ -14716,20 +14992,23 @@ __flow_dv_action_rss_setup(struct rte_eth_dev *dev, /* Set non-zero value to indicate a shared RSS. */ rss_desc.shared_rss = action_idx; rss_desc.ind_tbl = shared_rss->ind_tbl; + if (priv->sh->config.dv_flow_en == 2) + rss_desc.hws_flags = MLX5DR_ACTION_FLAG_HWS_RX; for (i = 0; i < MLX5_RSS_HASH_FIELDS_LEN; i++) { - uint32_t hrxq_idx; + struct mlx5_hrxq *hrxq; uint64_t hash_fields = mlx5_rss_hash_fields[i]; int tunnel = 0; - __flow_dv_action_rss_l34_hash_adjust(shared_rss, &hash_fields); + flow_dv_action_rss_l34_hash_adjust(shared_rss->origin.types, + &hash_fields); if (shared_rss->origin.level > 1) { hash_fields |= IBV_RX_HASH_INNER; tunnel = 1; } rss_desc.tunnel = tunnel; rss_desc.hash_fields = hash_fields; - hrxq_idx = mlx5_hrxq_get(dev, &rss_desc); - if (!hrxq_idx) { + hrxq = mlx5_hrxq_get(dev, &rss_desc); + if (!hrxq) { rte_flow_error_set (error, rte_errno, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, @@ -14737,14 +15016,14 @@ __flow_dv_action_rss_setup(struct rte_eth_dev *dev, goto error_hrxq_new; } err = __flow_dv_action_rss_hrxq_set - (shared_rss, hash_fields, hrxq_idx); + (shared_rss, hash_fields, hrxq->idx); MLX5_ASSERT(!err); } return 0; error_hrxq_new: err = rte_errno; __flow_dv_action_rss_hrxqs_release(dev, shared_rss); - if (!mlx5_ind_table_obj_release(dev, shared_rss->ind_tbl, true, true)) + if (!mlx5_ind_table_obj_release(dev, shared_rss->ind_tbl, true)) shared_rss->ind_tbl = NULL; rte_errno = err; return -rte_errno; @@ -14775,18 +15054,14 @@ __flow_dv_action_rss_create(struct rte_eth_dev *dev, { struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_shared_action_rss *shared_rss = NULL; - void *queue = NULL; struct rte_flow_action_rss *origin; const uint8_t *rss_key; - uint32_t queue_size = rss->queue_num * sizeof(uint16_t); uint32_t idx; RTE_SET_USED(conf); - queue = mlx5_malloc(0, RTE_ALIGN_CEIL(queue_size, sizeof(void *)), - 0, SOCKET_ID_ANY); shared_rss = mlx5_ipool_zmalloc (priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], &idx); - if (!shared_rss || !queue) { + if (!shared_rss) { rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, "cannot allocate resource memory"); @@ -14798,18 +15073,6 @@ __flow_dv_action_rss_create(struct rte_eth_dev *dev, "rss action number out of range"); goto error_rss_init; } - shared_rss->ind_tbl = mlx5_malloc(MLX5_MEM_ZERO, - sizeof(*shared_rss->ind_tbl), - 0, SOCKET_ID_ANY); - if (!shared_rss->ind_tbl) { - rte_flow_error_set(error, ENOMEM, - RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, - "cannot allocate resource memory"); - goto error_rss_init; - } - memcpy(queue, rss->queue, queue_size); - shared_rss->ind_tbl->queues = queue; - shared_rss->ind_tbl->queues_n = rss->queue_num; origin = &shared_rss->origin; origin->func = rss->func; origin->level = rss->level; @@ -14820,10 +15083,12 @@ __flow_dv_action_rss_create(struct rte_eth_dev *dev, memcpy(shared_rss->key, rss_key, MLX5_RSS_HASH_KEY_LEN); origin->key = &shared_rss->key[0]; origin->key_len = MLX5_RSS_HASH_KEY_LEN; - origin->queue = queue; + origin->queue = rss->queue; origin->queue_num = rss->queue_num; if (__flow_dv_action_rss_setup(dev, idx, shared_rss, error)) goto error_rss_init; + /* Update queue with indirect table queue memoyr. */ + origin->queue = shared_rss->ind_tbl->queues; rte_spinlock_init(&shared_rss->action_rss_sl); __atomic_add_fetch(&shared_rss->refcnt, 1, __ATOMIC_RELAXED); rte_spinlock_lock(&priv->shared_act_sl); @@ -14834,12 +15099,11 @@ __flow_dv_action_rss_create(struct rte_eth_dev *dev, error_rss_init: if (shared_rss) { if (shared_rss->ind_tbl) - mlx5_free(shared_rss->ind_tbl); + mlx5_ind_table_obj_release(dev, shared_rss->ind_tbl, + !!dev->data->dev_started); mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], idx); } - if (queue) - mlx5_free(queue); return 0; } @@ -14867,7 +15131,6 @@ __flow_dv_action_rss_release(struct rte_eth_dev *dev, uint32_t idx, mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], idx); uint32_t old_refcnt = 1; int remaining; - uint16_t *queue = NULL; if (!shared_rss) return rte_flow_error_set(error, EINVAL, @@ -14886,8 +15149,7 @@ __flow_dv_action_rss_release(struct rte_eth_dev *dev, uint32_t idx, RTE_FLOW_ERROR_TYPE_ACTION, NULL, "shared rss hrxq has references"); - queue = shared_rss->ind_tbl->queues; - remaining = mlx5_ind_table_obj_release(dev, shared_rss->ind_tbl, true, + remaining = mlx5_ind_table_obj_release(dev, shared_rss->ind_tbl, !!dev->data->dev_started); if (remaining) return rte_flow_error_set(error, EBUSY, @@ -14895,7 +15157,6 @@ __flow_dv_action_rss_release(struct rte_eth_dev *dev, uint32_t idx, NULL, "shared rss indirection table has" " references"); - mlx5_free(queue); rte_spinlock_lock(&priv->shared_act_sl); ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], &priv->rss_shared_actions, idx, shared_rss, next); @@ -14924,7 +15185,7 @@ __flow_dv_action_rss_release(struct rte_eth_dev *dev, uint32_t idx, * A valid shared action handle in case of success, NULL otherwise and * rte_errno is set. */ -static struct rte_flow_action_handle * +struct rte_flow_action_handle * flow_dv_action_create(struct rte_eth_dev *dev, const struct rte_flow_indir_action_conf *conf, const struct rte_flow_action *action, @@ -14994,7 +15255,7 @@ flow_dv_action_create(struct rte_eth_dev *dev, * @return * 0 on success, otherwise negative errno value. */ -static int +int flow_dv_action_destroy(struct rte_eth_dev *dev, struct rte_flow_action_handle *handle, struct rte_flow_error *error) @@ -15074,7 +15335,7 @@ __flow_dv_action_rss_update(struct rte_eth_dev *dev, uint32_t idx, mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], idx); int ret = 0; void *queue = NULL; - uint16_t *queue_old = NULL; + void *queue_i = NULL; uint32_t queue_size = action_conf->queue_num * sizeof(uint16_t); bool dev_started = !!dev->data->dev_started; @@ -15097,22 +15358,23 @@ __flow_dv_action_rss_update(struct rte_eth_dev *dev, uint32_t idx, memcpy(queue, action_conf->queue, queue_size); MLX5_ASSERT(shared_rss->ind_tbl); rte_spinlock_lock(&shared_rss->action_rss_sl); - queue_old = shared_rss->ind_tbl->queues; + queue_i = shared_rss->ind_tbl->queues; ret = mlx5_ind_table_obj_modify(dev, shared_rss->ind_tbl, queue, action_conf->queue_num, true /* standalone */, dev_started /* ref_new_qs */, dev_started /* deref_old_qs */); if (ret) { - mlx5_free(queue); ret = rte_flow_error_set(error, rte_errno, RTE_FLOW_ERROR_TYPE_ACTION, NULL, "cannot update indirection table"); } else { - mlx5_free(queue_old); - shared_rss->origin.queue = queue; + /* Restore the queue to indirect table internal queue. */ + memcpy(queue_i, queue, queue_size); + shared_rss->ind_tbl->queues = queue_i; shared_rss->origin.queue_num = action_conf->queue_num; } + mlx5_free(queue); rte_spinlock_unlock(&shared_rss->action_rss_sl); return ret; } @@ -15203,7 +15465,7 @@ __flow_dv_action_ct_update(struct rte_eth_dev *dev, uint32_t idx, * @return * 0 on success, otherwise negative errno value. */ -static int +int flow_dv_action_update(struct rte_eth_dev *dev, struct rte_flow_action_handle *handle, const void *update, @@ -15455,7 +15717,6 @@ __flow_dv_create_domain_policy_acts(struct rte_eth_dev *dev, NULL, "cannot create policy " "mark action for this color"); - dev_flow.handle->mark = 1; if (flow_dv_tag_resource_register(dev, tag_be, &dev_flow, &flow_err)) return -rte_mtr_error_set(error, @@ -15467,6 +15728,7 @@ __flow_dv_create_domain_policy_acts(struct rte_eth_dev *dev, act_cnt->rix_mark = dev_flow.handle->dvh.rix_tag; action_flags |= MLX5_FLOW_ACTION_MARK; + mtr_policy->mark = 1; break; } case RTE_FLOW_ACTION_TYPE_SET_TAG: @@ -15750,6 +16012,8 @@ __flow_dv_create_domain_policy_acts(struct rte_eth_dev *dev, act_cnt->next_sub_policy = NULL; mtr_policy->is_hierarchy = 1; mtr_policy->dev = next_policy->dev; + if (next_policy->mark) + mtr_policy->mark = 1; action_flags |= MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY; break; @@ -15835,14 +16099,14 @@ flow_dv_create_mtr_policy_acts(struct rte_eth_dev *dev, * @return * 0 on success, a negative errno value otherwise and rte_errno is set. */ -int +static int flow_dv_query_count(struct rte_eth_dev *dev, uint32_t cnt_idx, void *data, struct rte_flow_error *error) { struct mlx5_priv *priv = dev->data->dev_private; struct rte_flow_query_count *qc = data; - if (!priv->sh->devx) + if (!priv->sh->cdev->config.devx) return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, @@ -15873,49 +16137,7 @@ flow_dv_query_count(struct rte_eth_dev *dev, uint32_t cnt_idx, void *data, "counters are not available"); } - -/** - * Query counter's action pointer for a DV flow rule via DevX. - * - * @param[in] dev - * Pointer to Ethernet device. - * @param[in] cnt_idx - * Index to the flow counter. - * @param[out] action_ptr - * Action pointer for counter. - * @param[out] error - * Perform verbose error reporting if not NULL. - * - * @return - * 0 on success, a negative errno value otherwise and rte_errno is set. - */ int -flow_dv_query_count_ptr(struct rte_eth_dev *dev, uint32_t cnt_idx, - void **action_ptr, struct rte_flow_error *error) -{ - struct mlx5_priv *priv = dev->data->dev_private; - - if (!priv->sh->devx || !action_ptr) - return rte_flow_error_set(error, ENOTSUP, - RTE_FLOW_ERROR_TYPE_UNSPECIFIED, - NULL, - "counters are not supported"); - - if (cnt_idx) { - struct mlx5_flow_counter *cnt = NULL; - cnt = flow_dv_counter_get_by_idx(dev, cnt_idx, NULL); - if (cnt) { - *action_ptr = cnt->action; - return 0; - } - } - return rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_UNSPECIFIED, - NULL, - "counters are not available"); -} - -static int flow_dv_action_query(struct rte_eth_dev *dev, const struct rte_flow_action_handle *handle, void *data, struct rte_flow_error *error) @@ -16077,7 +16299,7 @@ flow_dv_destroy_mtr_tbls(struct rte_eth_dev *dev, struct mlx5_priv *priv = dev->data->dev_private; int i; - if (!fm || !priv->config.dv_flow_en) + if (!fm || !priv->sh->config.dv_flow_en) return; for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) { if (fm->drop_rule[i]) { @@ -16180,7 +16402,7 @@ __flow_dv_create_policy_flow(struct rte_eth_dev *dev, struct mlx5_priv *priv = dev->data->dev_private; uint8_t misc_mask; - if (match_src_port && (priv->representor || priv->master)) { + if (match_src_port && priv->sh->esw_mode) { if (flow_dv_translate_item_port_id(dev, matcher.buf, value.buf, item, attr)) { DRV_LOG(ERR, "Failed to create meter policy%d flow's" @@ -16232,7 +16454,7 @@ __flow_dv_create_policy_matcher(struct rte_eth_dev *dev, struct mlx5_priv *priv = dev->data->dev_private; const uint32_t color_mask = (UINT32_C(1) << MLX5_MTR_COLOR_BITS) - 1; - if (match_src_port && (priv->representor || priv->master)) { + if (match_src_port && priv->sh->esw_mode) { if (flow_dv_translate_item_port_id(dev, matcher.mask.buf, value.buf, item, attr)) { DRV_LOG(ERR, "Failed to register meter policy%d matcher" @@ -16683,7 +16905,8 @@ flow_dv_create_def_policy(struct rte_eth_dev *dev) /* Non-termination policy table. */ for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) { - if (!priv->config.dv_esw_en && i == MLX5_MTR_DOMAIN_TRANSFER) + if (!priv->sh->config.dv_esw_en && + i == MLX5_MTR_DOMAIN_TRANSFER) continue; if (__flow_dv_create_domain_def_policy(dev, i)) { DRV_LOG(ERR, "Failed to create default policy"); @@ -16880,16 +17103,19 @@ __flow_dv_meter_get_rss_sub_policy(struct rte_eth_dev *dev, struct mlx5_meter_policy_action_container *act_cnt; uint32_t domain = MLX5_MTR_DOMAIN_INGRESS; uint16_t sub_policy_num; + struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace(); + MLX5_ASSERT(wks); rte_spinlock_lock(&mtr_policy->sl); for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) { if (!rss_desc[i]) continue; - hrxq_idx[i] = mlx5_hrxq_get(dev, rss_desc[i]); - if (!hrxq_idx[i]) { + hrxq = mlx5_hrxq_get(dev, rss_desc[i]); + if (!hrxq) { rte_spinlock_unlock(&mtr_policy->sl); return NULL; } + hrxq_idx[i] = hrxq->idx; } sub_policy_num = (mtr_policy->sub_policy_num >> (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain)) & @@ -16914,7 +17140,8 @@ __flow_dv_meter_get_rss_sub_policy(struct rte_eth_dev *dev, } } /* Create sub policy. */ - if (!mtr_policy->sub_policys[domain][0]->rix_hrxq[0]) { + if (!mtr_policy->sub_policys[domain][0]->rix_hrxq[RTE_COLOR_GREEN] && + !mtr_policy->sub_policys[domain][0]->rix_hrxq[RTE_COLOR_YELLOW]) { /* Reuse the first pre-allocated sub_policy. */ sub_policy = mtr_policy->sub_policys[domain][0]; sub_policy_idx = sub_policy->idx; @@ -16954,7 +17181,7 @@ __flow_dv_meter_get_rss_sub_policy(struct rte_eth_dev *dev, if (act_cnt->rix_mark || act_cnt->modify_hdr) { memset(&dh, 0, sizeof(struct mlx5_flow_handle)); if (act_cnt->rix_mark) - dh.mark = 1; + wks->mark = 1; dh.fate_action = MLX5_FLOW_FATE_QUEUE; dh.rix_hrxq = hrxq_idx[i]; flow_drv_rxq_flags_set(dev, &dh); @@ -17490,20 +17717,23 @@ err: */ static int flow_dv_counter_query(struct rte_eth_dev *dev, uint32_t counter, bool clear, - uint64_t *pkts, uint64_t *bytes) + uint64_t *pkts, uint64_t *bytes, void **action) { struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_flow_counter *cnt; uint64_t inn_pkts, inn_bytes; int ret; - if (!priv->sh->devx) + if (!priv->sh->cdev->config.devx) return -1; ret = _flow_dv_query_count(dev, counter, &inn_pkts, &inn_bytes); if (ret) return -1; cnt = flow_dv_counter_get_by_idx(dev, counter, NULL); + if (cnt && action) + *action = cnt->action; + *pkts = inn_pkts - cnt->hits; *bytes = inn_bytes - cnt->bytes; if (clear) { @@ -17601,7 +17831,7 @@ flow_dv_counter_allocate(struct rte_eth_dev *dev) * @return * 0 on success, otherwise negative errno value. */ -static int +int flow_dv_action_validate(struct rte_eth_dev *dev, const struct rte_flow_indir_action_conf *conf, const struct rte_flow_action *action, @@ -17635,7 +17865,7 @@ flow_dv_action_validate(struct rte_eth_dev *dev, "Indirect age action not supported"); return flow_dv_validate_action_age(0, action, dev, err); case RTE_FLOW_ACTION_TYPE_COUNT: - return flow_dv_validate_action_count(dev, true, 0, err); + return flow_dv_validate_action_count(dev, true, 0, NULL, err); case RTE_FLOW_ACTION_TYPE_CONNTRACK: if (!priv->sh->ct_aso_en) return rte_flow_error_set(err, ENOTSUP, @@ -17792,7 +18022,7 @@ flow_dv_validate_mtr_policy_acts(struct rte_eth_dev *dev, struct rte_mtr_error *error) { struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5_dev_config *dev_conf = &priv->config; + struct mlx5_sh_config *dev_conf = &priv->sh->config; const struct rte_flow_action *act; uint64_t action_flags[RTE_COLORS] = {0}; int actions_n; @@ -17806,7 +18036,7 @@ flow_dv_validate_mtr_policy_acts(struct rte_eth_dev *dev, bool def_yellow = false; const struct rte_flow_action_rss *rss_color[RTE_COLORS] = {NULL}; - if (!priv->config.dv_esw_en) + if (!dev_conf->dv_esw_en) def_domain &= ~MLX5_MTR_DOMAIN_TRANSFER_BIT; *domain_bitmap = def_domain; /* Red color could only support DROP action. */ @@ -17850,7 +18080,7 @@ flow_dv_validate_mtr_policy_acts(struct rte_eth_dev *dev, switch (act->type) { case RTE_FLOW_ACTION_TYPE_PORT_ID: case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT: - if (!priv->config.dv_esw_en) + if (!dev_conf->dv_esw_en) return -rte_mtr_error_set(error, ENOTSUP, RTE_MTR_ERROR_TYPE_METER_POLICY, @@ -18291,4 +18521,3 @@ const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops = { }; #endif /* HAVE_IBV_FLOW_DV_SUPPORT */ -