X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fmlx5%2Fmlx5_flow_dv.c;h=af90a7fd0ae07b4ad5d5e4027a65c5eee6920c8c;hb=8e83ba285abe4341b7666927d3fc265b35446c06;hp=9cba22ca2dae7fb5d39290ef57ded8f8ea3e1fea;hpb=295968d1740760337e16b0d7914875c5cac52850;p=dpdk.git diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c index 9cba22ca2d..af90a7fd0a 100644 --- a/drivers/net/mlx5/mlx5_flow_dv.c +++ b/drivers/net/mlx5/mlx5_flow_dv.c @@ -287,31 +287,6 @@ struct field_modify_info modify_tcp[] = { {0, 0, 0}, }; -static const struct rte_flow_item * -mlx5_flow_find_tunnel_item(const struct rte_flow_item *item) -{ - for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) { - switch (item->type) { - default: - break; - case RTE_FLOW_ITEM_TYPE_VXLAN: - case RTE_FLOW_ITEM_TYPE_VXLAN_GPE: - case RTE_FLOW_ITEM_TYPE_GRE: - case RTE_FLOW_ITEM_TYPE_MPLS: - case RTE_FLOW_ITEM_TYPE_NVGRE: - case RTE_FLOW_ITEM_TYPE_GENEVE: - return item; - case RTE_FLOW_ITEM_TYPE_IPV4: - case RTE_FLOW_ITEM_TYPE_IPV6: - if (item[1].type == RTE_FLOW_ITEM_TYPE_IPV4 || - item[1].type == RTE_FLOW_ITEM_TYPE_IPV6) - return item; - break; - } - } - return NULL; -} - static void mlx5_flow_tunnel_ip_check(const struct rte_flow_item *item __rte_unused, uint8_t next_protocol, uint64_t *item_flags, @@ -1408,9 +1383,13 @@ flow_dv_convert_action_modify_ipv6_dscp } static int -mlx5_flow_item_field_width(struct mlx5_priv *priv, - enum rte_flow_field_id field, int inherit) +mlx5_flow_item_field_width(struct rte_eth_dev *dev, + enum rte_flow_field_id field, int inherit, + const struct rte_flow_attr *attr, + struct rte_flow_error *error) { + struct mlx5_priv *priv = dev->data->dev_private; + switch (field) { case RTE_FLOW_FIELD_START: return 32; @@ -1457,7 +1436,8 @@ mlx5_flow_item_field_width(struct mlx5_priv *priv, case RTE_FLOW_FIELD_MARK: return __builtin_popcount(priv->sh->dv_mark_mask); case RTE_FLOW_FIELD_META: - return __builtin_popcount(priv->sh->dv_meta_mask); + return (flow_dv_get_metadata_reg(dev, attr, error) == REG_C_0) ? + __builtin_popcount(priv->sh->dv_meta_mask) : 32; case RTE_FLOW_FIELD_POINTER: case RTE_FLOW_FIELD_VALUE: return inherit < 0 ? 0 : inherit; @@ -1490,11 +1470,11 @@ mlx5_flow_field_id_to_modify_info info[idx] = (struct field_modify_info){2, 4, MLX5_MODI_OUT_DMAC_15_0}; if (width < 16) { - mask[idx] = rte_cpu_to_be_16(0xffff >> + mask[1] = rte_cpu_to_be_16(0xffff >> (16 - width)); width = 0; } else { - mask[idx] = RTE_BE16(0xffff); + mask[1] = RTE_BE16(0xffff); width -= 16; } if (!width) @@ -1503,13 +1483,13 @@ mlx5_flow_field_id_to_modify_info } info[idx] = (struct field_modify_info){4, 0, MLX5_MODI_OUT_DMAC_47_16}; - mask[idx] = rte_cpu_to_be_32((0xffffffff >> - (32 - width)) << off); + mask[0] = rte_cpu_to_be_32((0xffffffff >> + (32 - width)) << off); } else { if (data->offset < 16) - info[idx++] = (struct field_modify_info){2, 4, + info[idx++] = (struct field_modify_info){2, 0, MLX5_MODI_OUT_DMAC_15_0}; - info[idx] = (struct field_modify_info){4, 0, + info[idx] = (struct field_modify_info){4, off, MLX5_MODI_OUT_DMAC_47_16}; } break; @@ -1520,11 +1500,11 @@ mlx5_flow_field_id_to_modify_info info[idx] = (struct field_modify_info){2, 4, MLX5_MODI_OUT_SMAC_15_0}; if (width < 16) { - mask[idx] = rte_cpu_to_be_16(0xffff >> + mask[1] = rte_cpu_to_be_16(0xffff >> (16 - width)); width = 0; } else { - mask[idx] = RTE_BE16(0xffff); + mask[1] = RTE_BE16(0xffff); width -= 16; } if (!width) @@ -1533,13 +1513,13 @@ mlx5_flow_field_id_to_modify_info } info[idx] = (struct field_modify_info){4, 0, MLX5_MODI_OUT_SMAC_47_16}; - mask[idx] = rte_cpu_to_be_32((0xffffffff >> - (32 - width)) << off); + mask[0] = rte_cpu_to_be_32((0xffffffff >> + (32 - width)) << off); } else { if (data->offset < 16) - info[idx++] = (struct field_modify_info){2, 4, + info[idx++] = (struct field_modify_info){2, 0, MLX5_MODI_OUT_SMAC_15_0}; - info[idx] = (struct field_modify_info){4, 0, + info[idx] = (struct field_modify_info){4, off, MLX5_MODI_OUT_SMAC_47_16}; } break; @@ -1602,12 +1582,12 @@ mlx5_flow_field_id_to_modify_info info[idx] = (struct field_modify_info){4, 12, MLX5_MODI_OUT_SIPV6_31_0}; if (width < 32) { - mask[idx] = + mask[3] = rte_cpu_to_be_32(0xffffffff >> (32 - width)); width = 0; } else { - mask[idx] = RTE_BE32(0xffffffff); + mask[3] = RTE_BE32(0xffffffff); width -= 32; } if (!width) @@ -1618,12 +1598,12 @@ mlx5_flow_field_id_to_modify_info info[idx] = (struct field_modify_info){4, 8, MLX5_MODI_OUT_SIPV6_63_32}; if (width < 32) { - mask[idx] = + mask[2] = rte_cpu_to_be_32(0xffffffff >> (32 - width)); width = 0; } else { - mask[idx] = RTE_BE32(0xffffffff); + mask[2] = RTE_BE32(0xffffffff); width -= 32; } if (!width) @@ -1634,12 +1614,12 @@ mlx5_flow_field_id_to_modify_info info[idx] = (struct field_modify_info){4, 4, MLX5_MODI_OUT_SIPV6_95_64}; if (width < 32) { - mask[idx] = + mask[1] = rte_cpu_to_be_32(0xffffffff >> (32 - width)); width = 0; } else { - mask[idx] = RTE_BE32(0xffffffff); + mask[1] = RTE_BE32(0xffffffff); width -= 32; } if (!width) @@ -1648,17 +1628,16 @@ mlx5_flow_field_id_to_modify_info } info[idx] = (struct field_modify_info){4, 0, MLX5_MODI_OUT_SIPV6_127_96}; - mask[idx] = rte_cpu_to_be_32(0xffffffff >> - (32 - width)); + mask[0] = rte_cpu_to_be_32(0xffffffff >> (32 - width)); } else { if (data->offset < 32) - info[idx++] = (struct field_modify_info){4, 12, + info[idx++] = (struct field_modify_info){4, 0, MLX5_MODI_OUT_SIPV6_31_0}; if (data->offset < 64) - info[idx++] = (struct field_modify_info){4, 8, + info[idx++] = (struct field_modify_info){4, 0, MLX5_MODI_OUT_SIPV6_63_32}; if (data->offset < 96) - info[idx++] = (struct field_modify_info){4, 4, + info[idx++] = (struct field_modify_info){4, 0, MLX5_MODI_OUT_SIPV6_95_64}; if (data->offset < 128) info[idx++] = (struct field_modify_info){4, 0, @@ -1671,12 +1650,12 @@ mlx5_flow_field_id_to_modify_info info[idx] = (struct field_modify_info){4, 12, MLX5_MODI_OUT_DIPV6_31_0}; if (width < 32) { - mask[idx] = + mask[3] = rte_cpu_to_be_32(0xffffffff >> (32 - width)); width = 0; } else { - mask[idx] = RTE_BE32(0xffffffff); + mask[3] = RTE_BE32(0xffffffff); width -= 32; } if (!width) @@ -1687,12 +1666,12 @@ mlx5_flow_field_id_to_modify_info info[idx] = (struct field_modify_info){4, 8, MLX5_MODI_OUT_DIPV6_63_32}; if (width < 32) { - mask[idx] = + mask[2] = rte_cpu_to_be_32(0xffffffff >> (32 - width)); width = 0; } else { - mask[idx] = RTE_BE32(0xffffffff); + mask[2] = RTE_BE32(0xffffffff); width -= 32; } if (!width) @@ -1703,12 +1682,12 @@ mlx5_flow_field_id_to_modify_info info[idx] = (struct field_modify_info){4, 4, MLX5_MODI_OUT_DIPV6_95_64}; if (width < 32) { - mask[idx] = + mask[1] = rte_cpu_to_be_32(0xffffffff >> (32 - width)); width = 0; } else { - mask[idx] = RTE_BE32(0xffffffff); + mask[1] = RTE_BE32(0xffffffff); width -= 32; } if (!width) @@ -1717,17 +1696,16 @@ mlx5_flow_field_id_to_modify_info } info[idx] = (struct field_modify_info){4, 0, MLX5_MODI_OUT_DIPV6_127_96}; - mask[idx] = rte_cpu_to_be_32(0xffffffff >> - (32 - width)); + mask[0] = rte_cpu_to_be_32(0xffffffff >> (32 - width)); } else { if (data->offset < 32) - info[idx++] = (struct field_modify_info){4, 12, + info[idx++] = (struct field_modify_info){4, 0, MLX5_MODI_OUT_DIPV6_31_0}; if (data->offset < 64) - info[idx++] = (struct field_modify_info){4, 8, + info[idx++] = (struct field_modify_info){4, 0, MLX5_MODI_OUT_DIPV6_63_32}; if (data->offset < 96) - info[idx++] = (struct field_modify_info){4, 4, + info[idx++] = (struct field_modify_info){4, 0, MLX5_MODI_OUT_DIPV6_95_64}; if (data->offset < 128) info[idx++] = (struct field_modify_info){4, 0, @@ -2040,7 +2018,7 @@ flow_dv_validate_item_meta(struct rte_eth_dev *dev __rte_unused, if (reg == REG_NON) return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item, - "unavalable extended metadata register"); + "unavailable extended metadata register"); if (reg == REG_B) return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item, @@ -3195,11 +3173,14 @@ flow_dv_validate_action_set_meta(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, struct rte_flow_error *error) { + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_dev_config *config = &priv->config; const struct rte_flow_action_set_meta *conf; uint32_t nic_mask = UINT32_MAX; int reg; - if (!mlx5_flow_ext_mreg_supported(dev)) + if (config->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY && + !mlx5_flow_ext_mreg_supported(dev)) return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, action, "extended metadata register" @@ -3210,7 +3191,7 @@ flow_dv_validate_action_set_meta(struct rte_eth_dev *dev, if (reg == REG_NON) return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, action, - "unavalable extended metadata register"); + "unavailable extended metadata register"); if (reg != REG_A && reg != REG_B) { struct mlx5_priv *priv = dev->data->dev_private; @@ -4833,10 +4814,12 @@ flow_dv_validate_action_modify_field(struct rte_eth_dev *dev, struct mlx5_dev_config *config = &priv->config; const struct rte_flow_action_modify_field *action_modify_field = action->conf; - uint32_t dst_width = mlx5_flow_item_field_width(priv, - action_modify_field->dst.field, -1); - uint32_t src_width = mlx5_flow_item_field_width(priv, - action_modify_field->src.field, dst_width); + uint32_t dst_width = mlx5_flow_item_field_width(dev, + action_modify_field->dst.field, + -1, attr, error); + uint32_t src_width = mlx5_flow_item_field_width(dev, + action_modify_field->src.field, + dst_width, attr, error); ret = flow_dv_validate_action_modify_hdr(action_flags, action, error); if (ret) @@ -4929,15 +4912,27 @@ flow_dv_validate_action_modify_field(struct rte_eth_dev *dev, "modifications of the GENEVE Network" " Identifier is not supported"); if (action_modify_field->dst.field == RTE_FLOW_FIELD_MARK || - action_modify_field->src.field == RTE_FLOW_FIELD_MARK || - action_modify_field->dst.field == RTE_FLOW_FIELD_META || - action_modify_field->src.field == RTE_FLOW_FIELD_META) { + action_modify_field->src.field == RTE_FLOW_FIELD_MARK) if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY || !mlx5_flow_ext_mreg_supported(dev)) return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, action, - "cannot modify mark or metadata without" - " extended metadata register support"); + "cannot modify mark in legacy mode" + " or without extensive registers"); + if (action_modify_field->dst.field == RTE_FLOW_FIELD_META || + action_modify_field->src.field == RTE_FLOW_FIELD_META) { + if (config->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY && + !mlx5_flow_ext_mreg_supported(dev)) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, action, + "cannot modify meta without" + " extensive registers support"); + ret = flow_dv_get_metadata_reg(dev, attr, error); + if (ret < 0 || ret == REG_NON) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, action, + "cannot modify meta without" + " extensive registers available"); } if (action_modify_field->operation != RTE_FLOW_MODIFY_SET) return rte_flow_error_set(error, ENOTSUP, @@ -4973,7 +4968,7 @@ flow_dv_validate_action_jump(struct rte_eth_dev *dev, const struct rte_flow_attr *attributes, bool external, struct rte_flow_error *error) { - uint32_t target_group, table; + uint32_t target_group, table = 0; int ret = 0; struct flow_grp_info grp_info = { .external = !!external, @@ -5004,6 +4999,10 @@ flow_dv_validate_action_jump(struct rte_eth_dev *dev, RTE_FLOW_ERROR_TYPE_ACTION, NULL, "target group must be other than" " the current flow group"); + if (table == 0) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION_CONF, + NULL, "root table shouldn't be destination"); return 0; } @@ -5124,6 +5123,8 @@ flow_dv_modify_hdr_action_max(struct rte_eth_dev *dev __rte_unused, * Pointer to rte_eth_dev structure. * @param[in] action_flags * Bit-fields that holds the actions detected until now. + * @param[in] item_flags + * Holds the items detected. * @param[in] action * Pointer to the meter action. * @param[in] attr @@ -5134,11 +5135,11 @@ flow_dv_modify_hdr_action_max(struct rte_eth_dev *dev __rte_unused, * Pointer to error structure. * * @return - * 0 on success, a negative errno value otherwise and rte_ernno is set. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int mlx5_flow_validate_action_meter(struct rte_eth_dev *dev, - uint64_t action_flags, + uint64_t action_flags, uint64_t item_flags, const struct rte_flow_action *action, const struct rte_flow_attr *attr, const struct rte_flow_item *port_id_item, @@ -5242,6 +5243,35 @@ mlx5_flow_validate_action_meter(struct rte_eth_dev *dev, NULL, "Flow and meter policy " "have different src port."); + } else if (mtr_policy->is_rss) { + struct mlx5_flow_meter_policy *fp; + struct mlx5_meter_policy_action_container *acg; + struct mlx5_meter_policy_action_container *acy; + const struct rte_flow_action *rss_act; + int ret; + + fp = mlx5_flow_meter_hierarchy_get_final_policy(dev, + mtr_policy); + if (fp == NULL) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, NULL, + "Unable to get the final " + "policy in the hierarchy"); + acg = &fp->act_cnt[RTE_COLOR_GREEN]; + acy = &fp->act_cnt[RTE_COLOR_YELLOW]; + MLX5_ASSERT(acg->fate_action == + MLX5_FLOW_FATE_SHARED_RSS || + acy->fate_action == + MLX5_FLOW_FATE_SHARED_RSS); + if (acg->fate_action == MLX5_FLOW_FATE_SHARED_RSS) + rss_act = acg->rss; + else + rss_act = acy->rss; + ret = mlx5_flow_validate_action_rss(rss_act, + action_flags, dev, attr, + item_flags, error); + if (ret) + return ret; } *def_policy = false; } @@ -5577,6 +5607,10 @@ flow_dv_validate_action_sample(uint64_t *action_flags, RTE_FLOW_ERROR_TYPE_ACTION, action, "wrong action order, jump should " "be after sample action"); + if (*action_flags & MLX5_FLOW_ACTION_CT) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, action, + "Sample after CT not supported"); act = sample->actions; for (; act->type != RTE_FLOW_ACTION_TYPE_END; act++) { if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS) @@ -6401,14 +6435,17 @@ flow_dv_mtr_pool_create(struct rte_eth_dev *dev, struct mlx5_aso_mtr **mtr_free) return NULL; } pool->devx_obj = dcs; + rte_rwlock_write_lock(&pools_mng->resize_mtrwl); pool->index = pools_mng->n_valid; if (pool->index == pools_mng->n && flow_dv_mtr_container_resize(dev)) { mlx5_free(pool); claim_zero(mlx5_devx_cmd_destroy(dcs)); + rte_rwlock_write_unlock(&pools_mng->resize_mtrwl); return NULL; } pools_mng->pools[pool->index] = pool; pools_mng->n_valid++; + rte_rwlock_write_unlock(&pools_mng->resize_mtrwl); for (i = 1; i < MLX5_ASO_MTRS_PER_POOL; ++i) { pool->mtrs[i].offset = i; LIST_INSERT_HEAD(&pools_mng->meters, &pool->mtrs[i], next); @@ -6581,119 +6618,85 @@ flow_dv_validate_attributes(struct rte_eth_dev *dev, return ret; } -static uint16_t -mlx5_flow_locate_proto_l3(const struct rte_flow_item **head, - const struct rte_flow_item *end) +static int +validate_integrity_bits(const struct rte_flow_item_integrity *mask, + int64_t pattern_flags, uint64_t l3_flags, + uint64_t l4_flags, uint64_t ip4_flag, + struct rte_flow_error *error) { - const struct rte_flow_item *item = *head; - uint16_t l3_protocol; + if (mask->l3_ok && !(pattern_flags & l3_flags)) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + NULL, "missing L3 protocol"); + + if (mask->ipv4_csum_ok && !(pattern_flags & ip4_flag)) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + NULL, "missing IPv4 protocol"); + + if ((mask->l4_ok || mask->l4_csum_ok) && !(pattern_flags & l4_flags)) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + NULL, "missing L4 protocol"); - for (; item != end; item++) { - switch (item->type) { - default: - break; - case RTE_FLOW_ITEM_TYPE_IPV4: - l3_protocol = RTE_ETHER_TYPE_IPV4; - goto l3_ok; - case RTE_FLOW_ITEM_TYPE_IPV6: - l3_protocol = RTE_ETHER_TYPE_IPV6; - goto l3_ok; - case RTE_FLOW_ITEM_TYPE_ETH: - if (item->mask && item->spec) { - MLX5_ETHER_TYPE_FROM_HEADER(rte_flow_item_eth, - type, item, - l3_protocol); - if (l3_protocol == RTE_ETHER_TYPE_IPV4 || - l3_protocol == RTE_ETHER_TYPE_IPV6) - goto l3_ok; - } - break; - case RTE_FLOW_ITEM_TYPE_VLAN: - if (item->mask && item->spec) { - MLX5_ETHER_TYPE_FROM_HEADER(rte_flow_item_vlan, - inner_type, item, - l3_protocol); - if (l3_protocol == RTE_ETHER_TYPE_IPV4 || - l3_protocol == RTE_ETHER_TYPE_IPV6) - goto l3_ok; - } - break; - } - } return 0; -l3_ok: - *head = item; - return l3_protocol; } -static uint8_t -mlx5_flow_locate_proto_l4(const struct rte_flow_item **head, - const struct rte_flow_item *end) +static int +flow_dv_validate_item_integrity_post(const struct + rte_flow_item *integrity_items[2], + int64_t pattern_flags, + struct rte_flow_error *error) { - const struct rte_flow_item *item = *head; - uint8_t l4_protocol; + const struct rte_flow_item_integrity *mask; + int ret; - for (; item != end; item++) { - switch (item->type) { - default: - break; - case RTE_FLOW_ITEM_TYPE_TCP: - l4_protocol = IPPROTO_TCP; - goto l4_ok; - case RTE_FLOW_ITEM_TYPE_UDP: - l4_protocol = IPPROTO_UDP; - goto l4_ok; - case RTE_FLOW_ITEM_TYPE_IPV4: - if (item->mask && item->spec) { - const struct rte_flow_item_ipv4 *mask, *spec; - - mask = (typeof(mask))item->mask; - spec = (typeof(spec))item->spec; - l4_protocol = mask->hdr.next_proto_id & - spec->hdr.next_proto_id; - if (l4_protocol == IPPROTO_TCP || - l4_protocol == IPPROTO_UDP) - goto l4_ok; - } - break; - case RTE_FLOW_ITEM_TYPE_IPV6: - if (item->mask && item->spec) { - const struct rte_flow_item_ipv6 *mask, *spec; - mask = (typeof(mask))item->mask; - spec = (typeof(spec))item->spec; - l4_protocol = mask->hdr.proto & spec->hdr.proto; - if (l4_protocol == IPPROTO_TCP || - l4_protocol == IPPROTO_UDP) - goto l4_ok; - } - break; - } + if (pattern_flags & MLX5_FLOW_ITEM_OUTER_INTEGRITY) { + mask = (typeof(mask))integrity_items[0]->mask; + ret = validate_integrity_bits(mask, pattern_flags, + MLX5_FLOW_LAYER_OUTER_L3, + MLX5_FLOW_LAYER_OUTER_L4, + MLX5_FLOW_LAYER_OUTER_L3_IPV4, + error); + if (ret) + return ret; + } + if (pattern_flags & MLX5_FLOW_ITEM_INNER_INTEGRITY) { + mask = (typeof(mask))integrity_items[1]->mask; + ret = validate_integrity_bits(mask, pattern_flags, + MLX5_FLOW_LAYER_INNER_L3, + MLX5_FLOW_LAYER_INNER_L4, + MLX5_FLOW_LAYER_INNER_L3_IPV4, + error); + if (ret) + return ret; } return 0; -l4_ok: - *head = item; - return l4_protocol; } static int flow_dv_validate_item_integrity(struct rte_eth_dev *dev, - const struct rte_flow_item *rule_items, const struct rte_flow_item *integrity_item, + uint64_t pattern_flags, uint64_t *last_item, + const struct rte_flow_item *integrity_items[2], struct rte_flow_error *error) { struct mlx5_priv *priv = dev->data->dev_private; - const struct rte_flow_item *tunnel_item, *end_item, *item = rule_items; const struct rte_flow_item_integrity *mask = (typeof(mask)) integrity_item->mask; const struct rte_flow_item_integrity *spec = (typeof(spec)) integrity_item->spec; - uint32_t protocol; if (!priv->config.hca_attr.pkt_integrity_match) return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, integrity_item, "packet integrity integrity_item not supported"); + if (!spec) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, + integrity_item, + "no spec for integrity item"); if (!mask) mask = &rte_flow_item_integrity_mask; if (!mlx5_validate_integrity_item(mask)) @@ -6701,35 +6704,105 @@ flow_dv_validate_item_integrity(struct rte_eth_dev *dev, RTE_FLOW_ERROR_TYPE_ITEM, integrity_item, "unsupported integrity filter"); - tunnel_item = mlx5_flow_find_tunnel_item(rule_items); if (spec->level > 1) { - if (!tunnel_item) - return rte_flow_error_set(error, ENOTSUP, - RTE_FLOW_ERROR_TYPE_ITEM, - integrity_item, - "missing tunnel item"); - item = tunnel_item; - end_item = mlx5_find_end_item(tunnel_item); + if (pattern_flags & MLX5_FLOW_ITEM_INNER_INTEGRITY) + return rte_flow_error_set + (error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, + NULL, "multiple inner integrity items not supported"); + integrity_items[1] = integrity_item; + *last_item |= MLX5_FLOW_ITEM_INNER_INTEGRITY; } else { - end_item = tunnel_item ? tunnel_item : - mlx5_find_end_item(integrity_item); - } - if (mask->l3_ok || mask->ipv4_csum_ok) { - protocol = mlx5_flow_locate_proto_l3(&item, end_item); - if (!protocol) - return rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ITEM, - integrity_item, - "missing L3 protocol"); + if (pattern_flags & MLX5_FLOW_ITEM_OUTER_INTEGRITY) + return rte_flow_error_set + (error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, + NULL, "multiple outer integrity items not supported"); + integrity_items[0] = integrity_item; + *last_item |= MLX5_FLOW_ITEM_OUTER_INTEGRITY; } - if (mask->l4_ok || mask->l4_csum_ok) { - protocol = mlx5_flow_locate_proto_l4(&item, end_item); - if (!protocol) - return rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ITEM, - integrity_item, - "missing L4 protocol"); + return 0; +} + +static int +flow_dv_validate_item_flex(struct rte_eth_dev *dev, + const struct rte_flow_item *item, + uint64_t item_flags, + uint64_t *last_item, + bool is_inner, + struct rte_flow_error *error) +{ + const struct rte_flow_item_flex *flow_spec = item->spec; + const struct rte_flow_item_flex *flow_mask = item->mask; + struct mlx5_flex_item *flex; + + if (!flow_spec) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, NULL, + "flex flow item spec cannot be NULL"); + if (!flow_mask) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, NULL, + "flex flow item mask cannot be NULL"); + if (item->last) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, NULL, + "flex flow item last not supported"); + if (mlx5_flex_acquire_index(dev, flow_spec->handle, false) < 0) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, NULL, + "invalid flex flow item handle"); + flex = (struct mlx5_flex_item *)flow_spec->handle; + switch (flex->tunnel_mode) { + case FLEX_TUNNEL_MODE_SINGLE: + if (item_flags & + (MLX5_FLOW_ITEM_OUTER_FLEX | MLX5_FLOW_ITEM_INNER_FLEX)) + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + NULL, "multiple flex items not supported"); + break; + case FLEX_TUNNEL_MODE_OUTER: + if (is_inner) + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + NULL, "inner flex item was not configured"); + if (item_flags & MLX5_FLOW_ITEM_OUTER_FLEX) + rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, + NULL, "multiple flex items not supported"); + break; + case FLEX_TUNNEL_MODE_INNER: + if (!is_inner) + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + NULL, "outer flex item was not configured"); + if (item_flags & MLX5_FLOW_ITEM_INNER_FLEX) + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + NULL, "multiple flex items not supported"); + break; + case FLEX_TUNNEL_MODE_MULTI: + if ((is_inner && (item_flags & MLX5_FLOW_ITEM_INNER_FLEX)) || + (!is_inner && (item_flags & MLX5_FLOW_ITEM_OUTER_FLEX))) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + NULL, "multiple flex items not supported"); + } + break; + case FLEX_TUNNEL_MODE_TUNNEL: + if (is_inner || (item_flags & MLX5_FLOW_ITEM_FLEX_TUNNEL)) + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + NULL, "multiple flex tunnel items not supported"); + break; + default: + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + NULL, "invalid flex item configuration"); } + *last_item = flex->tunnel_mode == FLEX_TUNNEL_MODE_TUNNEL ? + MLX5_FLOW_ITEM_FLEX_TUNNEL : is_inner ? + MLX5_FLOW_ITEM_INNER_FLEX : MLX5_FLOW_ITEM_OUTER_FLEX; return 0; } @@ -6825,7 +6898,7 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, .std_tbl_fix = true, }; const struct rte_eth_hairpin_conf *conf; - const struct rte_flow_item *rule_items = items; + const struct rte_flow_item *integrity_items[2] = {NULL, NULL}; const struct rte_flow_item *port_id_item = NULL; bool def_policy = false; uint16_t udp_dport = 0; @@ -6835,6 +6908,11 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, tunnel = is_tunnel_offload_active(dev) ? mlx5_get_tof(items, actions, &tof_rule_type) : NULL; if (tunnel) { + if (!priv->config.dv_flow_en) + return rte_flow_error_set + (error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, "tunnel offload requires DV flow interface"); if (priv->representor) return rte_flow_error_set (error, ENOTSUP, @@ -7152,16 +7230,13 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, last_item = MLX5_FLOW_LAYER_ECPRI; break; case RTE_FLOW_ITEM_TYPE_INTEGRITY: - if (item_flags & MLX5_FLOW_ITEM_INTEGRITY) - return rte_flow_error_set - (error, ENOTSUP, - RTE_FLOW_ERROR_TYPE_ITEM, - NULL, "multiple integrity items not supported"); - ret = flow_dv_validate_item_integrity(dev, rule_items, - items, error); + ret = flow_dv_validate_item_integrity(dev, items, + item_flags, + &last_item, + integrity_items, + error); if (ret < 0) return ret; - last_item = MLX5_FLOW_ITEM_INTEGRITY; break; case RTE_FLOW_ITEM_TYPE_CONNTRACK: ret = flow_dv_validate_item_aso_ct(dev, items, @@ -7174,6 +7249,13 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, * list it here as a supported type */ break; + case RTE_FLOW_ITEM_TYPE_FLEX: + ret = flow_dv_validate_item_flex(dev, items, item_flags, + &last_item, + tunnel != 0, error); + if (ret < 0) + return ret; + break; default: return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, @@ -7181,6 +7263,12 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, } item_flags |= last_item; } + if (item_flags & MLX5_FLOW_ITEM_INTEGRITY) { + ret = flow_dv_validate_item_integrity_post(integrity_items, + item_flags, error); + if (ret) + return ret; + } for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { int type = actions->type; bool shared_count = false; @@ -7617,6 +7705,7 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, case RTE_FLOW_ACTION_TYPE_METER: ret = mlx5_flow_validate_action_meter(dev, action_flags, + item_flags, actions, attr, port_id_item, &def_policy, @@ -7759,7 +7848,7 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, * - Explicit decap action is prohibited by the tunnel offload API. * - Drop action in tunnel steer rule is prohibited by the API. * - Application cannot use MARK action because it's value can mask - * tunnel default miss nitification. + * tunnel default miss notification. * - JUMP in tunnel match rule has no support in current PMD * implementation. * - TAG & META are reserved for future uses. @@ -8663,18 +8752,19 @@ flow_dv_translate_item_gre_key(void *matcher, void *key, * Flow matcher value. * @param[in] item * Flow pattern to translate. - * @param[in] inner - * Item is inner pattern. + * @param[in] pattern_flags + * Accumulated pattern flags. */ static void flow_dv_translate_item_gre(void *matcher, void *key, const struct rte_flow_item *item, - int inner) + uint64_t pattern_flags) { + static const struct rte_flow_item_gre empty_gre = {0,}; const struct rte_flow_item_gre *gre_m = item->mask; const struct rte_flow_item_gre *gre_v = item->spec; - void *headers_m; - void *headers_v; + void *headers_m = MLX5_ADDR_OF(fte_match_param, matcher, outer_headers); + void *headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers); void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters); void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters); struct { @@ -8691,26 +8781,17 @@ flow_dv_translate_item_gre(void *matcher, void *key, uint16_t value; }; } gre_crks_rsvd0_ver_m, gre_crks_rsvd0_ver_v; + uint16_t protocol_m, protocol_v; - if (inner) { - headers_m = MLX5_ADDR_OF(fte_match_param, matcher, - inner_headers); - headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers); - } else { - headers_m = MLX5_ADDR_OF(fte_match_param, matcher, - outer_headers); - headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers); - } MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff); MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_GRE); - if (!gre_v) - return; - if (!gre_m) - gre_m = &rte_flow_item_gre_mask; - MLX5_SET(fte_match_set_misc, misc_m, gre_protocol, - rte_be_to_cpu_16(gre_m->protocol)); - MLX5_SET(fte_match_set_misc, misc_v, gre_protocol, - rte_be_to_cpu_16(gre_v->protocol & gre_m->protocol)); + if (!gre_v) { + gre_v = &empty_gre; + gre_m = &empty_gre; + } else { + if (!gre_m) + gre_m = &rte_flow_item_gre_mask; + } gre_crks_rsvd0_ver_m.value = rte_be_to_cpu_16(gre_m->c_rsvd0_ver); gre_crks_rsvd0_ver_v.value = rte_be_to_cpu_16(gre_v->c_rsvd0_ver); MLX5_SET(fte_match_set_misc, misc_m, gre_c_present, @@ -8728,6 +8809,17 @@ flow_dv_translate_item_gre(void *matcher, void *key, MLX5_SET(fte_match_set_misc, misc_v, gre_s_present, gre_crks_rsvd0_ver_v.s_present & gre_crks_rsvd0_ver_m.s_present); + protocol_m = rte_be_to_cpu_16(gre_m->protocol); + protocol_v = rte_be_to_cpu_16(gre_v->protocol); + if (!protocol_m) { + /* Force next protocol to prevent matchers duplication */ + protocol_v = mlx5_translate_tunnel_etypes(pattern_flags); + if (protocol_v) + protocol_m = 0xFFFF; + } + MLX5_SET(fte_match_set_misc, misc_m, gre_protocol, protocol_m); + MLX5_SET(fte_match_set_misc, misc_v, gre_protocol, + protocol_m & protocol_v); } /** @@ -8739,13 +8831,13 @@ flow_dv_translate_item_gre(void *matcher, void *key, * Flow matcher value. * @param[in] item * Flow pattern to translate. - * @param[in] inner - * Item is inner pattern. + * @param[in] pattern_flags + * Accumulated pattern flags. */ static void flow_dv_translate_item_nvgre(void *matcher, void *key, const struct rte_flow_item *item, - int inner) + unsigned long pattern_flags) { const struct rte_flow_item_nvgre *nvgre_m = item->mask; const struct rte_flow_item_nvgre *nvgre_v = item->spec; @@ -8772,7 +8864,7 @@ flow_dv_translate_item_nvgre(void *matcher, void *key, .mask = &gre_mask, .last = NULL, }; - flow_dv_translate_item_gre(matcher, key, &gre_item, inner); + flow_dv_translate_item_gre(matcher, key, &gre_item, pattern_flags); if (!nvgre_v) return; if (!nvgre_m) @@ -8909,46 +9001,40 @@ flow_dv_translate_item_vxlan(struct rte_eth_dev *dev, static void flow_dv_translate_item_vxlan_gpe(void *matcher, void *key, - const struct rte_flow_item *item, int inner) + const struct rte_flow_item *item, + const uint64_t pattern_flags) { + static const struct rte_flow_item_vxlan_gpe dummy_vxlan_gpe_hdr = {0, }; const struct rte_flow_item_vxlan_gpe *vxlan_m = item->mask; const struct rte_flow_item_vxlan_gpe *vxlan_v = item->spec; - void *headers_m; - void *headers_v; + /* The item was validated to be on the outer side */ + void *headers_m = MLX5_ADDR_OF(fte_match_param, matcher, outer_headers); + void *headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers); void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_3); void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3); - char *vni_m; - char *vni_v; - uint16_t dport; - int size; - int i; + char *vni_m = + MLX5_ADDR_OF(fte_match_set_misc3, misc_m, outer_vxlan_gpe_vni); + char *vni_v = + MLX5_ADDR_OF(fte_match_set_misc3, misc_v, outer_vxlan_gpe_vni); + int i, size = sizeof(vxlan_m->vni); uint8_t flags_m = 0xff; uint8_t flags_v = 0xc; + uint8_t m_protocol, v_protocol; - if (inner) { - headers_m = MLX5_ADDR_OF(fte_match_param, matcher, - inner_headers); - headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers); - } else { - headers_m = MLX5_ADDR_OF(fte_match_param, matcher, - outer_headers); - headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers); - } - dport = item->type == RTE_FLOW_ITEM_TYPE_VXLAN ? - MLX5_UDP_PORT_VXLAN : MLX5_UDP_PORT_VXLAN_GPE; if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) { MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF); - MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport); + MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, + MLX5_UDP_PORT_VXLAN_GPE); + } + if (!vxlan_v) { + vxlan_v = &dummy_vxlan_gpe_hdr; + vxlan_m = &dummy_vxlan_gpe_hdr; + } else { + if (!vxlan_m) + vxlan_m = &rte_flow_item_vxlan_gpe_mask; } - if (!vxlan_v) - return; - if (!vxlan_m) - vxlan_m = &rte_flow_item_vxlan_gpe_mask; - size = sizeof(vxlan_m->vni); - vni_m = MLX5_ADDR_OF(fte_match_set_misc3, misc_m, outer_vxlan_gpe_vni); - vni_v = MLX5_ADDR_OF(fte_match_set_misc3, misc_v, outer_vxlan_gpe_vni); memcpy(vni_m, vxlan_m->vni, size); for (i = 0; i < size; ++i) vni_v[i] = vni_m[i] & vxlan_v->vni[i]; @@ -8958,10 +9044,23 @@ flow_dv_translate_item_vxlan_gpe(void *matcher, void *key, } MLX5_SET(fte_match_set_misc3, misc_m, outer_vxlan_gpe_flags, flags_m); MLX5_SET(fte_match_set_misc3, misc_v, outer_vxlan_gpe_flags, flags_v); - MLX5_SET(fte_match_set_misc3, misc_m, outer_vxlan_gpe_next_protocol, - vxlan_m->protocol); - MLX5_SET(fte_match_set_misc3, misc_v, outer_vxlan_gpe_next_protocol, - vxlan_v->protocol); + m_protocol = vxlan_m->protocol; + v_protocol = vxlan_v->protocol; + if (!m_protocol) { + /* Force next protocol to ensure next headers parsing. */ + if (pattern_flags & MLX5_FLOW_LAYER_INNER_L2) + v_protocol = RTE_VXLAN_GPE_TYPE_ETH; + else if (pattern_flags & MLX5_FLOW_LAYER_INNER_L3_IPV4) + v_protocol = RTE_VXLAN_GPE_TYPE_IPV4; + else if (pattern_flags & MLX5_FLOW_LAYER_INNER_L3_IPV6) + v_protocol = RTE_VXLAN_GPE_TYPE_IPV6; + if (v_protocol) + m_protocol = 0xFF; + } + MLX5_SET(fte_match_set_misc3, misc_m, + outer_vxlan_gpe_next_protocol, m_protocol); + MLX5_SET(fte_match_set_misc3, misc_v, + outer_vxlan_gpe_next_protocol, m_protocol & v_protocol); } /** @@ -8979,49 +9078,39 @@ flow_dv_translate_item_vxlan_gpe(void *matcher, void *key, static void flow_dv_translate_item_geneve(void *matcher, void *key, - const struct rte_flow_item *item, int inner) + const struct rte_flow_item *item, + uint64_t pattern_flags) { + static const struct rte_flow_item_geneve empty_geneve = {0,}; const struct rte_flow_item_geneve *geneve_m = item->mask; const struct rte_flow_item_geneve *geneve_v = item->spec; - void *headers_m; - void *headers_v; + /* GENEVE flow item validation allows single tunnel item */ + void *headers_m = MLX5_ADDR_OF(fte_match_param, matcher, outer_headers); + void *headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers); void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters); void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters); - uint16_t dport; uint16_t gbhdr_m; uint16_t gbhdr_v; - char *vni_m; - char *vni_v; - size_t size, i; + char *vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, geneve_vni); + char *vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, geneve_vni); + size_t size = sizeof(geneve_m->vni), i; + uint16_t protocol_m, protocol_v; - if (inner) { - headers_m = MLX5_ADDR_OF(fte_match_param, matcher, - inner_headers); - headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers); - } else { - headers_m = MLX5_ADDR_OF(fte_match_param, matcher, - outer_headers); - headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers); - } - dport = MLX5_UDP_PORT_GENEVE; if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) { MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF); - MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport); + MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, + MLX5_UDP_PORT_GENEVE); + } + if (!geneve_v) { + geneve_v = &empty_geneve; + geneve_m = &empty_geneve; + } else { + if (!geneve_m) + geneve_m = &rte_flow_item_geneve_mask; } - if (!geneve_v) - return; - if (!geneve_m) - geneve_m = &rte_flow_item_geneve_mask; - size = sizeof(geneve_m->vni); - vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, geneve_vni); - vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, geneve_vni); memcpy(vni_m, geneve_m->vni, size); for (i = 0; i < size; ++i) vni_v[i] = vni_m[i] & geneve_v->vni[i]; - MLX5_SET(fte_match_set_misc, misc_m, geneve_protocol_type, - rte_be_to_cpu_16(geneve_m->protocol)); - MLX5_SET(fte_match_set_misc, misc_v, geneve_protocol_type, - rte_be_to_cpu_16(geneve_v->protocol & geneve_m->protocol)); gbhdr_m = rte_be_to_cpu_16(geneve_m->ver_opt_len_o_c_rsvd0); gbhdr_v = rte_be_to_cpu_16(geneve_v->ver_opt_len_o_c_rsvd0); MLX5_SET(fte_match_set_misc, misc_m, geneve_oam, @@ -9033,6 +9122,17 @@ flow_dv_translate_item_geneve(void *matcher, void *key, MLX5_SET(fte_match_set_misc, misc_v, geneve_opt_len, MLX5_GENEVE_OPTLEN_VAL(gbhdr_v) & MLX5_GENEVE_OPTLEN_VAL(gbhdr_m)); + protocol_m = rte_be_to_cpu_16(geneve_m->protocol); + protocol_v = rte_be_to_cpu_16(geneve_v->protocol); + if (!protocol_m) { + /* Force next protocol to prevent matchers duplication */ + protocol_v = mlx5_translate_tunnel_etypes(pattern_flags); + if (protocol_v) + protocol_m = 0xFFFF; + } + MLX5_SET(fte_match_set_misc, misc_m, geneve_protocol_type, protocol_m); + MLX5_SET(fte_match_set_misc, misc_v, geneve_protocol_type, + protocol_m & protocol_v); } /** @@ -9074,7 +9174,7 @@ flow_dev_geneve_tlv_option_resource_register(struct rte_eth_dev *dev, geneve_opt_v->option_type && geneve_opt_resource->length == geneve_opt_v->option_len) { - /* We already have GENVE TLV option obj allocated. */ + /* We already have GENEVE TLV option obj allocated. */ __atomic_fetch_add(&geneve_opt_resource->refcnt, 1, __ATOMIC_RELAXED); } else { @@ -9229,16 +9329,22 @@ flow_dv_translate_item_mpls(void *matcher, void *key, switch (prev_layer) { case MLX5_FLOW_LAYER_OUTER_L4_UDP: - MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xffff); - MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, - MLX5_UDP_PORT_MPLS); + if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) { + MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, + 0xffff); + MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, + MLX5_UDP_PORT_MPLS); + } break; case MLX5_FLOW_LAYER_GRE: /* Fall-through. */ case MLX5_FLOW_LAYER_GRE_KEY: - MLX5_SET(fte_match_set_misc, misc_m, gre_protocol, 0xffff); - MLX5_SET(fte_match_set_misc, misc_v, gre_protocol, - RTE_ETHER_TYPE_MPLS); + if (!MLX5_GET16(fte_match_set_misc, misc_v, gre_protocol)) { + MLX5_SET(fte_match_set_misc, misc_m, gre_protocol, + 0xffff); + MLX5_SET(fte_match_set_misc, misc_v, gre_protocol, + RTE_ETHER_TYPE_MPLS); + } break; default: break; @@ -9935,7 +10041,7 @@ flow_dv_translate_item_ecpri(struct rte_eth_dev *dev, void *matcher, */ if (!ecpri_m->hdr.common.u32) return; - samples = priv->sh->fp[MLX5_FLEX_PARSER_ECPRI_0].ids; + samples = priv->sh->ecpri_parser.ids; /* Need to take the whole DW as the mask to fill the entry. */ dw_m = MLX5_ADDR_OF(fte_match_set_misc4, misc4_m, prog_sample_field_value_0); @@ -10038,6 +10144,27 @@ flow_dv_translate_item_aso_ct(struct rte_eth_dev *dev, reg_value, reg_mask); } +static void +flow_dv_translate_item_flex(struct rte_eth_dev *dev, void *matcher, void *key, + const struct rte_flow_item *item, + struct mlx5_flow *dev_flow, bool is_inner) +{ + const struct rte_flow_item_flex *spec = + (const struct rte_flow_item_flex *)item->spec; + int index = mlx5_flex_acquire_index(dev, spec->handle, false); + + MLX5_ASSERT(index >= 0 && index <= (int)(sizeof(uint32_t) * CHAR_BIT)); + if (index < 0) + return; + if (!(dev_flow->handle->flex_item & RTE_BIT32(index))) { + /* Don't count both inner and outer flex items in one rule. */ + if (mlx5_flex_acquire_index(dev, spec->handle, true) != index) + MLX5_ASSERT(false); + dev_flow->handle->flex_item |= RTE_BIT32(index); + } + mlx5_flex_flow_translate_item(dev, matcher, key, item, is_inner); +} + static uint32_t matcher_zero[MLX5_ST_SZ_DW(fte_match_param)] = { 0 }; #define HEADER_IS_ZERO(match_criteria, headers) \ @@ -10089,7 +10216,7 @@ __flow_dv_adjust_buf_size(size_t *size, uint8_t match_criteria) * Check flow matching criteria first, subtract misc5/4 length if flow * doesn't own misc5/4 parameters. In some old rdma-core releases, * misc5/4 are not supported, and matcher creation failure is expected - * w/o subtration. If misc5 is provided, misc4 must be counted in since + * w/o subtraction. If misc5 is provided, misc4 must be counted in since * misc5 is right after misc4. */ if (!(match_criteria & (1 << MLX5_MATCH_CRITERIA_ENABLE_MISC5_BIT))) { @@ -11288,7 +11415,7 @@ flow_dv_dest_array_create_cb(void *tool_ctx __rte_unused, void *cb_ctx) goto error; } } - /* create a dest array actioin */ + /* create a dest array action */ ret = mlx5_os_flow_dr_create_flow_action_dest_array (domain, resource->num_of_dest, @@ -11523,7 +11650,7 @@ flow_dv_translate_action_sample(struct rte_eth_dev *dev, (((const struct rte_flow_action_mark *) (sub_actions->conf))->id); - dev_flow->handle->mark = 1; + wks->mark = 1; pre_rix = dev_flow->handle->dvh.rix_tag; /* Save the mark resource before sample */ pre_r = dev_flow->dv.tag_resource; @@ -11903,18 +12030,18 @@ flow_dv_age_pool_create(struct rte_eth_dev *dev, } pool->flow_hit_aso_obj = obj; pool->time_of_last_age_check = MLX5_CURR_TIME_SEC; - rte_spinlock_lock(&mng->resize_sl); + rte_rwlock_write_lock(&mng->resize_rwl); pool->index = mng->next; /* Resize pools array if there is no room for the new pool in it. */ if (pool->index == mng->n && flow_dv_aso_age_pools_resize(dev)) { claim_zero(mlx5_devx_cmd_destroy(obj)); mlx5_free(pool); - rte_spinlock_unlock(&mng->resize_sl); + rte_rwlock_write_unlock(&mng->resize_rwl); return NULL; } mng->pools[pool->index] = pool; mng->next++; - rte_spinlock_unlock(&mng->resize_sl); + rte_rwlock_write_unlock(&mng->resize_rwl); /* Assign the first action in the new pool, the rest go to free list. */ *age_free = &pool->actions[0]; for (i = 1; i < MLX5_ASO_AGE_ACTIONS_PER_POOL; i++) { @@ -12032,34 +12159,24 @@ flow_dv_translate_integrity_l4(const struct rte_flow_item_integrity *mask, void *headers_m, void *headers_v) { if (mask->l4_ok) { - /* application l4_ok filter aggregates all hardware l4 filters - * therefore hw l4_checksum_ok must be implicitly added here. + /* RTE l4_ok filter aggregates hardware l4_ok and + * l4_checksum_ok filters. + * Positive RTE l4_ok match requires hardware match on both L4 + * hardware integrity bits. + * For negative match, check hardware l4_checksum_ok bit only, + * because hardware sets that bit to 0 for all packets + * with bad L4. */ - struct rte_flow_item_integrity local_item; - - local_item.l4_csum_ok = 1; - MLX5_SET(fte_match_set_lyr_2_4, headers_m, l4_checksum_ok, - local_item.l4_csum_ok); if (value->l4_ok) { - /* application l4_ok = 1 matches sets both hw flags - * l4_ok and l4_checksum_ok flags to 1. - */ - MLX5_SET(fte_match_set_lyr_2_4, headers_v, - l4_checksum_ok, local_item.l4_csum_ok); - MLX5_SET(fte_match_set_lyr_2_4, headers_m, l4_ok, - mask->l4_ok); - MLX5_SET(fte_match_set_lyr_2_4, headers_v, l4_ok, - value->l4_ok); - } else { - /* application l4_ok = 0 matches on hw flag - * l4_checksum_ok = 0 only. - */ - MLX5_SET(fte_match_set_lyr_2_4, headers_v, - l4_checksum_ok, 0); + MLX5_SET(fte_match_set_lyr_2_4, headers_m, l4_ok, 1); + MLX5_SET(fte_match_set_lyr_2_4, headers_v, l4_ok, 1); } - } else if (mask->l4_csum_ok) { - MLX5_SET(fte_match_set_lyr_2_4, headers_m, l4_checksum_ok, - mask->l4_csum_ok); + MLX5_SET(fte_match_set_lyr_2_4, headers_m, l4_checksum_ok, 1); + MLX5_SET(fte_match_set_lyr_2_4, headers_v, l4_checksum_ok, + !!value->l4_ok); + } + if (mask->l4_csum_ok) { + MLX5_SET(fte_match_set_lyr_2_4, headers_m, l4_checksum_ok, 1); MLX5_SET(fte_match_set_lyr_2_4, headers_v, l4_checksum_ok, value->l4_csum_ok); } @@ -12068,77 +12185,102 @@ flow_dv_translate_integrity_l4(const struct rte_flow_item_integrity *mask, static void flow_dv_translate_integrity_l3(const struct rte_flow_item_integrity *mask, const struct rte_flow_item_integrity *value, - void *headers_m, void *headers_v, - bool is_ipv4) + void *headers_m, void *headers_v, bool is_ipv4) { if (mask->l3_ok) { - /* application l3_ok filter aggregates all hardware l3 filters - * therefore hw ipv4_checksum_ok must be implicitly added here. + /* RTE l3_ok filter aggregates for IPv4 hardware l3_ok and + * ipv4_csum_ok filters. + * Positive RTE l3_ok match requires hardware match on both L3 + * hardware integrity bits. + * For negative match, check hardware l3_csum_ok bit only, + * because hardware sets that bit to 0 for all packets + * with bad L3. */ - struct rte_flow_item_integrity local_item; - - local_item.ipv4_csum_ok = !!is_ipv4; - MLX5_SET(fte_match_set_lyr_2_4, headers_m, ipv4_checksum_ok, - local_item.ipv4_csum_ok); - if (value->l3_ok) { + if (is_ipv4) { + if (value->l3_ok) { + MLX5_SET(fte_match_set_lyr_2_4, headers_m, + l3_ok, 1); + MLX5_SET(fte_match_set_lyr_2_4, headers_v, + l3_ok, 1); + } + MLX5_SET(fte_match_set_lyr_2_4, headers_m, + ipv4_checksum_ok, 1); MLX5_SET(fte_match_set_lyr_2_4, headers_v, - ipv4_checksum_ok, local_item.ipv4_csum_ok); - MLX5_SET(fte_match_set_lyr_2_4, headers_m, l3_ok, - mask->l3_ok); + ipv4_checksum_ok, !!value->l3_ok); + } else { + MLX5_SET(fte_match_set_lyr_2_4, headers_m, l3_ok, 1); MLX5_SET(fte_match_set_lyr_2_4, headers_v, l3_ok, value->l3_ok); - } else { - MLX5_SET(fte_match_set_lyr_2_4, headers_v, - ipv4_checksum_ok, 0); } - } else if (mask->ipv4_csum_ok) { - MLX5_SET(fte_match_set_lyr_2_4, headers_m, ipv4_checksum_ok, - mask->ipv4_csum_ok); + } + if (mask->ipv4_csum_ok) { + MLX5_SET(fte_match_set_lyr_2_4, headers_m, ipv4_checksum_ok, 1); MLX5_SET(fte_match_set_lyr_2_4, headers_v, ipv4_checksum_ok, value->ipv4_csum_ok); } } static void -flow_dv_translate_item_integrity(void *matcher, void *key, - const struct rte_flow_item *head_item, - const struct rte_flow_item *integrity_item) +set_integrity_bits(void *headers_m, void *headers_v, + const struct rte_flow_item *integrity_item, bool is_l3_ip4) { + const struct rte_flow_item_integrity *spec = integrity_item->spec; const struct rte_flow_item_integrity *mask = integrity_item->mask; - const struct rte_flow_item_integrity *value = integrity_item->spec; - const struct rte_flow_item *tunnel_item, *end_item, *item; - void *headers_m; - void *headers_v; - uint32_t l3_protocol; - if (!value) - return; + /* Integrity bits validation cleared spec pointer */ + MLX5_ASSERT(spec != NULL); if (!mask) mask = &rte_flow_item_integrity_mask; - if (value->level > 1) { + flow_dv_translate_integrity_l3(mask, spec, headers_m, headers_v, + is_l3_ip4); + flow_dv_translate_integrity_l4(mask, spec, headers_m, headers_v); +} + +static void +flow_dv_translate_item_integrity_post(void *matcher, void *key, + const + struct rte_flow_item *integrity_items[2], + uint64_t pattern_flags) +{ + void *headers_m, *headers_v; + bool is_l3_ip4; + + if (pattern_flags & MLX5_FLOW_ITEM_INNER_INTEGRITY) { headers_m = MLX5_ADDR_OF(fte_match_param, matcher, inner_headers); headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers); - } else { + is_l3_ip4 = (pattern_flags & MLX5_FLOW_LAYER_INNER_L3_IPV4) != + 0; + set_integrity_bits(headers_m, headers_v, + integrity_items[1], is_l3_ip4); + } + if (pattern_flags & MLX5_FLOW_ITEM_OUTER_INTEGRITY) { headers_m = MLX5_ADDR_OF(fte_match_param, matcher, outer_headers); headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers); + is_l3_ip4 = (pattern_flags & MLX5_FLOW_LAYER_OUTER_L3_IPV4) != + 0; + set_integrity_bits(headers_m, headers_v, + integrity_items[0], is_l3_ip4); } - tunnel_item = mlx5_flow_find_tunnel_item(head_item); - if (value->level > 1) { - /* tunnel item was verified during the item validation */ - item = tunnel_item; - end_item = mlx5_find_end_item(tunnel_item); +} + +static void +flow_dv_translate_item_integrity(const struct rte_flow_item *item, + const struct rte_flow_item *integrity_items[2], + uint64_t *last_item) +{ + const struct rte_flow_item_integrity *spec = (typeof(spec))item->spec; + + /* integrity bits validation cleared spec pointer */ + MLX5_ASSERT(spec != NULL); + if (spec->level > 1) { + integrity_items[1] = item; + *last_item |= MLX5_FLOW_ITEM_INNER_INTEGRITY; } else { - item = head_item; - end_item = tunnel_item ? tunnel_item : - mlx5_find_end_item(integrity_item); + integrity_items[0] = item; + *last_item |= MLX5_FLOW_ITEM_OUTER_INTEGRITY; } - l3_protocol = mask->l3_ok ? - mlx5_flow_locate_proto_l3(&item, end_item) : 0; - flow_dv_translate_integrity_l3(mask, value, headers_m, headers_v, - l3_protocol == RTE_ETHER_TYPE_IPV4); - flow_dv_translate_integrity_l4(mask, value, headers_m, headers_v); } /** @@ -12554,7 +12696,8 @@ flow_dv_translate(struct rte_eth_dev *dev, (1 << MLX5_SCALE_FLOW_GROUP_BIT), .std_tbl_fix = true, }; - const struct rte_flow_item *head_item = items; + const struct rte_flow_item *integrity_items[2] = {NULL, NULL}; + const struct rte_flow_item *tunnel_item = NULL; if (!wks) return rte_flow_error_set(error, ENOMEM, @@ -12667,7 +12810,7 @@ flow_dv_translate(struct rte_eth_dev *dev, break; case RTE_FLOW_ACTION_TYPE_FLAG: action_flags |= MLX5_FLOW_ACTION_FLAG; - dev_flow->handle->mark = 1; + wks->mark = 1; if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) { struct rte_flow_action_mark mark = { .id = MLX5_FLOW_MARK_DEFAULT, @@ -12696,7 +12839,7 @@ flow_dv_translate(struct rte_eth_dev *dev, break; case RTE_FLOW_ACTION_TYPE_MARK: action_flags |= MLX5_FLOW_ACTION_MARK; - dev_flow->handle->mark = 1; + wks->mark = 1; if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) { const struct rte_flow_action_mark *mark = (const struct rte_flow_action_mark *) @@ -13324,10 +13467,9 @@ flow_dv_translate(struct rte_eth_dev *dev, MLX5_FLOW_LAYER_OUTER_L4_UDP; break; case RTE_FLOW_ITEM_TYPE_GRE: - flow_dv_translate_item_gre(match_mask, match_value, - items, tunnel); matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc); last_item = MLX5_FLOW_LAYER_GRE; + tunnel_item = items; break; case RTE_FLOW_ITEM_TYPE_GRE_KEY: flow_dv_translate_item_gre_key(match_mask, @@ -13335,10 +13477,9 @@ flow_dv_translate(struct rte_eth_dev *dev, last_item = MLX5_FLOW_LAYER_GRE_KEY; break; case RTE_FLOW_ITEM_TYPE_NVGRE: - flow_dv_translate_item_nvgre(match_mask, match_value, - items, tunnel); matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc); last_item = MLX5_FLOW_LAYER_GRE; + tunnel_item = items; break; case RTE_FLOW_ITEM_TYPE_VXLAN: flow_dv_translate_item_vxlan(dev, attr, @@ -13348,17 +13489,14 @@ flow_dv_translate(struct rte_eth_dev *dev, last_item = MLX5_FLOW_LAYER_VXLAN; break; case RTE_FLOW_ITEM_TYPE_VXLAN_GPE: - flow_dv_translate_item_vxlan_gpe(match_mask, - match_value, items, - tunnel); matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc); last_item = MLX5_FLOW_LAYER_VXLAN_GPE; + tunnel_item = items; break; case RTE_FLOW_ITEM_TYPE_GENEVE: - flow_dv_translate_item_geneve(match_mask, match_value, - items, tunnel); matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc); last_item = MLX5_FLOW_LAYER_GENEVE; + tunnel_item = items; break; case RTE_FLOW_ITEM_TYPE_GENEVE_OPT: ret = flow_dv_translate_item_geneve_opt(dev, match_mask, @@ -13447,14 +13585,20 @@ flow_dv_translate(struct rte_eth_dev *dev, last_item = MLX5_FLOW_LAYER_ECPRI; break; case RTE_FLOW_ITEM_TYPE_INTEGRITY: - flow_dv_translate_item_integrity(match_mask, - match_value, - head_item, items); + flow_dv_translate_item_integrity(items, integrity_items, + &last_item); break; case RTE_FLOW_ITEM_TYPE_CONNTRACK: flow_dv_translate_item_aso_ct(dev, match_mask, match_value, items); break; + case RTE_FLOW_ITEM_TYPE_FLEX: + flow_dv_translate_item_flex(dev, match_mask, + match_value, items, + dev_flow, tunnel != 0); + last_item = tunnel ? MLX5_FLOW_ITEM_INNER_FLEX : + MLX5_FLOW_ITEM_OUTER_FLEX; + break; default: break; } @@ -13473,6 +13617,27 @@ flow_dv_translate(struct rte_eth_dev *dev, match_value, NULL, attr)) return -rte_errno; } + if (item_flags & MLX5_FLOW_ITEM_INTEGRITY) { + flow_dv_translate_item_integrity_post(match_mask, match_value, + integrity_items, + item_flags); + } + if (item_flags & MLX5_FLOW_LAYER_VXLAN_GPE) + flow_dv_translate_item_vxlan_gpe(match_mask, match_value, + tunnel_item, item_flags); + else if (item_flags & MLX5_FLOW_LAYER_GENEVE) + flow_dv_translate_item_geneve(match_mask, match_value, + tunnel_item, item_flags); + else if (item_flags & MLX5_FLOW_LAYER_GRE) { + if (tunnel_item->type == RTE_FLOW_ITEM_TYPE_GRE) + flow_dv_translate_item_gre(match_mask, match_value, + tunnel_item, item_flags); + else if (tunnel_item->type == RTE_FLOW_ITEM_TYPE_NVGRE) + flow_dv_translate_item_nvgre(match_mask, match_value, + tunnel_item, item_flags); + else + MLX5_ASSERT(false); + } #ifdef RTE_LIBRTE_MLX5_DEBUG MLX5_ASSERT(!flow_dv_check_valid_spec(matcher.mask.buf, dev_flow->dv.value.buf)); @@ -14329,6 +14494,12 @@ flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow) if (!dev_handle) return; flow->dev_handles = dev_handle->next.next; + while (dev_handle->flex_item) { + int index = rte_bsf32(dev_handle->flex_item); + + mlx5_flex_release_index(dev, index); + dev_handle->flex_item &= ~RTE_BIT32(index); + } if (dev_handle->dvh.matcher) flow_dv_matcher_release(dev, dev_handle); if (dev_handle->dvh.rix_sample) @@ -14522,7 +14693,8 @@ __flow_dv_action_rss_setup(struct rte_eth_dev *dev, size_t i; int err; - if (mlx5_ind_table_obj_setup(dev, shared_rss->ind_tbl)) { + if (mlx5_ind_table_obj_setup(dev, shared_rss->ind_tbl, + !!dev->data->dev_started)) { return rte_flow_error_set(error, rte_errno, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, "cannot setup indirection table"); @@ -14562,7 +14734,7 @@ __flow_dv_action_rss_setup(struct rte_eth_dev *dev, error_hrxq_new: err = rte_errno; __flow_dv_action_rss_hrxqs_release(dev, shared_rss); - if (!mlx5_ind_table_obj_release(dev, shared_rss->ind_tbl, true)) + if (!mlx5_ind_table_obj_release(dev, shared_rss->ind_tbl, true, true)) shared_rss->ind_tbl = NULL; rte_errno = err; return -rte_errno; @@ -14705,7 +14877,8 @@ __flow_dv_action_rss_release(struct rte_eth_dev *dev, uint32_t idx, NULL, "shared rss hrxq has references"); queue = shared_rss->ind_tbl->queues; - remaining = mlx5_ind_table_obj_release(dev, shared_rss->ind_tbl, true); + remaining = mlx5_ind_table_obj_release(dev, shared_rss->ind_tbl, true, + !!dev->data->dev_started); if (remaining) return rte_flow_error_set(error, EBUSY, RTE_FLOW_ERROR_TYPE_ACTION, @@ -14893,6 +15066,7 @@ __flow_dv_action_rss_update(struct rte_eth_dev *dev, uint32_t idx, void *queue = NULL; uint16_t *queue_old = NULL; uint32_t queue_size = action_conf->queue_num * sizeof(uint16_t); + bool dev_started = !!dev->data->dev_started; if (!shared_rss) return rte_flow_error_set(error, EINVAL, @@ -14915,7 +15089,10 @@ __flow_dv_action_rss_update(struct rte_eth_dev *dev, uint32_t idx, rte_spinlock_lock(&shared_rss->action_rss_sl); queue_old = shared_rss->ind_tbl->queues; ret = mlx5_ind_table_obj_modify(dev, shared_rss->ind_tbl, - queue, action_conf->queue_num, true); + queue, action_conf->queue_num, + true /* standalone */, + dev_started /* ref_new_qs */, + dev_started /* deref_old_qs */); if (ret) { mlx5_free(queue); ret = rte_flow_error_set(error, rte_errno, @@ -15230,7 +15407,9 @@ __flow_dv_create_domain_policy_acts(struct rte_eth_dev *dev, (MLX5_MAX_MODIFY_NUM + 1)]; } mhdr_dummy; struct mlx5_flow_dv_modify_hdr_resource *mhdr_res = &mhdr_dummy.res; + struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace(); + MLX5_ASSERT(wks); egress = (domain == MLX5_MTR_DOMAIN_EGRESS) ? 1 : 0; transfer = (domain == MLX5_MTR_DOMAIN_TRANSFER) ? 1 : 0; memset(&dh, 0, sizeof(struct mlx5_flow_handle)); @@ -15268,7 +15447,7 @@ __flow_dv_create_domain_policy_acts(struct rte_eth_dev *dev, NULL, "cannot create policy " "mark action for this color"); - dev_flow.handle->mark = 1; + wks->mark = 1; if (flow_dv_tag_resource_register(dev, tag_be, &dev_flow, &flow_err)) return -rte_mtr_error_set(error, @@ -15648,7 +15827,7 @@ flow_dv_create_mtr_policy_acts(struct rte_eth_dev *dev, * @return * 0 on success, a negative errno value otherwise and rte_errno is set. */ -static int +int flow_dv_query_count(struct rte_eth_dev *dev, uint32_t cnt_idx, void *data, struct rte_flow_error *error) { @@ -15686,6 +15865,48 @@ flow_dv_query_count(struct rte_eth_dev *dev, uint32_t cnt_idx, void *data, "counters are not available"); } + +/** + * Query counter's action pointer for a DV flow rule via DevX. + * + * @param[in] dev + * Pointer to Ethernet device. + * @param[in] cnt_idx + * Index to the flow counter. + * @param[out] action_ptr + * Action pointer for counter. + * @param[out] error + * Perform verbose error reporting if not NULL. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +int +flow_dv_query_count_ptr(struct rte_eth_dev *dev, uint32_t cnt_idx, + void **action_ptr, struct rte_flow_error *error) +{ + struct mlx5_priv *priv = dev->data->dev_private; + + if (!priv->sh->devx || !action_ptr) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "counters are not supported"); + + if (cnt_idx) { + struct mlx5_flow_counter *cnt = NULL; + cnt = flow_dv_counter_get_by_idx(dev, cnt_idx, NULL); + if (cnt) { + *action_ptr = cnt->action; + return 0; + } + } + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "counters are not available"); +} + static int flow_dv_action_query(struct rte_eth_dev *dev, const struct rte_flow_action_handle *handle, void *data, @@ -16651,7 +16872,9 @@ __flow_dv_meter_get_rss_sub_policy(struct rte_eth_dev *dev, struct mlx5_meter_policy_action_container *act_cnt; uint32_t domain = MLX5_MTR_DOMAIN_INGRESS; uint16_t sub_policy_num; + struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace(); + MLX5_ASSERT(wks); rte_spinlock_lock(&mtr_policy->sl); for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) { if (!rss_desc[i]) @@ -16725,7 +16948,7 @@ __flow_dv_meter_get_rss_sub_policy(struct rte_eth_dev *dev, if (act_cnt->rix_mark || act_cnt->modify_hdr) { memset(&dh, 0, sizeof(struct mlx5_flow_handle)); if (act_cnt->rix_mark) - dh.mark = 1; + wks->mark = 1; dh.fate_action = MLX5_FLOW_FATE_QUEUE; dh.rix_hrxq = hrxq_idx[i]; flow_drv_rxq_flags_set(dev, &dh); @@ -17437,12 +17660,22 @@ static inline int flow_dv_mtr_policy_rss_compare(const struct rte_flow_action_rss *r1, const struct rte_flow_action_rss *r2) { - if (!r1 || !r2) + if (r1 == NULL || r2 == NULL) return 0; - if (r1->func != r2->func || r1->level != r2->level || - r1->types != r2->types || r1->key_len != r2->key_len || - memcmp(r1->key, r2->key, r1->key_len)) + if (!(r1->level <= 1 && r2->level <= 1) && + !(r1->level > 1 && r2->level > 1)) + return 1; + if (r1->types != r2->types && + !((r1->types == 0 || r1->types == RTE_ETH_RSS_IP) && + (r2->types == 0 || r2->types == RTE_ETH_RSS_IP))) return 1; + if (r1->key || r2->key) { + const void *key1 = r1->key ? r1->key : rss_hash_default_key; + const void *key2 = r2->key ? r2->key : rss_hash_default_key; + + if (memcmp(key1, key2, MLX5_RSS_HASH_KEY_LEN)) + return 1; + } return 0; } @@ -17594,6 +17827,8 @@ flow_dv_validate_mtr_policy_acts(struct rte_eth_dev *dev, *policy_mode = MLX5_MTR_POLICY_MODE_OG; } else if (def_green && !def_yellow) { *policy_mode = MLX5_MTR_POLICY_MODE_OY; + } else { + *policy_mode = MLX5_MTR_POLICY_MODE_ALL; } /* Set to empty string in case of NULL pointer access by user. */ flow_err.message = ""; @@ -17909,6 +18144,108 @@ flow_dv_sync_domain(struct rte_eth_dev *dev, uint32_t domains, uint32_t flags) return 0; } +/** + * Discover the number of available flow priorities + * by trying to create a flow with the highest priority value + * for each possible number. + * + * @param[in] dev + * Ethernet device. + * @param[in] vprio + * List of possible number of available priorities. + * @param[in] vprio_n + * Size of @p vprio array. + * @return + * On success, number of available flow priorities. + * On failure, a negative errno-style code and rte_errno is set. + */ +static int +flow_dv_discover_priorities(struct rte_eth_dev *dev, + const uint16_t *vprio, int vprio_n) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_indexed_pool *pool = priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW]; + struct rte_flow_item_eth eth; + struct rte_flow_item item = { + .type = RTE_FLOW_ITEM_TYPE_ETH, + .spec = ð, + .mask = ð, + }; + struct mlx5_flow_dv_matcher matcher = { + .mask = { + .size = sizeof(matcher.mask.buf), + }, + }; + union mlx5_flow_tbl_key tbl_key; + struct mlx5_flow flow; + void *action; + struct rte_flow_error error; + uint8_t misc_mask; + int i, err, ret = -ENOTSUP; + + /* + * Prepare a flow with a catch-all pattern and a drop action. + * Use drop queue, because shared drop action may be unavailable. + */ + action = priv->drop_queue.hrxq->action; + if (action == NULL) { + DRV_LOG(ERR, "Priority discovery requires a drop action"); + rte_errno = ENOTSUP; + return -rte_errno; + } + memset(&flow, 0, sizeof(flow)); + flow.handle = mlx5_ipool_zmalloc(pool, &flow.handle_idx); + if (flow.handle == NULL) { + DRV_LOG(ERR, "Cannot create flow handle"); + rte_errno = ENOMEM; + return -rte_errno; + } + flow.ingress = true; + flow.dv.value.size = MLX5_ST_SZ_BYTES(fte_match_param); + flow.dv.actions[0] = action; + flow.dv.actions_n = 1; + memset(ð, 0, sizeof(eth)); + flow_dv_translate_item_eth(matcher.mask.buf, flow.dv.value.buf, + &item, /* inner */ false, /* group */ 0); + matcher.crc = rte_raw_cksum(matcher.mask.buf, matcher.mask.size); + for (i = 0; i < vprio_n; i++) { + /* Configure the next proposed maximum priority. */ + matcher.priority = vprio[i] - 1; + memset(&tbl_key, 0, sizeof(tbl_key)); + err = flow_dv_matcher_register(dev, &matcher, &tbl_key, &flow, + /* tunnel */ NULL, + /* group */ 0, + &error); + if (err != 0) { + /* This action is pure SW and must always succeed. */ + DRV_LOG(ERR, "Cannot register matcher"); + ret = -rte_errno; + break; + } + /* Try to apply the flow to HW. */ + misc_mask = flow_dv_matcher_enable(flow.dv.value.buf); + __flow_dv_adjust_buf_size(&flow.dv.value.size, misc_mask); + err = mlx5_flow_os_create_flow + (flow.handle->dvh.matcher->matcher_object, + (void *)&flow.dv.value, flow.dv.actions_n, + flow.dv.actions, &flow.handle->drv_flow); + if (err == 0) { + claim_zero(mlx5_flow_os_destroy_flow + (flow.handle->drv_flow)); + flow.handle->drv_flow = NULL; + } + claim_zero(flow_dv_matcher_release(dev, flow.handle)); + if (err != 0) + break; + ret = vprio[i]; + } + mlx5_ipool_free(pool, flow.handle_idx); + /* Set rte_errno if no expected priority value matched. */ + if (ret < 0) + rte_errno = -ret; + return ret; +} + const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops = { .validate = flow_dv_validate, .prepare = flow_dv_prepare, @@ -17942,6 +18279,9 @@ const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops = { .action_update = flow_dv_action_update, .action_query = flow_dv_action_query, .sync_domain = flow_dv_sync_domain, + .discover_priorities = flow_dv_discover_priorities, + .item_create = flow_dv_item_create, + .item_release = flow_dv_item_release, }; #endif /* HAVE_IBV_FLOW_DV_SUPPORT */