X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fmlx5%2Fmlx5_flow_dv.c;h=af90a7fd0ae07b4ad5d5e4027a65c5eee6920c8c;hb=8e83ba285abe4341b7666927d3fc265b35446c06;hp=8fa7829341f464c6ec55cefa5c05b0b2887c980b;hpb=aaa6a7ec0f3e19388f1262b7af298e6668d401cf;p=dpdk.git diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c index 8fa7829341..af90a7fd0a 100644 --- a/drivers/net/mlx5/mlx5_flow_dv.c +++ b/drivers/net/mlx5/mlx5_flow_dv.c @@ -1470,11 +1470,11 @@ mlx5_flow_field_id_to_modify_info info[idx] = (struct field_modify_info){2, 4, MLX5_MODI_OUT_DMAC_15_0}; if (width < 16) { - mask[idx] = rte_cpu_to_be_16(0xffff >> + mask[1] = rte_cpu_to_be_16(0xffff >> (16 - width)); width = 0; } else { - mask[idx] = RTE_BE16(0xffff); + mask[1] = RTE_BE16(0xffff); width -= 16; } if (!width) @@ -1483,13 +1483,13 @@ mlx5_flow_field_id_to_modify_info } info[idx] = (struct field_modify_info){4, 0, MLX5_MODI_OUT_DMAC_47_16}; - mask[idx] = rte_cpu_to_be_32((0xffffffff >> - (32 - width)) << off); + mask[0] = rte_cpu_to_be_32((0xffffffff >> + (32 - width)) << off); } else { if (data->offset < 16) - info[idx++] = (struct field_modify_info){2, 4, + info[idx++] = (struct field_modify_info){2, 0, MLX5_MODI_OUT_DMAC_15_0}; - info[idx] = (struct field_modify_info){4, 0, + info[idx] = (struct field_modify_info){4, off, MLX5_MODI_OUT_DMAC_47_16}; } break; @@ -1500,11 +1500,11 @@ mlx5_flow_field_id_to_modify_info info[idx] = (struct field_modify_info){2, 4, MLX5_MODI_OUT_SMAC_15_0}; if (width < 16) { - mask[idx] = rte_cpu_to_be_16(0xffff >> + mask[1] = rte_cpu_to_be_16(0xffff >> (16 - width)); width = 0; } else { - mask[idx] = RTE_BE16(0xffff); + mask[1] = RTE_BE16(0xffff); width -= 16; } if (!width) @@ -1513,13 +1513,13 @@ mlx5_flow_field_id_to_modify_info } info[idx] = (struct field_modify_info){4, 0, MLX5_MODI_OUT_SMAC_47_16}; - mask[idx] = rte_cpu_to_be_32((0xffffffff >> - (32 - width)) << off); + mask[0] = rte_cpu_to_be_32((0xffffffff >> + (32 - width)) << off); } else { if (data->offset < 16) - info[idx++] = (struct field_modify_info){2, 4, + info[idx++] = (struct field_modify_info){2, 0, MLX5_MODI_OUT_SMAC_15_0}; - info[idx] = (struct field_modify_info){4, 0, + info[idx] = (struct field_modify_info){4, off, MLX5_MODI_OUT_SMAC_47_16}; } break; @@ -1582,12 +1582,12 @@ mlx5_flow_field_id_to_modify_info info[idx] = (struct field_modify_info){4, 12, MLX5_MODI_OUT_SIPV6_31_0}; if (width < 32) { - mask[idx] = + mask[3] = rte_cpu_to_be_32(0xffffffff >> (32 - width)); width = 0; } else { - mask[idx] = RTE_BE32(0xffffffff); + mask[3] = RTE_BE32(0xffffffff); width -= 32; } if (!width) @@ -1598,12 +1598,12 @@ mlx5_flow_field_id_to_modify_info info[idx] = (struct field_modify_info){4, 8, MLX5_MODI_OUT_SIPV6_63_32}; if (width < 32) { - mask[idx] = + mask[2] = rte_cpu_to_be_32(0xffffffff >> (32 - width)); width = 0; } else { - mask[idx] = RTE_BE32(0xffffffff); + mask[2] = RTE_BE32(0xffffffff); width -= 32; } if (!width) @@ -1614,12 +1614,12 @@ mlx5_flow_field_id_to_modify_info info[idx] = (struct field_modify_info){4, 4, MLX5_MODI_OUT_SIPV6_95_64}; if (width < 32) { - mask[idx] = + mask[1] = rte_cpu_to_be_32(0xffffffff >> (32 - width)); width = 0; } else { - mask[idx] = RTE_BE32(0xffffffff); + mask[1] = RTE_BE32(0xffffffff); width -= 32; } if (!width) @@ -1628,17 +1628,16 @@ mlx5_flow_field_id_to_modify_info } info[idx] = (struct field_modify_info){4, 0, MLX5_MODI_OUT_SIPV6_127_96}; - mask[idx] = rte_cpu_to_be_32(0xffffffff >> - (32 - width)); + mask[0] = rte_cpu_to_be_32(0xffffffff >> (32 - width)); } else { if (data->offset < 32) - info[idx++] = (struct field_modify_info){4, 12, + info[idx++] = (struct field_modify_info){4, 0, MLX5_MODI_OUT_SIPV6_31_0}; if (data->offset < 64) - info[idx++] = (struct field_modify_info){4, 8, + info[idx++] = (struct field_modify_info){4, 0, MLX5_MODI_OUT_SIPV6_63_32}; if (data->offset < 96) - info[idx++] = (struct field_modify_info){4, 4, + info[idx++] = (struct field_modify_info){4, 0, MLX5_MODI_OUT_SIPV6_95_64}; if (data->offset < 128) info[idx++] = (struct field_modify_info){4, 0, @@ -1651,12 +1650,12 @@ mlx5_flow_field_id_to_modify_info info[idx] = (struct field_modify_info){4, 12, MLX5_MODI_OUT_DIPV6_31_0}; if (width < 32) { - mask[idx] = + mask[3] = rte_cpu_to_be_32(0xffffffff >> (32 - width)); width = 0; } else { - mask[idx] = RTE_BE32(0xffffffff); + mask[3] = RTE_BE32(0xffffffff); width -= 32; } if (!width) @@ -1667,12 +1666,12 @@ mlx5_flow_field_id_to_modify_info info[idx] = (struct field_modify_info){4, 8, MLX5_MODI_OUT_DIPV6_63_32}; if (width < 32) { - mask[idx] = + mask[2] = rte_cpu_to_be_32(0xffffffff >> (32 - width)); width = 0; } else { - mask[idx] = RTE_BE32(0xffffffff); + mask[2] = RTE_BE32(0xffffffff); width -= 32; } if (!width) @@ -1683,12 +1682,12 @@ mlx5_flow_field_id_to_modify_info info[idx] = (struct field_modify_info){4, 4, MLX5_MODI_OUT_DIPV6_95_64}; if (width < 32) { - mask[idx] = + mask[1] = rte_cpu_to_be_32(0xffffffff >> (32 - width)); width = 0; } else { - mask[idx] = RTE_BE32(0xffffffff); + mask[1] = RTE_BE32(0xffffffff); width -= 32; } if (!width) @@ -1697,17 +1696,16 @@ mlx5_flow_field_id_to_modify_info } info[idx] = (struct field_modify_info){4, 0, MLX5_MODI_OUT_DIPV6_127_96}; - mask[idx] = rte_cpu_to_be_32(0xffffffff >> - (32 - width)); + mask[0] = rte_cpu_to_be_32(0xffffffff >> (32 - width)); } else { if (data->offset < 32) - info[idx++] = (struct field_modify_info){4, 12, + info[idx++] = (struct field_modify_info){4, 0, MLX5_MODI_OUT_DIPV6_31_0}; if (data->offset < 64) - info[idx++] = (struct field_modify_info){4, 8, + info[idx++] = (struct field_modify_info){4, 0, MLX5_MODI_OUT_DIPV6_63_32}; if (data->offset < 96) - info[idx++] = (struct field_modify_info){4, 4, + info[idx++] = (struct field_modify_info){4, 0, MLX5_MODI_OUT_DIPV6_95_64}; if (data->offset < 128) info[idx++] = (struct field_modify_info){4, 0, @@ -2020,7 +2018,7 @@ flow_dv_validate_item_meta(struct rte_eth_dev *dev __rte_unused, if (reg == REG_NON) return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item, - "unavalable extended metadata register"); + "unavailable extended metadata register"); if (reg == REG_B) return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item, @@ -3193,7 +3191,7 @@ flow_dv_validate_action_set_meta(struct rte_eth_dev *dev, if (reg == REG_NON) return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, action, - "unavalable extended metadata register"); + "unavailable extended metadata register"); if (reg != REG_A && reg != REG_B) { struct mlx5_priv *priv = dev->data->dev_private; @@ -4970,7 +4968,7 @@ flow_dv_validate_action_jump(struct rte_eth_dev *dev, const struct rte_flow_attr *attributes, bool external, struct rte_flow_error *error) { - uint32_t target_group, table; + uint32_t target_group, table = 0; int ret = 0; struct flow_grp_info grp_info = { .external = !!external, @@ -5001,6 +4999,10 @@ flow_dv_validate_action_jump(struct rte_eth_dev *dev, RTE_FLOW_ERROR_TYPE_ACTION, NULL, "target group must be other than" " the current flow group"); + if (table == 0) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION_CONF, + NULL, "root table shouldn't be destination"); return 0; } @@ -5121,6 +5123,8 @@ flow_dv_modify_hdr_action_max(struct rte_eth_dev *dev __rte_unused, * Pointer to rte_eth_dev structure. * @param[in] action_flags * Bit-fields that holds the actions detected until now. + * @param[in] item_flags + * Holds the items detected. * @param[in] action * Pointer to the meter action. * @param[in] attr @@ -5131,11 +5135,11 @@ flow_dv_modify_hdr_action_max(struct rte_eth_dev *dev __rte_unused, * Pointer to error structure. * * @return - * 0 on success, a negative errno value otherwise and rte_ernno is set. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int mlx5_flow_validate_action_meter(struct rte_eth_dev *dev, - uint64_t action_flags, + uint64_t action_flags, uint64_t item_flags, const struct rte_flow_action *action, const struct rte_flow_attr *attr, const struct rte_flow_item *port_id_item, @@ -5239,6 +5243,35 @@ mlx5_flow_validate_action_meter(struct rte_eth_dev *dev, NULL, "Flow and meter policy " "have different src port."); + } else if (mtr_policy->is_rss) { + struct mlx5_flow_meter_policy *fp; + struct mlx5_meter_policy_action_container *acg; + struct mlx5_meter_policy_action_container *acy; + const struct rte_flow_action *rss_act; + int ret; + + fp = mlx5_flow_meter_hierarchy_get_final_policy(dev, + mtr_policy); + if (fp == NULL) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, NULL, + "Unable to get the final " + "policy in the hierarchy"); + acg = &fp->act_cnt[RTE_COLOR_GREEN]; + acy = &fp->act_cnt[RTE_COLOR_YELLOW]; + MLX5_ASSERT(acg->fate_action == + MLX5_FLOW_FATE_SHARED_RSS || + acy->fate_action == + MLX5_FLOW_FATE_SHARED_RSS); + if (acg->fate_action == MLX5_FLOW_FATE_SHARED_RSS) + rss_act = acg->rss; + else + rss_act = acy->rss; + ret = mlx5_flow_validate_action_rss(rss_act, + action_flags, dev, attr, + item_flags, error); + if (ret) + return ret; } *def_policy = false; } @@ -5574,6 +5607,10 @@ flow_dv_validate_action_sample(uint64_t *action_flags, RTE_FLOW_ERROR_TYPE_ACTION, action, "wrong action order, jump should " "be after sample action"); + if (*action_flags & MLX5_FLOW_ACTION_CT) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, action, + "Sample after CT not supported"); act = sample->actions; for (; act->type != RTE_FLOW_ACTION_TYPE_END; act++) { if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS) @@ -7668,6 +7705,7 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, case RTE_FLOW_ACTION_TYPE_METER: ret = mlx5_flow_validate_action_meter(dev, action_flags, + item_flags, actions, attr, port_id_item, &def_policy, @@ -7810,7 +7848,7 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, * - Explicit decap action is prohibited by the tunnel offload API. * - Drop action in tunnel steer rule is prohibited by the API. * - Application cannot use MARK action because it's value can mask - * tunnel default miss nitification. + * tunnel default miss notification. * - JUMP in tunnel match rule has no support in current PMD * implementation. * - TAG & META are reserved for future uses. @@ -8714,18 +8752,19 @@ flow_dv_translate_item_gre_key(void *matcher, void *key, * Flow matcher value. * @param[in] item * Flow pattern to translate. - * @param[in] inner - * Item is inner pattern. + * @param[in] pattern_flags + * Accumulated pattern flags. */ static void flow_dv_translate_item_gre(void *matcher, void *key, const struct rte_flow_item *item, - int inner) + uint64_t pattern_flags) { + static const struct rte_flow_item_gre empty_gre = {0,}; const struct rte_flow_item_gre *gre_m = item->mask; const struct rte_flow_item_gre *gre_v = item->spec; - void *headers_m; - void *headers_v; + void *headers_m = MLX5_ADDR_OF(fte_match_param, matcher, outer_headers); + void *headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers); void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters); void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters); struct { @@ -8742,26 +8781,17 @@ flow_dv_translate_item_gre(void *matcher, void *key, uint16_t value; }; } gre_crks_rsvd0_ver_m, gre_crks_rsvd0_ver_v; + uint16_t protocol_m, protocol_v; - if (inner) { - headers_m = MLX5_ADDR_OF(fte_match_param, matcher, - inner_headers); - headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers); - } else { - headers_m = MLX5_ADDR_OF(fte_match_param, matcher, - outer_headers); - headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers); - } MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff); MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_GRE); - if (!gre_v) - return; - if (!gre_m) - gre_m = &rte_flow_item_gre_mask; - MLX5_SET(fte_match_set_misc, misc_m, gre_protocol, - rte_be_to_cpu_16(gre_m->protocol)); - MLX5_SET(fte_match_set_misc, misc_v, gre_protocol, - rte_be_to_cpu_16(gre_v->protocol & gre_m->protocol)); + if (!gre_v) { + gre_v = &empty_gre; + gre_m = &empty_gre; + } else { + if (!gre_m) + gre_m = &rte_flow_item_gre_mask; + } gre_crks_rsvd0_ver_m.value = rte_be_to_cpu_16(gre_m->c_rsvd0_ver); gre_crks_rsvd0_ver_v.value = rte_be_to_cpu_16(gre_v->c_rsvd0_ver); MLX5_SET(fte_match_set_misc, misc_m, gre_c_present, @@ -8779,6 +8809,17 @@ flow_dv_translate_item_gre(void *matcher, void *key, MLX5_SET(fte_match_set_misc, misc_v, gre_s_present, gre_crks_rsvd0_ver_v.s_present & gre_crks_rsvd0_ver_m.s_present); + protocol_m = rte_be_to_cpu_16(gre_m->protocol); + protocol_v = rte_be_to_cpu_16(gre_v->protocol); + if (!protocol_m) { + /* Force next protocol to prevent matchers duplication */ + protocol_v = mlx5_translate_tunnel_etypes(pattern_flags); + if (protocol_v) + protocol_m = 0xFFFF; + } + MLX5_SET(fte_match_set_misc, misc_m, gre_protocol, protocol_m); + MLX5_SET(fte_match_set_misc, misc_v, gre_protocol, + protocol_m & protocol_v); } /** @@ -8790,13 +8831,13 @@ flow_dv_translate_item_gre(void *matcher, void *key, * Flow matcher value. * @param[in] item * Flow pattern to translate. - * @param[in] inner - * Item is inner pattern. + * @param[in] pattern_flags + * Accumulated pattern flags. */ static void flow_dv_translate_item_nvgre(void *matcher, void *key, const struct rte_flow_item *item, - int inner) + unsigned long pattern_flags) { const struct rte_flow_item_nvgre *nvgre_m = item->mask; const struct rte_flow_item_nvgre *nvgre_v = item->spec; @@ -8823,7 +8864,7 @@ flow_dv_translate_item_nvgre(void *matcher, void *key, .mask = &gre_mask, .last = NULL, }; - flow_dv_translate_item_gre(matcher, key, &gre_item, inner); + flow_dv_translate_item_gre(matcher, key, &gre_item, pattern_flags); if (!nvgre_v) return; if (!nvgre_m) @@ -8960,46 +9001,40 @@ flow_dv_translate_item_vxlan(struct rte_eth_dev *dev, static void flow_dv_translate_item_vxlan_gpe(void *matcher, void *key, - const struct rte_flow_item *item, int inner) + const struct rte_flow_item *item, + const uint64_t pattern_flags) { + static const struct rte_flow_item_vxlan_gpe dummy_vxlan_gpe_hdr = {0, }; const struct rte_flow_item_vxlan_gpe *vxlan_m = item->mask; const struct rte_flow_item_vxlan_gpe *vxlan_v = item->spec; - void *headers_m; - void *headers_v; + /* The item was validated to be on the outer side */ + void *headers_m = MLX5_ADDR_OF(fte_match_param, matcher, outer_headers); + void *headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers); void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_3); void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3); - char *vni_m; - char *vni_v; - uint16_t dport; - int size; - int i; + char *vni_m = + MLX5_ADDR_OF(fte_match_set_misc3, misc_m, outer_vxlan_gpe_vni); + char *vni_v = + MLX5_ADDR_OF(fte_match_set_misc3, misc_v, outer_vxlan_gpe_vni); + int i, size = sizeof(vxlan_m->vni); uint8_t flags_m = 0xff; uint8_t flags_v = 0xc; + uint8_t m_protocol, v_protocol; - if (inner) { - headers_m = MLX5_ADDR_OF(fte_match_param, matcher, - inner_headers); - headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers); - } else { - headers_m = MLX5_ADDR_OF(fte_match_param, matcher, - outer_headers); - headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers); - } - dport = item->type == RTE_FLOW_ITEM_TYPE_VXLAN ? - MLX5_UDP_PORT_VXLAN : MLX5_UDP_PORT_VXLAN_GPE; if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) { MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF); - MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport); + MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, + MLX5_UDP_PORT_VXLAN_GPE); + } + if (!vxlan_v) { + vxlan_v = &dummy_vxlan_gpe_hdr; + vxlan_m = &dummy_vxlan_gpe_hdr; + } else { + if (!vxlan_m) + vxlan_m = &rte_flow_item_vxlan_gpe_mask; } - if (!vxlan_v) - return; - if (!vxlan_m) - vxlan_m = &rte_flow_item_vxlan_gpe_mask; - size = sizeof(vxlan_m->vni); - vni_m = MLX5_ADDR_OF(fte_match_set_misc3, misc_m, outer_vxlan_gpe_vni); - vni_v = MLX5_ADDR_OF(fte_match_set_misc3, misc_v, outer_vxlan_gpe_vni); memcpy(vni_m, vxlan_m->vni, size); for (i = 0; i < size; ++i) vni_v[i] = vni_m[i] & vxlan_v->vni[i]; @@ -9009,10 +9044,23 @@ flow_dv_translate_item_vxlan_gpe(void *matcher, void *key, } MLX5_SET(fte_match_set_misc3, misc_m, outer_vxlan_gpe_flags, flags_m); MLX5_SET(fte_match_set_misc3, misc_v, outer_vxlan_gpe_flags, flags_v); - MLX5_SET(fte_match_set_misc3, misc_m, outer_vxlan_gpe_next_protocol, - vxlan_m->protocol); - MLX5_SET(fte_match_set_misc3, misc_v, outer_vxlan_gpe_next_protocol, - vxlan_v->protocol); + m_protocol = vxlan_m->protocol; + v_protocol = vxlan_v->protocol; + if (!m_protocol) { + /* Force next protocol to ensure next headers parsing. */ + if (pattern_flags & MLX5_FLOW_LAYER_INNER_L2) + v_protocol = RTE_VXLAN_GPE_TYPE_ETH; + else if (pattern_flags & MLX5_FLOW_LAYER_INNER_L3_IPV4) + v_protocol = RTE_VXLAN_GPE_TYPE_IPV4; + else if (pattern_flags & MLX5_FLOW_LAYER_INNER_L3_IPV6) + v_protocol = RTE_VXLAN_GPE_TYPE_IPV6; + if (v_protocol) + m_protocol = 0xFF; + } + MLX5_SET(fte_match_set_misc3, misc_m, + outer_vxlan_gpe_next_protocol, m_protocol); + MLX5_SET(fte_match_set_misc3, misc_v, + outer_vxlan_gpe_next_protocol, m_protocol & v_protocol); } /** @@ -9030,49 +9078,39 @@ flow_dv_translate_item_vxlan_gpe(void *matcher, void *key, static void flow_dv_translate_item_geneve(void *matcher, void *key, - const struct rte_flow_item *item, int inner) + const struct rte_flow_item *item, + uint64_t pattern_flags) { + static const struct rte_flow_item_geneve empty_geneve = {0,}; const struct rte_flow_item_geneve *geneve_m = item->mask; const struct rte_flow_item_geneve *geneve_v = item->spec; - void *headers_m; - void *headers_v; + /* GENEVE flow item validation allows single tunnel item */ + void *headers_m = MLX5_ADDR_OF(fte_match_param, matcher, outer_headers); + void *headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers); void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters); void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters); - uint16_t dport; uint16_t gbhdr_m; uint16_t gbhdr_v; - char *vni_m; - char *vni_v; - size_t size, i; + char *vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, geneve_vni); + char *vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, geneve_vni); + size_t size = sizeof(geneve_m->vni), i; + uint16_t protocol_m, protocol_v; - if (inner) { - headers_m = MLX5_ADDR_OF(fte_match_param, matcher, - inner_headers); - headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers); - } else { - headers_m = MLX5_ADDR_OF(fte_match_param, matcher, - outer_headers); - headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers); - } - dport = MLX5_UDP_PORT_GENEVE; if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) { MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF); - MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport); + MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, + MLX5_UDP_PORT_GENEVE); + } + if (!geneve_v) { + geneve_v = &empty_geneve; + geneve_m = &empty_geneve; + } else { + if (!geneve_m) + geneve_m = &rte_flow_item_geneve_mask; } - if (!geneve_v) - return; - if (!geneve_m) - geneve_m = &rte_flow_item_geneve_mask; - size = sizeof(geneve_m->vni); - vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, geneve_vni); - vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, geneve_vni); memcpy(vni_m, geneve_m->vni, size); for (i = 0; i < size; ++i) vni_v[i] = vni_m[i] & geneve_v->vni[i]; - MLX5_SET(fte_match_set_misc, misc_m, geneve_protocol_type, - rte_be_to_cpu_16(geneve_m->protocol)); - MLX5_SET(fte_match_set_misc, misc_v, geneve_protocol_type, - rte_be_to_cpu_16(geneve_v->protocol & geneve_m->protocol)); gbhdr_m = rte_be_to_cpu_16(geneve_m->ver_opt_len_o_c_rsvd0); gbhdr_v = rte_be_to_cpu_16(geneve_v->ver_opt_len_o_c_rsvd0); MLX5_SET(fte_match_set_misc, misc_m, geneve_oam, @@ -9084,6 +9122,17 @@ flow_dv_translate_item_geneve(void *matcher, void *key, MLX5_SET(fte_match_set_misc, misc_v, geneve_opt_len, MLX5_GENEVE_OPTLEN_VAL(gbhdr_v) & MLX5_GENEVE_OPTLEN_VAL(gbhdr_m)); + protocol_m = rte_be_to_cpu_16(geneve_m->protocol); + protocol_v = rte_be_to_cpu_16(geneve_v->protocol); + if (!protocol_m) { + /* Force next protocol to prevent matchers duplication */ + protocol_v = mlx5_translate_tunnel_etypes(pattern_flags); + if (protocol_v) + protocol_m = 0xFFFF; + } + MLX5_SET(fte_match_set_misc, misc_m, geneve_protocol_type, protocol_m); + MLX5_SET(fte_match_set_misc, misc_v, geneve_protocol_type, + protocol_m & protocol_v); } /** @@ -9125,7 +9174,7 @@ flow_dev_geneve_tlv_option_resource_register(struct rte_eth_dev *dev, geneve_opt_v->option_type && geneve_opt_resource->length == geneve_opt_v->option_len) { - /* We already have GENVE TLV option obj allocated. */ + /* We already have GENEVE TLV option obj allocated. */ __atomic_fetch_add(&geneve_opt_resource->refcnt, 1, __ATOMIC_RELAXED); } else { @@ -9280,16 +9329,22 @@ flow_dv_translate_item_mpls(void *matcher, void *key, switch (prev_layer) { case MLX5_FLOW_LAYER_OUTER_L4_UDP: - MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xffff); - MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, - MLX5_UDP_PORT_MPLS); + if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) { + MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, + 0xffff); + MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, + MLX5_UDP_PORT_MPLS); + } break; case MLX5_FLOW_LAYER_GRE: /* Fall-through. */ case MLX5_FLOW_LAYER_GRE_KEY: - MLX5_SET(fte_match_set_misc, misc_m, gre_protocol, 0xffff); - MLX5_SET(fte_match_set_misc, misc_v, gre_protocol, - RTE_ETHER_TYPE_MPLS); + if (!MLX5_GET16(fte_match_set_misc, misc_v, gre_protocol)) { + MLX5_SET(fte_match_set_misc, misc_m, gre_protocol, + 0xffff); + MLX5_SET(fte_match_set_misc, misc_v, gre_protocol, + RTE_ETHER_TYPE_MPLS); + } break; default: break; @@ -10161,7 +10216,7 @@ __flow_dv_adjust_buf_size(size_t *size, uint8_t match_criteria) * Check flow matching criteria first, subtract misc5/4 length if flow * doesn't own misc5/4 parameters. In some old rdma-core releases, * misc5/4 are not supported, and matcher creation failure is expected - * w/o subtration. If misc5 is provided, misc4 must be counted in since + * w/o subtraction. If misc5 is provided, misc4 must be counted in since * misc5 is right after misc4. */ if (!(match_criteria & (1 << MLX5_MATCH_CRITERIA_ENABLE_MISC5_BIT))) { @@ -11360,7 +11415,7 @@ flow_dv_dest_array_create_cb(void *tool_ctx __rte_unused, void *cb_ctx) goto error; } } - /* create a dest array actioin */ + /* create a dest array action */ ret = mlx5_os_flow_dr_create_flow_action_dest_array (domain, resource->num_of_dest, @@ -11595,7 +11650,7 @@ flow_dv_translate_action_sample(struct rte_eth_dev *dev, (((const struct rte_flow_action_mark *) (sub_actions->conf))->id); - dev_flow->handle->mark = 1; + wks->mark = 1; pre_rix = dev_flow->handle->dvh.rix_tag; /* Save the mark resource before sample */ pre_r = dev_flow->dv.tag_resource; @@ -12104,34 +12159,24 @@ flow_dv_translate_integrity_l4(const struct rte_flow_item_integrity *mask, void *headers_m, void *headers_v) { if (mask->l4_ok) { - /* application l4_ok filter aggregates all hardware l4 filters - * therefore hw l4_checksum_ok must be implicitly added here. + /* RTE l4_ok filter aggregates hardware l4_ok and + * l4_checksum_ok filters. + * Positive RTE l4_ok match requires hardware match on both L4 + * hardware integrity bits. + * For negative match, check hardware l4_checksum_ok bit only, + * because hardware sets that bit to 0 for all packets + * with bad L4. */ - struct rte_flow_item_integrity local_item; - - local_item.l4_csum_ok = 1; - MLX5_SET(fte_match_set_lyr_2_4, headers_m, l4_checksum_ok, - local_item.l4_csum_ok); if (value->l4_ok) { - /* application l4_ok = 1 matches sets both hw flags - * l4_ok and l4_checksum_ok flags to 1. - */ - MLX5_SET(fte_match_set_lyr_2_4, headers_v, - l4_checksum_ok, local_item.l4_csum_ok); - MLX5_SET(fte_match_set_lyr_2_4, headers_m, l4_ok, - mask->l4_ok); - MLX5_SET(fte_match_set_lyr_2_4, headers_v, l4_ok, - value->l4_ok); - } else { - /* application l4_ok = 0 matches on hw flag - * l4_checksum_ok = 0 only. - */ - MLX5_SET(fte_match_set_lyr_2_4, headers_v, - l4_checksum_ok, 0); + MLX5_SET(fte_match_set_lyr_2_4, headers_m, l4_ok, 1); + MLX5_SET(fte_match_set_lyr_2_4, headers_v, l4_ok, 1); } - } else if (mask->l4_csum_ok) { - MLX5_SET(fte_match_set_lyr_2_4, headers_m, l4_checksum_ok, - mask->l4_csum_ok); + MLX5_SET(fte_match_set_lyr_2_4, headers_m, l4_checksum_ok, 1); + MLX5_SET(fte_match_set_lyr_2_4, headers_v, l4_checksum_ok, + !!value->l4_ok); + } + if (mask->l4_csum_ok) { + MLX5_SET(fte_match_set_lyr_2_4, headers_m, l4_checksum_ok, 1); MLX5_SET(fte_match_set_lyr_2_4, headers_v, l4_checksum_ok, value->l4_csum_ok); } @@ -12143,28 +12188,33 @@ flow_dv_translate_integrity_l3(const struct rte_flow_item_integrity *mask, void *headers_m, void *headers_v, bool is_ipv4) { if (mask->l3_ok) { - /* application l3_ok filter aggregates all hardware l3 filters - * therefore hw ipv4_checksum_ok must be implicitly added here. + /* RTE l3_ok filter aggregates for IPv4 hardware l3_ok and + * ipv4_csum_ok filters. + * Positive RTE l3_ok match requires hardware match on both L3 + * hardware integrity bits. + * For negative match, check hardware l3_csum_ok bit only, + * because hardware sets that bit to 0 for all packets + * with bad L3. */ - struct rte_flow_item_integrity local_item; - - local_item.ipv4_csum_ok = !!is_ipv4; - MLX5_SET(fte_match_set_lyr_2_4, headers_m, ipv4_checksum_ok, - local_item.ipv4_csum_ok); - if (value->l3_ok) { + if (is_ipv4) { + if (value->l3_ok) { + MLX5_SET(fte_match_set_lyr_2_4, headers_m, + l3_ok, 1); + MLX5_SET(fte_match_set_lyr_2_4, headers_v, + l3_ok, 1); + } + MLX5_SET(fte_match_set_lyr_2_4, headers_m, + ipv4_checksum_ok, 1); MLX5_SET(fte_match_set_lyr_2_4, headers_v, - ipv4_checksum_ok, local_item.ipv4_csum_ok); - MLX5_SET(fte_match_set_lyr_2_4, headers_m, l3_ok, - mask->l3_ok); + ipv4_checksum_ok, !!value->l3_ok); + } else { + MLX5_SET(fte_match_set_lyr_2_4, headers_m, l3_ok, 1); MLX5_SET(fte_match_set_lyr_2_4, headers_v, l3_ok, value->l3_ok); - } else { - MLX5_SET(fte_match_set_lyr_2_4, headers_v, - ipv4_checksum_ok, 0); } - } else if (mask->ipv4_csum_ok) { - MLX5_SET(fte_match_set_lyr_2_4, headers_m, ipv4_checksum_ok, - mask->ipv4_csum_ok); + } + if (mask->ipv4_csum_ok) { + MLX5_SET(fte_match_set_lyr_2_4, headers_m, ipv4_checksum_ok, 1); MLX5_SET(fte_match_set_lyr_2_4, headers_v, ipv4_checksum_ok, value->ipv4_csum_ok); } @@ -12647,6 +12697,7 @@ flow_dv_translate(struct rte_eth_dev *dev, .std_tbl_fix = true, }; const struct rte_flow_item *integrity_items[2] = {NULL, NULL}; + const struct rte_flow_item *tunnel_item = NULL; if (!wks) return rte_flow_error_set(error, ENOMEM, @@ -12759,7 +12810,7 @@ flow_dv_translate(struct rte_eth_dev *dev, break; case RTE_FLOW_ACTION_TYPE_FLAG: action_flags |= MLX5_FLOW_ACTION_FLAG; - dev_flow->handle->mark = 1; + wks->mark = 1; if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) { struct rte_flow_action_mark mark = { .id = MLX5_FLOW_MARK_DEFAULT, @@ -12788,7 +12839,7 @@ flow_dv_translate(struct rte_eth_dev *dev, break; case RTE_FLOW_ACTION_TYPE_MARK: action_flags |= MLX5_FLOW_ACTION_MARK; - dev_flow->handle->mark = 1; + wks->mark = 1; if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) { const struct rte_flow_action_mark *mark = (const struct rte_flow_action_mark *) @@ -13416,10 +13467,9 @@ flow_dv_translate(struct rte_eth_dev *dev, MLX5_FLOW_LAYER_OUTER_L4_UDP; break; case RTE_FLOW_ITEM_TYPE_GRE: - flow_dv_translate_item_gre(match_mask, match_value, - items, tunnel); matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc); last_item = MLX5_FLOW_LAYER_GRE; + tunnel_item = items; break; case RTE_FLOW_ITEM_TYPE_GRE_KEY: flow_dv_translate_item_gre_key(match_mask, @@ -13427,10 +13477,9 @@ flow_dv_translate(struct rte_eth_dev *dev, last_item = MLX5_FLOW_LAYER_GRE_KEY; break; case RTE_FLOW_ITEM_TYPE_NVGRE: - flow_dv_translate_item_nvgre(match_mask, match_value, - items, tunnel); matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc); last_item = MLX5_FLOW_LAYER_GRE; + tunnel_item = items; break; case RTE_FLOW_ITEM_TYPE_VXLAN: flow_dv_translate_item_vxlan(dev, attr, @@ -13440,17 +13489,14 @@ flow_dv_translate(struct rte_eth_dev *dev, last_item = MLX5_FLOW_LAYER_VXLAN; break; case RTE_FLOW_ITEM_TYPE_VXLAN_GPE: - flow_dv_translate_item_vxlan_gpe(match_mask, - match_value, items, - tunnel); matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc); last_item = MLX5_FLOW_LAYER_VXLAN_GPE; + tunnel_item = items; break; case RTE_FLOW_ITEM_TYPE_GENEVE: - flow_dv_translate_item_geneve(match_mask, match_value, - items, tunnel); matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc); last_item = MLX5_FLOW_LAYER_GENEVE; + tunnel_item = items; break; case RTE_FLOW_ITEM_TYPE_GENEVE_OPT: ret = flow_dv_translate_item_geneve_opt(dev, match_mask, @@ -13576,6 +13622,22 @@ flow_dv_translate(struct rte_eth_dev *dev, integrity_items, item_flags); } + if (item_flags & MLX5_FLOW_LAYER_VXLAN_GPE) + flow_dv_translate_item_vxlan_gpe(match_mask, match_value, + tunnel_item, item_flags); + else if (item_flags & MLX5_FLOW_LAYER_GENEVE) + flow_dv_translate_item_geneve(match_mask, match_value, + tunnel_item, item_flags); + else if (item_flags & MLX5_FLOW_LAYER_GRE) { + if (tunnel_item->type == RTE_FLOW_ITEM_TYPE_GRE) + flow_dv_translate_item_gre(match_mask, match_value, + tunnel_item, item_flags); + else if (tunnel_item->type == RTE_FLOW_ITEM_TYPE_NVGRE) + flow_dv_translate_item_nvgre(match_mask, match_value, + tunnel_item, item_flags); + else + MLX5_ASSERT(false); + } #ifdef RTE_LIBRTE_MLX5_DEBUG MLX5_ASSERT(!flow_dv_check_valid_spec(matcher.mask.buf, dev_flow->dv.value.buf)); @@ -14631,7 +14693,8 @@ __flow_dv_action_rss_setup(struct rte_eth_dev *dev, size_t i; int err; - if (mlx5_ind_table_obj_setup(dev, shared_rss->ind_tbl)) { + if (mlx5_ind_table_obj_setup(dev, shared_rss->ind_tbl, + !!dev->data->dev_started)) { return rte_flow_error_set(error, rte_errno, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, "cannot setup indirection table"); @@ -14671,7 +14734,7 @@ __flow_dv_action_rss_setup(struct rte_eth_dev *dev, error_hrxq_new: err = rte_errno; __flow_dv_action_rss_hrxqs_release(dev, shared_rss); - if (!mlx5_ind_table_obj_release(dev, shared_rss->ind_tbl, true)) + if (!mlx5_ind_table_obj_release(dev, shared_rss->ind_tbl, true, true)) shared_rss->ind_tbl = NULL; rte_errno = err; return -rte_errno; @@ -14814,7 +14877,8 @@ __flow_dv_action_rss_release(struct rte_eth_dev *dev, uint32_t idx, NULL, "shared rss hrxq has references"); queue = shared_rss->ind_tbl->queues; - remaining = mlx5_ind_table_obj_release(dev, shared_rss->ind_tbl, true); + remaining = mlx5_ind_table_obj_release(dev, shared_rss->ind_tbl, true, + !!dev->data->dev_started); if (remaining) return rte_flow_error_set(error, EBUSY, RTE_FLOW_ERROR_TYPE_ACTION, @@ -15002,6 +15066,7 @@ __flow_dv_action_rss_update(struct rte_eth_dev *dev, uint32_t idx, void *queue = NULL; uint16_t *queue_old = NULL; uint32_t queue_size = action_conf->queue_num * sizeof(uint16_t); + bool dev_started = !!dev->data->dev_started; if (!shared_rss) return rte_flow_error_set(error, EINVAL, @@ -15024,7 +15089,10 @@ __flow_dv_action_rss_update(struct rte_eth_dev *dev, uint32_t idx, rte_spinlock_lock(&shared_rss->action_rss_sl); queue_old = shared_rss->ind_tbl->queues; ret = mlx5_ind_table_obj_modify(dev, shared_rss->ind_tbl, - queue, action_conf->queue_num, true); + queue, action_conf->queue_num, + true /* standalone */, + dev_started /* ref_new_qs */, + dev_started /* deref_old_qs */); if (ret) { mlx5_free(queue); ret = rte_flow_error_set(error, rte_errno, @@ -15339,7 +15407,9 @@ __flow_dv_create_domain_policy_acts(struct rte_eth_dev *dev, (MLX5_MAX_MODIFY_NUM + 1)]; } mhdr_dummy; struct mlx5_flow_dv_modify_hdr_resource *mhdr_res = &mhdr_dummy.res; + struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace(); + MLX5_ASSERT(wks); egress = (domain == MLX5_MTR_DOMAIN_EGRESS) ? 1 : 0; transfer = (domain == MLX5_MTR_DOMAIN_TRANSFER) ? 1 : 0; memset(&dh, 0, sizeof(struct mlx5_flow_handle)); @@ -15377,7 +15447,7 @@ __flow_dv_create_domain_policy_acts(struct rte_eth_dev *dev, NULL, "cannot create policy " "mark action for this color"); - dev_flow.handle->mark = 1; + wks->mark = 1; if (flow_dv_tag_resource_register(dev, tag_be, &dev_flow, &flow_err)) return -rte_mtr_error_set(error, @@ -16802,7 +16872,9 @@ __flow_dv_meter_get_rss_sub_policy(struct rte_eth_dev *dev, struct mlx5_meter_policy_action_container *act_cnt; uint32_t domain = MLX5_MTR_DOMAIN_INGRESS; uint16_t sub_policy_num; + struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace(); + MLX5_ASSERT(wks); rte_spinlock_lock(&mtr_policy->sl); for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) { if (!rss_desc[i]) @@ -16876,7 +16948,7 @@ __flow_dv_meter_get_rss_sub_policy(struct rte_eth_dev *dev, if (act_cnt->rix_mark || act_cnt->modify_hdr) { memset(&dh, 0, sizeof(struct mlx5_flow_handle)); if (act_cnt->rix_mark) - dh.mark = 1; + wks->mark = 1; dh.fate_action = MLX5_FLOW_FATE_QUEUE; dh.rix_hrxq = hrxq_idx[i]; flow_drv_rxq_flags_set(dev, &dh);