X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fmlx5%2Fmlx5_flow_dv.c;h=af90a7fd0ae07b4ad5d5e4027a65c5eee6920c8c;hb=8e83ba285abe4341b7666927d3fc265b35446c06;hp=7b32c06fc66c89270d06230478162c6c9236df53;hpb=1ce19ab1f43b9895fe6c3d5000578cb227f6bcec;p=dpdk.git diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c index 7b32c06fc6..af90a7fd0a 100644 --- a/drivers/net/mlx5/mlx5_flow_dv.c +++ b/drivers/net/mlx5/mlx5_flow_dv.c @@ -93,20 +93,6 @@ static int flow_dv_jump_tbl_resource_release(struct rte_eth_dev *dev, uint32_t rix_jump); -static inline uint16_t -mlx5_translate_tunnel_etypes(uint64_t pattern_flags) -{ - if (pattern_flags & MLX5_FLOW_LAYER_INNER_L2) - return RTE_ETHER_TYPE_TEB; - else if (pattern_flags & MLX5_FLOW_LAYER_INNER_L3_IPV4) - return RTE_ETHER_TYPE_IPV4; - else if (pattern_flags & MLX5_FLOW_LAYER_INNER_L3_IPV6) - return RTE_ETHER_TYPE_IPV6; - else if (pattern_flags & MLX5_FLOW_LAYER_MPLS) - return RTE_ETHER_TYPE_MPLS; - return 0; -} - static int16_t flow_dv_get_esw_manager_vport_id(struct rte_eth_dev *dev) { @@ -1501,9 +1487,9 @@ mlx5_flow_field_id_to_modify_info (32 - width)) << off); } else { if (data->offset < 16) - info[idx++] = (struct field_modify_info){2, 4, + info[idx++] = (struct field_modify_info){2, 0, MLX5_MODI_OUT_DMAC_15_0}; - info[idx] = (struct field_modify_info){4, 0, + info[idx] = (struct field_modify_info){4, off, MLX5_MODI_OUT_DMAC_47_16}; } break; @@ -1531,9 +1517,9 @@ mlx5_flow_field_id_to_modify_info (32 - width)) << off); } else { if (data->offset < 16) - info[idx++] = (struct field_modify_info){2, 4, + info[idx++] = (struct field_modify_info){2, 0, MLX5_MODI_OUT_SMAC_15_0}; - info[idx] = (struct field_modify_info){4, 0, + info[idx] = (struct field_modify_info){4, off, MLX5_MODI_OUT_SMAC_47_16}; } break; @@ -1645,13 +1631,13 @@ mlx5_flow_field_id_to_modify_info mask[0] = rte_cpu_to_be_32(0xffffffff >> (32 - width)); } else { if (data->offset < 32) - info[idx++] = (struct field_modify_info){4, 12, + info[idx++] = (struct field_modify_info){4, 0, MLX5_MODI_OUT_SIPV6_31_0}; if (data->offset < 64) - info[idx++] = (struct field_modify_info){4, 8, + info[idx++] = (struct field_modify_info){4, 0, MLX5_MODI_OUT_SIPV6_63_32}; if (data->offset < 96) - info[idx++] = (struct field_modify_info){4, 4, + info[idx++] = (struct field_modify_info){4, 0, MLX5_MODI_OUT_SIPV6_95_64}; if (data->offset < 128) info[idx++] = (struct field_modify_info){4, 0, @@ -1713,13 +1699,13 @@ mlx5_flow_field_id_to_modify_info mask[0] = rte_cpu_to_be_32(0xffffffff >> (32 - width)); } else { if (data->offset < 32) - info[idx++] = (struct field_modify_info){4, 12, + info[idx++] = (struct field_modify_info){4, 0, MLX5_MODI_OUT_DIPV6_31_0}; if (data->offset < 64) - info[idx++] = (struct field_modify_info){4, 8, + info[idx++] = (struct field_modify_info){4, 0, MLX5_MODI_OUT_DIPV6_63_32}; if (data->offset < 96) - info[idx++] = (struct field_modify_info){4, 4, + info[idx++] = (struct field_modify_info){4, 0, MLX5_MODI_OUT_DIPV6_95_64}; if (data->offset < 128) info[idx++] = (struct field_modify_info){4, 0, @@ -2032,7 +2018,7 @@ flow_dv_validate_item_meta(struct rte_eth_dev *dev __rte_unused, if (reg == REG_NON) return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item, - "unavalable extended metadata register"); + "unavailable extended metadata register"); if (reg == REG_B) return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item, @@ -3205,7 +3191,7 @@ flow_dv_validate_action_set_meta(struct rte_eth_dev *dev, if (reg == REG_NON) return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, action, - "unavalable extended metadata register"); + "unavailable extended metadata register"); if (reg != REG_A && reg != REG_B) { struct mlx5_priv *priv = dev->data->dev_private; @@ -4982,7 +4968,7 @@ flow_dv_validate_action_jump(struct rte_eth_dev *dev, const struct rte_flow_attr *attributes, bool external, struct rte_flow_error *error) { - uint32_t target_group, table; + uint32_t target_group, table = 0; int ret = 0; struct flow_grp_info grp_info = { .external = !!external, @@ -5013,6 +4999,10 @@ flow_dv_validate_action_jump(struct rte_eth_dev *dev, RTE_FLOW_ERROR_TYPE_ACTION, NULL, "target group must be other than" " the current flow group"); + if (table == 0) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION_CONF, + NULL, "root table shouldn't be destination"); return 0; } @@ -5145,7 +5135,7 @@ flow_dv_modify_hdr_action_max(struct rte_eth_dev *dev __rte_unused, * Pointer to error structure. * * @return - * 0 on success, a negative errno value otherwise and rte_ernno is set. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int mlx5_flow_validate_action_meter(struct rte_eth_dev *dev, @@ -5254,13 +5244,21 @@ mlx5_flow_validate_action_meter(struct rte_eth_dev *dev, "Flow and meter policy " "have different src port."); } else if (mtr_policy->is_rss) { - struct mlx5_meter_policy_action_container *acg = - &mtr_policy->act_cnt[RTE_COLOR_GREEN]; - struct mlx5_meter_policy_action_container *acy = - &mtr_policy->act_cnt[RTE_COLOR_YELLOW]; + struct mlx5_flow_meter_policy *fp; + struct mlx5_meter_policy_action_container *acg; + struct mlx5_meter_policy_action_container *acy; const struct rte_flow_action *rss_act; int ret; + fp = mlx5_flow_meter_hierarchy_get_final_policy(dev, + mtr_policy); + if (fp == NULL) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, NULL, + "Unable to get the final " + "policy in the hierarchy"); + acg = &fp->act_cnt[RTE_COLOR_GREEN]; + acy = &fp->act_cnt[RTE_COLOR_YELLOW]; MLX5_ASSERT(acg->fate_action == MLX5_FLOW_FATE_SHARED_RSS || acy->fate_action == @@ -7850,7 +7848,7 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, * - Explicit decap action is prohibited by the tunnel offload API. * - Drop action in tunnel steer rule is prohibited by the API. * - Application cannot use MARK action because it's value can mask - * tunnel default miss nitification. + * tunnel default miss notification. * - JUMP in tunnel match rule has no support in current PMD * implementation. * - TAG & META are reserved for future uses. @@ -8815,8 +8813,9 @@ flow_dv_translate_item_gre(void *matcher, void *key, protocol_v = rte_be_to_cpu_16(gre_v->protocol); if (!protocol_m) { /* Force next protocol to prevent matchers duplication */ - protocol_m = 0xFFFF; protocol_v = mlx5_translate_tunnel_etypes(pattern_flags); + if (protocol_v) + protocol_m = 0xFFFF; } MLX5_SET(fte_match_set_misc, misc_m, gre_protocol, protocol_m); MLX5_SET(fte_match_set_misc, misc_v, gre_protocol, @@ -9048,7 +9047,6 @@ flow_dv_translate_item_vxlan_gpe(void *matcher, void *key, m_protocol = vxlan_m->protocol; v_protocol = vxlan_v->protocol; if (!m_protocol) { - m_protocol = 0xff; /* Force next protocol to ensure next headers parsing. */ if (pattern_flags & MLX5_FLOW_LAYER_INNER_L2) v_protocol = RTE_VXLAN_GPE_TYPE_ETH; @@ -9056,6 +9054,8 @@ flow_dv_translate_item_vxlan_gpe(void *matcher, void *key, v_protocol = RTE_VXLAN_GPE_TYPE_IPV4; else if (pattern_flags & MLX5_FLOW_LAYER_INNER_L3_IPV6) v_protocol = RTE_VXLAN_GPE_TYPE_IPV6; + if (v_protocol) + m_protocol = 0xFF; } MLX5_SET(fte_match_set_misc3, misc_m, outer_vxlan_gpe_next_protocol, m_protocol); @@ -9126,8 +9126,9 @@ flow_dv_translate_item_geneve(void *matcher, void *key, protocol_v = rte_be_to_cpu_16(geneve_v->protocol); if (!protocol_m) { /* Force next protocol to prevent matchers duplication */ - protocol_m = 0xFFFF; protocol_v = mlx5_translate_tunnel_etypes(pattern_flags); + if (protocol_v) + protocol_m = 0xFFFF; } MLX5_SET(fte_match_set_misc, misc_m, geneve_protocol_type, protocol_m); MLX5_SET(fte_match_set_misc, misc_v, geneve_protocol_type, @@ -9173,7 +9174,7 @@ flow_dev_geneve_tlv_option_resource_register(struct rte_eth_dev *dev, geneve_opt_v->option_type && geneve_opt_resource->length == geneve_opt_v->option_len) { - /* We already have GENVE TLV option obj allocated. */ + /* We already have GENEVE TLV option obj allocated. */ __atomic_fetch_add(&geneve_opt_resource->refcnt, 1, __ATOMIC_RELAXED); } else { @@ -9328,16 +9329,22 @@ flow_dv_translate_item_mpls(void *matcher, void *key, switch (prev_layer) { case MLX5_FLOW_LAYER_OUTER_L4_UDP: - MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xffff); - MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, - MLX5_UDP_PORT_MPLS); + if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) { + MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, + 0xffff); + MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, + MLX5_UDP_PORT_MPLS); + } break; case MLX5_FLOW_LAYER_GRE: /* Fall-through. */ case MLX5_FLOW_LAYER_GRE_KEY: - MLX5_SET(fte_match_set_misc, misc_m, gre_protocol, 0xffff); - MLX5_SET(fte_match_set_misc, misc_v, gre_protocol, - RTE_ETHER_TYPE_MPLS); + if (!MLX5_GET16(fte_match_set_misc, misc_v, gre_protocol)) { + MLX5_SET(fte_match_set_misc, misc_m, gre_protocol, + 0xffff); + MLX5_SET(fte_match_set_misc, misc_v, gre_protocol, + RTE_ETHER_TYPE_MPLS); + } break; default: break; @@ -10209,7 +10216,7 @@ __flow_dv_adjust_buf_size(size_t *size, uint8_t match_criteria) * Check flow matching criteria first, subtract misc5/4 length if flow * doesn't own misc5/4 parameters. In some old rdma-core releases, * misc5/4 are not supported, and matcher creation failure is expected - * w/o subtration. If misc5 is provided, misc4 must be counted in since + * w/o subtraction. If misc5 is provided, misc4 must be counted in since * misc5 is right after misc4. */ if (!(match_criteria & (1 << MLX5_MATCH_CRITERIA_ENABLE_MISC5_BIT))) { @@ -11408,7 +11415,7 @@ flow_dv_dest_array_create_cb(void *tool_ctx __rte_unused, void *cb_ctx) goto error; } } - /* create a dest array actioin */ + /* create a dest array action */ ret = mlx5_os_flow_dr_create_flow_action_dest_array (domain, resource->num_of_dest, @@ -11643,7 +11650,7 @@ flow_dv_translate_action_sample(struct rte_eth_dev *dev, (((const struct rte_flow_action_mark *) (sub_actions->conf))->id); - dev_flow->handle->mark = 1; + wks->mark = 1; pre_rix = dev_flow->handle->dvh.rix_tag; /* Save the mark resource before sample */ pre_r = dev_flow->dv.tag_resource; @@ -12803,7 +12810,7 @@ flow_dv_translate(struct rte_eth_dev *dev, break; case RTE_FLOW_ACTION_TYPE_FLAG: action_flags |= MLX5_FLOW_ACTION_FLAG; - dev_flow->handle->mark = 1; + wks->mark = 1; if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) { struct rte_flow_action_mark mark = { .id = MLX5_FLOW_MARK_DEFAULT, @@ -12832,7 +12839,7 @@ flow_dv_translate(struct rte_eth_dev *dev, break; case RTE_FLOW_ACTION_TYPE_MARK: action_flags |= MLX5_FLOW_ACTION_MARK; - dev_flow->handle->mark = 1; + wks->mark = 1; if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) { const struct rte_flow_action_mark *mark = (const struct rte_flow_action_mark *) @@ -14686,7 +14693,8 @@ __flow_dv_action_rss_setup(struct rte_eth_dev *dev, size_t i; int err; - if (mlx5_ind_table_obj_setup(dev, shared_rss->ind_tbl)) { + if (mlx5_ind_table_obj_setup(dev, shared_rss->ind_tbl, + !!dev->data->dev_started)) { return rte_flow_error_set(error, rte_errno, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, "cannot setup indirection table"); @@ -14726,7 +14734,7 @@ __flow_dv_action_rss_setup(struct rte_eth_dev *dev, error_hrxq_new: err = rte_errno; __flow_dv_action_rss_hrxqs_release(dev, shared_rss); - if (!mlx5_ind_table_obj_release(dev, shared_rss->ind_tbl, true)) + if (!mlx5_ind_table_obj_release(dev, shared_rss->ind_tbl, true, true)) shared_rss->ind_tbl = NULL; rte_errno = err; return -rte_errno; @@ -14869,7 +14877,8 @@ __flow_dv_action_rss_release(struct rte_eth_dev *dev, uint32_t idx, NULL, "shared rss hrxq has references"); queue = shared_rss->ind_tbl->queues; - remaining = mlx5_ind_table_obj_release(dev, shared_rss->ind_tbl, true); + remaining = mlx5_ind_table_obj_release(dev, shared_rss->ind_tbl, true, + !!dev->data->dev_started); if (remaining) return rte_flow_error_set(error, EBUSY, RTE_FLOW_ERROR_TYPE_ACTION, @@ -15057,6 +15066,7 @@ __flow_dv_action_rss_update(struct rte_eth_dev *dev, uint32_t idx, void *queue = NULL; uint16_t *queue_old = NULL; uint32_t queue_size = action_conf->queue_num * sizeof(uint16_t); + bool dev_started = !!dev->data->dev_started; if (!shared_rss) return rte_flow_error_set(error, EINVAL, @@ -15079,7 +15089,10 @@ __flow_dv_action_rss_update(struct rte_eth_dev *dev, uint32_t idx, rte_spinlock_lock(&shared_rss->action_rss_sl); queue_old = shared_rss->ind_tbl->queues; ret = mlx5_ind_table_obj_modify(dev, shared_rss->ind_tbl, - queue, action_conf->queue_num, true); + queue, action_conf->queue_num, + true /* standalone */, + dev_started /* ref_new_qs */, + dev_started /* deref_old_qs */); if (ret) { mlx5_free(queue); ret = rte_flow_error_set(error, rte_errno, @@ -15394,7 +15407,9 @@ __flow_dv_create_domain_policy_acts(struct rte_eth_dev *dev, (MLX5_MAX_MODIFY_NUM + 1)]; } mhdr_dummy; struct mlx5_flow_dv_modify_hdr_resource *mhdr_res = &mhdr_dummy.res; + struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace(); + MLX5_ASSERT(wks); egress = (domain == MLX5_MTR_DOMAIN_EGRESS) ? 1 : 0; transfer = (domain == MLX5_MTR_DOMAIN_TRANSFER) ? 1 : 0; memset(&dh, 0, sizeof(struct mlx5_flow_handle)); @@ -15432,7 +15447,7 @@ __flow_dv_create_domain_policy_acts(struct rte_eth_dev *dev, NULL, "cannot create policy " "mark action for this color"); - dev_flow.handle->mark = 1; + wks->mark = 1; if (flow_dv_tag_resource_register(dev, tag_be, &dev_flow, &flow_err)) return -rte_mtr_error_set(error, @@ -16857,7 +16872,9 @@ __flow_dv_meter_get_rss_sub_policy(struct rte_eth_dev *dev, struct mlx5_meter_policy_action_container *act_cnt; uint32_t domain = MLX5_MTR_DOMAIN_INGRESS; uint16_t sub_policy_num; + struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace(); + MLX5_ASSERT(wks); rte_spinlock_lock(&mtr_policy->sl); for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) { if (!rss_desc[i]) @@ -16931,7 +16948,7 @@ __flow_dv_meter_get_rss_sub_policy(struct rte_eth_dev *dev, if (act_cnt->rix_mark || act_cnt->modify_hdr) { memset(&dh, 0, sizeof(struct mlx5_flow_handle)); if (act_cnt->rix_mark) - dh.mark = 1; + wks->mark = 1; dh.fate_action = MLX5_FLOW_FATE_QUEUE; dh.rix_hrxq = hrxq_idx[i]; flow_drv_rxq_flags_set(dev, &dh);