X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fmlx5%2Fmlx5_flow_dv.c;h=e4736ee9b5920733a0fe52e971da3bd9c667d2ee;hb=e342da2d438f2bb660dff61364d44176cb227d37;hp=bb2898f0e5510a24a3d177013e81b7da6252de00;hpb=0080b100dd67aaf174fc1cd1dcec438d60439681;p=dpdk.git diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c index bb2898f0e5..e4736ee9b5 100644 --- a/drivers/net/mlx5/mlx5_flow_dv.c +++ b/drivers/net/mlx5/mlx5_flow_dv.c @@ -35,7 +35,7 @@ #include "mlx5_rxtx.h" #include "rte_pmd_mlx5.h" -#ifdef HAVE_IBV_FLOW_DV_SUPPORT +#if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H) #ifndef HAVE_IBV_FLOW_DEVX_COUNTERS #define MLX5DV_FLOW_ACTION_COUNTERS_DEVX 0 @@ -955,7 +955,7 @@ flow_dv_convert_action_set_reg RTE_FLOW_ERROR_TYPE_ACTION, NULL, "too many items to modify"); MLX5_ASSERT(conf->id != REG_NON); - MLX5_ASSERT(conf->id < RTE_DIM(reg_to_field)); + MLX5_ASSERT(conf->id < (enum modify_reg)RTE_DIM(reg_to_field)); actions[i] = (struct mlx5_modification_cmd) { .action_type = MLX5_MODIFICATION_TYPE_SET, .field = reg_to_field[conf->id], @@ -1217,6 +1217,7 @@ flow_dv_convert_action_set_meta if (reg < 0) return reg; + MLX5_ASSERT(reg != REG_NON); /* * In datapath code there is no endianness * coversions for perfromance reasons, all @@ -1438,6 +1439,10 @@ flow_dv_validate_item_meta(struct rte_eth_dev *dev __rte_unused, reg = flow_dv_get_metadata_reg(dev, attr, error); if (reg < 0) return reg; + if (reg == REG_NON) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "unavalable extended metadata register"); if (reg == REG_B) return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item, @@ -2446,6 +2451,10 @@ flow_dv_validate_action_set_meta(struct rte_eth_dev *dev, reg = flow_dv_get_metadata_reg(dev, attr, error); if (reg < 0) return reg; + if (reg == REG_NON) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, action, + "unavalable extended metadata register"); if (reg != REG_A && reg != REG_B) { struct mlx5_priv *priv = dev->data->dev_private; @@ -2603,6 +2612,10 @@ flow_dv_validate_action_l2_encap(struct rte_eth_dev *dev, * Pointer to the rte_eth_dev structure. * @param[in] action_flags * Holds the actions detected until now. + * @param[in] action + * Pointer to the action structure. + * @param[in] item_flags + * Holds the items detected. * @param[in] attr * Pointer to flow attributes * @param[out] error @@ -2614,6 +2627,8 @@ flow_dv_validate_action_l2_encap(struct rte_eth_dev *dev, static int flow_dv_validate_action_decap(struct rte_eth_dev *dev, uint64_t action_flags, + const struct rte_flow_action *action, + const uint64_t item_flags, const struct rte_flow_attr *attr, struct rte_flow_error *error) { @@ -2647,6 +2662,11 @@ flow_dv_validate_action_decap(struct rte_eth_dev *dev, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, "decap action for VF representor " "not supported on NIC table"); + if (action->type == RTE_FLOW_ACTION_TYPE_VXLAN_DECAP && + !(item_flags & MLX5_FLOW_LAYER_VXLAN)) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "VXLAN item should be present for VXLAN decap"); return 0; } @@ -2667,6 +2687,10 @@ const struct rte_flow_action_raw_decap empty_decap = {.data = NULL, .size = 0,}; * Holds the actions detected until now. * @param[out] actions_n * pointer to the number of actions counter. + * @param[in] action + * Pointer to the action structure. + * @param[in] item_flags + * Holds the items detected. * @param[out] error * Pointer to error structure. * @@ -2679,7 +2703,8 @@ flow_dv_validate_action_raw_encap_decap const struct rte_flow_action_raw_decap *decap, const struct rte_flow_action_raw_encap *encap, const struct rte_flow_attr *attr, uint64_t *action_flags, - int *actions_n, struct rte_flow_error *error) + int *actions_n, const struct rte_flow_action *action, + uint64_t item_flags, struct rte_flow_error *error) { const struct mlx5_priv *priv = dev->data->dev_private; int ret; @@ -2714,8 +2739,8 @@ flow_dv_validate_action_raw_encap_decap "encap combination"); } if (decap) { - ret = flow_dv_validate_action_decap(dev, *action_flags, attr, - error); + ret = flow_dv_validate_action_decap(dev, *action_flags, action, + item_flags, attr, error); if (ret < 0) return ret; *action_flags |= MLX5_FLOW_ACTION_DECAP; @@ -2771,8 +2796,7 @@ flow_dv_encap_decap_match_cb(struct mlx5_hlist *list __rte_unused, cache_resource = container_of(entry, struct mlx5_flow_dv_encap_decap_resource, entry); - if (resource->entry.key == cache_resource->entry.key && - resource->reformat_type == cache_resource->reformat_type && + if (resource->reformat_type == cache_resource->reformat_type && resource->ft_type == cache_resource->ft_type && resource->flags == cache_resource->flags && resource->size == cache_resource->size && @@ -2865,26 +2889,41 @@ flow_dv_encap_decap_resource_register struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_dev_ctx_shared *sh = priv->sh; struct mlx5_hlist_entry *entry; - union mlx5_flow_encap_decap_key encap_decap_key = { + union { + struct { + uint32_t ft_type:8; + uint32_t refmt_type:8; + /* + * Header reformat actions can be shared between + * non-root tables. One bit to indicate non-root + * table or not. + */ + uint32_t is_root:1; + uint32_t reserve:15; + }; + uint32_t v32; + } encap_decap_key = { { .ft_type = resource->ft_type, .refmt_type = resource->reformat_type, - .buf_size = resource->size, - .table_level = !!dev_flow->dv.group, - .cksum = 0, + .is_root = !!dev_flow->dv.group, + .reserve = 0, } }; struct mlx5_flow_cb_ctx ctx = { .error = error, .data = resource, }; + uint64_t key64; resource->flags = dev_flow->dv.group ? 0 : 1; - encap_decap_key.cksum = __rte_raw_cksum(resource->buf, - resource->size, 0); - resource->entry.key = encap_decap_key.v64; - entry = mlx5_hlist_register(sh->encaps_decaps, resource->entry.key, - &ctx); + key64 = __rte_raw_cksum(&encap_decap_key.v32, + sizeof(encap_decap_key.v32), 0); + if (resource->reformat_type != + MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TUNNEL_TO_L2 && + resource->size) + key64 = __rte_raw_cksum(resource->buf, resource->size, key64); + entry = mlx5_hlist_register(sh->encaps_decaps, key64, &ctx); if (!entry) return -rte_errno; resource = container_of(entry, typeof(*resource), entry); @@ -3935,7 +3974,7 @@ flow_dv_validate_action_jump(struct rte_eth_dev *dev, target_group = ((const struct rte_flow_action_jump *)action->conf)->group; ret = mlx5_flow_group_to_table(dev, tunnel, target_group, &table, - grp_info, error); + &grp_info, error); if (ret) return ret; if (attributes->group == target_group && @@ -4303,6 +4342,8 @@ flow_dv_modify_create_cb(struct mlx5_hlist *list, uint64_t key __rte_unused, * Pointer to the Ethernet device structure. * @param[in] attr * Attributes of flow that includes this action. + * @param[in] item_flags + * Holds the items detected. * @param[out] error * Pointer to error structure. * @@ -4314,6 +4355,7 @@ flow_dv_validate_action_sample(uint64_t action_flags, const struct rte_flow_action *action, struct rte_eth_dev *dev, const struct rte_flow_attr *attr, + const uint64_t item_flags, struct rte_flow_error *error) { struct mlx5_priv *priv = dev->data->dev_private; @@ -4407,7 +4449,7 @@ flow_dv_validate_action_sample(uint64_t action_flags, case RTE_FLOW_ACTION_TYPE_RAW_ENCAP: ret = flow_dv_validate_action_raw_encap_decap (dev, NULL, act->conf, attr, &sub_action_flags, - &actions_n, error); + &actions_n, action, item_flags, error); if (ret < 0) return ret; ++actions_n; @@ -4508,6 +4550,7 @@ flow_dv_modify_hdr_resource_register .error = error, .data = resource, }; + uint64_t key64; resource->flags = dev_flow->dv.group ? 0 : MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL; @@ -4516,8 +4559,8 @@ flow_dv_modify_hdr_resource_register return rte_flow_error_set(error, EOVERFLOW, RTE_FLOW_ERROR_TYPE_ACTION, NULL, "too many modify header items"); - resource->entry.key = __rte_raw_cksum(&resource->ft_type, key_len, 0); - entry = mlx5_hlist_register(sh->modify_cmds, resource->entry.key, &ctx); + key64 = __rte_raw_cksum(&resource->ft_type, key_len, 0); + entry = mlx5_hlist_register(sh->modify_cmds, key64, &ctx); if (!entry) return -rte_errno; resource = container_of(entry, typeof(*resource), entry); @@ -5103,7 +5146,7 @@ static int flow_dv_validate_attributes(struct rte_eth_dev *dev, const struct mlx5_flow_tunnel *tunnel, const struct rte_flow_attr *attributes, - struct flow_grp_info grp_info, + const struct flow_grp_info *grp_info, struct rte_flow_error *error) { struct mlx5_priv *priv = dev->data->dev_private; @@ -5256,9 +5299,14 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, } else { tunnel = NULL; } + if (tunnel && priv->representor) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "decap not supported " + "for VF representor"); grp_info.std_tbl_fix = tunnel_use_standard_attr_group_translate (dev, tunnel, attr, items, actions); - ret = flow_dv_validate_attributes(dev, tunnel, attr, grp_info, error); + ret = flow_dv_validate_attributes(dev, tunnel, attr, &grp_info, error); if (ret < 0) return ret; is_root = (uint64_t)ret; @@ -5735,6 +5783,7 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP: case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP: ret = flow_dv_validate_action_decap(dev, action_flags, + actions, item_flags, attr, error); if (ret < 0) return ret; @@ -5744,7 +5793,7 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, case RTE_FLOW_ACTION_TYPE_RAW_ENCAP: ret = flow_dv_validate_action_raw_encap_decap (dev, NULL, actions->conf, attr, &action_flags, - &actions_n, error); + &actions_n, actions, item_flags, error); if (ret < 0) return ret; break; @@ -5762,7 +5811,7 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, (dev, decap ? decap : &empty_decap, encap, attr, &action_flags, &actions_n, - error); + actions, item_flags, error); if (ret < 0) return ret; break; @@ -5929,7 +5978,7 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, rw_act_num += MLX5_ACT_NUM_SET_TAG; break; case MLX5_RTE_FLOW_ACTION_TYPE_AGE: - if (!attr->group) + if (!attr->transfer && !attr->group) return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, @@ -5977,7 +6026,8 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, case RTE_FLOW_ACTION_TYPE_SAMPLE: ret = flow_dv_validate_action_sample(action_flags, actions, dev, - attr, error); + attr, item_flags, + error); if (ret < 0) return ret; action_flags |= MLX5_FLOW_ACTION_SAMPLE; @@ -6206,8 +6256,9 @@ flow_dv_prepare(struct rte_eth_dev *dev, "not enough memory to create flow handle"); return NULL; } - MLX5_ASSERT(wks->flow_idx + 1 < RTE_DIM(wks->flows)); + MLX5_ASSERT(wks->flow_idx < RTE_DIM(wks->flows)); dev_flow = &wks->flows[wks->flow_idx++]; + memset(dev_flow, 0, sizeof(*dev_flow)); dev_flow->handle = dev_handle; dev_flow->handle_idx = handle_idx; /* @@ -6219,12 +6270,6 @@ flow_dv_prepare(struct rte_eth_dev *dev, */ dev_flow->dv.value.size = MLX5_ST_SZ_BYTES(fte_match_param) - MLX5_ST_SZ_BYTES(fte_match_set_misc4); - /* - * The matching value needs to be cleared to 0 before using. In the - * past, it will be automatically cleared when using rte_*alloc - * API. The time consumption will be almost the same as before. - */ - memset(dev_flow->dv.value.buf, 0, MLX5_ST_SZ_BYTES(fte_match_param)); dev_flow->ingress = attr->ingress; dev_flow->dv.transfer = attr->transfer; return dev_flow; @@ -7454,6 +7499,7 @@ flow_dv_translate_item_meta(struct rte_eth_dev *dev, reg = flow_dv_get_metadata_reg(dev, attr, NULL); if (reg < 0) return; + MLX5_ASSERT(reg != REG_NON); /* * In datapath code there is no endianness * coversions for perfromance reasons, all @@ -7596,13 +7642,16 @@ flow_dv_translate_item_source_vport(void *matcher, void *key, * Flow matcher value. * @param[in] item * Flow pattern to translate. + * @param[in] + * Flow attributes. * * @return * 0 on success, a negative errno value otherwise. */ static int flow_dv_translate_item_port_id(struct rte_eth_dev *dev, void *matcher, - void *key, const struct rte_flow_item *item) + void *key, const struct rte_flow_item *item, + const struct rte_flow_attr *attr) { const struct rte_flow_item_port_id *pid_m = item ? item->mask : NULL; const struct rte_flow_item_port_id *pid_v = item ? item->spec : NULL; @@ -7614,14 +7663,30 @@ flow_dv_translate_item_port_id(struct rte_eth_dev *dev, void *matcher, priv = mlx5_port_to_eswitch_info(id, item == NULL); if (!priv) return -rte_errno; - /* Translate to vport field or to metadata, depending on mode. */ - if (priv->vport_meta_mask) - flow_dv_translate_item_meta_vport(matcher, key, - priv->vport_meta_tag, - priv->vport_meta_mask); - else + /* + * Translate to vport field or to metadata, depending on mode. + * Kernel can use either misc.source_port or half of C0 metadata + * register. + */ + if (priv->vport_meta_mask) { + /* + * Provide the hint for SW steering library + * to insert the flow into ingress domain and + * save the extra vport match. + */ + if (mask == 0xffff && priv->vport_id == 0xffff && + priv->pf_bond < 0 && attr->transfer) + flow_dv_translate_item_source_vport + (matcher, key, priv->vport_id, mask); + else + flow_dv_translate_item_meta_vport + (matcher, key, + priv->vport_meta_tag, + priv->vport_meta_mask); + } else { flow_dv_translate_item_source_vport(matcher, key, priv->vport_id, mask); + } return 0; } @@ -7839,7 +7904,7 @@ flow_dv_translate_item_ecpri(struct rte_eth_dev *dev, void *matcher, prog_sample_field_value_0); /* Already big endian (network order) in the header. */ *(uint32_t *)dw_m = ecpri_m->hdr.common.u32; - *(uint32_t *)dw_v = ecpri_v->hdr.common.u32; + *(uint32_t *)dw_v = ecpri_v->hdr.common.u32 & ecpri_m->hdr.common.u32; /* Sample#0, used for matching type, offset 0. */ MLX5_SET(fte_match_set_misc4, misc4_m, prog_sample_field_id_0, samples[0]); @@ -7861,7 +7926,8 @@ flow_dv_translate_item_ecpri(struct rte_eth_dev *dev, void *matcher, dw_v = MLX5_ADDR_OF(fte_match_set_misc4, misc4_v, prog_sample_field_value_1); *(uint32_t *)dw_m = ecpri_m->hdr.dummy[0]; - *(uint32_t *)dw_v = ecpri_v->hdr.dummy[0]; + *(uint32_t *)dw_v = ecpri_v->hdr.dummy[0] & + ecpri_m->hdr.dummy[0]; /* Sample#1, to match message body, offset 4. */ MLX5_SET(fte_match_set_misc4, misc4_m, prog_sample_field_id_1, samples[1]); @@ -7942,9 +8008,12 @@ flow_dv_tbl_create_cb(struct mlx5_hlist *list, uint64_t key64, void *cb_ctx) tbl_data->idx = idx; tbl_data->tunnel = tt_prm->tunnel; tbl_data->group_id = tt_prm->group_id; - tbl_data->external = tt_prm->external; + tbl_data->external = !!tt_prm->external; tbl_data->tunnel_offload = is_tunnel_offload_active(dev); tbl_data->is_egress = !!key.direction; + tbl_data->is_transfer = !!key.domain; + tbl_data->dummy = !!key.dummy; + tbl_data->table_id = key.table_id; tbl = &tbl_data->tbl; if (key.dummy) return &tbl_data->entry; @@ -7985,6 +8054,21 @@ flow_dv_tbl_create_cb(struct mlx5_hlist *list, uint64_t key64, void *cb_ctx) return &tbl_data->entry; } +int +flow_dv_tbl_match_cb(struct mlx5_hlist *list __rte_unused, + struct mlx5_hlist_entry *entry, uint64_t key64, + void *cb_ctx __rte_unused) +{ + struct mlx5_flow_tbl_data_entry *tbl_data = + container_of(entry, struct mlx5_flow_tbl_data_entry, entry); + union mlx5_flow_tbl_key key = { .v64 = key64 }; + + return tbl_data->table_id != key.table_id || + tbl_data->dummy != key.dummy || + tbl_data->is_transfer != key.domain || + tbl_data->is_egress != key.direction; +} + /** * Get a flow table. * @@ -8042,6 +8126,8 @@ flow_dv_tbl_resource_get(struct rte_eth_dev *dev, "cannot get table"); return NULL; } + DRV_LOG(DEBUG, "Table_id %u tunnel %u group %u registered.", + table_id, tunnel ? tunnel->tunnel_id : 0, group_id); tbl_data = container_of(entry, struct mlx5_flow_tbl_data_entry, entry); return &tbl_data->tbl; } @@ -8068,10 +8154,7 @@ flow_dv_tbl_remove_cb(struct mlx5_hlist *list, tbl_data->tunnel->tunnel_id : 0, .group = tbl_data->group_id }; - union mlx5_flow_tbl_key table_key = { - .v64 = entry->key - }; - uint32_t table_id = table_key.table_id; + uint32_t table_id = tbl_data->table_id; tunnel_grp_hash = tbl_data->tunnel ? tbl_data->tunnel->groups : @@ -8080,7 +8163,7 @@ flow_dv_tbl_remove_cb(struct mlx5_hlist *list, if (he) mlx5_hlist_unregister(tunnel_grp_hash, he); DRV_LOG(DEBUG, - "Table_id %#x tunnel %u group %u released.", + "Table_id %u tunnel %u group %u released.", table_id, tbl_data->tunnel ? tbl_data->tunnel->tunnel_id : 0, @@ -8192,6 +8275,8 @@ flow_dv_matcher_register(struct rte_eth_dev *dev, struct mlx5_flow_dv_matcher *ref, union mlx5_flow_tbl_key *key, struct mlx5_flow *dev_flow, + const struct mlx5_flow_tunnel *tunnel, + uint32_t group_id, struct rte_flow_error *error) { struct mlx5_cache_entry *entry; @@ -8203,8 +8288,14 @@ flow_dv_matcher_register(struct rte_eth_dev *dev, .data = ref, }; - tbl = flow_dv_tbl_resource_get(dev, key->table_id, key->direction, - key->domain, false, NULL, 0, 0, error); + /** + * tunnel offload API requires this registration for cases when + * tunnel match rule was inserted before tunnel set rule. + */ + tbl = flow_dv_tbl_resource_get(dev, key->table_id, + key->direction, key->domain, + dev_flow->external, tunnel, + group_id, 0, error); if (!tbl) return -rte_errno; /* No need to refill the error info */ tbl_data = container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl); @@ -8238,6 +8329,7 @@ flow_dv_tag_create_cb(struct mlx5_hlist *list, uint64_t key, void *ctx) return NULL; } entry->idx = idx; + entry->tag_id = key; ret = mlx5_flow_os_create_flow_action_tag(key, &entry->action); if (ret) { @@ -8250,6 +8342,17 @@ flow_dv_tag_create_cb(struct mlx5_hlist *list, uint64_t key, void *ctx) return &entry->entry; } +int +flow_dv_tag_match_cb(struct mlx5_hlist *list __rte_unused, + struct mlx5_hlist_entry *entry, uint64_t key, + void *cb_ctx __rte_unused) +{ + struct mlx5_flow_dv_tag_resource *tag = + container_of(entry, struct mlx5_flow_dv_tag_resource, entry); + + return key != tag->tag_id; +} + /** * Find existing tag resource or create and register a new one. * @@ -8680,11 +8783,13 @@ flow_dv_sample_create_cb(struct mlx5_cache_list *list __rte_unused, "for sample"); goto error; } + int ret; + cache_resource->normal_path_tbl = tbl; if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB) { - cache_resource->default_miss = - mlx5_glue->dr_create_flow_action_default_miss(); - if (!cache_resource->default_miss) { + ret = mlx5_flow_os_create_flow_action_default_miss + (&cache_resource->default_miss); + if (!ret) { rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, @@ -8702,20 +8807,20 @@ flow_dv_sample_create_cb(struct mlx5_cache_list *list __rte_unused, sampler_attr.sample_actions = (struct mlx5dv_dr_action **) &sample_dv_actions[0]; sampler_attr.action = cache_resource->set_action; - cache_resource->verbs_action = - mlx5_glue->dr_create_flow_action_sampler(&sampler_attr); - if (!cache_resource->verbs_action) { + if (mlx5_os_flow_dr_create_flow_action_sampler + (&sampler_attr, &cache_resource->verbs_action)) { rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, "cannot create sample action"); goto error; } cache_resource->idx = idx; + cache_resource->dev = dev; return &cache_resource->entry; error: if (cache_resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB && cache_resource->default_miss) - claim_zero(mlx5_glue->destroy_flow_action + claim_zero(mlx5_flow_os_destroy_flow_action (cache_resource->default_miss)); else flow_dv_sample_sub_actions_release(dev, @@ -8813,6 +8918,7 @@ flow_dv_dest_array_create_cb(struct mlx5_cache_list *list __rte_unused, struct mlx5dv_dr_domain *domain; uint32_t idx = 0, res_idx = 0; struct rte_flow_error *error = ctx->error; + int ret; /* Register new destination array resource. */ cache_resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_DEST_ARRAY], @@ -8861,11 +8967,12 @@ flow_dv_dest_array_create_cb(struct mlx5_cache_list *list __rte_unused, } } /* create a dest array actioin */ - cache_resource->action = mlx5_glue->dr_create_flow_action_dest_array + ret = mlx5_os_flow_dr_create_flow_action_dest_array (domain, cache_resource->num_of_dest, - dest_attr); - if (!cache_resource->action) { + dest_attr, + &cache_resource->action); + if (ret) { rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, @@ -8873,6 +8980,7 @@ flow_dv_dest_array_create_cb(struct mlx5_cache_list *list __rte_unused, goto error; } cache_resource->idx = res_idx; + cache_resource->dev = dev; for (idx = 0; idx < resource->num_of_dest; idx++) mlx5_free(dest_attr[idx]); return &cache_resource->entry; @@ -8983,7 +9091,7 @@ flow_dv_translate_action_sample(struct rte_eth_dev *dev, uint64_t action_flags = 0; MLX5_ASSERT(wks); - rss_desc = &wks->rss_desc[!!wks->flow_nested_idx]; + rss_desc = &wks->rss_desc; sample_act = &res->sample_act; sample_idx = &res->sample_idx; sample_action = (const struct rte_flow_action_sample *)action->conf; @@ -9195,7 +9303,7 @@ flow_dv_create_action_sample(struct rte_eth_dev *dev, uint32_t hrxq_idx; MLX5_ASSERT(wks); - rss_desc = &wks->rss_desc[!!wks->flow_nested_idx]; + rss_desc = &wks->rss_desc; if (num_of_dest > 1) { if (sample_act->action_flags & MLX5_FLOW_ACTION_QUEUE) { /* Handle QP action for mirroring */ @@ -9347,8 +9455,10 @@ flow_dv_aso_age_pools_resize(struct rte_eth_dev *dev) /* First ASO flow hit allocation - starting ASO data-path. */ int ret = mlx5_aso_queue_start(priv->sh); - if (ret) + if (ret) { + mlx5_free(pools); return ret; + } } mng->n = resize; mng->pools = pools; @@ -9418,12 +9528,14 @@ flow_dv_age_pool_create(struct rte_eth_dev *dev, * * @param[in] dev * Pointer to the Ethernet device structure. + * @param[out] error + * Pointer to the error structure. * * @return * Index to ASO age action on success, 0 otherwise and rte_errno is set. */ static uint32_t -flow_dv_aso_age_alloc(struct rte_eth_dev *dev) +flow_dv_aso_age_alloc(struct rte_eth_dev *dev, struct rte_flow_error *error) { struct mlx5_priv *priv = dev->data->dev_private; const struct mlx5_aso_age_pool *pool; @@ -9438,7 +9550,9 @@ flow_dv_aso_age_alloc(struct rte_eth_dev *dev) LIST_REMOVE(age_free, next); } else if (!flow_dv_age_pool_create(dev, &age_free)) { rte_spinlock_unlock(&mng->free_sl); - return 0; /* 0 is an error.*/ + rte_flow_error_set(error, rte_errno, RTE_FLOW_ERROR_TYPE_ACTION, + NULL, "failed to create ASO age pool"); + return 0; /* 0 is an error. */ } rte_spinlock_unlock(&mng->free_sl); pool = container_of @@ -9446,15 +9560,33 @@ flow_dv_aso_age_alloc(struct rte_eth_dev *dev) (age_free - age_free->offset), const struct mlx5_aso_age_pool, actions); if (!age_free->dr_action) { - age_free->dr_action = mlx5_glue->dr_action_create_flow_hit - (pool->flow_hit_aso_obj->obj, - age_free->offset, REG_C_5); + int reg_c = mlx5_flow_get_reg_id(dev, MLX5_ASO_FLOW_HIT, 0, + error); + + if (reg_c < 0) { + rte_flow_error_set(error, rte_errno, + RTE_FLOW_ERROR_TYPE_ACTION, + NULL, "failed to get reg_c " + "for ASO flow hit"); + return 0; /* 0 is an error. */ + } +#ifdef HAVE_MLX5_DR_CREATE_ACTION_ASO + age_free->dr_action = mlx5_glue->dv_create_flow_action_aso + (priv->sh->rx_domain, + pool->flow_hit_aso_obj->obj, age_free->offset, + MLX5DV_DR_ACTION_FLAGS_ASO_FIRST_HIT_SET, + (reg_c - REG_C_0)); +#endif /* HAVE_MLX5_DR_CREATE_ACTION_ASO */ if (!age_free->dr_action) { rte_errno = errno; rte_spinlock_lock(&mng->free_sl); LIST_INSERT_HEAD(&mng->free, age_free, next); rte_spinlock_unlock(&mng->free_sl); - return 0; /* 0 is an error.*/ + rte_flow_error_set(error, rte_errno, + RTE_FLOW_ERROR_TYPE_ACTION, + NULL, "failed to create ASO " + "flow hit action"); + return 0; /* 0 is an error. */ } } __atomic_store_n(&age_free->refcnt, 1, __ATOMIC_RELAXED); @@ -9468,18 +9600,21 @@ flow_dv_aso_age_alloc(struct rte_eth_dev *dev) * Pointer to rte_eth_dev structure. * @param[in] age * Pointer to the aging action configuration. + * @param[out] error + * Pointer to the error structure. * * @return * Index to flow counter on success, 0 otherwise. */ static uint32_t flow_dv_translate_create_aso_age(struct rte_eth_dev *dev, - const struct rte_flow_action_age *age) + const struct rte_flow_action_age *age, + struct rte_flow_error *error) { uint32_t age_idx = 0; struct mlx5_aso_age_action *aso_age; - age_idx = flow_dv_aso_age_alloc(dev); + age_idx = flow_dv_aso_age_alloc(dev, error); if (!age_idx) return 0; aso_age = flow_aso_age_get_by_idx(dev, age_idx); @@ -9573,8 +9708,12 @@ flow_dv_translate(struct rte_eth_dev *dev, .skip_scale = !!dev_flow->skip_scale, }; - MLX5_ASSERT(wks); - rss_desc = &wks->rss_desc[!!wks->flow_nested_idx]; + if (!wks) + return rte_flow_error_set(error, ENOMEM, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "failed to push flow workspace"); + rss_desc = &wks->rss_desc; memset(&mdest_res, 0, sizeof(struct mlx5_flow_dv_dest_array_resource)); memset(&sample_res, 0, sizeof(struct mlx5_flow_dv_sample_resource)); mhdr_res->ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX : @@ -9591,7 +9730,7 @@ flow_dv_translate(struct rte_eth_dev *dev, grp_info.std_tbl_fix = tunnel_use_standard_attr_group_translate (dev, tunnel, attr, items, actions); ret = mlx5_flow_group_to_table(dev, tunnel, attr->group, &table, - grp_info, error); + &grp_info, error); if (ret) return ret; dev_flow->dv.group = table; @@ -9605,10 +9744,14 @@ flow_dv_translate(struct rte_eth_dev *dev, /* * do not add decap action if match rule drops packet * HW rejects rules with decap & drop + * + * if tunnel match rule was inserted before matching tunnel set + * rule flow table used in the match rule must be registered. + * current implementation handles that in the + * flow_dv_match_register() at the function end. */ bool add_decap = true; const struct rte_flow_action *ptr = actions; - struct mlx5_flow_tbl_resource *tbl; for (; ptr->type != RTE_FLOW_ACTION_TYPE_END; ptr++) { if (ptr->type == RTE_FLOW_ACTION_TYPE_DROP) { @@ -9625,20 +9768,6 @@ flow_dv_translate(struct rte_eth_dev *dev, dev_flow->dv.encap_decap->action; action_flags |= MLX5_FLOW_ACTION_DECAP; } - /* - * bind table_id with for tunnel match rule. - * Tunnel set rule establishes that bind in JUMP action handler. - * Required for scenario when application creates tunnel match - * rule before tunnel set rule. - */ - tbl = flow_dv_tbl_resource_get(dev, table, attr->egress, - attr->transfer, - !!dev_flow->external, tunnel, - attr->group, 0, error); - if (!tbl) - return rte_flow_error_set - (error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, - actions, "cannot register tunnel group"); } for (; !actions_end ; actions++) { const struct rte_flow_action_queue *queue; @@ -9797,7 +9926,7 @@ flow_dv_translate(struct rte_eth_dev *dev, case RTE_FLOW_ACTION_TYPE_AGE: if (priv->sh->flow_hit_aso_en && attr->group) { flow->age = flow_dv_translate_create_aso_age - (dev, action->conf); + (dev, action->conf, error); if (!flow->age) return rte_flow_error_set (error, rte_errno, @@ -9938,7 +10067,7 @@ flow_dv_translate(struct rte_eth_dev *dev, ret = mlx5_flow_group_to_table(dev, tunnel, jump_group, &table, - grp_info, error); + &grp_info, error); if (ret) return ret; tbl = flow_dv_tbl_resource_get(dev, table, attr->egress, @@ -10218,8 +10347,8 @@ flow_dv_translate(struct rte_eth_dev *dev, NULL, "item not supported"); switch (item_type) { case RTE_FLOW_ITEM_TYPE_PORT_ID: - flow_dv_translate_item_port_id(dev, match_mask, - match_value, items); + flow_dv_translate_item_port_id + (dev, match_mask, match_value, items, attr); last_item = MLX5_FLOW_ITEM_PORT_ID; break; case RTE_FLOW_ITEM_TYPE_ETH: @@ -10445,7 +10574,7 @@ flow_dv_translate(struct rte_eth_dev *dev, if (!(item_flags & MLX5_FLOW_ITEM_PORT_ID) && (priv->representor || priv->master)) { if (flow_dv_translate_item_port_id(dev, match_mask, - match_value, NULL)) + match_value, NULL, attr)) return -rte_errno; } #ifdef RTE_LIBRTE_MLX5_DEBUG @@ -10462,13 +10591,15 @@ flow_dv_translate(struct rte_eth_dev *dev, /* Register matcher. */ matcher.crc = rte_raw_cksum((const void *)matcher.mask.buf, matcher.mask.size); - matcher.priority = mlx5_flow_adjust_priority(dev, priority, - matcher.priority); + matcher.priority = mlx5_os_flow_adjust_priority(dev, + priority, + matcher.priority); /* reserved field no needs to be set to 0 here. */ tbl_key.domain = attr->transfer; tbl_key.direction = attr->egress; tbl_key.table_id = dev_flow->dv.group; - if (flow_dv_matcher_register(dev, &matcher, &tbl_key, dev_flow, error)) + if (flow_dv_matcher_register(dev, &matcher, &tbl_key, dev_flow, + tunnel, attr->group, error)) return -rte_errno; return 0; } @@ -10571,47 +10702,6 @@ __flow_dv_action_rss_hrxq_lookup(struct rte_eth_dev *dev, uint32_t idx, } } -/** - * Retrieves hash RX queue suitable for the *flow*. - * If shared action configured for *flow* suitable hash RX queue will be - * retrieved from attached shared action. - * - * @param[in] dev - * Pointer to the Ethernet device structure. - * @param[in] dev_flow - * Pointer to the sub flow. - * @param[in] rss_desc - * Pointer to the RSS descriptor. - * @param[out] hrxq - * Pointer to retrieved hash RX queue object. - * - * @return - * Valid hash RX queue index, otherwise 0 and rte_errno is set. - */ -static uint32_t -__flow_dv_rss_get_hrxq(struct rte_eth_dev *dev, struct mlx5_flow *dev_flow, - struct mlx5_flow_rss_desc *rss_desc, - struct mlx5_hrxq **hrxq) -{ - struct mlx5_priv *priv = dev->data->dev_private; - uint32_t hrxq_idx; - - if (rss_desc->shared_rss) { - hrxq_idx = __flow_dv_action_rss_hrxq_lookup - (dev, rss_desc->shared_rss, - dev_flow->hash_fields, - !!(dev_flow->handle->layers & - MLX5_FLOW_LAYER_TUNNEL)); - if (hrxq_idx) - *hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ], - hrxq_idx); - } else { - *hrxq = flow_dv_hrxq_prepare(dev, dev_flow, rss_desc, - &hrxq_idx); - } - return hrxq_idx; -} - /** * Apply the flow to the NIC, lock free, * (mutex should be acquired by caller). @@ -10640,16 +10730,10 @@ flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow, int err; int idx; struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace(); - struct mlx5_flow_rss_desc *rss_desc = - &wks->rss_desc[!!wks->flow_nested_idx]; + struct mlx5_flow_rss_desc *rss_desc = &wks->rss_desc; MLX5_ASSERT(wks); - if (rss_desc->shared_rss) { - dh = wks->flows[wks->flow_idx - 1].handle; - MLX5_ASSERT(dh->fate_action == MLX5_FLOW_FATE_SHARED_RSS); - dh->rix_srss = rss_desc->shared_rss; - } - for (idx = wks->flow_idx - 1; idx >= wks->flow_nested_idx; idx--) { + for (idx = wks->flow_idx - 1; idx >= 0; idx--) { dev_flow = &wks->flows[idx]; dv = &dev_flow->dv; dh = dev_flow->handle; @@ -10664,11 +10748,34 @@ flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow, priv->drop_queue.hrxq->action; } } else if ((dh->fate_action == MLX5_FLOW_FATE_QUEUE && - !dv_h->rix_sample && !dv_h->rix_dest_array) || - (dh->fate_action == MLX5_FLOW_FATE_SHARED_RSS)) { + !dv_h->rix_sample && !dv_h->rix_dest_array)) { + struct mlx5_hrxq *hrxq; + uint32_t hrxq_idx; + + hrxq = flow_dv_hrxq_prepare(dev, dev_flow, rss_desc, + &hrxq_idx); + if (!hrxq) { + rte_flow_error_set + (error, rte_errno, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "cannot get hash queue"); + goto error; + } + dh->rix_hrxq = hrxq_idx; + dv->actions[n++] = hrxq->action; + } else if (dh->fate_action == MLX5_FLOW_FATE_SHARED_RSS) { struct mlx5_hrxq *hrxq = NULL; - uint32_t hrxq_idx = __flow_dv_rss_get_hrxq - (dev, dev_flow, rss_desc, &hrxq); + uint32_t hrxq_idx; + + hrxq_idx = __flow_dv_action_rss_hrxq_lookup(dev, + rss_desc->shared_rss, + dev_flow->hash_fields, + !!(dh->layers & + MLX5_FLOW_LAYER_TUNNEL)); + if (hrxq_idx) + hrxq = mlx5_ipool_get + (priv->sh->ipool[MLX5_IPOOL_HRXQ], + hrxq_idx); if (!hrxq) { rte_flow_error_set (error, rte_errno, @@ -10676,8 +10783,7 @@ flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow, "cannot get hash queue"); goto error; } - if (dh->fate_action == MLX5_FLOW_FATE_QUEUE) - dh->rix_hrxq = hrxq_idx; + dh->rix_srss = rss_desc->shared_rss; dv->actions[n++] = hrxq->action; } else if (dh->fate_action == MLX5_FLOW_FATE_DEFAULT_MISS) { if (!priv->sh->default_miss_action) { @@ -10719,12 +10825,12 @@ error: if (dh->fate_action == MLX5_FLOW_FATE_QUEUE && dh->rix_hrxq) { mlx5_hrxq_release(dev, dh->rix_hrxq); dh->rix_hrxq = 0; + } else if (dh->fate_action == MLX5_FLOW_FATE_SHARED_RSS) { + dh->rix_srss = 0; } if (dh->vf_vlan.tag && dh->vf_vlan.created) mlx5_vlan_vmwa_release(dev, &dh->vf_vlan); } - if (rss_desc->shared_rss) - wks->flows[wks->flow_idx - 1].handle->rix_srss = 0; rte_errno = err; /* Restore rte_errno. */ return -rte_errno; } @@ -10992,9 +11098,6 @@ flow_dv_fate_resource_release(struct rte_eth_dev *dev, flow_dv_port_id_action_resource_release(dev, handle->rix_port_id_action); break; - case MLX5_FLOW_FATE_SHARED_RSS: - flow_dv_shared_rss_action_release(dev, handle->rix_srss); - break; default: DRV_LOG(DEBUG, "Incorrect fate action:%d", handle->fate_action); break; @@ -11003,20 +11106,20 @@ flow_dv_fate_resource_release(struct rte_eth_dev *dev, } void -flow_dv_sample_remove_cb(struct mlx5_cache_list *list, +flow_dv_sample_remove_cb(struct mlx5_cache_list *list __rte_unused, struct mlx5_cache_entry *entry) { - struct rte_eth_dev *dev = list->ctx; - struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_flow_dv_sample_resource *cache_resource = container_of(entry, typeof(*cache_resource), entry); + struct rte_eth_dev *dev = cache_resource->dev; + struct mlx5_priv *priv = dev->data->dev_private; if (cache_resource->verbs_action) - claim_zero(mlx5_glue->destroy_flow_action + claim_zero(mlx5_flow_os_destroy_flow_action (cache_resource->verbs_action)); if (cache_resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB) { if (cache_resource->default_miss) - claim_zero(mlx5_glue->destroy_flow_action + claim_zero(mlx5_flow_os_destroy_flow_action (cache_resource->default_miss)); } if (cache_resource->normal_path_tbl) @@ -11058,18 +11161,18 @@ flow_dv_sample_resource_release(struct rte_eth_dev *dev, } void -flow_dv_dest_array_remove_cb(struct mlx5_cache_list *list, +flow_dv_dest_array_remove_cb(struct mlx5_cache_list *list __rte_unused, struct mlx5_cache_entry *entry) { - struct rte_eth_dev *dev = list->ctx; - struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_flow_dv_dest_array_resource *cache_resource = container_of(entry, typeof(*cache_resource), entry); + struct rte_eth_dev *dev = cache_resource->dev; + struct mlx5_priv *priv = dev->data->dev_private; uint32_t i = 0; MLX5_ASSERT(cache_resource->action); if (cache_resource->action) - claim_zero(mlx5_glue->destroy_flow_action + claim_zero(mlx5_flow_os_destroy_flow_action (cache_resource->action)); for (; i < cache_resource->num_of_dest; i++) flow_dv_sample_sub_actions_release(dev, @@ -11157,6 +11260,7 @@ flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow) { struct mlx5_flow_handle *dev_handle; struct mlx5_priv *priv = dev->data->dev_private; + uint32_t srss = 0; if (!flow) return; @@ -11201,10 +11305,15 @@ flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow) if (dev_handle->dvh.rix_tag) flow_dv_tag_release(dev, dev_handle->dvh.rix_tag); - flow_dv_fate_resource_release(dev, dev_handle); + if (dev_handle->fate_action != MLX5_FLOW_FATE_SHARED_RSS) + flow_dv_fate_resource_release(dev, dev_handle); + else if (!srss) + srss = dev_handle->rix_srss; mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], tmp_idx); } + if (srss) + flow_dv_shared_rss_action_release(dev, srss); } /** @@ -11278,20 +11387,26 @@ __flow_dv_action_rss_hrxqs_release(struct rte_eth_dev *dev, */ static int __flow_dv_action_rss_setup(struct rte_eth_dev *dev, - uint32_t action_idx, - struct mlx5_shared_action_rss *action, - struct rte_flow_error *error) + uint32_t action_idx, + struct mlx5_shared_action_rss *action, + struct rte_flow_error *error) { struct mlx5_flow_rss_desc rss_desc = { 0 }; size_t i; int err; + if (mlx5_ind_table_obj_setup(dev, action->ind_tbl)) { + return rte_flow_error_set(error, rte_errno, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "cannot setup indirection table"); + } memcpy(rss_desc.key, action->origin.key, MLX5_RSS_HASH_KEY_LEN); rss_desc.key_len = MLX5_RSS_HASH_KEY_LEN; rss_desc.const_q = action->origin.queue; rss_desc.queue_num = action->origin.queue_num; /* Set non-zero value to indicate a shared RSS. */ rss_desc.shared_rss = action_idx; + rss_desc.ind_tbl = action->ind_tbl; for (i = 0; i < MLX5_RSS_HASH_FIELDS_LEN; i++) { uint32_t hrxq_idx; uint64_t hash_fields = mlx5_rss_hash_fields[i]; @@ -11317,6 +11432,8 @@ __flow_dv_action_rss_setup(struct rte_eth_dev *dev, error_hrxq_new: err = rte_errno; __flow_dv_action_rss_hrxqs_release(dev, action); + if (!mlx5_ind_table_obj_release(dev, action->ind_tbl, true)) + action->ind_tbl = NULL; rte_errno = err; return -rte_errno; } @@ -11369,7 +11486,18 @@ __flow_dv_action_rss_create(struct rte_eth_dev *dev, "rss action number out of range"); goto error_rss_init; } - shared_action->queue = queue; + shared_action->ind_tbl = mlx5_malloc(MLX5_MEM_ZERO, + sizeof(*shared_action->ind_tbl), + 0, SOCKET_ID_ANY); + if (!shared_action->ind_tbl) { + rte_flow_error_set(error, ENOMEM, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "cannot allocate resource memory"); + goto error_rss_init; + } + memcpy(queue, rss->queue, queue_size); + shared_action->ind_tbl->queues = queue; + shared_action->ind_tbl->queues_n = rss->queue_num; origin = &shared_action->origin; origin->func = rss->func; origin->level = rss->level; @@ -11380,11 +11508,11 @@ __flow_dv_action_rss_create(struct rte_eth_dev *dev, memcpy(shared_action->key, rss_key, MLX5_RSS_HASH_KEY_LEN); origin->key = &shared_action->key[0]; origin->key_len = MLX5_RSS_HASH_KEY_LEN; - memcpy(shared_action->queue, rss->queue, queue_size); - origin->queue = shared_action->queue; + origin->queue = queue; origin->queue_num = rss->queue_num; if (__flow_dv_action_rss_setup(dev, idx, shared_action, error)) goto error_rss_init; + rte_spinlock_init(&shared_action->action_rss_sl); __atomic_add_fetch(&shared_action->refcnt, 1, __ATOMIC_RELAXED); rte_spinlock_lock(&priv->shared_act_sl); ILIST_INSERT(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], @@ -11392,9 +11520,12 @@ __flow_dv_action_rss_create(struct rte_eth_dev *dev, rte_spinlock_unlock(&priv->shared_act_sl); return idx; error_rss_init: - if (shared_action) + if (shared_action) { + if (shared_action->ind_tbl) + mlx5_free(shared_action->ind_tbl); mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], idx); + } if (queue) mlx5_free(queue); return 0; @@ -11424,6 +11555,7 @@ __flow_dv_action_rss_release(struct rte_eth_dev *dev, uint32_t idx, mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], idx); uint32_t old_refcnt = 1; int remaining; + uint16_t *queue = NULL; if (!shared_rss) return rte_flow_error_set(error, EINVAL, @@ -11431,18 +11563,26 @@ __flow_dv_action_rss_release(struct rte_eth_dev *dev, uint32_t idx, "invalid shared action"); remaining = __flow_dv_action_rss_hrxqs_release(dev, shared_rss); if (remaining) - return rte_flow_error_set(error, ETOOMANYREFS, + return rte_flow_error_set(error, EBUSY, RTE_FLOW_ERROR_TYPE_ACTION, NULL, "shared rss hrxq has references"); + queue = shared_rss->ind_tbl->queues; + remaining = mlx5_ind_table_obj_release(dev, shared_rss->ind_tbl, true); + if (remaining) + return rte_flow_error_set(error, EBUSY, + RTE_FLOW_ERROR_TYPE_ACTION, + NULL, + "shared rss indirection table has" + " references"); if (!__atomic_compare_exchange_n(&shared_rss->refcnt, &old_refcnt, 0, 0, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED)) - return rte_flow_error_set(error, ETOOMANYREFS, + return rte_flow_error_set(error, EBUSY, RTE_FLOW_ERROR_TYPE_ACTION, NULL, "shared rss has references"); - rte_free(shared_rss->queue); + mlx5_free(queue); rte_spinlock_lock(&priv->shared_act_sl); ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], &priv->rss_shared_actions, idx, shared_rss, next); @@ -11487,7 +11627,7 @@ flow_dv_action_create(struct rte_eth_dev *dev, MLX5_SHARED_ACTION_TYPE_OFFSET) | ret; break; case RTE_FLOW_ACTION_TYPE_AGE: - ret = flow_dv_translate_create_aso_age(dev, action->conf); + ret = flow_dv_translate_create_aso_age(dev, action->conf, err); idx = (MLX5_SHARED_ACTION_TYPE_AGE << MLX5_SHARED_ACTION_TYPE_OFFSET) | ret; if (ret) { @@ -11580,11 +11720,9 @@ __flow_dv_action_rss_update(struct rte_eth_dev *dev, uint32_t idx, struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_shared_action_rss *shared_rss = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], idx); - size_t i; - int ret; + int ret = 0; void *queue = NULL; - const uint8_t *rss_key; - uint32_t rss_key_len; + uint16_t *queue_old = NULL; uint32_t queue_size = action_conf->queue_num * sizeof(uint16_t); if (!shared_rss) @@ -11599,42 +11737,24 @@ __flow_dv_action_rss_update(struct rte_eth_dev *dev, uint32_t idx, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, "cannot allocate resource memory"); - if (action_conf->key) { - rss_key = action_conf->key; - rss_key_len = action_conf->key_len; + memcpy(queue, action_conf->queue, queue_size); + MLX5_ASSERT(shared_rss->ind_tbl); + rte_spinlock_lock(&shared_rss->action_rss_sl); + queue_old = shared_rss->ind_tbl->queues; + ret = mlx5_ind_table_obj_modify(dev, shared_rss->ind_tbl, + queue, action_conf->queue_num, true); + if (ret) { + mlx5_free(queue); + ret = rte_flow_error_set(error, rte_errno, + RTE_FLOW_ERROR_TYPE_ACTION, NULL, + "cannot update indirection table"); } else { - rss_key = rss_hash_default_key; - rss_key_len = MLX5_RSS_HASH_KEY_LEN; - } - for (i = 0; i < MLX5_RSS_HASH_FIELDS_LEN; i++) { - uint32_t hrxq_idx; - uint64_t hash_fields = mlx5_rss_hash_fields[i]; - int tunnel; - - for (tunnel = 0; tunnel < 2; tunnel++) { - hrxq_idx = __flow_dv_action_rss_hrxq_lookup - (dev, idx, hash_fields, tunnel); - MLX5_ASSERT(hrxq_idx); - ret = mlx5_hrxq_modify - (dev, hrxq_idx, - rss_key, rss_key_len, - hash_fields, - action_conf->queue, action_conf->queue_num); - if (ret) { - mlx5_free(queue); - return rte_flow_error_set - (error, rte_errno, - RTE_FLOW_ERROR_TYPE_ACTION, NULL, - "cannot update hash queue"); - } - } + mlx5_free(queue_old); + shared_rss->origin.queue = queue; + shared_rss->origin.queue_num = action_conf->queue_num; } - mlx5_free(shared_rss->queue); - shared_rss->queue = queue; - memcpy(shared_rss->queue, action_conf->queue, queue_size); - shared_rss->origin.queue = shared_rss->queue; - shared_rss->origin.queue_num = action_conf->queue_num; - return 0; + rte_spinlock_unlock(&shared_rss->action_rss_sl); + return ret; } /** @@ -12514,18 +12634,18 @@ flow_dv_sync_domain(struct rte_eth_dev *dev, uint32_t domains, uint32_t flags) int ret = 0; if ((domains & MLX5_DOMAIN_BIT_NIC_RX) && priv->sh->rx_domain != NULL) { - ret = mlx5_glue->dr_sync_domain(priv->sh->rx_domain, + ret = mlx5_os_flow_dr_sync_domain(priv->sh->rx_domain, flags); if (ret != 0) return ret; } if ((domains & MLX5_DOMAIN_BIT_NIC_TX) && priv->sh->tx_domain != NULL) { - ret = mlx5_glue->dr_sync_domain(priv->sh->tx_domain, flags); + ret = mlx5_os_flow_dr_sync_domain(priv->sh->tx_domain, flags); if (ret != 0) return ret; } if ((domains & MLX5_DOMAIN_BIT_FDB) && priv->sh->fdb_domain != NULL) { - ret = mlx5_glue->dr_sync_domain(priv->sh->fdb_domain, flags); + ret = mlx5_os_flow_dr_sync_domain(priv->sh->fdb_domain, flags); if (ret != 0) return ret; }