X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fmlx5%2Fmlx5_flow_dv.c;h=20ff1fb5dde71d88a7ff4f9ccdc6bea02feda314;hb=d0b3ef1a6e3804c76f0d35b16946fc25d54443ab;hp=01b6e7ce861aa4745b0bd25f4cc175957ae3c647;hpb=9fbe97f0cedf9214aa9031622801ad00fe05a872;p=dpdk.git diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c index 01b6e7ce86..20ff1fb5dd 100644 --- a/drivers/net/mlx5/mlx5_flow_dv.c +++ b/drivers/net/mlx5/mlx5_flow_dv.c @@ -81,6 +81,8 @@ flow_dv_encap_decap_resource_release(struct rte_eth_dev *dev, static int flow_dv_port_id_action_resource_release(struct rte_eth_dev *dev, uint32_t port_id); +static void +flow_dv_shared_rss_action_release(struct rte_eth_dev *dev, uint32_t srss); /** * Initialize flow attributes structure according to flow items' types. @@ -1215,6 +1217,7 @@ flow_dv_convert_action_set_meta if (reg < 0) return reg; + MLX5_ASSERT(reg != REG_NON); /* * In datapath code there is no endianness * coversions for perfromance reasons, all @@ -1436,6 +1439,10 @@ flow_dv_validate_item_meta(struct rte_eth_dev *dev __rte_unused, reg = flow_dv_get_metadata_reg(dev, attr, error); if (reg < 0) return reg; + if (reg == REG_NON) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "unavalable extended metadata register"); if (reg == REG_B) return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item, @@ -2444,6 +2451,10 @@ flow_dv_validate_action_set_meta(struct rte_eth_dev *dev, reg = flow_dv_get_metadata_reg(dev, attr, error); if (reg < 0) return reg; + if (reg == REG_NON) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, action, + "unavalable extended metadata register"); if (reg != REG_A && reg != REG_B) { struct mlx5_priv *priv = dev->data->dev_private; @@ -2769,8 +2780,7 @@ flow_dv_encap_decap_match_cb(struct mlx5_hlist *list __rte_unused, cache_resource = container_of(entry, struct mlx5_flow_dv_encap_decap_resource, entry); - if (resource->entry.key == cache_resource->entry.key && - resource->reformat_type == cache_resource->reformat_type && + if (resource->reformat_type == cache_resource->reformat_type && resource->ft_type == cache_resource->ft_type && resource->flags == cache_resource->flags && resource->size == cache_resource->size && @@ -2863,26 +2873,41 @@ flow_dv_encap_decap_resource_register struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_dev_ctx_shared *sh = priv->sh; struct mlx5_hlist_entry *entry; - union mlx5_flow_encap_decap_key encap_decap_key = { + union { + struct { + uint32_t ft_type:8; + uint32_t refmt_type:8; + /* + * Header reformat actions can be shared between + * non-root tables. One bit to indicate non-root + * table or not. + */ + uint32_t is_root:1; + uint32_t reserve:15; + }; + uint32_t v32; + } encap_decap_key = { { .ft_type = resource->ft_type, .refmt_type = resource->reformat_type, - .buf_size = resource->size, - .table_level = !!dev_flow->dv.group, - .cksum = 0, + .is_root = !!dev_flow->dv.group, + .reserve = 0, } }; struct mlx5_flow_cb_ctx ctx = { .error = error, .data = resource, }; + uint64_t key64; resource->flags = dev_flow->dv.group ? 0 : 1; - encap_decap_key.cksum = __rte_raw_cksum(resource->buf, - resource->size, 0); - resource->entry.key = encap_decap_key.v64; - entry = mlx5_hlist_register(sh->encaps_decaps, resource->entry.key, - &ctx); + key64 = __rte_raw_cksum(&encap_decap_key.v32, + sizeof(encap_decap_key.v32), 0); + if (resource->reformat_type != + MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TUNNEL_TO_L2 && + resource->size) + key64 = __rte_raw_cksum(resource->buf, resource->size, key64); + entry = mlx5_hlist_register(sh->encaps_decaps, key64, &ctx); if (!entry) return -rte_errno; resource = container_of(entry, typeof(*resource), entry); @@ -3933,7 +3958,7 @@ flow_dv_validate_action_jump(struct rte_eth_dev *dev, target_group = ((const struct rte_flow_action_jump *)action->conf)->group; ret = mlx5_flow_group_to_table(dev, tunnel, target_group, &table, - grp_info, error); + &grp_info, error); if (ret) return ret; if (attributes->group == target_group && @@ -4127,7 +4152,8 @@ flow_dv_validate_action_age(uint64_t action_flags, struct mlx5_priv *priv = dev->data->dev_private; const struct rte_flow_action_age *age = action->conf; - if (!priv->config.devx || priv->sh->cmng.counter_fallback) + if (!priv->config.devx || (priv->sh->cmng.counter_fallback && + !priv->sh->aso_age_mng)) return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, @@ -4505,6 +4531,7 @@ flow_dv_modify_hdr_resource_register .error = error, .data = resource, }; + uint64_t key64; resource->flags = dev_flow->dv.group ? 0 : MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL; @@ -4513,8 +4540,8 @@ flow_dv_modify_hdr_resource_register return rte_flow_error_set(error, EOVERFLOW, RTE_FLOW_ERROR_TYPE_ACTION, NULL, "too many modify header items"); - resource->entry.key = __rte_raw_cksum(&resource->ft_type, key_len, 0); - entry = mlx5_hlist_register(sh->modify_cmds, resource->entry.key, &ctx); + key64 = __rte_raw_cksum(&resource->ft_type, key_len, 0); + entry = mlx5_hlist_register(sh->modify_cmds, key64, &ctx); if (!entry) return -rte_errno; resource = container_of(entry, typeof(*resource), entry); @@ -5100,7 +5127,7 @@ static int flow_dv_validate_attributes(struct rte_eth_dev *dev, const struct mlx5_flow_tunnel *tunnel, const struct rte_flow_attr *attributes, - struct flow_grp_info grp_info, + const struct flow_grp_info *grp_info, struct rte_flow_error *error) { struct mlx5_priv *priv = dev->data->dev_private; @@ -5253,9 +5280,14 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, } else { tunnel = NULL; } + if (tunnel && priv->representor) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "decap not supported " + "for VF representor"); grp_info.std_tbl_fix = tunnel_use_standard_attr_group_translate (dev, tunnel, attr, items, actions); - ret = flow_dv_validate_attributes(dev, tunnel, attr, grp_info, error); + ret = flow_dv_validate_attributes(dev, tunnel, attr, &grp_info, error); if (ret < 0) return ret; is_root = (uint64_t)ret; @@ -5925,6 +5957,15 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, /* Meter action will add one more TAG action. */ rw_act_num += MLX5_ACT_NUM_SET_TAG; break; + case MLX5_RTE_FLOW_ACTION_TYPE_AGE: + if (!attr->transfer && !attr->group) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "Shared ASO age action is not supported for group 0"); + action_flags |= MLX5_FLOW_ACTION_AGE; + ++actions_n; + break; case RTE_FLOW_ACTION_TYPE_AGE: ret = flow_dv_validate_action_age(action_flags, actions, dev, @@ -6194,8 +6235,9 @@ flow_dv_prepare(struct rte_eth_dev *dev, "not enough memory to create flow handle"); return NULL; } - MLX5_ASSERT(wks->flow_idx + 1 < RTE_DIM(wks->flows)); + MLX5_ASSERT(wks->flow_idx < RTE_DIM(wks->flows)); dev_flow = &wks->flows[wks->flow_idx++]; + memset(dev_flow, 0, sizeof(*dev_flow)); dev_flow->handle = dev_handle; dev_flow->handle_idx = handle_idx; /* @@ -6207,12 +6249,6 @@ flow_dv_prepare(struct rte_eth_dev *dev, */ dev_flow->dv.value.size = MLX5_ST_SZ_BYTES(fte_match_param) - MLX5_ST_SZ_BYTES(fte_match_set_misc4); - /* - * The matching value needs to be cleared to 0 before using. In the - * past, it will be automatically cleared when using rte_*alloc - * API. The time consumption will be almost the same as before. - */ - memset(dev_flow->dv.value.buf, 0, MLX5_ST_SZ_BYTES(fte_match_param)); dev_flow->ingress = attr->ingress; dev_flow->dv.transfer = attr->transfer; return dev_flow; @@ -7442,6 +7478,7 @@ flow_dv_translate_item_meta(struct rte_eth_dev *dev, reg = flow_dv_get_metadata_reg(dev, attr, NULL); if (reg < 0) return; + MLX5_ASSERT(reg != REG_NON); /* * In datapath code there is no endianness * coversions for perfromance reasons, all @@ -7584,13 +7621,16 @@ flow_dv_translate_item_source_vport(void *matcher, void *key, * Flow matcher value. * @param[in] item * Flow pattern to translate. + * @param[in] + * Flow attributes. * * @return * 0 on success, a negative errno value otherwise. */ static int flow_dv_translate_item_port_id(struct rte_eth_dev *dev, void *matcher, - void *key, const struct rte_flow_item *item) + void *key, const struct rte_flow_item *item, + const struct rte_flow_attr *attr) { const struct rte_flow_item_port_id *pid_m = item ? item->mask : NULL; const struct rte_flow_item_port_id *pid_v = item ? item->spec : NULL; @@ -7602,14 +7642,30 @@ flow_dv_translate_item_port_id(struct rte_eth_dev *dev, void *matcher, priv = mlx5_port_to_eswitch_info(id, item == NULL); if (!priv) return -rte_errno; - /* Translate to vport field or to metadata, depending on mode. */ - if (priv->vport_meta_mask) - flow_dv_translate_item_meta_vport(matcher, key, - priv->vport_meta_tag, - priv->vport_meta_mask); - else + /* + * Translate to vport field or to metadata, depending on mode. + * Kernel can use either misc.source_port or half of C0 metadata + * register. + */ + if (priv->vport_meta_mask) { + /* + * Provide the hint for SW steering library + * to insert the flow into ingress domain and + * save the extra vport match. + */ + if (mask == 0xffff && priv->vport_id == 0xffff && + priv->pf_bond < 0 && attr->transfer) + flow_dv_translate_item_source_vport + (matcher, key, priv->vport_id, mask); + else + flow_dv_translate_item_meta_vport + (matcher, key, + priv->vport_meta_tag, + priv->vport_meta_mask); + } else { flow_dv_translate_item_source_vport(matcher, key, priv->vport_id, mask); + } return 0; } @@ -7798,6 +7854,7 @@ flow_dv_translate_item_ecpri(struct rte_eth_dev *dev, void *matcher, struct mlx5_priv *priv = dev->data->dev_private; const struct rte_flow_item_ecpri *ecpri_m = item->mask; const struct rte_flow_item_ecpri *ecpri_v = item->spec; + struct rte_ecpri_common_hdr common; void *misc4_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_4); void *misc4_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_4); @@ -7826,7 +7883,7 @@ flow_dv_translate_item_ecpri(struct rte_eth_dev *dev, void *matcher, prog_sample_field_value_0); /* Already big endian (network order) in the header. */ *(uint32_t *)dw_m = ecpri_m->hdr.common.u32; - *(uint32_t *)dw_v = ecpri_v->hdr.common.u32; + *(uint32_t *)dw_v = ecpri_v->hdr.common.u32 & ecpri_m->hdr.common.u32; /* Sample#0, used for matching type, offset 0. */ MLX5_SET(fte_match_set_misc4, misc4_m, prog_sample_field_id_0, samples[0]); @@ -7838,7 +7895,8 @@ flow_dv_translate_item_ecpri(struct rte_eth_dev *dev, void *matcher, * Some wildcard rules only matching type field should be supported. */ if (ecpri_m->hdr.dummy[0]) { - switch (ecpri_v->hdr.common.type) { + common.u32 = rte_be_to_cpu_32(ecpri_v->hdr.common.u32); + switch (common.type) { case RTE_ECPRI_MSG_TYPE_IQ_DATA: case RTE_ECPRI_MSG_TYPE_RTC_CTRL: case RTE_ECPRI_MSG_TYPE_DLY_MSR: @@ -7847,7 +7905,8 @@ flow_dv_translate_item_ecpri(struct rte_eth_dev *dev, void *matcher, dw_v = MLX5_ADDR_OF(fte_match_set_misc4, misc4_v, prog_sample_field_value_1); *(uint32_t *)dw_m = ecpri_m->hdr.dummy[0]; - *(uint32_t *)dw_v = ecpri_v->hdr.dummy[0]; + *(uint32_t *)dw_v = ecpri_v->hdr.dummy[0] & + ecpri_m->hdr.dummy[0]; /* Sample#1, to match message body, offset 4. */ MLX5_SET(fte_match_set_misc4, misc4_m, prog_sample_field_id_1, samples[1]); @@ -7928,9 +7987,12 @@ flow_dv_tbl_create_cb(struct mlx5_hlist *list, uint64_t key64, void *cb_ctx) tbl_data->idx = idx; tbl_data->tunnel = tt_prm->tunnel; tbl_data->group_id = tt_prm->group_id; - tbl_data->external = tt_prm->external; + tbl_data->external = !!tt_prm->external; tbl_data->tunnel_offload = is_tunnel_offload_active(dev); tbl_data->is_egress = !!key.direction; + tbl_data->is_transfer = !!key.domain; + tbl_data->dummy = !!key.dummy; + tbl_data->table_id = key.table_id; tbl = &tbl_data->tbl; if (key.dummy) return &tbl_data->entry; @@ -7971,6 +8033,21 @@ flow_dv_tbl_create_cb(struct mlx5_hlist *list, uint64_t key64, void *cb_ctx) return &tbl_data->entry; } +int +flow_dv_tbl_match_cb(struct mlx5_hlist *list __rte_unused, + struct mlx5_hlist_entry *entry, uint64_t key64, + void *cb_ctx __rte_unused) +{ + struct mlx5_flow_tbl_data_entry *tbl_data = + container_of(entry, struct mlx5_flow_tbl_data_entry, entry); + union mlx5_flow_tbl_key key = { .v64 = key64 }; + + return tbl_data->table_id != key.table_id || + tbl_data->dummy != key.dummy || + tbl_data->is_transfer != key.domain || + tbl_data->is_egress != key.direction; +} + /** * Get a flow table. * @@ -8028,6 +8105,8 @@ flow_dv_tbl_resource_get(struct rte_eth_dev *dev, "cannot get table"); return NULL; } + DRV_LOG(DEBUG, "Table_id %u tunnel %u group %u registered.", + table_id, tunnel ? tunnel->tunnel_id : 0, group_id); tbl_data = container_of(entry, struct mlx5_flow_tbl_data_entry, entry); return &tbl_data->tbl; } @@ -8054,10 +8133,7 @@ flow_dv_tbl_remove_cb(struct mlx5_hlist *list, tbl_data->tunnel->tunnel_id : 0, .group = tbl_data->group_id }; - union mlx5_flow_tbl_key table_key = { - .v64 = entry->key - }; - uint32_t table_id = table_key.table_id; + uint32_t table_id = tbl_data->table_id; tunnel_grp_hash = tbl_data->tunnel ? tbl_data->tunnel->groups : @@ -8066,7 +8142,7 @@ flow_dv_tbl_remove_cb(struct mlx5_hlist *list, if (he) mlx5_hlist_unregister(tunnel_grp_hash, he); DRV_LOG(DEBUG, - "Table_id %#x tunnel %u group %u released.", + "Table_id %u tunnel %u group %u released.", table_id, tbl_data->tunnel ? tbl_data->tunnel->tunnel_id : 0, @@ -8178,6 +8254,8 @@ flow_dv_matcher_register(struct rte_eth_dev *dev, struct mlx5_flow_dv_matcher *ref, union mlx5_flow_tbl_key *key, struct mlx5_flow *dev_flow, + const struct mlx5_flow_tunnel *tunnel, + uint32_t group_id, struct rte_flow_error *error) { struct mlx5_cache_entry *entry; @@ -8189,8 +8267,14 @@ flow_dv_matcher_register(struct rte_eth_dev *dev, .data = ref, }; - tbl = flow_dv_tbl_resource_get(dev, key->table_id, key->direction, - key->domain, false, NULL, 0, 0, error); + /** + * tunnel offload API requires this registration for cases when + * tunnel match rule was inserted before tunnel set rule. + */ + tbl = flow_dv_tbl_resource_get(dev, key->table_id, + key->direction, key->domain, + dev_flow->external, tunnel, + group_id, 0, error); if (!tbl) return -rte_errno; /* No need to refill the error info */ tbl_data = container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl); @@ -8224,6 +8308,7 @@ flow_dv_tag_create_cb(struct mlx5_hlist *list, uint64_t key, void *ctx) return NULL; } entry->idx = idx; + entry->tag_id = key; ret = mlx5_flow_os_create_flow_action_tag(key, &entry->action); if (ret) { @@ -8236,6 +8321,17 @@ flow_dv_tag_create_cb(struct mlx5_hlist *list, uint64_t key, void *ctx) return &entry->entry; } +int +flow_dv_tag_match_cb(struct mlx5_hlist *list __rte_unused, + struct mlx5_hlist_entry *entry, uint64_t key, + void *cb_ctx __rte_unused) +{ + struct mlx5_flow_dv_tag_resource *tag = + container_of(entry, struct mlx5_flow_dv_tag_resource, entry); + + return key != tag->tag_id; +} + /** * Find existing tag resource or create and register a new one. * @@ -8400,6 +8496,7 @@ flow_dv_translate_create_counter(struct rte_eth_dev *dev, __atomic_store_n(&age_param->state, AGE_CANDIDATE, __ATOMIC_RELAXED); return counter; } + /** * Add Tx queue matcher * @@ -8545,7 +8642,7 @@ flow_dv_hrxq_prepare(struct rte_eth_dev *dev, rss_desc->key_len = MLX5_RSS_HASH_KEY_LEN; rss_desc->hash_fields = dev_flow->hash_fields; rss_desc->tunnel = !!(dh->layers & MLX5_FLOW_LAYER_TUNNEL); - rss_desc->standalone = false; + rss_desc->shared_rss = 0; *hrxq_idx = mlx5_hrxq_get(dev, rss_desc); if (!*hrxq_idx) return NULL; @@ -8696,6 +8793,7 @@ flow_dv_sample_create_cb(struct mlx5_cache_list *list __rte_unused, goto error; } cache_resource->idx = idx; + cache_resource->dev = dev; return &cache_resource->entry; error: if (cache_resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB && @@ -8858,6 +8956,7 @@ flow_dv_dest_array_create_cb(struct mlx5_cache_list *list __rte_unused, goto error; } cache_resource->idx = res_idx; + cache_resource->dev = dev; for (idx = 0; idx < resource->num_of_dest; idx++) mlx5_free(dest_attr[idx]); return &cache_resource->entry; @@ -8968,7 +9067,7 @@ flow_dv_translate_action_sample(struct rte_eth_dev *dev, uint64_t action_flags = 0; MLX5_ASSERT(wks); - rss_desc = &wks->rss_desc[!!wks->flow_nested_idx]; + rss_desc = &wks->rss_desc; sample_act = &res->sample_act; sample_idx = &res->sample_idx; sample_action = (const struct rte_flow_action_sample *)action->conf; @@ -9180,7 +9279,7 @@ flow_dv_create_action_sample(struct rte_eth_dev *dev, uint32_t hrxq_idx; MLX5_ASSERT(wks); - rss_desc = &wks->rss_desc[!!wks->flow_nested_idx]; + rss_desc = &wks->rss_desc; if (num_of_dest > 1) { if (sample_act->action_flags & MLX5_FLOW_ACTION_QUEUE) { /* Handle QP action for mirroring */ @@ -9238,6 +9337,273 @@ flow_dv_create_action_sample(struct rte_eth_dev *dev, return 0; } +/** + * Remove an ASO age action from age actions list. + * + * @param[in] dev + * Pointer to the Ethernet device structure. + * @param[in] age + * Pointer to the aso age action handler. + */ +static void +flow_dv_aso_age_remove_from_age(struct rte_eth_dev *dev, + struct mlx5_aso_age_action *age) +{ + struct mlx5_age_info *age_info; + struct mlx5_age_param *age_param = &age->age_params; + struct mlx5_priv *priv = dev->data->dev_private; + uint16_t expected = AGE_CANDIDATE; + + age_info = GET_PORT_AGE_INFO(priv); + if (!__atomic_compare_exchange_n(&age_param->state, &expected, + AGE_FREE, false, __ATOMIC_RELAXED, + __ATOMIC_RELAXED)) { + /** + * We need the lock even it is age timeout, + * since age action may still in process. + */ + rte_spinlock_lock(&age_info->aged_sl); + LIST_REMOVE(age, next); + rte_spinlock_unlock(&age_info->aged_sl); + __atomic_store_n(&age_param->state, AGE_FREE, __ATOMIC_RELAXED); + } +} + +/** + * Release an ASO age action. + * + * @param[in] dev + * Pointer to the Ethernet device structure. + * @param[in] age_idx + * Index of ASO age action to release. + * @param[in] flow + * True if the release operation is during flow destroy operation. + * False if the release operation is during action destroy operation. + * + * @return + * 0 when age action was removed, otherwise the number of references. + */ +static int +flow_dv_aso_age_release(struct rte_eth_dev *dev, uint32_t age_idx) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng; + struct mlx5_aso_age_action *age = flow_aso_age_get_by_idx(dev, age_idx); + uint32_t ret = __atomic_sub_fetch(&age->refcnt, 1, __ATOMIC_RELAXED); + + if (!ret) { + flow_dv_aso_age_remove_from_age(dev, age); + rte_spinlock_lock(&mng->free_sl); + LIST_INSERT_HEAD(&mng->free, age, next); + rte_spinlock_unlock(&mng->free_sl); + } + return ret; +} + +/** + * Resize the ASO age pools array by MLX5_CNT_CONTAINER_RESIZE pools. + * + * @param[in] dev + * Pointer to the Ethernet device structure. + * + * @return + * 0 on success, otherwise negative errno value and rte_errno is set. + */ +static int +flow_dv_aso_age_pools_resize(struct rte_eth_dev *dev) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng; + void *old_pools = mng->pools; + uint32_t resize = mng->n + MLX5_CNT_CONTAINER_RESIZE; + uint32_t mem_size = sizeof(struct mlx5_aso_age_pool *) * resize; + void *pools = mlx5_malloc(MLX5_MEM_ZERO, mem_size, 0, SOCKET_ID_ANY); + + if (!pools) { + rte_errno = ENOMEM; + return -ENOMEM; + } + if (old_pools) { + memcpy(pools, old_pools, + mng->n * sizeof(struct mlx5_flow_counter_pool *)); + mlx5_free(old_pools); + } else { + /* First ASO flow hit allocation - starting ASO data-path. */ + int ret = mlx5_aso_queue_start(priv->sh); + + if (ret) { + mlx5_free(pools); + return ret; + } + } + mng->n = resize; + mng->pools = pools; + return 0; +} + +/** + * Create and initialize a new ASO aging pool. + * + * @param[in] dev + * Pointer to the Ethernet device structure. + * @param[out] age_free + * Where to put the pointer of a new age action. + * + * @return + * The age actions pool pointer and @p age_free is set on success, + * NULL otherwise and rte_errno is set. + */ +static struct mlx5_aso_age_pool * +flow_dv_age_pool_create(struct rte_eth_dev *dev, + struct mlx5_aso_age_action **age_free) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng; + struct mlx5_aso_age_pool *pool = NULL; + struct mlx5_devx_obj *obj = NULL; + uint32_t i; + + obj = mlx5_devx_cmd_create_flow_hit_aso_obj(priv->sh->ctx, + priv->sh->pdn); + if (!obj) { + rte_errno = ENODATA; + DRV_LOG(ERR, "Failed to create flow_hit_aso_obj using DevX."); + return NULL; + } + pool = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*pool), 0, SOCKET_ID_ANY); + if (!pool) { + claim_zero(mlx5_devx_cmd_destroy(obj)); + rte_errno = ENOMEM; + return NULL; + } + pool->flow_hit_aso_obj = obj; + pool->time_of_last_age_check = MLX5_CURR_TIME_SEC; + rte_spinlock_lock(&mng->resize_sl); + pool->index = mng->next; + /* Resize pools array if there is no room for the new pool in it. */ + if (pool->index == mng->n && flow_dv_aso_age_pools_resize(dev)) { + claim_zero(mlx5_devx_cmd_destroy(obj)); + mlx5_free(pool); + rte_spinlock_unlock(&mng->resize_sl); + return NULL; + } + mng->pools[pool->index] = pool; + mng->next++; + rte_spinlock_unlock(&mng->resize_sl); + /* Assign the first action in the new pool, the rest go to free list. */ + *age_free = &pool->actions[0]; + for (i = 1; i < MLX5_ASO_AGE_ACTIONS_PER_POOL; i++) { + pool->actions[i].offset = i; + LIST_INSERT_HEAD(&mng->free, &pool->actions[i], next); + } + return pool; +} + +/** + * Allocate a ASO aging bit. + * + * @param[in] dev + * Pointer to the Ethernet device structure. + * @param[out] error + * Pointer to the error structure. + * + * @return + * Index to ASO age action on success, 0 otherwise and rte_errno is set. + */ +static uint32_t +flow_dv_aso_age_alloc(struct rte_eth_dev *dev, struct rte_flow_error *error) +{ + struct mlx5_priv *priv = dev->data->dev_private; + const struct mlx5_aso_age_pool *pool; + struct mlx5_aso_age_action *age_free = NULL; + struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng; + + MLX5_ASSERT(mng); + /* Try to get the next free age action bit. */ + rte_spinlock_lock(&mng->free_sl); + age_free = LIST_FIRST(&mng->free); + if (age_free) { + LIST_REMOVE(age_free, next); + } else if (!flow_dv_age_pool_create(dev, &age_free)) { + rte_spinlock_unlock(&mng->free_sl); + rte_flow_error_set(error, rte_errno, RTE_FLOW_ERROR_TYPE_ACTION, + NULL, "failed to create ASO age pool"); + return 0; /* 0 is an error. */ + } + rte_spinlock_unlock(&mng->free_sl); + pool = container_of + ((const struct mlx5_aso_age_action (*)[MLX5_ASO_AGE_ACTIONS_PER_POOL]) + (age_free - age_free->offset), const struct mlx5_aso_age_pool, + actions); + if (!age_free->dr_action) { + int reg_c = mlx5_flow_get_reg_id(dev, MLX5_ASO_FLOW_HIT, 0, + error); + + if (reg_c < 0) { + rte_flow_error_set(error, rte_errno, + RTE_FLOW_ERROR_TYPE_ACTION, + NULL, "failed to get reg_c " + "for ASO flow hit"); + return 0; /* 0 is an error. */ + } +#ifdef HAVE_MLX5_DR_CREATE_ACTION_ASO + age_free->dr_action = mlx5_glue->dv_create_flow_action_aso + (priv->sh->rx_domain, + pool->flow_hit_aso_obj->obj, age_free->offset, + MLX5DV_DR_ACTION_FLAGS_ASO_FIRST_HIT_SET, + (reg_c - REG_C_0)); +#endif /* HAVE_MLX5_DR_CREATE_ACTION_ASO */ + if (!age_free->dr_action) { + rte_errno = errno; + rte_spinlock_lock(&mng->free_sl); + LIST_INSERT_HEAD(&mng->free, age_free, next); + rte_spinlock_unlock(&mng->free_sl); + rte_flow_error_set(error, rte_errno, + RTE_FLOW_ERROR_TYPE_ACTION, + NULL, "failed to create ASO " + "flow hit action"); + return 0; /* 0 is an error. */ + } + } + __atomic_store_n(&age_free->refcnt, 1, __ATOMIC_RELAXED); + return pool->index | ((age_free->offset + 1) << 16); +} + +/** + * Create a age action using ASO mechanism. + * + * @param[in] dev + * Pointer to rte_eth_dev structure. + * @param[in] age + * Pointer to the aging action configuration. + * @param[out] error + * Pointer to the error structure. + * + * @return + * Index to flow counter on success, 0 otherwise. + */ +static uint32_t +flow_dv_translate_create_aso_age(struct rte_eth_dev *dev, + const struct rte_flow_action_age *age, + struct rte_flow_error *error) +{ + uint32_t age_idx = 0; + struct mlx5_aso_age_action *aso_age; + + age_idx = flow_dv_aso_age_alloc(dev, error); + if (!age_idx) + return 0; + aso_age = flow_aso_age_get_by_idx(dev, age_idx); + aso_age->age_params.context = age->context; + aso_age->age_params.timeout = age->timeout; + aso_age->age_params.port_id = dev->data->port_id; + __atomic_store_n(&aso_age->age_params.sec_since_last_hit, 0, + __ATOMIC_RELAXED); + __atomic_store_n(&aso_age->age_params.state, AGE_CANDIDATE, + __ATOMIC_RELAXED); + return age_idx; +} + /** * Fill the flow with DV spec, lock free * (mutex should be acquired by caller). @@ -9315,10 +9681,15 @@ flow_dv_translate(struct rte_eth_dev *dev, .external = !!dev_flow->external, .transfer = !!attr->transfer, .fdb_def_rule = !!priv->fdb_def_rule, + .skip_scale = !!dev_flow->skip_scale, }; - MLX5_ASSERT(wks); - rss_desc = &wks->rss_desc[!!wks->flow_nested_idx]; + if (!wks) + return rte_flow_error_set(error, ENOMEM, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "failed to push flow workspace"); + rss_desc = &wks->rss_desc; memset(&mdest_res, 0, sizeof(struct mlx5_flow_dv_dest_array_resource)); memset(&sample_res, 0, sizeof(struct mlx5_flow_dv_sample_resource)); mhdr_res->ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX : @@ -9335,7 +9706,7 @@ flow_dv_translate(struct rte_eth_dev *dev, grp_info.std_tbl_fix = tunnel_use_standard_attr_group_translate (dev, tunnel, attr, items, actions); ret = mlx5_flow_group_to_table(dev, tunnel, attr->group, &table, - grp_info, error); + &grp_info, error); if (ret) return ret; dev_flow->dv.group = table; @@ -9349,10 +9720,14 @@ flow_dv_translate(struct rte_eth_dev *dev, /* * do not add decap action if match rule drops packet * HW rejects rules with decap & drop + * + * if tunnel match rule was inserted before matching tunnel set + * rule flow table used in the match rule must be registered. + * current implementation handles that in the + * flow_dv_match_register() at the function end. */ bool add_decap = true; const struct rte_flow_action *ptr = actions; - struct mlx5_flow_tbl_resource *tbl; for (; ptr->type != RTE_FLOW_ACTION_TYPE_END; ptr++) { if (ptr->type == RTE_FLOW_ACTION_TYPE_DROP) { @@ -9369,20 +9744,6 @@ flow_dv_translate(struct rte_eth_dev *dev, dev_flow->dv.encap_decap->action; action_flags |= MLX5_FLOW_ACTION_DECAP; } - /* - * bind table_id with for tunnel match rule. - * Tunnel set rule establishes that bind in JUMP action handler. - * Required for scenario when application creates tunnel match - * rule before tunnel set rule. - */ - tbl = flow_dv_tbl_resource_get(dev, table, attr->egress, - attr->transfer, - !!dev_flow->external, tunnel, - attr->group, 0, error); - if (!tbl) - return rte_flow_error_set - (error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, - actions, "cannot register tunnel group"); } for (; !actions_end ; actions++) { const struct rte_flow_action_queue *queue; @@ -9391,6 +9752,7 @@ flow_dv_translate(struct rte_eth_dev *dev, const uint8_t *rss_key; const struct rte_flow_action_meter *mtr; struct mlx5_flow_tbl_resource *tbl; + struct mlx5_aso_age_action *age_act; uint32_t port_id = 0; struct mlx5_flow_dv_port_id_action_resource port_id_resource; int action_type = actions->type; @@ -9525,9 +9887,35 @@ flow_dv_translate(struct rte_eth_dev *dev, * when expanding items for RSS. */ action_flags |= MLX5_FLOW_ACTION_RSS; - dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE; + dev_flow->handle->fate_action = rss_desc->shared_rss ? + MLX5_FLOW_FATE_SHARED_RSS : + MLX5_FLOW_FATE_QUEUE; + break; + case MLX5_RTE_FLOW_ACTION_TYPE_AGE: + flow->age = (uint32_t)(uintptr_t)(action->conf); + age_act = flow_aso_age_get_by_idx(dev, flow->age); + __atomic_fetch_add(&age_act->refcnt, 1, + __ATOMIC_RELAXED); + dev_flow->dv.actions[actions_n++] = age_act->dr_action; + action_flags |= MLX5_FLOW_ACTION_AGE; break; case RTE_FLOW_ACTION_TYPE_AGE: + if (priv->sh->flow_hit_aso_en && attr->group) { + flow->age = flow_dv_translate_create_aso_age + (dev, action->conf, error); + if (!flow->age) + return rte_flow_error_set + (error, rte_errno, + RTE_FLOW_ERROR_TYPE_ACTION, + NULL, + "can't create ASO age action"); + dev_flow->dv.actions[actions_n++] = + (flow_aso_age_get_by_idx + (dev, flow->age))->dr_action; + action_flags |= MLX5_FLOW_ACTION_AGE; + break; + } + /* Fall-through */ case RTE_FLOW_ACTION_TYPE_COUNT: if (!dev_conf->devx) { return rte_flow_error_set @@ -9651,10 +10039,11 @@ flow_dv_translate(struct rte_eth_dev *dev, jump_group = ((const struct rte_flow_action_jump *) action->conf)->group; grp_info.std_tbl_fix = 0; + grp_info.skip_scale = 0; ret = mlx5_flow_group_to_table(dev, tunnel, jump_group, &table, - grp_info, error); + &grp_info, error); if (ret) return ret; tbl = flow_dv_tbl_resource_get(dev, table, attr->egress, @@ -9934,8 +10323,8 @@ flow_dv_translate(struct rte_eth_dev *dev, NULL, "item not supported"); switch (item_type) { case RTE_FLOW_ITEM_TYPE_PORT_ID: - flow_dv_translate_item_port_id(dev, match_mask, - match_value, items); + flow_dv_translate_item_port_id + (dev, match_mask, match_value, items, attr); last_item = MLX5_FLOW_ITEM_PORT_ID; break; case RTE_FLOW_ITEM_TYPE_ETH: @@ -10161,7 +10550,7 @@ flow_dv_translate(struct rte_eth_dev *dev, if (!(item_flags & MLX5_FLOW_ITEM_PORT_ID) && (priv->representor || priv->master)) { if (flow_dv_translate_item_port_id(dev, match_mask, - match_value, NULL)) + match_value, NULL, attr)) return -rte_errno; } #ifdef RTE_LIBRTE_MLX5_DEBUG @@ -10184,7 +10573,8 @@ flow_dv_translate(struct rte_eth_dev *dev, tbl_key.domain = attr->transfer; tbl_key.direction = attr->egress; tbl_key.table_id = dev_flow->dv.group; - if (flow_dv_matcher_register(dev, &matcher, &tbl_key, dev_flow, error)) + if (flow_dv_matcher_register(dev, &matcher, &tbl_key, dev_flow, + tunnel, attr->group, error)) return -rte_errno; return 0; } @@ -10244,8 +10634,10 @@ __flow_dv_action_rss_hrxq_set(struct mlx5_shared_action_rss *action, * Look up for hash RX queue by hash fields (see enum ibv_rx_hash_fields) * and tunnel. * - * @param[in] action - * Shred RSS action holding hash RX queue objects. + * @param[in] dev + * Pointer to the Ethernet device structure. + * @param[in] idx + * Shared RSS action ID holding hash RX queue objects. * @param[in] hash_fields * Defines combination of packet fields to participate in RX hash. * @param[in] tunnel @@ -10255,11 +10647,15 @@ __flow_dv_action_rss_hrxq_set(struct mlx5_shared_action_rss *action, * Valid hash RX queue index, otherwise 0. */ static uint32_t -__flow_dv_action_rss_hrxq_lookup(const struct mlx5_shared_action_rss *action, +__flow_dv_action_rss_hrxq_lookup(struct rte_eth_dev *dev, uint32_t idx, const uint64_t hash_fields, const int tunnel) { - const uint32_t *hrxqs = tunnel ? action->hrxq : action->hrxq_tunnel; + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_shared_action_rss *shared_rss = + mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], idx); + const uint32_t *hrxqs = tunnel ? shared_rss->hrxq : + shared_rss->hrxq_tunnel; switch (hash_fields & ~IBV_RX_HASH_INNER) { case MLX5_RSS_HASH_IPV4: @@ -10286,10 +10682,12 @@ __flow_dv_action_rss_hrxq_lookup(const struct mlx5_shared_action_rss *action, * If shared action configured for *flow* suitable hash RX queue will be * retrieved from attached shared action. * - * @param[in] flow - * Shred RSS action holding hash RX queue objects. + * @param[in] dev + * Pointer to the Ethernet device structure. * @param[in] dev_flow * Pointer to the sub flow. + * @param[in] rss_desc + * Pointer to the RSS descriptor. * @param[out] hrxq * Pointer to retrieved hash RX queue object. * @@ -10297,29 +10695,23 @@ __flow_dv_action_rss_hrxq_lookup(const struct mlx5_shared_action_rss *action, * Valid hash RX queue index, otherwise 0 and rte_errno is set. */ static uint32_t -__flow_dv_rss_get_hrxq(struct rte_eth_dev *dev, struct rte_flow *flow, - struct mlx5_flow *dev_flow, - struct mlx5_hrxq **hrxq) +__flow_dv_rss_get_hrxq(struct rte_eth_dev *dev, struct mlx5_flow *dev_flow, + struct mlx5_flow_rss_desc *rss_desc, + struct mlx5_hrxq **hrxq) { struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace(); uint32_t hrxq_idx; - if (flow->shared_rss) { + if (rss_desc->shared_rss) { hrxq_idx = __flow_dv_action_rss_hrxq_lookup - (flow->shared_rss, dev_flow->hash_fields, + (dev, rss_desc->shared_rss, + dev_flow->hash_fields, !!(dev_flow->handle->layers & MLX5_FLOW_LAYER_TUNNEL)); - if (hrxq_idx) { + if (hrxq_idx) *hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq_idx); - __atomic_fetch_add(&(*hrxq)->refcnt, 1, - __ATOMIC_RELAXED); - } } else { - struct mlx5_flow_rss_desc *rss_desc = - &wks->rss_desc[!!wks->flow_nested_idx]; - *hrxq = flow_dv_hrxq_prepare(dev, dev_flow, rss_desc, &hrxq_idx); } @@ -10354,9 +10746,15 @@ flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow, int err; int idx; struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace(); + struct mlx5_flow_rss_desc *rss_desc = &wks->rss_desc; MLX5_ASSERT(wks); - for (idx = wks->flow_idx - 1; idx >= wks->flow_nested_idx; idx--) { + if (rss_desc->shared_rss) { + dh = wks->flows[wks->flow_idx - 1].handle; + MLX5_ASSERT(dh->fate_action == MLX5_FLOW_FATE_SHARED_RSS); + dh->rix_srss = rss_desc->shared_rss; + } + for (idx = wks->flow_idx - 1; idx >= 0; idx--) { dev_flow = &wks->flows[idx]; dv = &dev_flow->dv; dh = dev_flow->handle; @@ -10370,11 +10768,12 @@ flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow, dv->actions[n++] = priv->drop_queue.hrxq->action; } - } else if (dh->fate_action == MLX5_FLOW_FATE_QUEUE && - !dv_h->rix_sample && !dv_h->rix_dest_array) { + } else if ((dh->fate_action == MLX5_FLOW_FATE_QUEUE && + !dv_h->rix_sample && !dv_h->rix_dest_array) || + (dh->fate_action == MLX5_FLOW_FATE_SHARED_RSS)) { struct mlx5_hrxq *hrxq = NULL; uint32_t hrxq_idx = __flow_dv_rss_get_hrxq - (dev, flow, dev_flow, &hrxq); + (dev, dev_flow, rss_desc, &hrxq); if (!hrxq) { rte_flow_error_set (error, rte_errno, @@ -10382,7 +10781,8 @@ flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow, "cannot get hash queue"); goto error; } - dh->rix_hrxq = hrxq_idx; + if (dh->fate_action == MLX5_FLOW_FATE_QUEUE) + dh->rix_hrxq = hrxq_idx; dv->actions[n++] = hrxq->action; } else if (dh->fate_action == MLX5_FLOW_FATE_DEFAULT_MISS) { if (!priv->sh->default_miss_action) { @@ -10428,6 +10828,8 @@ error: if (dh->vf_vlan.tag && dh->vf_vlan.created) mlx5_vlan_vmwa_release(dev, &dh->vf_vlan); } + if (rss_desc->shared_rss) + wks->flows[wks->flow_idx - 1].handle->rix_srss = 0; rte_errno = err; /* Restore rte_errno. */ return -rte_errno; } @@ -10612,6 +11014,25 @@ flow_dv_port_id_action_resource_release(struct rte_eth_dev *dev, &cache->entry); } +/** + * Release shared RSS action resource. + * + * @param dev + * Pointer to Ethernet device. + * @param srss + * Shared RSS action index. + */ +static void +flow_dv_shared_rss_action_release(struct rte_eth_dev *dev, uint32_t srss) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_shared_action_rss *shared_rss; + + shared_rss = mlx5_ipool_get + (priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], srss); + __atomic_sub_fetch(&shared_rss->refcnt, 1, __ATOMIC_RELAXED); +} + void flow_dv_push_vlan_remove_cb(struct mlx5_cache_list *list, struct mlx5_cache_entry *entry) @@ -10676,6 +11097,9 @@ flow_dv_fate_resource_release(struct rte_eth_dev *dev, flow_dv_port_id_action_resource_release(dev, handle->rix_port_id_action); break; + case MLX5_FLOW_FATE_SHARED_RSS: + flow_dv_shared_rss_action_release(dev, handle->rix_srss); + break; default: DRV_LOG(DEBUG, "Incorrect fate action:%d", handle->fate_action); break; @@ -10684,20 +11108,20 @@ flow_dv_fate_resource_release(struct rte_eth_dev *dev, } void -flow_dv_sample_remove_cb(struct mlx5_cache_list *list, +flow_dv_sample_remove_cb(struct mlx5_cache_list *list __rte_unused, struct mlx5_cache_entry *entry) { - struct rte_eth_dev *dev = list->ctx; - struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_flow_dv_sample_resource *cache_resource = container_of(entry, typeof(*cache_resource), entry); + struct rte_eth_dev *dev = cache_resource->dev; + struct mlx5_priv *priv = dev->data->dev_private; if (cache_resource->verbs_action) - claim_zero(mlx5_glue->destroy_flow_action + claim_zero(mlx5_flow_os_destroy_flow_action (cache_resource->verbs_action)); if (cache_resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB) { if (cache_resource->default_miss) - claim_zero(mlx5_glue->destroy_flow_action + claim_zero(mlx5_flow_os_destroy_flow_action (cache_resource->default_miss)); } if (cache_resource->normal_path_tbl) @@ -10739,18 +11163,18 @@ flow_dv_sample_resource_release(struct rte_eth_dev *dev, } void -flow_dv_dest_array_remove_cb(struct mlx5_cache_list *list, +flow_dv_dest_array_remove_cb(struct mlx5_cache_list *list __rte_unused, struct mlx5_cache_entry *entry) { - struct rte_eth_dev *dev = list->ctx; - struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_flow_dv_dest_array_resource *cache_resource = container_of(entry, typeof(*cache_resource), entry); + struct rte_eth_dev *dev = cache_resource->dev; + struct mlx5_priv *priv = dev->data->dev_private; uint32_t i = 0; MLX5_ASSERT(cache_resource->action); if (cache_resource->action) - claim_zero(mlx5_glue->destroy_flow_action + claim_zero(mlx5_flow_os_destroy_flow_action (cache_resource->action)); for (; i < cache_resource->num_of_dest; i++) flow_dv_sample_sub_actions_release(dev, @@ -10836,16 +11260,12 @@ flow_dv_remove(struct rte_eth_dev *dev, struct rte_flow *flow) static void flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow) { - struct rte_flow_shared_action *shared; struct mlx5_flow_handle *dev_handle; struct mlx5_priv *priv = dev->data->dev_private; if (!flow) return; flow_dv_remove(dev, flow); - shared = mlx5_flow_get_shared_rss(flow); - if (shared) - __atomic_sub_fetch(&shared->refcnt, 1, __ATOMIC_RELAXED); if (flow->counter) { flow_dv_counter_free(dev, flow->counter); flow->counter = 0; @@ -10859,6 +11279,8 @@ flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow) mlx5_flow_meter_detach(fm); flow->meter = 0; } + if (flow->age) + flow_dv_aso_age_release(dev, flow->age); while (flow->dev_handles) { uint32_t tmp_idx = flow->dev_handles; @@ -10948,6 +11370,8 @@ __flow_dv_action_rss_hrxqs_release(struct rte_eth_dev *dev, * * @param[in] dev * Pointer to the Ethernet device structure. + * @param[in] action_idx + * Shared RSS action ipool index. * @param[in, out] action * Partially initialized shared RSS action. * @param[out] error @@ -10959,18 +11383,26 @@ __flow_dv_action_rss_hrxqs_release(struct rte_eth_dev *dev, */ static int __flow_dv_action_rss_setup(struct rte_eth_dev *dev, - struct mlx5_shared_action_rss *action, - struct rte_flow_error *error) + uint32_t action_idx, + struct mlx5_shared_action_rss *action, + struct rte_flow_error *error) { struct mlx5_flow_rss_desc rss_desc = { 0 }; size_t i; int err; + if (mlx5_ind_table_obj_setup(dev, action->ind_tbl)) { + return rte_flow_error_set(error, rte_errno, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "cannot setup indirection table"); + } memcpy(rss_desc.key, action->origin.key, MLX5_RSS_HASH_KEY_LEN); rss_desc.key_len = MLX5_RSS_HASH_KEY_LEN; rss_desc.const_q = action->origin.queue; rss_desc.queue_num = action->origin.queue_num; - rss_desc.standalone = true; + /* Set non-zero value to indicate a shared RSS. */ + rss_desc.shared_rss = action_idx; + rss_desc.ind_tbl = action->ind_tbl; for (i = 0; i < MLX5_RSS_HASH_FIELDS_LEN; i++) { uint32_t hrxq_idx; uint64_t hash_fields = mlx5_rss_hash_fields[i]; @@ -10996,6 +11428,8 @@ __flow_dv_action_rss_setup(struct rte_eth_dev *dev, error_hrxq_new: err = rte_errno; __flow_dv_action_rss_hrxqs_release(dev, action); + if (!mlx5_ind_table_obj_release(dev, action->ind_tbl, true)) + action->ind_tbl = NULL; rte_errno = err; return -rte_errno; } @@ -11014,56 +11448,83 @@ error_hrxq_new: * error only. * * @return - * A valid shared action handle in case of success, NULL otherwise and + * A valid shared action ID in case of success, 0 otherwise and * rte_errno is set. */ -static struct rte_flow_shared_action * +static uint32_t __flow_dv_action_rss_create(struct rte_eth_dev *dev, const struct rte_flow_shared_action_conf *conf, const struct rte_flow_action_rss *rss, struct rte_flow_error *error) { - struct rte_flow_shared_action *shared_action = NULL; + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_shared_action_rss *shared_action = NULL; void *queue = NULL; - struct mlx5_shared_action_rss *shared_rss; struct rte_flow_action_rss *origin; const uint8_t *rss_key; uint32_t queue_size = rss->queue_num * sizeof(uint16_t); + uint32_t idx; RTE_SET_USED(conf); queue = mlx5_malloc(0, RTE_ALIGN_CEIL(queue_size, sizeof(void *)), 0, SOCKET_ID_ANY); - shared_action = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*shared_action), 0, - SOCKET_ID_ANY); + shared_action = mlx5_ipool_zmalloc + (priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], &idx); if (!shared_action || !queue) { rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, "cannot allocate resource memory"); goto error_rss_init; } - shared_rss = &shared_action->rss; - shared_rss->queue = queue; - origin = &shared_rss->origin; + if (idx > (1u << MLX5_SHARED_ACTION_TYPE_OFFSET)) { + rte_flow_error_set(error, E2BIG, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "rss action number out of range"); + goto error_rss_init; + } + shared_action->ind_tbl = mlx5_malloc(MLX5_MEM_ZERO, + sizeof(*shared_action->ind_tbl), + 0, SOCKET_ID_ANY); + if (!shared_action->ind_tbl) { + rte_flow_error_set(error, ENOMEM, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "cannot allocate resource memory"); + goto error_rss_init; + } + memcpy(queue, rss->queue, queue_size); + shared_action->ind_tbl->queues = queue; + shared_action->ind_tbl->queues_n = rss->queue_num; + origin = &shared_action->origin; origin->func = rss->func; origin->level = rss->level; /* RSS type 0 indicates default RSS type (ETH_RSS_IP). */ origin->types = !rss->types ? ETH_RSS_IP : rss->types; /* NULL RSS key indicates default RSS key. */ rss_key = !rss->key ? rss_hash_default_key : rss->key; - memcpy(shared_rss->key, rss_key, MLX5_RSS_HASH_KEY_LEN); - origin->key = &shared_rss->key[0]; + memcpy(shared_action->key, rss_key, MLX5_RSS_HASH_KEY_LEN); + origin->key = &shared_action->key[0]; origin->key_len = MLX5_RSS_HASH_KEY_LEN; - memcpy(shared_rss->queue, rss->queue, queue_size); - origin->queue = shared_rss->queue; + origin->queue = queue; origin->queue_num = rss->queue_num; - if (__flow_dv_action_rss_setup(dev, shared_rss, error)) + if (__flow_dv_action_rss_setup(dev, idx, shared_action, error)) goto error_rss_init; - shared_action->type = MLX5_RTE_FLOW_ACTION_TYPE_SHARED_RSS; - return shared_action; + rte_spinlock_init(&shared_action->action_rss_sl); + __atomic_add_fetch(&shared_action->refcnt, 1, __ATOMIC_RELAXED); + rte_spinlock_lock(&priv->shared_act_sl); + ILIST_INSERT(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], + &priv->rss_shared_actions, idx, shared_action, next); + rte_spinlock_unlock(&priv->shared_act_sl); + return idx; error_rss_init: - mlx5_free(shared_action); - mlx5_free(queue); - return NULL; + if (shared_action) { + if (shared_action->ind_tbl) + mlx5_free(shared_action->ind_tbl); + mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], + idx); + } + if (queue) + mlx5_free(queue); + return 0; } /** @@ -11072,8 +11533,8 @@ error_rss_init: * * @param[in] dev * Pointer to the Ethernet device structure. - * @param[in] shared_rss - * The shared RSS action object to be removed. + * @param[in] idx + * The shared RSS action object ID to be removed. * @param[out] error * Perform verbose error reporting if not NULL. Initialized in case of * error only. @@ -11082,31 +11543,48 @@ error_rss_init: * 0 on success, otherwise negative errno value. */ static int -__flow_dv_action_rss_release(struct rte_eth_dev *dev, - struct mlx5_shared_action_rss *shared_rss, - struct rte_flow_error *error) +__flow_dv_action_rss_release(struct rte_eth_dev *dev, uint32_t idx, + struct rte_flow_error *error) { - struct rte_flow_shared_action *shared_action = NULL; + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_shared_action_rss *shared_rss = + mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], idx); uint32_t old_refcnt = 1; - int remaining = __flow_dv_action_rss_hrxqs_release(dev, shared_rss); + int remaining; + uint16_t *queue = NULL; - if (remaining) { - return rte_flow_error_set(error, ETOOMANYREFS, + if (!shared_rss) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, NULL, + "invalid shared action"); + remaining = __flow_dv_action_rss_hrxqs_release(dev, shared_rss); + if (remaining) + return rte_flow_error_set(error, EBUSY, RTE_FLOW_ERROR_TYPE_ACTION, NULL, "shared rss hrxq has references"); - } - shared_action = container_of(shared_rss, - struct rte_flow_shared_action, rss); - if (!__atomic_compare_exchange_n(&shared_action->refcnt, &old_refcnt, - 0, 0, - __ATOMIC_ACQUIRE, __ATOMIC_RELAXED)) { - return rte_flow_error_set(error, ETOOMANYREFS, + queue = shared_rss->ind_tbl->queues; + remaining = mlx5_ind_table_obj_release(dev, shared_rss->ind_tbl, true); + if (remaining) + return rte_flow_error_set(error, EBUSY, + RTE_FLOW_ERROR_TYPE_ACTION, + NULL, + "shared rss indirection table has" + " references"); + if (!__atomic_compare_exchange_n(&shared_rss->refcnt, &old_refcnt, + 0, 0, __ATOMIC_ACQUIRE, + __ATOMIC_RELAXED)) + return rte_flow_error_set(error, EBUSY, RTE_FLOW_ERROR_TYPE_ACTION, NULL, "shared rss has references"); - } - rte_free(shared_rss->queue); + mlx5_free(queue); + rte_spinlock_lock(&priv->shared_act_sl); + ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], + &priv->rss_shared_actions, idx, shared_rss, next); + rte_spinlock_unlock(&priv->shared_act_sl); + mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], + idx); return 0; } @@ -11133,30 +11611,36 @@ static struct rte_flow_shared_action * flow_dv_action_create(struct rte_eth_dev *dev, const struct rte_flow_shared_action_conf *conf, const struct rte_flow_action *action, - struct rte_flow_error *error) + struct rte_flow_error *err) { - struct rte_flow_shared_action *shared_action = NULL; - struct mlx5_priv *priv = dev->data->dev_private; + uint32_t idx = 0; + uint32_t ret = 0; switch (action->type) { case RTE_FLOW_ACTION_TYPE_RSS: - shared_action = __flow_dv_action_rss_create(dev, conf, - action->conf, - error); + ret = __flow_dv_action_rss_create(dev, conf, action->conf, err); + idx = (MLX5_SHARED_ACTION_TYPE_RSS << + MLX5_SHARED_ACTION_TYPE_OFFSET) | ret; + break; + case RTE_FLOW_ACTION_TYPE_AGE: + ret = flow_dv_translate_create_aso_age(dev, action->conf, err); + idx = (MLX5_SHARED_ACTION_TYPE_AGE << + MLX5_SHARED_ACTION_TYPE_OFFSET) | ret; + if (ret) { + struct mlx5_aso_age_action *aso_age = + flow_aso_age_get_by_idx(dev, ret); + + if (!aso_age->age_params.context) + aso_age->age_params.context = + (void *)(uintptr_t)idx; + } break; default: - rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, + rte_flow_error_set(err, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, NULL, "action type not supported"); break; } - if (shared_action) { - __atomic_add_fetch(&shared_action->refcnt, 1, - __ATOMIC_RELAXED); - rte_spinlock_lock(&priv->shared_act_sl); - LIST_INSERT_HEAD(&priv->shared_actions, shared_action, next); - rte_spinlock_unlock(&priv->shared_act_sl); - } - return shared_action; + return ret ? (struct rte_flow_shared_action *)(uintptr_t)idx : NULL; } /** @@ -11181,26 +11665,30 @@ flow_dv_action_destroy(struct rte_eth_dev *dev, struct rte_flow_shared_action *action, struct rte_flow_error *error) { - struct mlx5_priv *priv = dev->data->dev_private; + uint32_t act_idx = (uint32_t)(uintptr_t)action; + uint32_t type = act_idx >> MLX5_SHARED_ACTION_TYPE_OFFSET; + uint32_t idx = act_idx & ((1u << MLX5_SHARED_ACTION_TYPE_OFFSET) - 1); int ret; - switch (action->type) { - case MLX5_RTE_FLOW_ACTION_TYPE_SHARED_RSS: - ret = __flow_dv_action_rss_release(dev, &action->rss, error); - break; + switch (type) { + case MLX5_SHARED_ACTION_TYPE_RSS: + return __flow_dv_action_rss_release(dev, idx, error); + case MLX5_SHARED_ACTION_TYPE_AGE: + ret = flow_dv_aso_age_release(dev, idx); + if (ret) + /* + * In this case, the last flow has a reference will + * actually release the age action. + */ + DRV_LOG(DEBUG, "Shared age action %" PRIu32 " was" + " released with references %d.", idx, ret); + return 0; default: return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, NULL, "action type not supported"); } - if (ret) - return ret; - rte_spinlock_lock(&priv->shared_act_sl); - LIST_REMOVE(action, next); - rte_spinlock_unlock(&priv->shared_act_sl); - rte_free(action); - return 0; } /** @@ -11208,8 +11696,8 @@ flow_dv_action_destroy(struct rte_eth_dev *dev, * * @param[in] dev * Pointer to the Ethernet device structure. - * @param[in] shared_rss - * The shared RSS action object to be updated. + * @param[in] idx + * The shared RSS action object ID to be updated. * @param[in] action_conf * RSS action specification used to modify *shared_rss*. * @param[out] error @@ -11221,18 +11709,22 @@ flow_dv_action_destroy(struct rte_eth_dev *dev, * @note: currently only support update of RSS queues. */ static int -__flow_dv_action_rss_update(struct rte_eth_dev *dev, - struct mlx5_shared_action_rss *shared_rss, +__flow_dv_action_rss_update(struct rte_eth_dev *dev, uint32_t idx, const struct rte_flow_action_rss *action_conf, struct rte_flow_error *error) { - size_t i; - int ret; + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_shared_action_rss *shared_rss = + mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], idx); + int ret = 0; void *queue = NULL; - const uint8_t *rss_key; - uint32_t rss_key_len; + uint16_t *queue_old = NULL; uint32_t queue_size = action_conf->queue_num * sizeof(uint16_t); + if (!shared_rss) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, NULL, + "invalid shared action to update"); queue = mlx5_malloc(MLX5_MEM_ZERO, RTE_ALIGN_CEIL(queue_size, sizeof(void *)), 0, SOCKET_ID_ANY); @@ -11241,42 +11733,24 @@ __flow_dv_action_rss_update(struct rte_eth_dev *dev, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, "cannot allocate resource memory"); - if (action_conf->key) { - rss_key = action_conf->key; - rss_key_len = action_conf->key_len; + memcpy(queue, action_conf->queue, queue_size); + MLX5_ASSERT(shared_rss->ind_tbl); + rte_spinlock_lock(&shared_rss->action_rss_sl); + queue_old = shared_rss->ind_tbl->queues; + ret = mlx5_ind_table_obj_modify(dev, shared_rss->ind_tbl, + queue, action_conf->queue_num, true); + if (ret) { + mlx5_free(queue); + ret = rte_flow_error_set(error, rte_errno, + RTE_FLOW_ERROR_TYPE_ACTION, NULL, + "cannot update indirection table"); } else { - rss_key = rss_hash_default_key; - rss_key_len = MLX5_RSS_HASH_KEY_LEN; + mlx5_free(queue_old); + shared_rss->origin.queue = queue; + shared_rss->origin.queue_num = action_conf->queue_num; } - for (i = 0; i < MLX5_RSS_HASH_FIELDS_LEN; i++) { - uint32_t hrxq_idx; - uint64_t hash_fields = mlx5_rss_hash_fields[i]; - int tunnel; - - for (tunnel = 0; tunnel < 2; tunnel++) { - hrxq_idx = __flow_dv_action_rss_hrxq_lookup - (shared_rss, hash_fields, tunnel); - MLX5_ASSERT(hrxq_idx); - ret = mlx5_hrxq_modify - (dev, hrxq_idx, - rss_key, rss_key_len, - hash_fields, - action_conf->queue, action_conf->queue_num); - if (ret) { - mlx5_free(queue); - return rte_flow_error_set - (error, rte_errno, - RTE_FLOW_ERROR_TYPE_ACTION, NULL, - "cannot update hash queue"); - } - } - } - mlx5_free(shared_rss->queue); - shared_rss->queue = queue; - memcpy(shared_rss->queue, action_conf->queue, queue_size); - shared_rss->origin.queue = shared_rss->queue; - shared_rss->origin.queue_num = action_conf->queue_num; - return 0; + rte_spinlock_unlock(&shared_rss->action_rss_sl); + return ret; } /** @@ -11302,19 +11776,54 @@ static int flow_dv_action_update(struct rte_eth_dev *dev, struct rte_flow_shared_action *action, const void *action_conf, - struct rte_flow_error *error) + struct rte_flow_error *err) { - switch (action->type) { - case MLX5_RTE_FLOW_ACTION_TYPE_SHARED_RSS: - return __flow_dv_action_rss_update(dev, &action->rss, - action_conf, error); + uint32_t act_idx = (uint32_t)(uintptr_t)action; + uint32_t type = act_idx >> MLX5_SHARED_ACTION_TYPE_OFFSET; + uint32_t idx = act_idx & ((1u << MLX5_SHARED_ACTION_TYPE_OFFSET) - 1); + + switch (type) { + case MLX5_SHARED_ACTION_TYPE_RSS: + return __flow_dv_action_rss_update(dev, idx, action_conf, err); + default: + return rte_flow_error_set(err, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, + NULL, + "action type update not supported"); + } +} + +static int +flow_dv_action_query(struct rte_eth_dev *dev, + const struct rte_flow_shared_action *action, void *data, + struct rte_flow_error *error) +{ + struct mlx5_age_param *age_param; + struct rte_flow_query_age *resp; + uint32_t act_idx = (uint32_t)(uintptr_t)action; + uint32_t type = act_idx >> MLX5_SHARED_ACTION_TYPE_OFFSET; + uint32_t idx = act_idx & ((1u << MLX5_SHARED_ACTION_TYPE_OFFSET) - 1); + + switch (type) { + case MLX5_SHARED_ACTION_TYPE_AGE: + age_param = &flow_aso_age_get_by_idx(dev, idx)->age_params; + resp = data; + resp->aged = __atomic_load_n(&age_param->state, + __ATOMIC_RELAXED) == AGE_TMOUT ? + 1 : 0; + resp->sec_since_last_hit_valid = !resp->aged; + if (resp->sec_since_last_hit_valid) + resp->sec_since_last_hit = __atomic_load_n + (&age_param->sec_since_last_hit, __ATOMIC_RELAXED); + return 0; default: return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, NULL, - "action type not supported"); + "action type query not supported"); } } + /** * Query a dv flow rule for its statistics via devx. * @@ -11391,30 +11900,33 @@ flow_dv_query_age(struct rte_eth_dev *dev, struct rte_flow *flow, void *data, struct rte_flow_error *error) { struct rte_flow_query_age *resp = data; + struct mlx5_age_param *age_param; - if (flow->counter) { - struct mlx5_age_param *age_param = - flow_dv_counter_idx_get_age(dev, flow->counter); + if (flow->age) { + struct mlx5_aso_age_action *act = + flow_aso_age_get_by_idx(dev, flow->age); + + age_param = &act->age_params; + } else if (flow->counter) { + age_param = flow_dv_counter_idx_get_age(dev, flow->counter); if (!age_param || !age_param->timeout) return rte_flow_error_set (error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, "cannot read age data"); - resp->aged = __atomic_load_n(&age_param->state, - __ATOMIC_RELAXED) == - AGE_TMOUT ? 1 : 0; - resp->sec_since_last_hit_valid = !resp->aged; - if (resp->sec_since_last_hit_valid) - resp->sec_since_last_hit = - __atomic_load_n(&age_param->sec_since_last_hit, - __ATOMIC_RELAXED); - return 0; - } - return rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_UNSPECIFIED, - NULL, - "age data not available"); + } else { + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, "age data not available"); + } + resp->aged = __atomic_load_n(&age_param->state, __ATOMIC_RELAXED) == + AGE_TMOUT ? 1 : 0; + resp->sec_since_last_hit_valid = !resp->aged; + if (resp->sec_since_last_hit_valid) + resp->sec_since_last_hit = __atomic_load_n + (&age_param->sec_since_last_hit, __ATOMIC_RELAXED); + return 0; } /** @@ -11898,19 +12410,15 @@ mlx5_flow_dv_discover_counter_offset_support(struct rte_eth_dev *dev) .match_mask = (void *)&mask, }; void *actions[2] = { 0 }; - struct mlx5_flow_tbl_resource *tbl = NULL, *dest_tbl = NULL; + struct mlx5_flow_tbl_resource *tbl = NULL; struct mlx5_devx_obj *dcs = NULL; void *matcher = NULL; void *flow = NULL; - int i, ret = -1; + int ret = -1; tbl = flow_dv_tbl_resource_get(dev, 0, 0, 0, false, NULL, 0, 0, NULL); if (!tbl) goto err; - dest_tbl = flow_dv_tbl_resource_get(dev, 1, 0, 0, false, - NULL, 0, 0, NULL); - if (!dest_tbl) - goto err; dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0x4); if (!dcs) goto err; @@ -11918,10 +12426,7 @@ mlx5_flow_dv_discover_counter_offset_support(struct rte_eth_dev *dev) &actions[0]); if (ret) goto err; - ret = mlx5_flow_os_create_flow_action_dest_flow_tbl - (dest_tbl->obj, &actions[1]); - if (ret) - goto err; + actions[1] = priv->drop_queue.hrxq->action; dv_attr.match_criteria_enable = flow_dv_matcher_enable(mask.buf); ret = mlx5_flow_os_create_flow_matcher(sh->ctx, &dv_attr, tbl->obj, &matcher); @@ -11950,17 +12455,12 @@ err: "support detection"); ret = 0; } - for (i = 0; i < 2; i++) { - if (actions[i]) - claim_zero(mlx5_flow_os_destroy_flow_action - (actions[i])); - } + if (actions[0]) + claim_zero(mlx5_flow_os_destroy_flow_action(actions[0])); if (matcher) claim_zero(mlx5_flow_os_destroy_flow_matcher(matcher)); if (tbl) flow_dv_tbl_resource_release(MLX5_SH(dev), tbl); - if (dest_tbl) - flow_dv_tbl_resource_release(MLX5_SH(dev), dest_tbl); if (dcs) claim_zero(mlx5_devx_cmd_destroy(dcs)); return ret; @@ -12038,16 +12538,24 @@ flow_get_aged_flows(struct rte_eth_dev *dev, struct mlx5_age_info *age_info; struct mlx5_age_param *age_param; struct mlx5_flow_counter *counter; + struct mlx5_aso_age_action *act; int nb_flows = 0; if (nb_contexts && !context) return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, - NULL, - "Should assign at least one flow or" - " context to get if nb_contexts != 0"); + NULL, "empty context"); age_info = GET_PORT_AGE_INFO(priv); rte_spinlock_lock(&age_info->aged_sl); + LIST_FOREACH(act, &age_info->aged_aso, next) { + nb_flows++; + if (nb_contexts) { + context[nb_flows - 1] = + act->age_params.context; + if (!(--nb_contexts)) + break; + } + } TAILQ_FOREACH(counter, &age_info->aged_counters, next) { nb_flows++; if (nb_contexts) { @@ -12092,14 +12600,23 @@ static int flow_dv_action_validate(struct rte_eth_dev *dev, const struct rte_flow_shared_action_conf *conf, const struct rte_flow_action *action, - struct rte_flow_error *error) + struct rte_flow_error *err) { + struct mlx5_priv *priv = dev->data->dev_private; + RTE_SET_USED(conf); switch (action->type) { case RTE_FLOW_ACTION_TYPE_RSS: - return mlx5_validate_action_rss(dev, action, error); + return mlx5_validate_action_rss(dev, action, err); + case RTE_FLOW_ACTION_TYPE_AGE: + if (!priv->sh->aso_age_mng) + return rte_flow_error_set(err, ENOTSUP, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "shared age action not supported"); + return flow_dv_validate_action_age(0, action, dev, err); default: - return rte_flow_error_set(error, ENOTSUP, + return rte_flow_error_set(err, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, NULL, "action type not supported"); @@ -12151,6 +12668,7 @@ const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops = { .action_create = flow_dv_action_create, .action_destroy = flow_dv_action_destroy, .action_update = flow_dv_action_update, + .action_query = flow_dv_action_query, .sync_domain = flow_dv_sync_domain, };