X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fmlx5%2Fmlx5_flow_dv.c;h=b8d03d4df5a0f8b5a22ad7275825cd61f55b7f10;hb=2823b082f93c94c5c97fa572b5b84b637e088668;hp=467d1ce6b1e7b23b741bd0ec262840308f9d2cb2;hpb=38ad946892830c9fb5f8fe221bf677ee8e9bc819;p=dpdk.git diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c index 467d1ce6b1..b8d03d4df5 100644 --- a/drivers/net/mlx5/mlx5_flow_dv.c +++ b/drivers/net/mlx5/mlx5_flow_dv.c @@ -54,8 +54,6 @@ #define MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL 1 #endif -#define MLX5_ENCAPSULATION_DECISION_SIZE (sizeof(struct rte_flow_item_eth) + \ - sizeof(struct rte_flow_item_ipv4)) /* VLAN header definitions */ #define MLX5DV_FLOW_VLAN_PCP_SHIFT 13 #define MLX5DV_FLOW_VLAN_PCP_MASK (0x7 << MLX5DV_FLOW_VLAN_PCP_SHIFT) @@ -85,19 +83,76 @@ union flow_dv_attr { * Pointer to item specification. * @param[out] attr * Pointer to flow attributes structure. + * @param[in] dev_flow + * Pointer to the sub flow. + * @param[in] tunnel_decap + * Whether action is after tunnel decapsulation. */ static void -flow_dv_attr_init(const struct rte_flow_item *item, union flow_dv_attr *attr) +flow_dv_attr_init(const struct rte_flow_item *item, union flow_dv_attr *attr, + struct mlx5_flow *dev_flow, bool tunnel_decap) { + uint64_t layers = dev_flow->handle->layers; + + /* + * If layers is already initialized, it means this dev_flow is the + * suffix flow, the layers flags is set by the prefix flow. Need to + * use the layer flags from prefix flow as the suffix flow may not + * have the user defined items as the flow is split. + */ + if (layers) { + if (layers & MLX5_FLOW_LAYER_OUTER_L3_IPV4) + attr->ipv4 = 1; + else if (layers & MLX5_FLOW_LAYER_OUTER_L3_IPV6) + attr->ipv6 = 1; + if (layers & MLX5_FLOW_LAYER_OUTER_L4_TCP) + attr->tcp = 1; + else if (layers & MLX5_FLOW_LAYER_OUTER_L4_UDP) + attr->udp = 1; + attr->valid = 1; + return; + } for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) { + uint8_t next_protocol = 0xff; switch (item->type) { + case RTE_FLOW_ITEM_TYPE_GRE: + case RTE_FLOW_ITEM_TYPE_NVGRE: + case RTE_FLOW_ITEM_TYPE_VXLAN: + case RTE_FLOW_ITEM_TYPE_VXLAN_GPE: + case RTE_FLOW_ITEM_TYPE_GENEVE: + case RTE_FLOW_ITEM_TYPE_MPLS: + if (tunnel_decap) + attr->attr = 0; + break; case RTE_FLOW_ITEM_TYPE_IPV4: if (!attr->ipv6) attr->ipv4 = 1; + if (item->mask != NULL && + ((const struct rte_flow_item_ipv4 *) + item->mask)->hdr.next_proto_id) + next_protocol = + ((const struct rte_flow_item_ipv4 *) + (item->spec))->hdr.next_proto_id & + ((const struct rte_flow_item_ipv4 *) + (item->mask))->hdr.next_proto_id; + if ((next_protocol == IPPROTO_IPIP || + next_protocol == IPPROTO_IPV6) && tunnel_decap) + attr->attr = 0; break; case RTE_FLOW_ITEM_TYPE_IPV6: if (!attr->ipv4) attr->ipv6 = 1; + if (item->mask != NULL && + ((const struct rte_flow_item_ipv6 *) + item->mask)->hdr.proto) + next_protocol = + ((const struct rte_flow_item_ipv6 *) + (item->spec))->hdr.proto & + ((const struct rte_flow_item_ipv6 *) + (item->mask))->hdr.proto; + if ((next_protocol == IPPROTO_IPIP || + next_protocol == IPPROTO_IPV6) && tunnel_decap) + attr->attr = 0; break; case RTE_FLOW_ITEM_TYPE_UDP: if (!attr->tcp) @@ -573,7 +628,7 @@ flow_dv_convert_action_modify_vlan_vid const struct rte_flow_action_of_set_vlan_vid *conf = (const struct rte_flow_action_of_set_vlan_vid *)(action->conf); int i = resource->actions_num; - struct mlx5_modification_cmd *actions = &resource->actions[i]; + struct mlx5_modification_cmd *actions = resource->actions; struct field_modify_info *field = modify_vlan_out_first_vid; if (i >= MLX5_MAX_MODIFY_NUM) @@ -604,6 +659,10 @@ flow_dv_convert_action_modify_vlan_vid * Pointer to rte_flow_item objects list. * @param[in] attr * Pointer to flow attributes structure. + * @param[in] dev_flow + * Pointer to the sub flow. + * @param[in] tunnel_decap + * Whether action is after tunnel decapsulation. * @param[out] error * Pointer to the error structure. * @@ -615,8 +674,8 @@ flow_dv_convert_action_modify_tp (struct mlx5_flow_dv_modify_hdr_resource *resource, const struct rte_flow_action *action, const struct rte_flow_item *items, - union flow_dv_attr *attr, - struct rte_flow_error *error) + union flow_dv_attr *attr, struct mlx5_flow *dev_flow, + bool tunnel_decap, struct rte_flow_error *error) { const struct rte_flow_action_set_tp *conf = (const struct rte_flow_action_set_tp *)(action->conf); @@ -628,7 +687,7 @@ flow_dv_convert_action_modify_tp struct field_modify_info *field; if (!attr->valid) - flow_dv_attr_init(items, attr); + flow_dv_attr_init(items, attr, dev_flow, tunnel_decap); if (attr->udp) { memset(&udp, 0, sizeof(udp)); memset(&udp_mask, 0, sizeof(udp_mask)); @@ -645,8 +704,8 @@ flow_dv_convert_action_modify_tp item.spec = &udp; item.mask = &udp_mask; field = modify_udp; - } - if (attr->tcp) { + } else { + MLX5_ASSERT(attr->tcp); memset(&tcp, 0, sizeof(tcp)); memset(&tcp_mask, 0, sizeof(tcp_mask)); if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) { @@ -678,6 +737,10 @@ flow_dv_convert_action_modify_tp * Pointer to rte_flow_item objects list. * @param[in] attr * Pointer to flow attributes structure. + * @param[in] dev_flow + * Pointer to the sub flow. + * @param[in] tunnel_decap + * Whether action is after tunnel decapsulation. * @param[out] error * Pointer to the error structure. * @@ -689,8 +752,8 @@ flow_dv_convert_action_modify_ttl (struct mlx5_flow_dv_modify_hdr_resource *resource, const struct rte_flow_action *action, const struct rte_flow_item *items, - union flow_dv_attr *attr, - struct rte_flow_error *error) + union flow_dv_attr *attr, struct mlx5_flow *dev_flow, + bool tunnel_decap, struct rte_flow_error *error) { const struct rte_flow_action_set_ttl *conf = (const struct rte_flow_action_set_ttl *)(action->conf); @@ -702,7 +765,7 @@ flow_dv_convert_action_modify_ttl struct field_modify_info *field; if (!attr->valid) - flow_dv_attr_init(items, attr); + flow_dv_attr_init(items, attr, dev_flow, tunnel_decap); if (attr->ipv4) { memset(&ipv4, 0, sizeof(ipv4)); memset(&ipv4_mask, 0, sizeof(ipv4_mask)); @@ -712,8 +775,8 @@ flow_dv_convert_action_modify_ttl item.spec = &ipv4; item.mask = &ipv4_mask; field = modify_ipv4; - } - if (attr->ipv6) { + } else { + MLX5_ASSERT(attr->ipv6); memset(&ipv6, 0, sizeof(ipv6)); memset(&ipv6_mask, 0, sizeof(ipv6_mask)); ipv6.hdr.hop_limits = conf->ttl_value; @@ -738,6 +801,10 @@ flow_dv_convert_action_modify_ttl * Pointer to rte_flow_item objects list. * @param[in] attr * Pointer to flow attributes structure. + * @param[in] dev_flow + * Pointer to the sub flow. + * @param[in] tunnel_decap + * Whether action is after tunnel decapsulation. * @param[out] error * Pointer to the error structure. * @@ -748,8 +815,8 @@ static int flow_dv_convert_action_modify_dec_ttl (struct mlx5_flow_dv_modify_hdr_resource *resource, const struct rte_flow_item *items, - union flow_dv_attr *attr, - struct rte_flow_error *error) + union flow_dv_attr *attr, struct mlx5_flow *dev_flow, + bool tunnel_decap, struct rte_flow_error *error) { struct rte_flow_item item; struct rte_flow_item_ipv4 ipv4; @@ -759,7 +826,7 @@ flow_dv_convert_action_modify_dec_ttl struct field_modify_info *field; if (!attr->valid) - flow_dv_attr_init(items, attr); + flow_dv_attr_init(items, attr, dev_flow, tunnel_decap); if (attr->ipv4) { memset(&ipv4, 0, sizeof(ipv4)); memset(&ipv4_mask, 0, sizeof(ipv4_mask)); @@ -769,8 +836,8 @@ flow_dv_convert_action_modify_dec_ttl item.spec = &ipv4; item.mask = &ipv4_mask; field = modify_ipv4; - } - if (attr->ipv6) { + } else { + MLX5_ASSERT(attr->ipv6); memset(&ipv6, 0, sizeof(ipv6)); memset(&ipv6_mask, 0, sizeof(ipv6_mask)); ipv6.hdr.hop_limits = 0xFF; @@ -1341,6 +1408,11 @@ flow_dv_validate_item_mark(struct rte_eth_dev *dev, "mark id exceeds the limit"); if (!mask) mask = &nic_mask; + if (!mask->id) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL, + "mask cannot be zero"); + ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask, (const uint8_t *)&nic_mask, sizeof(struct rte_flow_item_mark), @@ -1386,10 +1458,6 @@ flow_dv_validate_item_meta(struct rte_eth_dev *dev __rte_unused, RTE_FLOW_ERROR_TYPE_ITEM_SPEC, item->spec, "data cannot be empty"); - if (!spec->data) - return rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL, - "data cannot be zero"); if (config->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) { if (!mlx5_flow_ext_mreg_supported(dev)) return rte_flow_error_set(error, ENOTSUP, @@ -1409,6 +1477,11 @@ flow_dv_validate_item_meta(struct rte_eth_dev *dev __rte_unused, } if (!mask) mask = &rte_flow_item_meta_mask; + if (!mask->data) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL, + "mask cannot be zero"); + ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask, (const uint8_t *)&nic_mask, sizeof(struct rte_flow_item_meta), @@ -1457,6 +1530,11 @@ flow_dv_validate_item_tag(struct rte_eth_dev *dev, "data cannot be empty"); if (!mask) mask = &rte_flow_item_tag_mask; + if (!mask->data) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL, + "mask cannot be zero"); + ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask, (const uint8_t *)&nic_mask, sizeof(struct rte_flow_item_tag), @@ -1633,7 +1711,7 @@ flow_dv_validate_action_pop_vlan(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, struct rte_flow_error *error) { - struct mlx5_priv *priv = dev->data->dev_private; + const struct mlx5_priv *priv = dev->data->dev_private; (void)action; (void)attr; @@ -1664,6 +1742,11 @@ flow_dv_validate_action_pop_vlan(struct rte_eth_dev *dev, RTE_FLOW_ERROR_TYPE_ACTION, action, "wrong action order, port_id should " "be after pop VLAN action"); + if (!attr->transfer && priv->representor) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "pop vlan action for VF representor " + "not supported on NIC table"); return 0; } @@ -1690,10 +1773,14 @@ flow_dev_get_vlan_info_from_items(const struct rte_flow_item *items, if (items == NULL) return; - for (; items->type != RTE_FLOW_ITEM_TYPE_END && - items->type != RTE_FLOW_ITEM_TYPE_VLAN; items++) - ; - if (items->type == RTE_FLOW_ITEM_TYPE_VLAN) { + for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) { + int type = items->type; + + if (type == RTE_FLOW_ITEM_TYPE_VLAN || + type == MLX5_RTE_FLOW_ITEM_TYPE_VLAN) + break; + } + if (items->type != RTE_FLOW_ITEM_TYPE_END) { const struct rte_flow_item_vlan *vlan_m = items->mask; const struct rte_flow_item_vlan *vlan_v = items->spec; @@ -1702,7 +1789,7 @@ flow_dev_get_vlan_info_from_items(const struct rte_flow_item *items, /* Only full match values are accepted */ if ((vlan_m->tci & MLX5DV_FLOW_VLAN_PCP_MASK_BE) == MLX5DV_FLOW_VLAN_PCP_MASK_BE) { - vlan->vlan_tci &= MLX5DV_FLOW_VLAN_PCP_MASK; + vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_PCP_MASK; vlan->vlan_tci |= rte_be_to_cpu_16(vlan_v->tci & MLX5DV_FLOW_VLAN_PCP_MASK_BE); @@ -1723,6 +1810,8 @@ flow_dev_get_vlan_info_from_items(const struct rte_flow_item *items, /** * Validate the push VLAN action. * + * @param[in] dev + * Pointer to the rte_eth_dev structure. * @param[in] action_flags * Holds the actions detected until now. * @param[in] item_flags @@ -1738,13 +1827,15 @@ flow_dev_get_vlan_info_from_items(const struct rte_flow_item *items, * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int -flow_dv_validate_action_push_vlan(uint64_t action_flags, +flow_dv_validate_action_push_vlan(struct rte_eth_dev *dev, + uint64_t action_flags, uint64_t item_flags __rte_unused, const struct rte_flow_action *action, const struct rte_flow_attr *attr, struct rte_flow_error *error) { const struct rte_flow_action_of_push_vlan *push_vlan = action->conf; + const struct mlx5_priv *priv = dev->data->dev_private; if (!attr->transfer && attr->ingress) return rte_flow_error_set(error, ENOTSUP, @@ -1767,6 +1858,11 @@ flow_dv_validate_action_push_vlan(uint64_t action_flags, RTE_FLOW_ERROR_TYPE_ACTION, action, "wrong action order, port_id should " "be after push VLAN"); + if (!attr->transfer && priv->representor) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "push vlan action for VF representor " + "not supported on NIC table"); (void)attr; return 0; } @@ -2040,10 +2136,6 @@ flow_dv_validate_action_set_meta(struct rte_eth_dev *dev, return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, action, "meta data must be within reg C0"); - if (!(conf->data & conf->mask)) - return rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ACTION, action, - "zero value has no effect"); return 0; } @@ -2136,10 +2228,14 @@ notsup_err: /** * Validate the L2 encap action. * + * @param[in] dev + * Pointer to the rte_eth_dev structure. * @param[in] action_flags * Holds the actions detected until now. * @param[in] action * Pointer to the action structure. + * @param[in] attr + * Pointer to flow attributes. * @param[out] error * Pointer to error structure. * @@ -2147,10 +2243,14 @@ notsup_err: * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int -flow_dv_validate_action_l2_encap(uint64_t action_flags, +flow_dv_validate_action_l2_encap(struct rte_eth_dev *dev, + uint64_t action_flags, const struct rte_flow_action *action, + const struct rte_flow_attr *attr, struct rte_flow_error *error) { + const struct mlx5_priv *priv = dev->data->dev_private; + if (!(action->conf)) return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, action, @@ -2160,12 +2260,19 @@ flow_dv_validate_action_l2_encap(uint64_t action_flags, RTE_FLOW_ERROR_TYPE_ACTION, NULL, "can only have a single encap action " "in a flow"); + if (!attr->transfer && priv->representor) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "encap action for VF representor " + "not supported on NIC table"); return 0; } /** * Validate a decap action. * + * @param[in] dev + * Pointer to the rte_eth_dev structure. * @param[in] action_flags * Holds the actions detected until now. * @param[in] attr @@ -2177,10 +2284,13 @@ flow_dv_validate_action_l2_encap(uint64_t action_flags, * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int -flow_dv_validate_action_decap(uint64_t action_flags, - const struct rte_flow_attr *attr, - struct rte_flow_error *error) +flow_dv_validate_action_decap(struct rte_eth_dev *dev, + uint64_t action_flags, + const struct rte_flow_attr *attr, + struct rte_flow_error *error) { + const struct mlx5_priv *priv = dev->data->dev_private; + if (action_flags & MLX5_FLOW_XCAP_ACTIONS) return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, NULL, @@ -2199,6 +2309,11 @@ flow_dv_validate_action_decap(uint64_t action_flags, NULL, "decap action not supported for " "egress"); + if (!attr->transfer && priv->representor) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "decap action for VF representor " + "not supported on NIC table"); return 0; } @@ -2207,6 +2322,8 @@ const struct rte_flow_action_raw_decap empty_decap = {.data = NULL, .size = 0,}; /** * Validate the raw encap and decap actions. * + * @param[in] dev + * Pointer to the rte_eth_dev structure. * @param[in] decap * Pointer to the decap action. * @param[in] encap @@ -2225,11 +2342,13 @@ const struct rte_flow_action_raw_decap empty_decap = {.data = NULL, .size = 0,}; */ static int flow_dv_validate_action_raw_encap_decap - (const struct rte_flow_action_raw_decap *decap, + (struct rte_eth_dev *dev, + const struct rte_flow_action_raw_decap *decap, const struct rte_flow_action_raw_encap *encap, const struct rte_flow_attr *attr, uint64_t *action_flags, int *actions_n, struct rte_flow_error *error) { + const struct mlx5_priv *priv = dev->data->dev_private; int ret; if (encap && (!encap->size || !encap->data)) @@ -2262,7 +2381,8 @@ flow_dv_validate_action_raw_encap_decap "encap combination"); } if (decap) { - ret = flow_dv_validate_action_decap(*action_flags, attr, error); + ret = flow_dv_validate_action_decap(dev, *action_flags, attr, + error); if (ret < 0) return ret; *action_flags |= MLX5_FLOW_ACTION_DECAP; @@ -2279,6 +2399,12 @@ flow_dv_validate_action_raw_encap_decap RTE_FLOW_ERROR_TYPE_ACTION, NULL, "more than one encap action"); + if (!attr->transfer && priv->representor) + return rte_flow_error_set + (error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "encap action for VF representor " + "not supported on NIC table"); *action_flags |= MLX5_FLOW_ACTION_ENCAP; ++(*actions_n); } @@ -2312,7 +2438,7 @@ flow_dv_encap_decap_resource_register struct mlx5_flow_dv_encap_decap_resource *cache_resource; struct mlx5dv_dr_domain *domain; - resource->flags = dev_flow->group ? 0 : 1; + resource->flags = dev_flow->dv.group ? 0 : 1; if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB) domain = sh->fdb_domain; else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX) @@ -2332,7 +2458,7 @@ flow_dv_encap_decap_resource_register (void *)cache_resource, rte_atomic32_read(&cache_resource->refcnt)); rte_atomic32_inc(&cache_resource->refcnt); - dev_flow->dv.encap_decap = cache_resource; + dev_flow->handle->dvh.encap_decap = cache_resource; return 0; } } @@ -2358,7 +2484,7 @@ flow_dv_encap_decap_resource_register rte_atomic32_init(&cache_resource->refcnt); rte_atomic32_inc(&cache_resource->refcnt); LIST_INSERT_HEAD(&sh->encaps_decaps, cache_resource, next); - dev_flow->dv.encap_decap = cache_resource; + dev_flow->handle->dvh.encap_decap = cache_resource; DRV_LOG(DEBUG, "new encap/decap resource %p: refcnt %d++", (void *)cache_resource, rte_atomic32_read(&cache_resource->refcnt)); @@ -2409,7 +2535,7 @@ flow_dv_jump_tbl_resource_register (void *)&tbl_data->jump, cnt); } rte_atomic32_inc(&tbl_data->jump.refcnt); - dev_flow->dv.jump = &tbl_data->jump; + dev_flow->handle->dvh.jump = &tbl_data->jump; return 0; } @@ -2447,7 +2573,7 @@ flow_dv_port_id_action_resource_register (void *)cache_resource, rte_atomic32_read(&cache_resource->refcnt)); rte_atomic32_inc(&cache_resource->refcnt); - dev_flow->dv.port_id_action = cache_resource; + dev_flow->handle->dvh.port_id_action = cache_resource; return 0; } } @@ -2475,7 +2601,7 @@ flow_dv_port_id_action_resource_register rte_atomic32_init(&cache_resource->refcnt); rte_atomic32_inc(&cache_resource->refcnt); LIST_INSERT_HEAD(&sh->port_id_action_list, cache_resource, next); - dev_flow->dv.port_id_action = cache_resource; + dev_flow->handle->dvh.port_id_action = cache_resource; DRV_LOG(DEBUG, "new port id action resource %p: refcnt %d++", (void *)cache_resource, rte_atomic32_read(&cache_resource->refcnt)); @@ -2518,7 +2644,7 @@ flow_dv_push_vlan_action_resource_register (void *)cache_resource, rte_atomic32_read(&cache_resource->refcnt)); rte_atomic32_inc(&cache_resource->refcnt); - dev_flow->dv.push_vlan_res = cache_resource; + dev_flow->handle->dvh.push_vlan_res = cache_resource; return 0; } } @@ -2547,7 +2673,7 @@ flow_dv_push_vlan_action_resource_register rte_atomic32_init(&cache_resource->refcnt); rte_atomic32_inc(&cache_resource->refcnt); LIST_INSERT_HEAD(&sh->push_vlan_action_list, cache_resource, next); - dev_flow->dv.push_vlan_res = cache_resource; + dev_flow->handle->dvh.push_vlan_res = cache_resource; DRV_LOG(DEBUG, "new push vlan action resource %p: refcnt %d++", (void *)cache_resource, rte_atomic32_read(&cache_resource->refcnt)); @@ -3096,10 +3222,14 @@ flow_dv_validate_action_modify_ipv4(const uint64_t action_flags, struct rte_flow_error *error) { int ret = 0; + uint64_t layer; ret = flow_dv_validate_action_modify_hdr(action_flags, action, error); if (!ret) { - if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV4)) + layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ? + MLX5_FLOW_LAYER_INNER_L3_IPV4 : + MLX5_FLOW_LAYER_OUTER_L3_IPV4; + if (!(item_flags & layer)) return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, NULL, @@ -3130,10 +3260,14 @@ flow_dv_validate_action_modify_ipv6(const uint64_t action_flags, struct rte_flow_error *error) { int ret = 0; + uint64_t layer; ret = flow_dv_validate_action_modify_hdr(action_flags, action, error); if (!ret) { - if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV6)) + layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ? + MLX5_FLOW_LAYER_INNER_L3_IPV6 : + MLX5_FLOW_LAYER_OUTER_L3_IPV6; + if (!(item_flags & layer)) return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, NULL, @@ -3164,10 +3298,14 @@ flow_dv_validate_action_modify_tp(const uint64_t action_flags, struct rte_flow_error *error) { int ret = 0; + uint64_t layer; ret = flow_dv_validate_action_modify_hdr(action_flags, action, error); if (!ret) { - if (!(item_flags & MLX5_FLOW_LAYER_L4)) + layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ? + MLX5_FLOW_LAYER_INNER_L4 : + MLX5_FLOW_LAYER_OUTER_L4; + if (!(item_flags & layer)) return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, NULL, "no transport layer " @@ -3199,10 +3337,14 @@ flow_dv_validate_action_modify_tcp_seq(const uint64_t action_flags, struct rte_flow_error *error) { int ret = 0; + uint64_t layer; ret = flow_dv_validate_action_modify_hdr(action_flags, action, error); if (!ret) { - if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_TCP)) + layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ? + MLX5_FLOW_LAYER_INNER_L4_TCP : + MLX5_FLOW_LAYER_OUTER_L4_TCP; + if (!(item_flags & layer)) return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, NULL, "no TCP item in" @@ -3244,10 +3386,14 @@ flow_dv_validate_action_modify_tcp_ack(const uint64_t action_flags, struct rte_flow_error *error) { int ret = 0; + uint64_t layer; ret = flow_dv_validate_action_modify_hdr(action_flags, action, error); if (!ret) { - if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_TCP)) + layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ? + MLX5_FLOW_LAYER_INNER_L4_TCP : + MLX5_FLOW_LAYER_OUTER_L4_TCP; + if (!(item_flags & layer)) return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, NULL, "no TCP item in" @@ -3288,10 +3434,14 @@ flow_dv_validate_action_modify_ttl(const uint64_t action_flags, struct rte_flow_error *error) { int ret = 0; + uint64_t layer; ret = flow_dv_validate_action_modify_hdr(action_flags, action, error); if (!ret) { - if (!(item_flags & MLX5_FLOW_LAYER_L3)) + layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ? + MLX5_FLOW_LAYER_INNER_L3 : + MLX5_FLOW_LAYER_OUTER_L3; + if (!(item_flags & layer)) return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, NULL, @@ -3610,8 +3760,8 @@ flow_dv_modify_hdr_resource_register struct mlx5dv_dr_domain *ns; uint32_t actions_len; - resource->flags = - dev_flow->group ? 0 : MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL; + resource->flags = dev_flow->dv.group ? 0 : + MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL; if (resource->actions_num > flow_dv_modify_hdr_action_max(dev, resource->flags)) return rte_flow_error_set(error, EOVERFLOW, @@ -3636,7 +3786,7 @@ flow_dv_modify_hdr_resource_register (void *)cache_resource, rte_atomic32_read(&cache_resource->refcnt)); rte_atomic32_inc(&cache_resource->refcnt); - dev_flow->dv.modify_hdr = cache_resource; + dev_flow->handle->dvh.modify_hdr = cache_resource; return 0; } } @@ -3663,139 +3813,48 @@ flow_dv_modify_hdr_resource_register rte_atomic32_init(&cache_resource->refcnt); rte_atomic32_inc(&cache_resource->refcnt); LIST_INSERT_HEAD(&sh->modify_cmds, cache_resource, next); - dev_flow->dv.modify_hdr = cache_resource; + dev_flow->handle->dvh.modify_hdr = cache_resource; DRV_LOG(DEBUG, "new modify-header resource %p: refcnt %d++", (void *)cache_resource, rte_atomic32_read(&cache_resource->refcnt)); return 0; } -#define MLX5_CNT_CONTAINER_RESIZE 64 - /** - * Get or create a flow counter. + * Get DV flow counter by index. * * @param[in] dev * Pointer to the Ethernet device structure. - * @param[in] shared - * Indicate if this counter is shared with other flows. - * @param[in] id - * Counter identifier. + * @param[in] idx + * mlx5 flow counter index in the container. + * @param[out] ppool + * mlx5 flow counter pool in the container, * * @return - * pointer to flow counter on success, NULL otherwise and rte_errno is set. + * Pointer to the counter, NULL otherwise. */ static struct mlx5_flow_counter * -flow_dv_counter_alloc_fallback(struct rte_eth_dev *dev, uint32_t shared, - uint32_t id) +flow_dv_counter_get_by_idx(struct rte_eth_dev *dev, + uint32_t idx, + struct mlx5_flow_counter_pool **ppool) { struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5_flow_counter *cnt = NULL; - struct mlx5_devx_obj *dcs = NULL; - - if (!priv->config.devx) { - rte_errno = ENOTSUP; - return NULL; - } - if (shared) { - TAILQ_FOREACH(cnt, &priv->sh->cmng.flow_counters, next) { - if (cnt->shared && cnt->id == id) { - cnt->ref_cnt++; - return cnt; - } - } - } - dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0); - if (!dcs) - return NULL; - cnt = rte_calloc(__func__, 1, sizeof(*cnt), 0); - if (!cnt) { - claim_zero(mlx5_devx_cmd_destroy(cnt->dcs)); - rte_errno = ENOMEM; - return NULL; - } - struct mlx5_flow_counter tmpl = { - .shared = shared, - .ref_cnt = 1, - .id = id, - .dcs = dcs, - }; - tmpl.action = mlx5_glue->dv_create_flow_action_counter(dcs->obj, 0); - if (!tmpl.action) { - claim_zero(mlx5_devx_cmd_destroy(cnt->dcs)); - rte_errno = errno; - rte_free(cnt); - return NULL; - } - *cnt = tmpl; - TAILQ_INSERT_HEAD(&priv->sh->cmng.flow_counters, cnt, next); - return cnt; -} - -/** - * Release a flow counter. - * - * @param[in] dev - * Pointer to the Ethernet device structure. - * @param[in] counter - * Pointer to the counter handler. - */ -static void -flow_dv_counter_release_fallback(struct rte_eth_dev *dev, - struct mlx5_flow_counter *counter) -{ - struct mlx5_priv *priv = dev->data->dev_private; - - if (!counter) - return; - if (--counter->ref_cnt == 0) { - TAILQ_REMOVE(&priv->sh->cmng.flow_counters, counter, next); - claim_zero(mlx5_devx_cmd_destroy(counter->dcs)); - rte_free(counter); - } -} - -/** - * Query a devx flow counter. - * - * @param[in] dev - * Pointer to the Ethernet device structure. - * @param[in] cnt - * Pointer to the flow counter. - * @param[out] pkts - * The statistics value of packets. - * @param[out] bytes - * The statistics value of bytes. - * - * @return - * 0 on success, otherwise a negative errno value and rte_errno is set. - */ -static inline int -_flow_dv_query_count_fallback(struct rte_eth_dev *dev __rte_unused, - struct mlx5_flow_counter *cnt, uint64_t *pkts, - uint64_t *bytes) -{ - return mlx5_devx_cmd_flow_counter_query(cnt->dcs, 0, 0, pkts, bytes, - 0, NULL, NULL, 0); -} + struct mlx5_pools_container *cont; + struct mlx5_flow_counter_pool *pool; + uint32_t batch = 0; -/** - * Get a pool by a counter. - * - * @param[in] cnt - * Pointer to the counter. - * - * @return - * The counter pool. - */ -static struct mlx5_flow_counter_pool * -flow_dv_counter_pool_get(struct mlx5_flow_counter *cnt) -{ - if (!cnt->batch) { - cnt -= cnt->dcs->id % MLX5_COUNTERS_PER_POOL; - return (struct mlx5_flow_counter_pool *)cnt - 1; + idx--; + if (idx >= MLX5_CNT_BATCH_OFFSET) { + idx -= MLX5_CNT_BATCH_OFFSET; + batch = 1; } - return cnt->pool; + cont = MLX5_CNT_CONTAINER(priv->sh, batch, 0); + MLX5_ASSERT(idx / MLX5_COUNTERS_PER_POOL < cont->n); + pool = cont->pools[idx / MLX5_COUNTERS_PER_POOL]; + MLX5_ASSERT(pool); + if (ppool) + *ppool = pool; + return &pool->counters_raw[idx % MLX5_COUNTERS_PER_POOL]; } /** @@ -3812,15 +3871,26 @@ flow_dv_counter_pool_get(struct mlx5_flow_counter *cnt) static struct mlx5_flow_counter_pool * flow_dv_find_pool_by_id(struct mlx5_pools_container *cont, int id) { - struct mlx5_flow_counter_pool *pool; + uint32_t i; + uint32_t n_valid = rte_atomic16_read(&cont->n_valid); - TAILQ_FOREACH(pool, &cont->pool_list, next) { + for (i = 0; i < n_valid; i++) { + struct mlx5_flow_counter_pool *pool = cont->pools[i]; int base = (pool->min_dcs->id / MLX5_COUNTERS_PER_POOL) * - MLX5_COUNTERS_PER_POOL; + MLX5_COUNTERS_PER_POOL; - if (id >= base && id < base + MLX5_COUNTERS_PER_POOL) + if (id >= base && id < base + MLX5_COUNTERS_PER_POOL) { + /* + * Move the pool to the head, as counter allocate + * always gets the first pool in the container. + */ + if (pool != TAILQ_FIRST(&cont->pool_list)) { + TAILQ_REMOVE(&cont->pool_list, pool, next); + TAILQ_INSERT_HEAD(&cont->pool_list, pool, next); + } return pool; - }; + } + } return NULL; } @@ -3873,6 +3943,7 @@ flow_dv_create_counter_stat_mem_mng(struct rte_eth_dev *dev, int raws_n) mkey_attr.pg_access = 0; mkey_attr.klm_array = NULL; mkey_attr.klm_num = 0; + mkey_attr.relaxed_ordering = 1; mem_mng->dm = mlx5_devx_cmd_mkey_create(sh->ctx, &mkey_attr); if (!mem_mng->dm) { mlx5_glue->devx_umem_dereg(mem_mng->umem); @@ -3909,12 +3980,14 @@ flow_dv_container_resize(struct rte_eth_dev *dev, uint32_t batch) MLX5_CNT_CONTAINER(priv->sh, batch, 0); struct mlx5_pools_container *new_cont = MLX5_CNT_CONTAINER_UNUSED(priv->sh, batch, 0); - struct mlx5_counter_stats_mem_mng *mem_mng; + struct mlx5_counter_stats_mem_mng *mem_mng = NULL; uint32_t resize = cont->n + MLX5_CNT_CONTAINER_RESIZE; uint32_t mem_size = sizeof(struct mlx5_flow_counter_pool *) * resize; int i; - if (cont != MLX5_CNT_CONTAINER(priv->sh, batch, 1)) { + /* Fallback mode has no background thread. Skip the check. */ + if (!priv->counter_fallback && + cont != MLX5_CNT_CONTAINER(priv->sh, batch, 1)) { /* The last resize still hasn't detected by the host thread. */ rte_errno = EAGAIN; return NULL; @@ -3927,16 +4000,29 @@ flow_dv_container_resize(struct rte_eth_dev *dev, uint32_t batch) if (cont->n) memcpy(new_cont->pools, cont->pools, cont->n * sizeof(struct mlx5_flow_counter_pool *)); - mem_mng = flow_dv_create_counter_stat_mem_mng(dev, - MLX5_CNT_CONTAINER_RESIZE + MLX5_MAX_PENDING_QUERIES); - if (!mem_mng) { - rte_free(new_cont->pools); - return NULL; + /* + * Fallback mode query the counter directly, no background query + * resources are needed. + */ + if (!priv->counter_fallback) { + mem_mng = flow_dv_create_counter_stat_mem_mng(dev, + MLX5_CNT_CONTAINER_RESIZE + MLX5_MAX_PENDING_QUERIES); + if (!mem_mng) { + rte_free(new_cont->pools); + return NULL; + } + for (i = 0; i < MLX5_MAX_PENDING_QUERIES; ++i) + LIST_INSERT_HEAD(&priv->sh->cmng.free_stat_raws, + mem_mng->raws + + MLX5_CNT_CONTAINER_RESIZE + + i, next); + } else { + /* + * Release the old container pools directly as no background + * thread helps that. + */ + rte_free(cont->pools); } - for (i = 0; i < MLX5_MAX_PENDING_QUERIES; ++i) - LIST_INSERT_HEAD(&priv->sh->cmng.free_stat_raws, - mem_mng->raws + MLX5_CNT_CONTAINER_RESIZE + - i, next); new_cont->n = resize; rte_atomic16_set(&new_cont->n_valid, rte_atomic16_read(&cont->n_valid)); TAILQ_INIT(&new_cont->pool_list); @@ -3954,7 +4040,7 @@ flow_dv_container_resize(struct rte_eth_dev *dev, uint32_t batch) * @param[in] dev * Pointer to the Ethernet device structure. * @param[in] cnt - * Pointer to the flow counter. + * Index to the flow counter. * @param[out] pkts * The statistics value of packets. * @param[out] bytes @@ -3964,17 +4050,23 @@ flow_dv_container_resize(struct rte_eth_dev *dev, uint32_t batch) * 0 on success, otherwise a negative errno value and rte_errno is set. */ static inline int -_flow_dv_query_count(struct rte_eth_dev *dev, - struct mlx5_flow_counter *cnt, uint64_t *pkts, +_flow_dv_query_count(struct rte_eth_dev *dev, uint32_t counter, uint64_t *pkts, uint64_t *bytes) { struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5_flow_counter_pool *pool = - flow_dv_counter_pool_get(cnt); - int offset = cnt - &pool->counters_raw[0]; + struct mlx5_flow_counter_pool *pool = NULL; + struct mlx5_flow_counter *cnt; + struct mlx5_flow_counter_ext *cnt_ext = NULL; + int offset; - if (priv->counter_fallback) - return _flow_dv_query_count_fallback(dev, cnt, pkts, bytes); + cnt = flow_dv_counter_get_by_idx(dev, counter, &pool); + MLX5_ASSERT(pool); + if (counter < MLX5_CNT_BATCH_OFFSET) { + cnt_ext = MLX5_CNT_TO_CNT_EXT(pool, cnt); + if (priv->counter_fallback) + return mlx5_devx_cmd_flow_counter_query(cnt_ext->dcs, 0, + 0, pkts, bytes, 0, NULL, NULL, 0); + } rte_spinlock_lock(&pool->sl); /* @@ -3982,10 +4074,11 @@ _flow_dv_query_count(struct rte_eth_dev *dev, * current allocated in parallel to the host reading. * In this case the new counter values must be reported as 0. */ - if (unlikely(!cnt->batch && cnt->dcs->id < pool->raw->min_dcs_id)) { + if (unlikely(cnt_ext && cnt_ext->dcs->id < pool->raw->min_dcs_id)) { *pkts = 0; *bytes = 0; } else { + offset = cnt - &pool->counters_raw[0]; *pkts = rte_be_to_cpu_64(pool->raw->data[offset].hits); *bytes = rte_be_to_cpu_64(pool->raw->data[offset].bytes); } @@ -4002,11 +4095,13 @@ _flow_dv_query_count(struct rte_eth_dev *dev, * The devX counter handle. * @param[in] batch * Whether the pool is for counter that was allocated by batch command. + * @param[in/out] cont_cur + * Pointer to the container pointer, it will be update in pool resize. * * @return - * A new pool pointer on success, NULL otherwise and rte_errno is set. + * The pool container pointer on success, NULL otherwise and rte_errno is set. */ -static struct mlx5_flow_counter_pool * +static struct mlx5_pools_container * flow_dv_pool_create(struct rte_eth_dev *dev, struct mlx5_devx_obj *dcs, uint32_t batch) { @@ -4022,30 +4117,44 @@ flow_dv_pool_create(struct rte_eth_dev *dev, struct mlx5_devx_obj *dcs, if (!cont) return NULL; } - size = sizeof(*pool) + MLX5_COUNTERS_PER_POOL * - sizeof(struct mlx5_flow_counter); + size = sizeof(*pool); + if (!batch) + size += MLX5_COUNTERS_PER_POOL * + sizeof(struct mlx5_flow_counter_ext); pool = rte_calloc(__func__, 1, size, 0); if (!pool) { rte_errno = ENOMEM; return NULL; } pool->min_dcs = dcs; - pool->raw = cont->init_mem_mng->raws + n_valid % + if (!priv->counter_fallback) + pool->raw = cont->init_mem_mng->raws + n_valid % MLX5_CNT_CONTAINER_RESIZE; pool->raw_hw = NULL; rte_spinlock_init(&pool->sl); /* * The generation of the new allocated counters in this pool is 0, 2 in * the pool generation makes all the counters valid for allocation. + * The start and end query generation protect the counters be released + * between the query and update gap period will not be reallocated + * without the last query finished and stats updated to the memory. */ - rte_atomic64_set(&pool->query_gen, 0x2); + rte_atomic64_set(&pool->start_query_gen, 0x2); + /* + * There's no background query thread for fallback mode, set the + * end_query_gen to the maximum value since no need to wait for + * statistics update. + */ + rte_atomic64_set(&pool->end_query_gen, priv->counter_fallback ? + INT64_MAX : 0x2); TAILQ_INIT(&pool->counters); - TAILQ_INSERT_TAIL(&cont->pool_list, pool, next); + TAILQ_INSERT_HEAD(&cont->pool_list, pool, next); + pool->index = n_valid; cont->pools[n_valid] = pool; /* Pool initialization must be updated before host thread access. */ rte_cio_wmb(); rte_atomic16_add(&cont->n_valid, 1); - return pool; + return cont; } /** @@ -4059,42 +4168,45 @@ flow_dv_pool_create(struct rte_eth_dev *dev, struct mlx5_devx_obj *dcs, * Whether the pool is for counter that was allocated by batch command. * * @return - * The free counter pool pointer and @p cnt_free is set on success, + * The counter container pointer and @p cnt_free is set on success, * NULL otherwise and rte_errno is set. */ -static struct mlx5_flow_counter_pool * +static struct mlx5_pools_container * flow_dv_counter_pool_prepare(struct rte_eth_dev *dev, struct mlx5_flow_counter **cnt_free, uint32_t batch) { struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_pools_container *cont; struct mlx5_flow_counter_pool *pool; struct mlx5_devx_obj *dcs = NULL; struct mlx5_flow_counter *cnt; uint32_t i; + cont = MLX5_CNT_CONTAINER(priv->sh, batch, 0); if (!batch) { /* bulk_bitmap must be 0 for single counter allocation. */ dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0); if (!dcs) return NULL; - pool = flow_dv_find_pool_by_id - (MLX5_CNT_CONTAINER(priv->sh, batch, 0), dcs->id); + pool = flow_dv_find_pool_by_id(cont, dcs->id); if (!pool) { - pool = flow_dv_pool_create(dev, dcs, batch); - if (!pool) { + cont = flow_dv_pool_create(dev, dcs, batch); + if (!cont) { mlx5_devx_cmd_destroy(dcs); return NULL; } + pool = TAILQ_FIRST(&cont->pool_list); } else if (dcs->id < pool->min_dcs->id) { rte_atomic64_set(&pool->a64_dcs, (int64_t)(uintptr_t)dcs); } - cnt = &pool->counters_raw[dcs->id % MLX5_COUNTERS_PER_POOL]; + i = dcs->id % MLX5_COUNTERS_PER_POOL; + cnt = &pool->counters_raw[i]; TAILQ_INSERT_HEAD(&pool->counters, cnt, next); - cnt->dcs = dcs; + MLX5_GET_POOL_CNT_EXT(pool, i)->dcs = dcs; *cnt_free = cnt; - return pool; + return cont; } /* bulk_bitmap is in 128 counters units. */ if (priv->config.hca_attr.flow_counter_bulk_alloc_bitmap & 0x4) @@ -4103,18 +4215,18 @@ flow_dv_counter_pool_prepare(struct rte_eth_dev *dev, rte_errno = ENODATA; return NULL; } - pool = flow_dv_pool_create(dev, dcs, batch); - if (!pool) { + cont = flow_dv_pool_create(dev, dcs, batch); + if (!cont) { mlx5_devx_cmd_destroy(dcs); return NULL; } + pool = TAILQ_FIRST(&cont->pool_list); for (i = 0; i < MLX5_COUNTERS_PER_POOL; ++i) { cnt = &pool->counters_raw[i]; - cnt->pool = pool; TAILQ_INSERT_HEAD(&pool->counters, cnt, next); } *cnt_free = &pool->counters_raw[0]; - return pool; + return cont; } /** @@ -4124,23 +4236,30 @@ flow_dv_counter_pool_prepare(struct rte_eth_dev *dev, * Pointer to the relevant counter pool container. * @param[in] id * The shared counter ID to search. + * @param[out] ppool + * mlx5 flow counter pool in the container, * * @return - * NULL if not existed, otherwise pointer to the shared counter. + * NULL if not existed, otherwise pointer to the shared extend counter. */ -static struct mlx5_flow_counter * -flow_dv_counter_shared_search(struct mlx5_pools_container *cont, - uint32_t id) +static struct mlx5_flow_counter_ext * +flow_dv_counter_shared_search(struct mlx5_pools_container *cont, uint32_t id, + struct mlx5_flow_counter_pool **ppool) { - static struct mlx5_flow_counter *cnt; + static struct mlx5_flow_counter_ext *cnt; struct mlx5_flow_counter_pool *pool; - int i; + uint32_t i; + uint32_t n_valid = rte_atomic16_read(&cont->n_valid); - TAILQ_FOREACH(pool, &cont->pool_list, next) { + for (i = 0; i < n_valid; i++) { + pool = cont->pools[i]; for (i = 0; i < MLX5_COUNTERS_PER_POOL; ++i) { - cnt = &pool->counters_raw[i]; - if (cnt->ref_cnt && cnt->shared && cnt->id == id) + cnt = MLX5_GET_POOL_CNT_EXT(pool, i); + if (cnt->ref_cnt && cnt->shared && cnt->id == id) { + if (ppool) + *ppool = cont->pools[i]; return cnt; + } } } return NULL; @@ -4159,15 +4278,16 @@ flow_dv_counter_shared_search(struct mlx5_pools_container *cont, * Counter flow group. * * @return - * pointer to flow counter on success, NULL otherwise and rte_errno is set. + * Index to flow counter on success, 0 otherwise and rte_errno is set. */ -static struct mlx5_flow_counter * +static uint32_t flow_dv_counter_alloc(struct rte_eth_dev *dev, uint32_t shared, uint32_t id, uint16_t group) { struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_flow_counter_pool *pool = NULL; struct mlx5_flow_counter *cnt_free = NULL; + struct mlx5_flow_counter_ext *cnt_ext = NULL; /* * Currently group 0 flow counter cannot be assigned to a flow if it is * not the first one in the batch counter allocation, so it is better @@ -4176,25 +4296,27 @@ flow_dv_counter_alloc(struct rte_eth_dev *dev, uint32_t shared, uint32_t id, * A counter can be shared between different groups so need to take * shared counters from the single container. */ - uint32_t batch = (group && !shared) ? 1 : 0; + uint32_t batch = (group && !shared && !priv->counter_fallback) ? 1 : 0; struct mlx5_pools_container *cont = MLX5_CNT_CONTAINER(priv->sh, batch, 0); + uint32_t cnt_idx; - if (priv->counter_fallback) - return flow_dv_counter_alloc_fallback(dev, shared, id); if (!priv->config.devx) { rte_errno = ENOTSUP; - return NULL; + return 0; } if (shared) { - cnt_free = flow_dv_counter_shared_search(cont, id); - if (cnt_free) { - if (cnt_free->ref_cnt + 1 == 0) { + cnt_ext = flow_dv_counter_shared_search(cont, id, &pool); + if (cnt_ext) { + if (cnt_ext->ref_cnt + 1 == 0) { rte_errno = E2BIG; - return NULL; + return 0; } - cnt_free->ref_cnt++; - return cnt_free; + cnt_ext->ref_cnt++; + cnt_idx = pool->index * MLX5_COUNTERS_PER_POOL + + (cnt_ext->dcs->id % MLX5_COUNTERS_PER_POOL) + + 1; + return cnt_idx; } } /* Pools which has a free counters are in the start. */ @@ -4209,17 +4331,19 @@ flow_dv_counter_alloc(struct rte_eth_dev *dev, uint32_t shared, uint32_t id, * updated too. */ cnt_free = TAILQ_FIRST(&pool->counters); - if (cnt_free && cnt_free->query_gen + 1 < - rte_atomic64_read(&pool->query_gen)) + if (cnt_free && cnt_free->query_gen < + rte_atomic64_read(&pool->end_query_gen)) break; cnt_free = NULL; } if (!cnt_free) { - pool = flow_dv_counter_pool_prepare(dev, &cnt_free, batch); - if (!pool) - return NULL; + cont = flow_dv_counter_pool_prepare(dev, &cnt_free, batch); + if (!cont) + return 0; + pool = TAILQ_FIRST(&cont->pool_list); } - cnt_free->batch = batch; + if (!batch) + cnt_ext = MLX5_CNT_TO_CNT_EXT(pool, cnt_free); /* Create a DV counter action only in the first time usage. */ if (!cnt_free->action) { uint16_t offset; @@ -4230,23 +4354,28 @@ flow_dv_counter_alloc(struct rte_eth_dev *dev, uint32_t shared, uint32_t id, dcs = pool->min_dcs; } else { offset = 0; - dcs = cnt_free->dcs; + dcs = cnt_ext->dcs; } cnt_free->action = mlx5_glue->dv_create_flow_action_counter (dcs->obj, offset); if (!cnt_free->action) { rte_errno = errno; - return NULL; + return 0; } } + cnt_idx = MLX5_MAKE_CNT_IDX(pool->index, + (cnt_free - pool->counters_raw)); + cnt_idx += batch * MLX5_CNT_BATCH_OFFSET; /* Update the counter reset values. */ - if (_flow_dv_query_count(dev, cnt_free, &cnt_free->hits, + if (_flow_dv_query_count(dev, cnt_idx, &cnt_free->hits, &cnt_free->bytes)) - return NULL; - cnt_free->shared = shared; - cnt_free->ref_cnt = 1; - cnt_free->id = id; - if (!priv->sh->cmng.query_thread_on) + return 0; + if (cnt_ext) { + cnt_ext->shared = shared; + cnt_ext->ref_cnt = 1; + cnt_ext->id = id; + } + if (!priv->counter_fallback && !priv->sh->cmng.query_thread_on) /* Start the asynchronous batch query by the host thread. */ mlx5_set_query_alarm(priv->sh); TAILQ_REMOVE(&pool->counters, cnt_free, next); @@ -4255,7 +4384,7 @@ flow_dv_counter_alloc(struct rte_eth_dev *dev, uint32_t shared, uint32_t id, TAILQ_REMOVE(&cont->pool_list, pool, next); TAILQ_INSERT_TAIL(&cont->pool_list, pool, next); } - return cnt_free; + return cnt_idx; } /** @@ -4264,28 +4393,33 @@ flow_dv_counter_alloc(struct rte_eth_dev *dev, uint32_t shared, uint32_t id, * @param[in] dev * Pointer to the Ethernet device structure. * @param[in] counter - * Pointer to the counter handler. + * Index to the counter handler. */ static void -flow_dv_counter_release(struct rte_eth_dev *dev, - struct mlx5_flow_counter *counter) +flow_dv_counter_release(struct rte_eth_dev *dev, uint32_t counter) { - struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_flow_counter_pool *pool = NULL; + struct mlx5_flow_counter *cnt; + struct mlx5_flow_counter_ext *cnt_ext = NULL; if (!counter) return; - if (priv->counter_fallback) { - flow_dv_counter_release_fallback(dev, counter); - return; - } - if (--counter->ref_cnt == 0) { - struct mlx5_flow_counter_pool *pool = - flow_dv_counter_pool_get(counter); - - /* Put the counter in the end - the last updated one. */ - TAILQ_INSERT_TAIL(&pool->counters, counter, next); - counter->query_gen = rte_atomic64_read(&pool->query_gen); + cnt = flow_dv_counter_get_by_idx(dev, counter, &pool); + MLX5_ASSERT(pool); + if (counter < MLX5_CNT_BATCH_OFFSET) { + cnt_ext = MLX5_CNT_TO_CNT_EXT(pool, cnt); + if (cnt_ext && --cnt_ext->ref_cnt) + return; } + /* Put the counter in the end - the last updated one. */ + TAILQ_INSERT_TAIL(&pool->counters, cnt, next); + /* + * Counters released between query trigger and handler need + * to wait the next round of query. Since the packets arrive + * in the gap period will not be taken into account to the + * old counter. + */ + cnt->query_gen = rte_atomic64_read(&pool->start_query_gen); } /** @@ -4397,13 +4531,35 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, const struct rte_flow_action_raw_decap *decap; const struct rte_flow_action_raw_encap *encap; const struct rte_flow_action_rss *rss; - struct rte_flow_item_tcp nic_tcp_mask = { + const struct rte_flow_item_tcp nic_tcp_mask = { .hdr = { .tcp_flags = 0xFF, .src_port = RTE_BE16(UINT16_MAX), .dst_port = RTE_BE16(UINT16_MAX), } }; + const struct rte_flow_item_ipv4 nic_ipv4_mask = { + .hdr = { + .src_addr = RTE_BE32(0xffffffff), + .dst_addr = RTE_BE32(0xffffffff), + .type_of_service = 0xff, + .next_proto_id = 0xff, + .time_to_live = 0xff, + }, + }; + const struct rte_flow_item_ipv6 nic_ipv6_mask = { + .hdr = { + .src_addr = + "\xff\xff\xff\xff\xff\xff\xff\xff" + "\xff\xff\xff\xff\xff\xff\xff\xff", + .dst_addr = + "\xff\xff\xff\xff\xff\xff\xff\xff" + "\xff\xff\xff\xff\xff\xff\xff\xff", + .vtc_flow = RTE_BE32(0xffffffff), + .proto = 0xff, + .hop_limits = 0xff, + }, + }; struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_dev_config *dev_conf = &priv->config; uint16_t queue_index = 0xFFFF; @@ -4470,7 +4626,8 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, &item_flags, &tunnel); ret = mlx5_flow_validate_item_ipv4(items, item_flags, last_item, - ether_type, NULL, + ether_type, + &nic_ipv4_mask, error); if (ret < 0) return ret; @@ -4495,7 +4652,8 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, &item_flags, &tunnel); ret = mlx5_flow_validate_item_ipv6(items, item_flags, last_item, - ether_type, NULL, + ether_type, + &nic_ipv6_mask, error); if (ret < 0) return ret; @@ -4775,7 +4933,8 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, ++actions_n; break; case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN: - ret = flow_dv_validate_action_push_vlan(action_flags, + ret = flow_dv_validate_action_push_vlan(dev, + action_flags, item_flags, actions, attr, error); @@ -4803,8 +4962,10 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, break; case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP: case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP: - ret = flow_dv_validate_action_l2_encap(action_flags, - actions, error); + ret = flow_dv_validate_action_l2_encap(dev, + action_flags, + actions, attr, + error); if (ret < 0) return ret; action_flags |= MLX5_FLOW_ACTION_ENCAP; @@ -4812,8 +4973,8 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, break; case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP: case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP: - ret = flow_dv_validate_action_decap(action_flags, attr, - error); + ret = flow_dv_validate_action_decap(dev, action_flags, + attr, error); if (ret < 0) return ret; action_flags |= MLX5_FLOW_ACTION_DECAP; @@ -4821,7 +4982,7 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, break; case RTE_FLOW_ACTION_TYPE_RAW_ENCAP: ret = flow_dv_validate_action_raw_encap_decap - (NULL, actions->conf, attr, &action_flags, + (dev, NULL, actions->conf, attr, &action_flags, &actions_n, error); if (ret < 0) return ret; @@ -4837,7 +4998,8 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, encap = actions->conf; } ret = flow_dv_validate_action_raw_encap_decap - (decap ? decap : &empty_decap, encap, + (dev, + decap ? decap : &empty_decap, encap, attr, &action_flags, &actions_n, error); if (ret < 0) @@ -5092,6 +5254,8 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, * Internal preparation function. Allocates the DV flow size, * this size is constant. * + * @param[in] dev + * Pointer to the rte_eth_dev structure. * @param[in] attr * Pointer to the flow attributes. * @param[in] items @@ -5106,24 +5270,43 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, * otherwise NULL and rte_errno is set. */ static struct mlx5_flow * -flow_dv_prepare(const struct rte_flow_attr *attr __rte_unused, +flow_dv_prepare(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr __rte_unused, const struct rte_flow_item items[] __rte_unused, const struct rte_flow_action actions[] __rte_unused, struct rte_flow_error *error) { - size_t size = sizeof(struct mlx5_flow); + size_t size = sizeof(struct mlx5_flow_handle); struct mlx5_flow *dev_flow; + struct mlx5_flow_handle *dev_handle; + struct mlx5_priv *priv = dev->data->dev_private; - dev_flow = rte_calloc(__func__, 1, size, 0); - if (!dev_flow) { + /* In case of corrupting the memory. */ + if (priv->flow_idx >= MLX5_NUM_MAX_DEV_FLOWS) { + rte_flow_error_set(error, ENOSPC, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "not free temporary device flow"); + return NULL; + } + dev_handle = rte_calloc(__func__, 1, size, 0); + if (!dev_handle) { rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, - "not enough memory to create flow"); + "not enough memory to create flow handle"); return NULL; } + /* No multi-thread supporting. */ + dev_flow = &((struct mlx5_flow *)priv->inter_flows)[priv->flow_idx++]; + dev_flow->handle = dev_handle; dev_flow->dv.value.size = MLX5_ST_SZ_BYTES(fte_match_param); + /* + * The matching value needs to be cleared to 0 before using. In the + * past, it will be automatically cleared when using rte_*alloc + * API. The time consumption will be almost the same as before. + */ + memset(dev_flow->dv.value.buf, 0, MLX5_ST_SZ_BYTES(fte_match_param)); dev_flow->ingress = attr->ingress; - dev_flow->transfer = attr->transfer; + dev_flow->dv.transfer = attr->transfer; return dev_flow; } @@ -5213,19 +5396,27 @@ flow_dv_translate_item_eth(void *matcher, void *key, /* The value must be in the range of the mask. */ for (i = 0; i < sizeof(eth_m->dst); ++i) l24_v[i] = eth_m->src.addr_bytes[i] & eth_v->src.addr_bytes[i]; - MLX5_SET(fte_match_set_lyr_2_4, headers_m, ethertype, - rte_be_to_cpu_16(eth_m->type)); - l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, ethertype); - *(uint16_t *)(l24_v) = eth_m->type & eth_v->type; if (eth_v->type) { /* When ethertype is present set mask for tagged VLAN. */ MLX5_SET(fte_match_set_lyr_2_4, headers_m, cvlan_tag, 1); /* Set value for tagged VLAN if ethertype is 802.1Q. */ if (eth_v->type == RTE_BE16(RTE_ETHER_TYPE_VLAN) || - eth_v->type == RTE_BE16(RTE_ETHER_TYPE_QINQ)) + eth_v->type == RTE_BE16(RTE_ETHER_TYPE_QINQ)) { MLX5_SET(fte_match_set_lyr_2_4, headers_v, cvlan_tag, 1); + /* Return here to avoid setting match on ethertype. */ + return; + } } + /* + * HW supports match on one Ethertype, the Ethertype following the last + * VLAN tag of the packet (see PRM). + * Set match on ethertype only if ETH header is not followed by VLAN. + */ + MLX5_SET(fte_match_set_lyr_2_4, headers_m, ethertype, + rte_be_to_cpu_16(eth_m->type)); + l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, ethertype); + *(uint16_t *)(l24_v) = eth_m->type & eth_v->type; } /** @@ -5271,7 +5462,7 @@ flow_dv_translate_item_vlan(struct mlx5_flow *dev_flow, * This is workaround, masks are not supported, * and pre-validated. */ - dev_flow->dv.vf_vlan.tag = + dev_flow->handle->vf_vlan.tag = rte_be_to_cpu_16(vlan_v->tci) & 0x0fff; } tci_m = rte_be_to_cpu_16(vlan_m->tci); @@ -5299,6 +5490,8 @@ flow_dv_translate_item_vlan(struct mlx5_flow *dev_flow, * Flow matcher value. * @param[in] item * Flow pattern to translate. + * @param[in] item_flags + * Bit-fields that holds the items detected until now. * @param[in] inner * Item is inner pattern. * @param[in] group @@ -5307,6 +5500,7 @@ flow_dv_translate_item_vlan(struct mlx5_flow *dev_flow, static void flow_dv_translate_item_ipv4(void *matcher, void *key, const struct rte_flow_item *item, + const uint64_t item_flags, int inner, uint32_t group) { const struct rte_flow_item_ipv4 *ipv4_m = item->mask; @@ -5317,6 +5511,7 @@ flow_dv_translate_item_ipv4(void *matcher, void *key, .dst_addr = RTE_BE32(0xffffffff), .type_of_service = 0xff, .next_proto_id = 0xff, + .time_to_live = 0xff, }, }; void *headers_m; @@ -5339,6 +5534,13 @@ flow_dv_translate_item_ipv4(void *matcher, void *key, else MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0x4); MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, 4); + /* + * On outer header (which must contains L2), or inner header with L2, + * set cvlan_tag mask bit to mark this packet as untagged. + * This should be done even if item->spec is empty. + */ + if (!inner || item_flags & MLX5_FLOW_LAYER_INNER_L2) + MLX5_SET(fte_match_set_lyr_2_4, headers_m, cvlan_tag, 1); if (!ipv4_v) return; if (!ipv4_m) @@ -5366,7 +5568,10 @@ flow_dv_translate_item_ipv4(void *matcher, void *key, ipv4_m->hdr.next_proto_id); MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, ipv4_v->hdr.next_proto_id & ipv4_m->hdr.next_proto_id); - MLX5_SET(fte_match_set_lyr_2_4, headers_m, cvlan_tag, 1); + MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ttl_hoplimit, + ipv4_m->hdr.time_to_live); + MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ttl_hoplimit, + ipv4_v->hdr.time_to_live & ipv4_m->hdr.time_to_live); } /** @@ -5378,6 +5583,8 @@ flow_dv_translate_item_ipv4(void *matcher, void *key, * Flow matcher value. * @param[in] item * Flow pattern to translate. + * @param[in] item_flags + * Bit-fields that holds the items detected until now. * @param[in] inner * Item is inner pattern. * @param[in] group @@ -5386,6 +5593,7 @@ flow_dv_translate_item_ipv4(void *matcher, void *key, static void flow_dv_translate_item_ipv6(void *matcher, void *key, const struct rte_flow_item *item, + const uint64_t item_flags, int inner, uint32_t group) { const struct rte_flow_item_ipv6 *ipv6_m = item->mask; @@ -5428,6 +5636,13 @@ flow_dv_translate_item_ipv6(void *matcher, void *key, else MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0x6); MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, 6); + /* + * On outer header (which must contains L2), or inner header with L2, + * set cvlan_tag mask bit to mark this packet as untagged. + * This should be done even if item->spec is empty. + */ + if (!inner || item_flags & MLX5_FLOW_LAYER_INNER_L2) + MLX5_SET(fte_match_set_lyr_2_4, headers_m, cvlan_tag, 1); if (!ipv6_v) return; if (!ipv6_m) @@ -5471,7 +5686,11 @@ flow_dv_translate_item_ipv6(void *matcher, void *key, ipv6_m->hdr.proto); MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, ipv6_v->hdr.proto & ipv6_m->hdr.proto); - MLX5_SET(fte_match_set_lyr_2_4, headers_m, cvlan_tag, 1); + /* Hop limit. */ + MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ttl_hoplimit, + ipv6_m->hdr.hop_limits); + MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ttl_hoplimit, + ipv6_v->hdr.hop_limits & ipv6_m->hdr.hop_limits); } /** @@ -6744,7 +6963,7 @@ flow_dv_matcher_register(struct rte_eth_dev *dev, (void *)cache_matcher, rte_atomic32_read(&cache_matcher->refcnt)); rte_atomic32_inc(&cache_matcher->refcnt); - dev_flow->dv.matcher = cache_matcher; + dev_flow->handle->dvh.matcher = cache_matcher; /* old matcher should not make the table ref++. */ flow_dv_tbl_resource_release(dev, tbl); return 0; @@ -6781,7 +7000,7 @@ flow_dv_matcher_register(struct rte_eth_dev *dev, /* only matcher ref++, table ref++ already done above in get API. */ rte_atomic32_inc(&cache_matcher->refcnt); LIST_INSERT_HEAD(&tbl_data->matchers, cache_matcher, next); - dev_flow->dv.matcher = cache_matcher; + dev_flow->handle->dvh.matcher = cache_matcher; DRV_LOG(DEBUG, "%s group %u priority %hd new %s matcher %p: refcnt %d", key->domain ? "FDB" : "NIC", key->table_id, cache_matcher->priority, @@ -6823,7 +7042,7 @@ flow_dv_tag_resource_register cache_resource = container_of (entry, struct mlx5_flow_dv_tag_resource, entry); rte_atomic32_inc(&cache_resource->refcnt); - dev_flow->dv.tag_resource = cache_resource; + dev_flow->handle->dvh.tag_resource = cache_resource; DRV_LOG(DEBUG, "cached tag resource %p: refcnt now %d++", (void *)cache_resource, rte_atomic32_read(&cache_resource->refcnt)); @@ -6852,7 +7071,7 @@ flow_dv_tag_resource_register RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, "cannot insert tag"); } - dev_flow->dv.tag_resource = cache_resource; + dev_flow->handle->dvh.tag_resource = cache_resource; DRV_LOG(DEBUG, "new tag resource %p: refcnt now %d++", (void *)cache_resource, rte_atomic32_read(&cache_resource->refcnt)); @@ -6997,7 +7216,7 @@ static void flow_dv_hashfields_set(struct mlx5_flow *dev_flow) { struct rte_flow *flow = dev_flow->flow; - uint64_t items = dev_flow->layers; + uint64_t items = dev_flow->handle->layers; int rss_inner = 0; uint64_t rss_types = rte_eth_rss_hf_refine(flow->rss.types); @@ -7087,6 +7306,7 @@ __flow_dv_translate(struct rte_eth_dev *dev, struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_dev_config *dev_conf = &priv->config; struct rte_flow *flow = dev_flow->flow; + struct mlx5_flow_handle *handle = dev_flow->handle; uint64_t item_flags = 0; uint64_t last_item = 0; uint64_t action_flags = 0; @@ -7122,7 +7342,7 @@ __flow_dv_translate(struct rte_eth_dev *dev, !!priv->fdb_def_rule, &table, error); if (ret) return ret; - dev_flow->group = table; + dev_flow->dv.group = table; if (attr->transfer) mhdr_res->ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB; if (priority == MLX5_FLOW_PRIO_RSVD) @@ -7155,7 +7375,7 @@ __flow_dv_translate(struct rte_eth_dev *dev, (dev, &port_id_resource, dev_flow, error)) return -rte_errno; dev_flow->dv.actions[actions_n++] = - dev_flow->dv.port_id_action->action; + handle->dvh.port_id_action->action; action_flags |= MLX5_FLOW_ACTION_PORT_ID; break; case RTE_FLOW_ACTION_TYPE_FLAG: @@ -7173,12 +7393,17 @@ __flow_dv_translate(struct rte_eth_dev *dev, break; } tag_be = mlx5_flow_mark_set(MLX5_FLOW_MARK_DEFAULT); - if (!dev_flow->dv.tag_resource) - if (flow_dv_tag_resource_register - (dev, tag_be, dev_flow, error)) - return -rte_errno; + /* + * Only one FLAG or MARK is supported per device flow + * right now. So the pointer to the tag resource must be + * zero before the register process. + */ + MLX5_ASSERT(!handle->dvh.tag_resource); + if (flow_dv_tag_resource_register(dev, tag_be, + dev_flow, error)) + return -rte_errno; dev_flow->dv.actions[actions_n++] = - dev_flow->dv.tag_resource->action; + handle->dvh.tag_resource->action; break; case RTE_FLOW_ACTION_TYPE_MARK: action_flags |= MLX5_FLOW_ACTION_MARK; @@ -7200,12 +7425,12 @@ __flow_dv_translate(struct rte_eth_dev *dev, tag_be = mlx5_flow_mark_set (((const struct rte_flow_action_mark *) (actions->conf))->id); - if (!dev_flow->dv.tag_resource) - if (flow_dv_tag_resource_register - (dev, tag_be, dev_flow, error)) - return -rte_errno; + MLX5_ASSERT(!handle->dvh.tag_resource); + if (flow_dv_tag_resource_register(dev, tag_be, + dev_flow, error)) + return -rte_errno; dev_flow->dv.actions[actions_n++] = - dev_flow->dv.tag_resource->action; + handle->dvh.tag_resource->action; break; case RTE_FLOW_ACTION_TYPE_SET_META: if (flow_dv_convert_action_set_meta @@ -7255,13 +7480,14 @@ __flow_dv_translate(struct rte_eth_dev *dev, goto cnt_err; } flow->counter = flow_dv_counter_alloc(dev, - count->shared, - count->id, - dev_flow->group); - if (flow->counter == NULL) + count->shared, + count->id, + dev_flow->dv.group); + if (!flow->counter) goto cnt_err; dev_flow->dv.actions[actions_n++] = - flow->counter->action; + (flow_dv_counter_get_by_idx(dev, + flow->counter, NULL))->action; action_flags |= MLX5_FLOW_ACTION_COUNT; break; cnt_err: @@ -7303,7 +7529,7 @@ cnt_err: (dev, attr, &vlan, dev_flow, error)) return -rte_errno; dev_flow->dv.actions[actions_n++] = - dev_flow->dv.push_vlan_res->action; + handle->dvh.push_vlan_res->action; action_flags |= MLX5_FLOW_ACTION_OF_PUSH_VLAN; break; case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP: @@ -7330,7 +7556,7 @@ cnt_err: error)) return -rte_errno; dev_flow->dv.actions[actions_n++] = - dev_flow->dv.encap_decap->verbs_action; + handle->dvh.encap_decap->verbs_action; action_flags |= MLX5_FLOW_ACTION_ENCAP; break; case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP: @@ -7340,7 +7566,7 @@ cnt_err: error)) return -rte_errno; dev_flow->dv.actions[actions_n++] = - dev_flow->dv.encap_decap->verbs_action; + handle->dvh.encap_decap->verbs_action; action_flags |= MLX5_FLOW_ACTION_DECAP; break; case RTE_FLOW_ACTION_TYPE_RAW_ENCAP: @@ -7350,7 +7576,7 @@ cnt_err: (dev, actions, dev_flow, attr, error)) return -rte_errno; dev_flow->dv.actions[actions_n++] = - dev_flow->dv.encap_decap->verbs_action; + handle->dvh.encap_decap->verbs_action; } else { /* Handle encap without preceding decap. */ if (flow_dv_create_action_l2_encap @@ -7358,7 +7584,7 @@ cnt_err: error)) return -rte_errno; dev_flow->dv.actions[actions_n++] = - dev_flow->dv.encap_decap->verbs_action; + handle->dvh.encap_decap->verbs_action; } action_flags |= MLX5_FLOW_ACTION_ENCAP; break; @@ -7370,7 +7596,7 @@ cnt_err: (dev, dev_flow, attr->transfer, error)) return -rte_errno; dev_flow->dv.actions[actions_n++] = - dev_flow->dv.encap_decap->verbs_action; + handle->dvh.encap_decap->verbs_action; } /* If decap is followed by encap, handle it at encap. */ action_flags |= MLX5_FLOW_ACTION_DECAP; @@ -7402,7 +7628,7 @@ cnt_err: "cannot create jump action."); } dev_flow->dv.actions[actions_n++] = - dev_flow->dv.jump->action; + handle->dvh.jump->action; action_flags |= MLX5_FLOW_ACTION_JUMP; break; case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC: @@ -7439,7 +7665,8 @@ cnt_err: case RTE_FLOW_ACTION_TYPE_SET_TP_DST: if (flow_dv_convert_action_modify_tp (mhdr_res, actions, items, - &flow_attr, error)) + &flow_attr, dev_flow, !!(action_flags & + MLX5_FLOW_ACTION_DECAP), error)) return -rte_errno; action_flags |= actions->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC ? @@ -7448,14 +7675,17 @@ cnt_err: break; case RTE_FLOW_ACTION_TYPE_DEC_TTL: if (flow_dv_convert_action_modify_dec_ttl - (mhdr_res, items, &flow_attr, error)) + (mhdr_res, items, &flow_attr, dev_flow, + !!(action_flags & + MLX5_FLOW_ACTION_DECAP), error)) return -rte_errno; action_flags |= MLX5_FLOW_ACTION_DEC_TTL; break; case RTE_FLOW_ACTION_TYPE_SET_TTL: if (flow_dv_convert_action_modify_ttl - (mhdr_res, actions, items, - &flow_attr, error)) + (mhdr_res, actions, items, &flow_attr, + dev_flow, !!(action_flags & + MLX5_FLOW_ACTION_DECAP), error)) return -rte_errno; action_flags |= MLX5_FLOW_ACTION_SET_TTL; break; @@ -7531,7 +7761,7 @@ cnt_err: (dev, mhdr_res, dev_flow, error)) return -rte_errno; dev_flow->dv.actions[modify_action_position] = - dev_flow->dv.modify_hdr->verbs_action; + handle->dvh.modify_hdr->verbs_action; } break; default: @@ -7542,7 +7772,7 @@ cnt_err: modify_action_position = actions_n++; } dev_flow->dv.actions_n = actions_n; - dev_flow->actions = action_flags; + handle->act_flags = action_flags; for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) { int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); int item_type = items->type; @@ -7574,8 +7804,8 @@ cnt_err: mlx5_flow_tunnel_ip_check(items, next_protocol, &item_flags, &tunnel); flow_dv_translate_item_ipv4(match_mask, match_value, - items, tunnel, - dev_flow->group); + items, item_flags, tunnel, + dev_flow->dv.group); matcher.priority = MLX5_PRIORITY_MAP_L3; last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 : MLX5_FLOW_LAYER_OUTER_L3_IPV4; @@ -7597,8 +7827,8 @@ cnt_err: mlx5_flow_tunnel_ip_check(items, next_protocol, &item_flags, &tunnel); flow_dv_translate_item_ipv6(match_mask, match_value, - items, tunnel, - dev_flow->group); + items, item_flags, tunnel, + dev_flow->dv.group); matcher.priority = MLX5_PRIORITY_MAP_L3; last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 : MLX5_FLOW_LAYER_OUTER_L3_IPV6; @@ -7743,7 +7973,11 @@ cnt_err: MLX5_ASSERT(!flow_dv_check_valid_spec(matcher.mask.buf, dev_flow->dv.value.buf)); #endif - dev_flow->layers = item_flags; + /* + * Layers may be already initialized from prefix flow if this dev_flow + * is the suffix flow. + */ + handle->layers |= item_flags; if (action_flags & MLX5_FLOW_ACTION_RSS) flow_dv_hashfields_set(dev_flow); /* Register matcher. */ @@ -7754,7 +7988,7 @@ cnt_err: /* reserved field no needs to be set to 0 here. */ tbl_key.domain = attr->transfer; tbl_key.direction = attr->egress; - tbl_key.table_id = dev_flow->group; + tbl_key.table_id = dev_flow->dv.group; if (flow_dv_matcher_register(dev, &matcher, &tbl_key, dev_flow, error)) return -rte_errno; return 0; @@ -7778,21 +8012,27 @@ static int __flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow, struct rte_flow_error *error) { - struct mlx5_flow_dv *dv; + struct mlx5_flow_dv_workspace *dv; + struct mlx5_flow_handle *dh; + struct mlx5_flow_handle_dv *dv_h; struct mlx5_flow *dev_flow; struct mlx5_priv *priv = dev->data->dev_private; int n; int err; + int idx; - LIST_FOREACH(dev_flow, &flow->dev_flows, next) { + for (idx = priv->flow_idx - 1; idx >= 0; idx--) { + dev_flow = &((struct mlx5_flow *)priv->inter_flows)[idx]; dv = &dev_flow->dv; + dh = dev_flow->handle; + dv_h = &dh->dvh; n = dv->actions_n; - if (dev_flow->actions & MLX5_FLOW_ACTION_DROP) { - if (dev_flow->transfer) { + if (dh->act_flags & MLX5_FLOW_ACTION_DROP) { + if (dv->transfer) { dv->actions[n++] = priv->sh->esw_drop_action; } else { - dv->hrxq = mlx5_hrxq_drop_new(dev); - if (!dv->hrxq) { + dh->hrxq = mlx5_hrxq_drop_new(dev); + if (!dh->hrxq) { rte_flow_error_set (error, errno, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, @@ -7800,9 +8040,9 @@ __flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow, "cannot get drop hash queue"); goto error; } - dv->actions[n++] = dv->hrxq->action; + dv->actions[n++] = dh->hrxq->action; } - } else if (dev_flow->actions & + } else if (dh->act_flags & (MLX5_FLOW_ACTION_QUEUE | MLX5_FLOW_ACTION_RSS)) { struct mlx5_hrxq *hrxq; @@ -7819,7 +8059,7 @@ __flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow, dev_flow->hash_fields, (*flow->rss.queue), flow->rss.queue_num, - !!(dev_flow->layers & + !!(dh->layers & MLX5_FLOW_LAYER_TUNNEL)); } if (!hrxq) { @@ -7829,14 +8069,14 @@ __flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow, "cannot get hash queue"); goto error; } - dv->hrxq = hrxq; - dv->actions[n++] = dv->hrxq->action; + dh->hrxq = hrxq; + dv->actions[n++] = dh->hrxq->action; } - dv->flow = - mlx5_glue->dv_create_flow(dv->matcher->matcher_object, + dh->ib_flow = + mlx5_glue->dv_create_flow(dv_h->matcher->matcher_object, (void *)&dv->value, n, dv->actions); - if (!dv->flow) { + if (!dh->ib_flow) { rte_flow_error_set(error, errno, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, @@ -7844,32 +8084,29 @@ __flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow, goto error; } if (priv->vmwa_context && - dev_flow->dv.vf_vlan.tag && - !dev_flow->dv.vf_vlan.created) { + dh->vf_vlan.tag && !dh->vf_vlan.created) { /* * The rule contains the VLAN pattern. * For VF we are going to create VLAN * interface to make hypervisor set correct * e-Switch vport context. */ - mlx5_vlan_vmwa_acquire(dev, &dev_flow->dv.vf_vlan); + mlx5_vlan_vmwa_acquire(dev, &dh->vf_vlan); } } return 0; error: err = rte_errno; /* Save rte_errno before cleanup. */ - LIST_FOREACH(dev_flow, &flow->dev_flows, next) { - struct mlx5_flow_dv *dv = &dev_flow->dv; - if (dv->hrxq) { - if (dev_flow->actions & MLX5_FLOW_ACTION_DROP) + LIST_FOREACH(dh, &flow->dev_handles, next) { + if (dh->hrxq) { + if (dh->act_flags & MLX5_FLOW_ACTION_DROP) mlx5_hrxq_drop_release(dev); else - mlx5_hrxq_release(dev, dv->hrxq); - dv->hrxq = NULL; + mlx5_hrxq_release(dev, dh->hrxq); + dh->hrxq = NULL; } - if (dev_flow->dv.vf_vlan.tag && - dev_flow->dv.vf_vlan.created) - mlx5_vlan_vmwa_release(dev, &dev_flow->dv.vf_vlan); + if (dh->vf_vlan.tag && dh->vf_vlan.created) + mlx5_vlan_vmwa_release(dev, &dh->vf_vlan); } rte_errno = err; /* Restore rte_errno. */ return -rte_errno; @@ -7880,17 +8117,17 @@ error: * * @param dev * Pointer to Ethernet device. - * @param flow - * Pointer to mlx5_flow. + * @param handle + * Pointer to mlx5_flow_handle. * * @return * 1 while a reference on it exists, 0 when freed. */ static int flow_dv_matcher_release(struct rte_eth_dev *dev, - struct mlx5_flow *flow) + struct mlx5_flow_handle *handle) { - struct mlx5_flow_dv_matcher *matcher = flow->dv.matcher; + struct mlx5_flow_dv_matcher *matcher = handle->dvh.matcher; MLX5_ASSERT(matcher->matcher_object); DRV_LOG(DEBUG, "port %u matcher %p: refcnt %d--", @@ -7913,17 +8150,17 @@ flow_dv_matcher_release(struct rte_eth_dev *dev, /** * Release an encap/decap resource. * - * @param flow - * Pointer to mlx5_flow. + * @param handle + * Pointer to mlx5_flow_handle. * * @return * 1 while a reference on it exists, 0 when freed. */ static int -flow_dv_encap_decap_resource_release(struct mlx5_flow *flow) +flow_dv_encap_decap_resource_release(struct mlx5_flow_handle *handle) { struct mlx5_flow_dv_encap_decap_resource *cache_resource = - flow->dv.encap_decap; + handle->dvh.encap_decap; MLX5_ASSERT(cache_resource->verbs_action); DRV_LOG(DEBUG, "encap/decap resource %p: refcnt %d--", @@ -7946,17 +8183,18 @@ flow_dv_encap_decap_resource_release(struct mlx5_flow *flow) * * @param dev * Pointer to Ethernet device. - * @param flow - * Pointer to mlx5_flow. + * @param handle + * Pointer to mlx5_flow_handle. * * @return * 1 while a reference on it exists, 0 when freed. */ static int flow_dv_jump_tbl_resource_release(struct rte_eth_dev *dev, - struct mlx5_flow *flow) + struct mlx5_flow_handle *handle) { - struct mlx5_flow_dv_jump_tbl_resource *cache_resource = flow->dv.jump; + struct mlx5_flow_dv_jump_tbl_resource *cache_resource = + handle->dvh.jump; struct mlx5_flow_tbl_data_entry *tbl_data = container_of(cache_resource, struct mlx5_flow_tbl_data_entry, jump); @@ -7980,17 +8218,17 @@ flow_dv_jump_tbl_resource_release(struct rte_eth_dev *dev, /** * Release a modify-header resource. * - * @param flow - * Pointer to mlx5_flow. + * @param handle + * Pointer to mlx5_flow_handle. * * @return * 1 while a reference on it exists, 0 when freed. */ static int -flow_dv_modify_hdr_resource_release(struct mlx5_flow *flow) +flow_dv_modify_hdr_resource_release(struct mlx5_flow_handle *handle) { struct mlx5_flow_dv_modify_hdr_resource *cache_resource = - flow->dv.modify_hdr; + handle->dvh.modify_hdr; MLX5_ASSERT(cache_resource->verbs_action); DRV_LOG(DEBUG, "modify-header resource %p: refcnt %d--", @@ -8011,17 +8249,17 @@ flow_dv_modify_hdr_resource_release(struct mlx5_flow *flow) /** * Release port ID action resource. * - * @param flow - * Pointer to mlx5_flow. + * @param handle + * Pointer to mlx5_flow_handle. * * @return * 1 while a reference on it exists, 0 when freed. */ static int -flow_dv_port_id_action_resource_release(struct mlx5_flow *flow) +flow_dv_port_id_action_resource_release(struct mlx5_flow_handle *handle) { struct mlx5_flow_dv_port_id_action_resource *cache_resource = - flow->dv.port_id_action; + handle->dvh.port_id_action; MLX5_ASSERT(cache_resource->action); DRV_LOG(DEBUG, "port ID action resource %p: refcnt %d--", @@ -8042,17 +8280,17 @@ flow_dv_port_id_action_resource_release(struct mlx5_flow *flow) /** * Release push vlan action resource. * - * @param flow - * Pointer to mlx5_flow. + * @param handle + * Pointer to mlx5_flow_handle. * * @return * 1 while a reference on it exists, 0 when freed. */ static int -flow_dv_push_vlan_action_resource_release(struct mlx5_flow *flow) +flow_dv_push_vlan_action_resource_release(struct mlx5_flow_handle *handle) { struct mlx5_flow_dv_push_vlan_action_resource *cache_resource = - flow->dv.push_vlan_res; + handle->dvh.push_vlan_res; MLX5_ASSERT(cache_resource->action); DRV_LOG(DEBUG, "push VLAN action resource %p: refcnt %d--", @@ -8082,27 +8320,24 @@ flow_dv_push_vlan_action_resource_release(struct mlx5_flow *flow) static void __flow_dv_remove(struct rte_eth_dev *dev, struct rte_flow *flow) { - struct mlx5_flow_dv *dv; - struct mlx5_flow *dev_flow; + struct mlx5_flow_handle *dh; if (!flow) return; - LIST_FOREACH(dev_flow, &flow->dev_flows, next) { - dv = &dev_flow->dv; - if (dv->flow) { - claim_zero(mlx5_glue->dv_destroy_flow(dv->flow)); - dv->flow = NULL; + LIST_FOREACH(dh, &flow->dev_handles, next) { + if (dh->ib_flow) { + claim_zero(mlx5_glue->dv_destroy_flow(dh->ib_flow)); + dh->ib_flow = NULL; } - if (dv->hrxq) { - if (dev_flow->actions & MLX5_FLOW_ACTION_DROP) + if (dh->hrxq) { + if (dh->act_flags & MLX5_FLOW_ACTION_DROP) mlx5_hrxq_drop_release(dev); else - mlx5_hrxq_release(dev, dv->hrxq); - dv->hrxq = NULL; + mlx5_hrxq_release(dev, dh->hrxq); + dh->hrxq = NULL; } - if (dev_flow->dv.vf_vlan.tag && - dev_flow->dv.vf_vlan.created) - mlx5_vlan_vmwa_release(dev, &dev_flow->dv.vf_vlan); + if (dh->vf_vlan.tag && dh->vf_vlan.created) + mlx5_vlan_vmwa_release(dev, &dh->vf_vlan); } } @@ -8118,37 +8353,38 @@ __flow_dv_remove(struct rte_eth_dev *dev, struct rte_flow *flow) static void __flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow) { - struct mlx5_flow *dev_flow; + struct mlx5_flow_handle *dev_handle; if (!flow) return; __flow_dv_remove(dev, flow); if (flow->counter) { flow_dv_counter_release(dev, flow->counter); - flow->counter = NULL; + flow->counter = 0; } if (flow->meter) { mlx5_flow_meter_detach(flow->meter); flow->meter = NULL; } - while (!LIST_EMPTY(&flow->dev_flows)) { - dev_flow = LIST_FIRST(&flow->dev_flows); - LIST_REMOVE(dev_flow, next); - if (dev_flow->dv.matcher) - flow_dv_matcher_release(dev, dev_flow); - if (dev_flow->dv.encap_decap) - flow_dv_encap_decap_resource_release(dev_flow); - if (dev_flow->dv.modify_hdr) - flow_dv_modify_hdr_resource_release(dev_flow); - if (dev_flow->dv.jump) - flow_dv_jump_tbl_resource_release(dev, dev_flow); - if (dev_flow->dv.port_id_action) - flow_dv_port_id_action_resource_release(dev_flow); - if (dev_flow->dv.push_vlan_res) - flow_dv_push_vlan_action_resource_release(dev_flow); - if (dev_flow->dv.tag_resource) - flow_dv_tag_release(dev, dev_flow->dv.tag_resource); - rte_free(dev_flow); + while (!LIST_EMPTY(&flow->dev_handles)) { + dev_handle = LIST_FIRST(&flow->dev_handles); + LIST_REMOVE(dev_handle, next); + if (dev_handle->dvh.matcher) + flow_dv_matcher_release(dev, dev_handle); + if (dev_handle->dvh.encap_decap) + flow_dv_encap_decap_resource_release(dev_handle); + if (dev_handle->dvh.modify_hdr) + flow_dv_modify_hdr_resource_release(dev_handle); + if (dev_handle->dvh.jump) + flow_dv_jump_tbl_resource_release(dev, dev_handle); + if (dev_handle->dvh.port_id_action) + flow_dv_port_id_action_resource_release(dev_handle); + if (dev_handle->dvh.push_vlan_res) + flow_dv_push_vlan_action_resource_release(dev_handle); + if (dev_handle->dvh.tag_resource) + flow_dv_tag_release(dev, + dev_handle->dvh.tag_resource); + rte_free(dev_handle); } } @@ -8181,6 +8417,10 @@ flow_dv_query_count(struct rte_eth_dev *dev, struct rte_flow *flow, "counters are not supported"); if (flow->counter) { uint64_t pkts, bytes; + struct mlx5_flow_counter *cnt; + + cnt = flow_dv_counter_get_by_idx(dev, flow->counter, + NULL); int err = _flow_dv_query_count(dev, flow->counter, &pkts, &bytes); @@ -8190,11 +8430,11 @@ flow_dv_query_count(struct rte_eth_dev *dev, struct rte_flow *flow, NULL, "cannot read counters"); qc->hits_set = 1; qc->bytes_set = 1; - qc->hits = pkts - flow->counter->hits; - qc->bytes = bytes - flow->counter->bytes; + qc->hits = pkts - cnt->hits; + qc->bytes = bytes - cnt->bytes; if (qc->reset) { - flow->counter->hits = pkts; - flow->counter->bytes = bytes; + cnt->hits = pkts; + cnt->bytes = bytes; } return 0; } @@ -8444,9 +8684,12 @@ flow_dv_create_mtr_tbl(struct rte_eth_dev *dev, } /* Create meter count actions */ for (i = 0; i <= RTE_MTR_DROPPED; i++) { + struct mlx5_flow_counter *cnt; if (!fm->policer_stats.cnt[i]) continue; - mtb->count_actns[i] = fm->policer_stats.cnt[i]->action; + cnt = flow_dv_counter_get_by_idx(dev, + fm->policer_stats.cnt[i], NULL); + mtb->count_actns[i] = cnt->action; } /* Create drop action. */ mtb->drop_actn = mlx5_glue->dr_create_flow_action_drop(); @@ -8665,7 +8908,7 @@ error: * @param[in] dev * Pointer to the Ethernet device structure. * @param[in] cnt - * Pointer to the flow counter. + * Index to the flow counter. * @param[in] clear * Set to clear the counter statistics. * @param[out] pkts @@ -8677,19 +8920,21 @@ error: * 0 on success, otherwise return -1. */ static int -flow_dv_counter_query(struct rte_eth_dev *dev, - struct mlx5_flow_counter *cnt, bool clear, +flow_dv_counter_query(struct rte_eth_dev *dev, uint32_t counter, bool clear, uint64_t *pkts, uint64_t *bytes) { struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_flow_counter *cnt; uint64_t inn_pkts, inn_bytes; int ret; if (!priv->config.devx) return -1; - ret = _flow_dv_query_count(dev, cnt, &inn_pkts, &inn_bytes); + + ret = _flow_dv_query_count(dev, counter, &inn_pkts, &inn_bytes); if (ret) return -1; + cnt = flow_dv_counter_get_by_idx(dev, counter, NULL); *pkts = inn_pkts - cnt->hits; *bytes = inn_bytes - cnt->bytes; if (clear) { @@ -8759,10 +9004,10 @@ flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow) /* * Mutex-protected thunk to lock-free flow_dv_counter_alloc(). */ -static struct mlx5_flow_counter * +static uint32_t flow_dv_counter_allocate(struct rte_eth_dev *dev) { - struct mlx5_flow_counter *cnt; + uint32_t cnt; flow_dv_shared_lock(dev); cnt = flow_dv_counter_alloc(dev, 0, 0, 1); @@ -8774,7 +9019,7 @@ flow_dv_counter_allocate(struct rte_eth_dev *dev) * Mutex-protected thunk to lock-free flow_dv_counter_release(). */ static void -flow_dv_counter_free(struct rte_eth_dev *dev, struct mlx5_flow_counter *cnt) +flow_dv_counter_free(struct rte_eth_dev *dev, uint32_t cnt) { flow_dv_shared_lock(dev); flow_dv_counter_release(dev, cnt);