X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fmlx5%2Fmlx5_flow_dv.c;h=59ef716ebe0d0c8cab54a5402e38e616d60a7e0f;hb=8d0715f054b44750c2e1553d6af29487bcea25d8;hp=4849bd9811ef5c1e87c08b0089ed3ee02a8fac9f;hpb=f15db67df09c1925e7d62a55931bf694e360d67a;p=dpdk.git diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c index 4849bd9811..59ef716ebe 100644 --- a/drivers/net/mlx5/mlx5_flow_dv.c +++ b/drivers/net/mlx5/mlx5_flow_dv.c @@ -2149,6 +2149,113 @@ flow_dv_modify_hdr_resource_register #define MLX5_CNT_CONTAINER_RESIZE 64 +/** + * Get or create a flow counter. + * + * @param[in] dev + * Pointer to the Ethernet device structure. + * @param[in] shared + * Indicate if this counter is shared with other flows. + * @param[in] id + * Counter identifier. + * + * @return + * pointer to flow counter on success, NULL otherwise and rte_errno is set. + */ +static struct mlx5_flow_counter * +flow_dv_counter_alloc_fallback(struct rte_eth_dev *dev, uint32_t shared, + uint32_t id) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_flow_counter *cnt = NULL; + struct mlx5_devx_obj *dcs = NULL; + + if (!priv->config.devx) { + rte_errno = ENOTSUP; + return NULL; + } + if (shared) { + TAILQ_FOREACH(cnt, &priv->sh->cmng.flow_counters, next) { + if (cnt->shared && cnt->id == id) { + cnt->ref_cnt++; + return cnt; + } + } + } + dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0); + if (!dcs) + return NULL; + cnt = rte_calloc(__func__, 1, sizeof(*cnt), 0); + if (!cnt) { + claim_zero(mlx5_devx_cmd_destroy(cnt->dcs)); + rte_errno = ENOMEM; + return NULL; + } + struct mlx5_flow_counter tmpl = { + .shared = shared, + .ref_cnt = 1, + .id = id, + .dcs = dcs, + }; + tmpl.action = mlx5_glue->dv_create_flow_action_counter(dcs->obj, 0); + if (!tmpl.action) { + claim_zero(mlx5_devx_cmd_destroy(cnt->dcs)); + rte_errno = errno; + rte_free(cnt); + return NULL; + } + *cnt = tmpl; + TAILQ_INSERT_HEAD(&priv->sh->cmng.flow_counters, cnt, next); + return cnt; +} + +/** + * Release a flow counter. + * + * @param[in] dev + * Pointer to the Ethernet device structure. + * @param[in] counter + * Pointer to the counter handler. + */ +static void +flow_dv_counter_release_fallback(struct rte_eth_dev *dev, + struct mlx5_flow_counter *counter) +{ + struct mlx5_priv *priv = dev->data->dev_private; + + if (!counter) + return; + if (--counter->ref_cnt == 0) { + TAILQ_REMOVE(&priv->sh->cmng.flow_counters, counter, next); + claim_zero(mlx5_devx_cmd_destroy(counter->dcs)); + rte_free(counter); + } +} + +/** + * Query a devx flow counter. + * + * @param[in] dev + * Pointer to the Ethernet device structure. + * @param[in] cnt + * Pointer to the flow counter. + * @param[out] pkts + * The statistics value of packets. + * @param[out] bytes + * The statistics value of bytes. + * + * @return + * 0 on success, otherwise a negative errno value and rte_errno is set. + */ +static inline int +_flow_dv_query_count_fallback(struct rte_eth_dev *dev __rte_unused, + struct mlx5_flow_counter *cnt, uint64_t *pkts, + uint64_t *bytes) +{ + return mlx5_devx_cmd_flow_counter_query(cnt->dcs, 0, 0, pkts, bytes, + 0, NULL, NULL, 0); +} + /** * Get a pool by a counter. * @@ -2212,8 +2319,6 @@ flow_dv_create_counter_stat_mem_mng(struct rte_eth_dev *dev, int raws_n) { struct mlx5_ibv_shared *sh = ((struct mlx5_priv *) (dev->data->dev_private))->sh; - struct mlx5dv_pd dv_pd; - struct mlx5dv_obj dv_obj; struct mlx5_devx_mkey_attr mkey_attr; struct mlx5_counter_stats_mem_mng *mem_mng; volatile struct flow_counter_stats *raw_data; @@ -2237,13 +2342,10 @@ flow_dv_create_counter_stat_mem_mng(struct rte_eth_dev *dev, int raws_n) rte_free(mem); return NULL; } - dv_obj.pd.in = sh->pd; - dv_obj.pd.out = &dv_pd; - mlx5_glue->dv_init_obj(&dv_obj, MLX5DV_OBJ_PD); mkey_attr.addr = (uintptr_t)mem; mkey_attr.size = size; mkey_attr.umem_id = mem_mng->umem->umem_id; - mkey_attr.pd = dv_pd.pdn; + mkey_attr.pd = sh->pdn; mem_mng->dm = mlx5_devx_cmd_mkey_create(sh->ctx, &mkey_attr); if (!mem_mng->dm) { mlx5_glue->devx_umem_dereg(mem_mng->umem); @@ -2335,14 +2437,18 @@ flow_dv_container_resize(struct rte_eth_dev *dev, uint32_t batch) * 0 on success, otherwise a negative errno value and rte_errno is set. */ static inline int -_flow_dv_query_count(struct rte_eth_dev *dev __rte_unused, +_flow_dv_query_count(struct rte_eth_dev *dev, struct mlx5_flow_counter *cnt, uint64_t *pkts, uint64_t *bytes) { + struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_flow_counter_pool *pool = flow_dv_counter_pool_get(cnt); int offset = cnt - &pool->counters_raw[0]; + if (priv->counter_fallback) + return _flow_dv_query_count_fallback(dev, cnt, pkts, bytes); + rte_spinlock_lock(&pool->sl); /* * The single counters allocation may allocate smaller ID than the @@ -2547,10 +2653,8 @@ flow_dv_counter_alloc(struct rte_eth_dev *dev, uint32_t shared, uint32_t id, struct mlx5_pools_container *cont = MLX5_CNT_CONTAINER(priv->sh, batch, 0); -#ifndef HAVE_IBV_DEVX_ASYNC - rte_errno = ENOTSUP; - return NULL; -#endif + if (priv->counter_fallback) + return flow_dv_counter_alloc_fallback(dev, shared, id); if (!priv->config.devx) { rte_errno = ENOTSUP; return NULL; @@ -2636,11 +2740,17 @@ flow_dv_counter_alloc(struct rte_eth_dev *dev, uint32_t shared, uint32_t id, * Pointer to the counter handler. */ static void -flow_dv_counter_release(struct rte_eth_dev *dev __rte_unused, +flow_dv_counter_release(struct rte_eth_dev *dev, struct mlx5_flow_counter *counter) { + struct mlx5_priv *priv = dev->data->dev_private; + if (!counter) return; + if (priv->counter_fallback) { + flow_dv_counter_release_fallback(dev, counter); + return; + } if (--counter->ref_cnt == 0) { struct mlx5_flow_counter_pool *pool = flow_dv_counter_pool_get(counter); @@ -2770,7 +2880,7 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, (dev, items, attr, item_flags, error); if (ret < 0) return ret; - last_item |= MLX5_FLOW_ITEM_PORT_ID; + last_item = MLX5_FLOW_ITEM_PORT_ID; break; case RTE_FLOW_ITEM_TYPE_ETH: ret = mlx5_flow_validate_item_eth(items, item_flags, @@ -2853,7 +2963,6 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, MLX5_FLOW_LAYER_OUTER_L4_UDP; break; case RTE_FLOW_ITEM_TYPE_GRE: - case RTE_FLOW_ITEM_TYPE_NVGRE: ret = mlx5_flow_validate_item_gre(items, item_flags, next_protocol, error); if (ret < 0) @@ -2861,12 +2970,20 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, gre_item = items; last_item = MLX5_FLOW_LAYER_GRE; break; + case RTE_FLOW_ITEM_TYPE_NVGRE: + ret = mlx5_flow_validate_item_nvgre(items, item_flags, + next_protocol, + error); + if (ret < 0) + return ret; + last_item = MLX5_FLOW_LAYER_NVGRE; + break; case RTE_FLOW_ITEM_TYPE_GRE_KEY: ret = mlx5_flow_validate_item_gre_key (items, item_flags, gre_item, error); if (ret < 0) return ret; - item_flags |= MLX5_FLOW_LAYER_GRE_KEY; + last_item = MLX5_FLOW_LAYER_GRE_KEY; break; case RTE_FLOW_ITEM_TYPE_VXLAN: ret = mlx5_flow_validate_item_vxlan(items, item_flags, @@ -2904,7 +3021,7 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, error); if (ret < 0) return ret; - item_flags |= MLX5_FLOW_LAYER_ICMP; + last_item = MLX5_FLOW_LAYER_ICMP; break; case RTE_FLOW_ITEM_TYPE_ICMP6: ret = mlx5_flow_validate_item_icmp6(items, item_flags, @@ -2912,7 +3029,7 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, error); if (ret < 0) return ret; - item_flags |= MLX5_FLOW_LAYER_ICMP6; + last_item = MLX5_FLOW_LAYER_ICMP6; break; default: return rte_flow_error_set(error, ENOTSUP, @@ -3806,7 +3923,21 @@ flow_dv_translate_item_nvgre(void *matcher, void *key, int size; int i; - flow_dv_translate_item_gre(matcher, key, item, inner); + /* For NVGRE, GRE header fields must be set with defined values. */ + const struct rte_flow_item_gre gre_spec = { + .c_rsvd0_ver = RTE_BE16(0x2000), + .protocol = RTE_BE16(RTE_ETHER_TYPE_TEB) + }; + const struct rte_flow_item_gre gre_mask = { + .c_rsvd0_ver = RTE_BE16(0xB000), + .protocol = RTE_BE16(UINT16_MAX), + }; + const struct rte_flow_item gre_item = { + .spec = &gre_spec, + .mask = &gre_mask, + .last = NULL, + }; + flow_dv_translate_item_gre(matcher, key, &gre_item, inner); if (!nvgre_v) return; if (!nvgre_m) @@ -4930,7 +5061,7 @@ cnt_err: case RTE_FLOW_ITEM_TYPE_GRE_KEY: flow_dv_translate_item_gre_key(match_mask, match_value, items); - item_flags |= MLX5_FLOW_LAYER_GRE_KEY; + last_item = MLX5_FLOW_LAYER_GRE_KEY; break; case RTE_FLOW_ITEM_TYPE_NVGRE: flow_dv_translate_item_nvgre(match_mask, match_value, @@ -4960,12 +5091,12 @@ cnt_err: case RTE_FLOW_ITEM_TYPE_ICMP: flow_dv_translate_item_icmp(match_mask, match_value, items, tunnel); - item_flags |= MLX5_FLOW_LAYER_ICMP; + last_item = MLX5_FLOW_LAYER_ICMP; break; case RTE_FLOW_ITEM_TYPE_ICMP6: flow_dv_translate_item_icmp6(match_mask, match_value, items, tunnel); - item_flags |= MLX5_FLOW_LAYER_ICMP6; + last_item = MLX5_FLOW_LAYER_ICMP6; break; default: break; @@ -5051,13 +5182,14 @@ flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow, dv->hash_fields, (*flow->queue), flow->rss.queue_num); - if (!hrxq) + if (!hrxq) { hrxq = mlx5_hrxq_new (dev, flow->key, MLX5_RSS_HASH_KEY_LEN, dv->hash_fields, (*flow->queue), flow->rss.queue_num, !!(dev_flow->layers & MLX5_FLOW_LAYER_TUNNEL)); + } if (!hrxq) { rte_flow_error_set (error, rte_errno,