X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;ds=sidebyside;f=drivers%2Fnet%2Fmlx5%2Fmlx5_flow_verbs.c;h=fd902078f89752216500463d9cda81a83315815f;hb=5e393509121c2188b8d58ee7d184e1cad31c88d9;hp=29cd694752f09c570b7a9b6183715dc87ac48a58;hpb=c5042f93a425645135bf548f1995175e9d89fddd;p=dpdk.git diff --git a/drivers/net/mlx5/mlx5_flow_verbs.c b/drivers/net/mlx5/mlx5_flow_verbs.c index 29cd694752..fd902078f8 100644 --- a/drivers/net/mlx5/mlx5_flow_verbs.c +++ b/drivers/net/mlx5/mlx5_flow_verbs.c @@ -882,13 +882,48 @@ flow_verbs_item_gre_ip_protocol_update(struct ibv_flow_attr *attr, } } +/** + * Reserve space for GRE spec in spec buffer. + * + * @param[in,out] dev_flow + * Pointer to dev_flow structure. + * + * @return + * Pointer to reserved space in spec buffer. + */ +static uint8_t * +flow_verbs_reserve_gre(struct mlx5_flow *dev_flow) +{ + uint8_t *buffer; + struct mlx5_flow_verbs_workspace *verbs = &dev_flow->verbs; +#ifndef HAVE_IBV_DEVICE_MPLS_SUPPORT + unsigned int size = sizeof(struct ibv_flow_spec_tunnel); + struct ibv_flow_spec_tunnel tunnel = { + .type = IBV_FLOW_SPEC_VXLAN_TUNNEL, + .size = size, + }; +#else + unsigned int size = sizeof(struct ibv_flow_spec_gre); + struct ibv_flow_spec_gre tunnel = { + .type = IBV_FLOW_SPEC_GRE, + .size = size, + }; +#endif + + buffer = verbs->specs + verbs->size; + flow_verbs_spec_add(verbs, &tunnel, size); + return buffer; +} + /** * Convert the @p item into a Verbs specification. This function assumes that - * the input is valid and that there is space to insert the requested item - * into the flow. + * the input is valid and that Verbs specification will be placed in + * the pre-reserved space. * * @param[in, out] dev_flow * Pointer to dev_flow structure. + * @param[in, out] gre_spec + * Pointer to space reserved for GRE spec. * @param[in] item * Item specification. * @param[in] item_flags @@ -896,6 +931,7 @@ flow_verbs_item_gre_ip_protocol_update(struct ibv_flow_attr *attr, */ static void flow_verbs_translate_item_gre(struct mlx5_flow *dev_flow, + uint8_t *gre_spec, const struct rte_flow_item *item __rte_unused, uint64_t item_flags) { @@ -907,6 +943,7 @@ flow_verbs_translate_item_gre(struct mlx5_flow *dev_flow, .size = size, }; #else + static const struct rte_flow_item_gre empty_gre = {0,}; const struct rte_flow_item_gre *spec = item->spec; const struct rte_flow_item_gre *mask = item->mask; unsigned int size = sizeof(struct ibv_flow_spec_gre); @@ -915,17 +952,29 @@ flow_verbs_translate_item_gre(struct mlx5_flow *dev_flow, .size = size, }; - if (!mask) - mask = &rte_flow_item_gre_mask; - if (spec) { - tunnel.val.c_ks_res0_ver = spec->c_rsvd0_ver; - tunnel.val.protocol = spec->protocol; - tunnel.mask.c_ks_res0_ver = mask->c_rsvd0_ver; - tunnel.mask.protocol = mask->protocol; - /* Remove unwanted bits from values. */ - tunnel.val.c_ks_res0_ver &= tunnel.mask.c_ks_res0_ver; + if (!spec) { + spec = &empty_gre; + mask = &empty_gre; + } else { + if (!mask) + mask = &rte_flow_item_gre_mask; + } + tunnel.val.c_ks_res0_ver = spec->c_rsvd0_ver; + tunnel.val.protocol = spec->protocol; + tunnel.mask.c_ks_res0_ver = mask->c_rsvd0_ver; + tunnel.mask.protocol = mask->protocol; + /* Remove unwanted bits from values. */ + tunnel.val.c_ks_res0_ver &= tunnel.mask.c_ks_res0_ver; + tunnel.val.key &= tunnel.mask.key; + if (tunnel.mask.protocol) { tunnel.val.protocol &= tunnel.mask.protocol; - tunnel.val.key &= tunnel.mask.key; + } else { + tunnel.val.protocol = mlx5_translate_tunnel_etypes(item_flags); + if (tunnel.val.protocol) { + tunnel.mask.protocol = 0xFFFF; + tunnel.val.protocol = + rte_cpu_to_be_16(tunnel.val.protocol); + } } #endif if (item_flags & MLX5_FLOW_LAYER_OUTER_L3_IPV4) @@ -936,7 +985,8 @@ flow_verbs_translate_item_gre(struct mlx5_flow *dev_flow, flow_verbs_item_gre_ip_protocol_update(&verbs->attr, IBV_FLOW_SPEC_IPV6, IPPROTO_GRE); - flow_verbs_spec_add(verbs, &tunnel, size); + MLX5_ASSERT(gre_spec); + memcpy(gre_spec, &tunnel, size); } /** @@ -1666,6 +1716,8 @@ flow_verbs_translate(struct rte_eth_dev *dev, struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace(); struct mlx5_flow_rss_desc *rss_desc; + const struct rte_flow_item *tunnel_item = NULL; + uint8_t *gre_spec = NULL; MLX5_ASSERT(wks); rss_desc = &wks->rss_desc; @@ -1680,12 +1732,12 @@ flow_verbs_translate(struct rte_eth_dev *dev, case RTE_FLOW_ACTION_TYPE_FLAG: flow_verbs_translate_action_flag(dev_flow, actions); action_flags |= MLX5_FLOW_ACTION_FLAG; - dev_flow->handle->mark = 1; + wks->mark = 1; break; case RTE_FLOW_ACTION_TYPE_MARK: flow_verbs_translate_action_mark(dev_flow, actions); action_flags |= MLX5_FLOW_ACTION_MARK; - dev_flow->handle->mark = 1; + wks->mark = 1; break; case RTE_FLOW_ACTION_TYPE_DROP: flow_verbs_translate_action_drop(dev_flow, actions); @@ -1803,10 +1855,10 @@ flow_verbs_translate(struct rte_eth_dev *dev, item_flags |= MLX5_FLOW_LAYER_VXLAN_GPE; break; case RTE_FLOW_ITEM_TYPE_GRE: - flow_verbs_translate_item_gre(dev_flow, items, - item_flags); + gre_spec = flow_verbs_reserve_gre(dev_flow); subpriority = MLX5_TUNNEL_PRIO_GET(rss_desc); item_flags |= MLX5_FLOW_LAYER_GRE; + tunnel_item = items; break; case RTE_FLOW_ITEM_TYPE_MPLS: flow_verbs_translate_item_mpls(dev_flow, items, @@ -1820,6 +1872,9 @@ flow_verbs_translate(struct rte_eth_dev *dev, NULL, "item not supported"); } } + if (item_flags & MLX5_FLOW_LAYER_GRE) + flow_verbs_translate_item_gre(dev_flow, gre_spec, + tunnel_item, item_flags); dev_flow->handle->layers = item_flags; /* Other members of attr will be ignored. */ dev_flow->verbs.attr.priority = @@ -1930,7 +1985,6 @@ flow_verbs_apply(struct rte_eth_dev *dev, struct rte_flow *flow, MLX5_ASSERT(priv->drop_queue.hrxq); hrxq = priv->drop_queue.hrxq; } else { - uint32_t hrxq_idx; struct mlx5_flow_rss_desc *rss_desc = &wks->rss_desc; MLX5_ASSERT(rss_desc->queue_num); @@ -1939,9 +1993,7 @@ flow_verbs_apply(struct rte_eth_dev *dev, struct rte_flow *flow, rss_desc->tunnel = !!(handle->layers & MLX5_FLOW_LAYER_TUNNEL); rss_desc->shared_rss = 0; - hrxq_idx = mlx5_hrxq_get(dev, rss_desc); - hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ], - hrxq_idx); + hrxq = mlx5_hrxq_get(dev, rss_desc); if (!hrxq) { rte_flow_error_set (error, rte_errno, @@ -1949,7 +2001,7 @@ flow_verbs_apply(struct rte_eth_dev *dev, struct rte_flow *flow, "cannot get hash queue"); goto error; } - handle->rix_hrxq = hrxq_idx; + handle->rix_hrxq = hrxq->idx; } MLX5_ASSERT(hrxq); handle->drv_flow = mlx5_glue->create_flow