X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fmlx5%2Fmlx5_flow.c;h=c5c767aaee1a40e0890882db72849a68061109d7;hb=b25987fa7e25b3ba852f9433d22761c006884205;hp=3194cd5633e07cb7d23f4323999c48ac07593ce1;hpb=0a4291173989faa02a9a4250246ab6c9da4ff741;p=dpdk.git diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c index 3194cd5633..c5c767aaee 100644 --- a/drivers/net/mlx5/mlx5_flow.c +++ b/drivers/net/mlx5/mlx5_flow.c @@ -50,6 +50,7 @@ flow_tunnel_add_default_miss(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, const struct rte_flow_action *app_actions, uint32_t flow_idx, + const struct mlx5_flow_tunnel *tunnel, struct tunnel_default_miss_ctx *ctx, struct rte_flow_error *error); static struct mlx5_flow_tunnel * @@ -99,6 +100,8 @@ struct mlx5_flow_expand_node { * RSS types bit-field associated with this node * (see ETH_RSS_* definitions). */ + uint8_t optional; + /**< optional expand field. Default 0 to expand, 1 not go deeper. */ }; /** Object returned by mlx5_flow_expand_rss(). */ @@ -111,6 +114,30 @@ struct mlx5_flow_expand_rss { } entry[]; }; +static void +mlx5_dbg__print_pattern(const struct rte_flow_item *item); + +static bool +mlx5_flow_is_rss_expandable_item(const struct rte_flow_item *item) +{ + switch (item->type) { + case RTE_FLOW_ITEM_TYPE_ETH: + case RTE_FLOW_ITEM_TYPE_VLAN: + case RTE_FLOW_ITEM_TYPE_IPV4: + case RTE_FLOW_ITEM_TYPE_IPV6: + case RTE_FLOW_ITEM_TYPE_UDP: + case RTE_FLOW_ITEM_TYPE_TCP: + case RTE_FLOW_ITEM_TYPE_VXLAN: + case RTE_FLOW_ITEM_TYPE_NVGRE: + case RTE_FLOW_ITEM_TYPE_GRE: + case RTE_FLOW_ITEM_TYPE_GENEVE: + return true; + default: + break; + } + return false; +} + static enum rte_flow_item_type mlx5_flow_expand_rss_item_complete(const struct rte_flow_item *item) { @@ -212,7 +239,7 @@ mlx5_flow_expand_rss_item_complete(const struct rte_flow_item *item) return ret; } -#define MLX5_RSS_EXP_ELT_N 8 +#define MLX5_RSS_EXP_ELT_N 16 /** * Expand RSS flows into several possible flows according to the RSS hash @@ -270,8 +297,11 @@ mlx5_flow_expand_rss(struct mlx5_flow_expand_rss *buf, size_t size, addr = buf->entry[0].pattern; } for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) { - if (item->type != RTE_FLOW_ITEM_TYPE_VOID) - last_item = item; + if (!mlx5_flow_is_rss_expandable_item(item)) { + user_pattern_size += sizeof(*item); + continue; + } + last_item = item; for (i = 0; node->next && node->next[i]; ++i) { next = &graph[node->next[i]]; if (next->type == item->type) @@ -366,7 +396,7 @@ mlx5_flow_expand_rss(struct mlx5_flow_expand_rss *buf, size_t size, } } /* Go deeper. */ - if (node->next) { + if (!node->optional && node->next) { next_node = node->next; if (stack_pos++ == MLX5_RSS_EXP_ELT_N) { rte_errno = E2BIG; @@ -405,6 +435,8 @@ enum mlx5_expansion { MLX5_EXPANSION_VXLAN, MLX5_EXPANSION_VXLAN_GPE, MLX5_EXPANSION_GRE, + MLX5_EXPANSION_NVGRE, + MLX5_EXPANSION_GRE_KEY, MLX5_EXPANSION_MPLS, MLX5_EXPANSION_ETH, MLX5_EXPANSION_ETH_VLAN, @@ -462,6 +494,7 @@ static const struct mlx5_flow_expand_node mlx5_support_expansion[] = { (MLX5_EXPANSION_OUTER_IPV4_UDP, MLX5_EXPANSION_OUTER_IPV4_TCP, MLX5_EXPANSION_GRE, + MLX5_EXPANSION_NVGRE, MLX5_EXPANSION_IPV4, MLX5_EXPANSION_IPV6), .type = RTE_FLOW_ITEM_TYPE_IPV4, @@ -484,7 +517,8 @@ static const struct mlx5_flow_expand_node mlx5_support_expansion[] = { MLX5_EXPANSION_OUTER_IPV6_TCP, MLX5_EXPANSION_IPV4, MLX5_EXPANSION_IPV6, - MLX5_EXPANSION_GRE), + MLX5_EXPANSION_GRE, + MLX5_EXPANSION_NVGRE), .type = RTE_FLOW_ITEM_TYPE_IPV6, .rss_types = ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 | ETH_RSS_NONFRAG_IPV6_OTHER, @@ -513,9 +547,20 @@ static const struct mlx5_flow_expand_node mlx5_support_expansion[] = { }, [MLX5_EXPANSION_GRE] = { .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4, - MLX5_EXPANSION_IPV6), + MLX5_EXPANSION_IPV6, + MLX5_EXPANSION_GRE_KEY), .type = RTE_FLOW_ITEM_TYPE_GRE, }, + [MLX5_EXPANSION_GRE_KEY] = { + .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4, + MLX5_EXPANSION_IPV6), + .type = RTE_FLOW_ITEM_TYPE_GRE_KEY, + .optional = 1, + }, + [MLX5_EXPANSION_NVGRE] = { + .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH), + .type = RTE_FLOW_ITEM_TYPE_NVGRE, + }, [MLX5_EXPANSION_MPLS] = { .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4, MLX5_EXPANSION_IPV6), @@ -2079,7 +2124,7 @@ mlx5_flow_validate_item_ipv4(const struct rte_flow_item *item, RTE_FLOW_ERROR_TYPE_ITEM, item, "IPv4 cannot follow L2/VLAN layer " "which ether type is not IPv4"); - if (item_flags & MLX5_FLOW_LAYER_IPIP) { + if (item_flags & MLX5_FLOW_LAYER_TUNNEL) { if (mask && spec) next_proto = mask->hdr.next_proto_id & spec->hdr.next_proto_id; @@ -2187,7 +2232,7 @@ mlx5_flow_validate_item_ipv6(const struct rte_flow_item *item, "which ether type is not IPv6"); if (mask && mask->hdr.proto == UINT8_MAX && spec) next_proto = spec->hdr.proto; - if (item_flags & MLX5_FLOW_LAYER_IPV6_ENCAP) { + if (item_flags & MLX5_FLOW_LAYER_TUNNEL) { if (next_proto == IPPROTO_IPIP || next_proto == IPPROTO_IPV6) return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, @@ -4549,7 +4594,9 @@ get_meter_sub_policy(struct rte_eth_dev *dev, "Failed to find Meter Policy."); goto exit; } - if (policy->is_rss) { + if (policy->is_rss || + (policy->is_queue && + !policy->sub_policys[MLX5_MTR_DOMAIN_INGRESS][0]->rix_hrxq[0])) { struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace(); struct mlx5_flow_rss_desc rss_desc_v[MLX5_MTR_RTE_COLORS]; @@ -4565,34 +4612,49 @@ get_meter_sub_policy(struct rte_eth_dev *dev, for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) { struct mlx5_flow dev_flow = {0}; struct mlx5_flow_handle dev_handle = { {0} }; - const void *rss_act = policy->act_cnt[i].rss->conf; - struct rte_flow_action rss_actions[2] = { - [0] = { + + rss_desc_v[i] = wks->rss_desc; + if (policy->is_rss) { + const void *rss_act = + policy->act_cnt[i].rss->conf; + struct rte_flow_action rss_actions[2] = { + [0] = { .type = RTE_FLOW_ACTION_TYPE_RSS, .conf = rss_act - }, - [1] = { + }, + [1] = { .type = RTE_FLOW_ACTION_TYPE_END, .conf = NULL - } - }; + } + }; - dev_flow.handle = &dev_handle; - dev_flow.ingress = attr->ingress; - dev_flow.flow = flow; - dev_flow.external = 0; + dev_flow.handle = &dev_handle; + dev_flow.ingress = attr->ingress; + dev_flow.flow = flow; + dev_flow.external = 0; #ifdef HAVE_IBV_FLOW_DV_SUPPORT - dev_flow.dv.transfer = attr->transfer; + dev_flow.dv.transfer = attr->transfer; #endif - /* Translate RSS action to get rss hash fields. */ - if (flow_drv_translate(dev, &dev_flow, attr, + /** + * Translate RSS action to get rss hash fields. + */ + if (flow_drv_translate(dev, &dev_flow, attr, items, rss_actions, error)) - goto exit; - rss_desc_v[i] = wks->rss_desc; - rss_desc_v[i].key_len = MLX5_RSS_HASH_KEY_LEN; - rss_desc_v[i].hash_fields = dev_flow.hash_fields; - rss_desc_v[i].queue_num = rss_desc_v[i].hash_fields ? - rss_desc_v[i].queue_num : 1; + goto exit; + rss_desc_v[i].key_len = MLX5_RSS_HASH_KEY_LEN; + rss_desc_v[i].hash_fields = + dev_flow.hash_fields; + rss_desc_v[i].queue_num = + rss_desc_v[i].hash_fields ? + rss_desc_v[i].queue_num : 1; + } else { + /* This is queue action. */ + rss_desc_v[i].key_len = 0; + rss_desc_v[i].hash_fields = 0; + rss_desc_v[i].queue = + &policy->act_cnt[i].queue; + rss_desc_v[i].queue_num = 1; + } rss_desc[i] = &rss_desc_v[i]; } sub_policy = flow_drv_meter_sub_policy_rss_prepare(dev, @@ -4784,8 +4846,8 @@ flow_meter_split_prep(struct rte_eth_dev *dev, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, "Failed to allocate meter flow id."); flow_id = tag_id - 1; - flow_id_bits = MLX5_REG_BITS - __builtin_clz(flow_id); - flow_id_bits = flow_id_bits ? flow_id_bits : 1; + flow_id_bits = (!flow_id) ? 1 : + (MLX5_REG_BITS - __builtin_clz(flow_id)); if ((flow_id_bits + priv->sh->mtrmng->max_mtr_bits) > mtr_reg_bits) { mlx5_ipool_free(fm->flow_ipool, tag_id); @@ -5125,6 +5187,7 @@ flow_check_match_action(const struct rte_flow_action actions[], case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP: case RTE_FLOW_ACTION_TYPE_RAW_DECAP: case RTE_FLOW_ACTION_TYPE_MODIFY_FIELD: + case RTE_FLOW_ACTION_TYPE_METER: if (fdb_mirror) *modify_after_mirror = 1; break; @@ -5958,22 +6021,14 @@ flow_create_split_outer(struct rte_eth_dev *dev, return ret; } -static struct mlx5_flow_tunnel * -flow_tunnel_from_rule(struct rte_eth_dev *dev, - const struct rte_flow_attr *attr, - const struct rte_flow_item items[], - const struct rte_flow_action actions[]) +static inline struct mlx5_flow_tunnel * +flow_tunnel_from_rule(const struct mlx5_flow *flow) { struct mlx5_flow_tunnel *tunnel; #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wcast-qual" - if (is_flow_tunnel_match_rule(dev, attr, items, actions)) - tunnel = (struct mlx5_flow_tunnel *)items[0].spec; - else if (is_flow_tunnel_steer_rule(dev, attr, items, actions)) - tunnel = (struct mlx5_flow_tunnel *)actions[0].conf; - else - tunnel = NULL; + tunnel = (typeof(tunnel))flow->tunnel; #pragma GCC diagnostic pop return tunnel; @@ -6146,6 +6201,10 @@ flow_list_create(struct rte_eth_dev *dev, uint32_t *list, mlx5_support_expansion, graph_root); MLX5_ASSERT(ret > 0 && (unsigned int)ret < sizeof(expand_buffer.buffer)); + if (rte_log_can_log(mlx5_logtype, RTE_LOG_DEBUG)) { + for (i = 0; i < buf->entries; ++i) + mlx5_dbg__print_pattern(buf->entry[i].pattern); + } } else { buf->entries = 1; buf->entry[0].pattern = (void *)(uintptr_t)items; @@ -6168,12 +6227,11 @@ flow_list_create(struct rte_eth_dev *dev, uint32_t *list, error); if (ret < 0) goto error; - if (is_flow_tunnel_steer_rule(dev, attr, - buf->entry[i].pattern, - p_actions_rx)) { + if (is_flow_tunnel_steer_rule(wks->flows[0].tof_type)) { ret = flow_tunnel_add_default_miss(dev, flow, attr, p_actions_rx, idx, + wks->flows[0].tunnel, &default_miss_ctx, error); if (ret < 0) { @@ -6237,7 +6295,7 @@ flow_list_create(struct rte_eth_dev *dev, uint32_t *list, } flow_rxq_flags_set(dev, flow); rte_free(translated_actions); - tunnel = flow_tunnel_from_rule(dev, attr, items, actions); + tunnel = flow_tunnel_from_rule(wks->flows); if (tunnel) { flow->tunnel = 1; flow->tunnel_id = tunnel->tunnel_id; @@ -7213,6 +7271,24 @@ mlx5_flow_destroy_mtr_drop_tbls(struct rte_eth_dev *dev) fops->destroy_mtr_drop_tbls(dev); } +/** + * Destroy the sub policy table with RX queue. + * + * @param[in] dev + * Pointer to Ethernet device. + * @param[in] mtr_policy + * Pointer to meter policy table. + */ +void +mlx5_flow_destroy_sub_policy_with_rxq(struct rte_eth_dev *dev, + struct mlx5_flow_meter_policy *mtr_policy) +{ + const struct mlx5_flow_driver_ops *fops; + + fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV); + fops->destroy_sub_policy_with_rxq(dev, mtr_policy); +} + /** * Allocate the needed aso flow meter id. * @@ -7378,14 +7454,11 @@ mlx5_flow_create_counter_stat_mem_mng(struct mlx5_dev_ctx_shared *sh) mlx5_free(mem); return -rte_errno; } + memset(&mkey_attr, 0, sizeof(mkey_attr)); mkey_attr.addr = (uintptr_t)mem; mkey_attr.size = size; mkey_attr.umem_id = mlx5_os_get_umem_id(mem_mng->umem); mkey_attr.pd = sh->pdn; - mkey_attr.log_entity_size = 0; - mkey_attr.pg_access = 0; - mkey_attr.klm_array = NULL; - mkey_attr.klm_num = 0; mkey_attr.relaxed_ordering_write = sh->cmng.relaxed_ordering_write; mkey_attr.relaxed_ordering_read = sh->cmng.relaxed_ordering_read; mem_mng->dm = mlx5_devx_cmd_mkey_create(sh->ctx, &mkey_attr); @@ -8152,6 +8225,28 @@ int rte_pmd_mlx5_sync_flow(uint16_t port_id, uint32_t domains) return ret; } +const struct mlx5_flow_tunnel * +mlx5_get_tof(const struct rte_flow_item *item, + const struct rte_flow_action *action, + enum mlx5_tof_rule_type *rule_type) +{ + for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) { + if (item->type == (typeof(item->type)) + MLX5_RTE_FLOW_ITEM_TYPE_TUNNEL) { + *rule_type = MLX5_TUNNEL_OFFLOAD_MATCH_RULE; + return flow_items_to_tunnel(item); + } + } + for (; action->conf != RTE_FLOW_ACTION_TYPE_END; action++) { + if (action->type == (typeof(action->type)) + MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET) { + *rule_type = MLX5_TUNNEL_OFFLOAD_SET_RULE; + return flow_actions_to_tunnel(action); + } + } + return NULL; +} + /** * tunnel offload functionalilty is defined for DV environment only */ @@ -8182,13 +8277,13 @@ flow_tunnel_add_default_miss(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, const struct rte_flow_action *app_actions, uint32_t flow_idx, + const struct mlx5_flow_tunnel *tunnel, struct tunnel_default_miss_ctx *ctx, struct rte_flow_error *error) { struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_flow *dev_flow; struct rte_flow_attr miss_attr = *attr; - const struct mlx5_flow_tunnel *tunnel = app_actions[0].conf; const struct rte_flow_item miss_items[2] = { { .type = RTE_FLOW_ITEM_TYPE_ETH, @@ -8274,6 +8369,7 @@ flow_tunnel_add_default_miss(struct rte_eth_dev *dev, dev_flow->flow = flow; dev_flow->external = true; dev_flow->tunnel = tunnel; + dev_flow->tof_type = MLX5_TUNNEL_OFFLOAD_MISS_RULE; /* Subflow object was created, we must include one in the list. */ SILIST_INSERT(&flow->dev_handles, dev_flow->handle_idx, dev_flow->handle, next); @@ -8887,6 +8983,7 @@ flow_tunnel_add_default_miss(__rte_unused struct rte_eth_dev *dev, __rte_unused const struct rte_flow_attr *attr, __rte_unused const struct rte_flow_action *actions, __rte_unused uint32_t flow_idx, + __rte_unused const struct mlx5_flow_tunnel *tunnel, __rte_unused struct tunnel_default_miss_ctx *ctx, __rte_unused struct rte_flow_error *error) { @@ -8924,3 +9021,22 @@ mlx5_release_tunnel_hub(__rte_unused struct mlx5_dev_ctx_shared *sh, { } #endif /* HAVE_IBV_FLOW_DV_SUPPORT */ + +static void +mlx5_dbg__print_pattern(const struct rte_flow_item *item) +{ + int ret; + struct rte_flow_error error; + + for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) { + char *item_name; + ret = rte_flow_conv(RTE_FLOW_CONV_OP_ITEM_NAME_PTR, &item_name, + sizeof(item_name), + (void *)(uintptr_t)item->type, &error); + if (ret > 0) + printf("%s ", item_name); + else + printf("%d\n", (int)item->type); + } + printf("END\n"); +}