From: Xiaoyu Min Date: Tue, 5 Nov 2019 07:51:27 +0000 (+0200) Subject: net/mlx5: improve flow item IP validation X-Git-Url: http://git.droids-corp.org/?a=commitdiff_plain;h=fba3213015891c7ec9634e66bec5eb8eaed0d70a;p=dpdk.git net/mlx5: improve flow item IP validation Currently PMD doesn't check whether the user specified ethernet type is conflicting with the followed IPv4/IPv6 items, which leads to HW refuse to create rule, for example: ... pattern eth type is 0x86dd / ipv4 / end ... ethernet type is IPv6 but IPv4 is following, this should be validated as failure and report corresponding error in detail. Fixes: 23c1d42c7138 ("net/mlx5: split flow validation to dedicated function") Cc: stable@dpdk.org Signed-off-by: Xiaoyu Min Acked-by: Ori Kam --- diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c index b4b08f4c6c..54f4cfe04b 100644 --- a/drivers/net/mlx5/mlx5_flow.c +++ b/drivers/net/mlx5/mlx5_flow.c @@ -1403,6 +1403,8 @@ mlx5_flow_validate_item_vlan(const struct rte_flow_item *item, int mlx5_flow_validate_item_ipv4(const struct rte_flow_item *item, uint64_t item_flags, + uint64_t last_item, + uint16_t ether_type, const struct rte_flow_item_ipv4 *acc_mask, struct rte_flow_error *error) { @@ -1423,7 +1425,16 @@ mlx5_flow_validate_item_ipv4(const struct rte_flow_item *item, MLX5_FLOW_LAYER_OUTER_L4; int ret; uint8_t next_proto = 0xFF; + const uint64_t l2_vlan = (MLX5_FLOW_LAYER_L2 | + MLX5_FLOW_LAYER_OUTER_VLAN | + MLX5_FLOW_LAYER_INNER_VLAN); + if ((last_item & l2_vlan) && ether_type && + ether_type != RTE_ETHER_TYPE_IPV4) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "IPv4 cannot follow L2/VLAN layer " + "which ether type is not IPv4"); if (item_flags & MLX5_FLOW_LAYER_IPIP) { if (mask && spec) next_proto = mask->hdr.next_proto_id & @@ -1494,6 +1505,8 @@ mlx5_flow_validate_item_ipv4(const struct rte_flow_item *item, int mlx5_flow_validate_item_ipv6(const struct rte_flow_item *item, uint64_t item_flags, + uint64_t last_item, + uint16_t ether_type, const struct rte_flow_item_ipv6 *acc_mask, struct rte_flow_error *error) { @@ -1519,7 +1532,16 @@ mlx5_flow_validate_item_ipv6(const struct rte_flow_item *item, MLX5_FLOW_LAYER_OUTER_L4; int ret; uint8_t next_proto = 0xFF; + const uint64_t l2_vlan = (MLX5_FLOW_LAYER_L2 | + MLX5_FLOW_LAYER_OUTER_VLAN | + MLX5_FLOW_LAYER_INNER_VLAN); + if ((last_item & l2_vlan) && ether_type && + ether_type != RTE_ETHER_TYPE_IPV6) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "IPv6 cannot follow L2/VLAN layer " + "which ether type is not IPv6"); if (item_flags & MLX5_FLOW_LAYER_IPV6_ENCAP) { if (mask && spec) next_proto = mask->hdr.proto & spec->hdr.proto; diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h index 7559810d01..d33d856ad2 100644 --- a/drivers/net/mlx5/mlx5_flow.h +++ b/drivers/net/mlx5/mlx5_flow.h @@ -600,10 +600,14 @@ int mlx5_flow_validate_item_gre_key(const struct rte_flow_item *item, struct rte_flow_error *error); int mlx5_flow_validate_item_ipv4(const struct rte_flow_item *item, uint64_t item_flags, + uint64_t last_item, + uint16_t ether_type, const struct rte_flow_item_ipv4 *acc_mask, struct rte_flow_error *error); int mlx5_flow_validate_item_ipv6(const struct rte_flow_item *item, uint64_t item_flags, + uint64_t last_item, + uint16_t ether_type, const struct rte_flow_item_ipv6 *acc_mask, struct rte_flow_error *error); int mlx5_flow_validate_item_mpls(struct rte_eth_dev *dev, diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c index 42c265f217..f507358e48 100644 --- a/drivers/net/mlx5/mlx5_flow_dv.c +++ b/drivers/net/mlx5/mlx5_flow_dv.c @@ -3396,6 +3396,7 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, uint64_t item_flags = 0; uint64_t last_item = 0; uint8_t next_protocol = 0xff; + uint16_t ether_type = 0; int actions_n = 0; const struct rte_flow_item *gre_item = NULL; struct rte_flow_item_tcp nic_tcp_mask = { @@ -3432,6 +3433,17 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, return ret; last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 : MLX5_FLOW_LAYER_OUTER_L2; + if (items->mask != NULL && items->spec != NULL) { + ether_type = + ((const struct rte_flow_item_eth *) + items->spec)->type; + ether_type &= + ((const struct rte_flow_item_eth *) + items->mask)->type; + ether_type = rte_be_to_cpu_16(ether_type); + } else { + ether_type = 0; + } break; case RTE_FLOW_ITEM_TYPE_VLAN: ret = mlx5_flow_validate_item_vlan(items, item_flags, @@ -3440,12 +3452,25 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, return ret; last_item = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN : MLX5_FLOW_LAYER_OUTER_VLAN; + if (items->mask != NULL && items->spec != NULL) { + ether_type = + ((const struct rte_flow_item_vlan *) + items->spec)->inner_type; + ether_type &= + ((const struct rte_flow_item_vlan *) + items->mask)->inner_type; + ether_type = rte_be_to_cpu_16(ether_type); + } else { + ether_type = 0; + } break; case RTE_FLOW_ITEM_TYPE_IPV4: mlx5_flow_tunnel_ip_check(items, next_protocol, &item_flags, &tunnel); ret = mlx5_flow_validate_item_ipv4(items, item_flags, - NULL, error); + last_item, + ether_type, NULL, + error); if (ret < 0) return ret; last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 : @@ -3468,7 +3493,9 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, mlx5_flow_tunnel_ip_check(items, next_protocol, &item_flags, &tunnel); ret = mlx5_flow_validate_item_ipv6(items, item_flags, - NULL, error); + last_item, + ether_type, NULL, + error); if (ret < 0) return ret; last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 : diff --git a/drivers/net/mlx5/mlx5_flow_verbs.c b/drivers/net/mlx5/mlx5_flow_verbs.c index fd27f6c3de..147436d7b3 100644 --- a/drivers/net/mlx5/mlx5_flow_verbs.c +++ b/drivers/net/mlx5/mlx5_flow_verbs.c @@ -1037,6 +1037,7 @@ flow_verbs_validate(struct rte_eth_dev *dev, uint64_t item_flags = 0; uint64_t last_item = 0; uint8_t next_protocol = 0xff; + uint16_t ether_type = 0; if (items == NULL) return -1; @@ -1057,6 +1058,17 @@ flow_verbs_validate(struct rte_eth_dev *dev, return ret; last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 : MLX5_FLOW_LAYER_OUTER_L2; + if (items->mask != NULL && items->spec != NULL) { + ether_type = + ((const struct rte_flow_item_eth *) + items->spec)->type; + ether_type &= + ((const struct rte_flow_item_eth *) + items->mask)->type; + ether_type = rte_be_to_cpu_16(ether_type); + } else { + ether_type = 0; + } break; case RTE_FLOW_ITEM_TYPE_VLAN: ret = mlx5_flow_validate_item_vlan(items, item_flags, @@ -1067,10 +1079,23 @@ flow_verbs_validate(struct rte_eth_dev *dev, MLX5_FLOW_LAYER_INNER_VLAN) : (MLX5_FLOW_LAYER_OUTER_L2 | MLX5_FLOW_LAYER_OUTER_VLAN); + if (items->mask != NULL && items->spec != NULL) { + ether_type = + ((const struct rte_flow_item_vlan *) + items->spec)->inner_type; + ether_type &= + ((const struct rte_flow_item_vlan *) + items->mask)->inner_type; + ether_type = rte_be_to_cpu_16(ether_type); + } else { + ether_type = 0; + } break; case RTE_FLOW_ITEM_TYPE_IPV4: ret = mlx5_flow_validate_item_ipv4(items, item_flags, - NULL, error); + last_item, + ether_type, NULL, + error); if (ret < 0) return ret; last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 : @@ -1091,7 +1116,9 @@ flow_verbs_validate(struct rte_eth_dev *dev, break; case RTE_FLOW_ITEM_TYPE_IPV6: ret = mlx5_flow_validate_item_ipv6(items, item_flags, - NULL, error); + last_item, + ether_type, NULL, + error); if (ret < 0) return ret; last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :