Currently PMD doesn't check whether the user specified ethernet type is
conflicting with the followed IPv4/IPv6 items, which leads to HW refuse
to create rule, for example:
... pattern eth type is 0x86dd / ipv4 / end ...
ethernet type is IPv6 but IPv4 is following, this should be validated
as failure and report corresponding error in detail.
Fixes:
23c1d42c7138 ("net/mlx5: split flow validation to dedicated function")
Cc: stable@dpdk.org
Signed-off-by: Xiaoyu Min <jackmin@mellanox.com>
Acked-by: Ori Kam <orika@mellanox.com>
int
mlx5_flow_validate_item_ipv4(const struct rte_flow_item *item,
uint64_t item_flags,
+ uint64_t last_item,
+ uint16_t ether_type,
const struct rte_flow_item_ipv4 *acc_mask,
struct rte_flow_error *error)
{
MLX5_FLOW_LAYER_OUTER_L4;
int ret;
uint8_t next_proto = 0xFF;
+ const uint64_t l2_vlan = (MLX5_FLOW_LAYER_L2 |
+ MLX5_FLOW_LAYER_OUTER_VLAN |
+ MLX5_FLOW_LAYER_INNER_VLAN);
+ if ((last_item & l2_vlan) && ether_type &&
+ ether_type != RTE_ETHER_TYPE_IPV4)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "IPv4 cannot follow L2/VLAN layer "
+ "which ether type is not IPv4");
if (item_flags & MLX5_FLOW_LAYER_IPIP) {
if (mask && spec)
next_proto = mask->hdr.next_proto_id &
int
mlx5_flow_validate_item_ipv6(const struct rte_flow_item *item,
uint64_t item_flags,
+ uint64_t last_item,
+ uint16_t ether_type,
const struct rte_flow_item_ipv6 *acc_mask,
struct rte_flow_error *error)
{
MLX5_FLOW_LAYER_OUTER_L4;
int ret;
uint8_t next_proto = 0xFF;
+ const uint64_t l2_vlan = (MLX5_FLOW_LAYER_L2 |
+ MLX5_FLOW_LAYER_OUTER_VLAN |
+ MLX5_FLOW_LAYER_INNER_VLAN);
+ if ((last_item & l2_vlan) && ether_type &&
+ ether_type != RTE_ETHER_TYPE_IPV6)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "IPv6 cannot follow L2/VLAN layer "
+ "which ether type is not IPv6");
if (item_flags & MLX5_FLOW_LAYER_IPV6_ENCAP) {
if (mask && spec)
next_proto = mask->hdr.proto & spec->hdr.proto;
struct rte_flow_error *error);
int mlx5_flow_validate_item_ipv4(const struct rte_flow_item *item,
uint64_t item_flags,
+ uint64_t last_item,
+ uint16_t ether_type,
const struct rte_flow_item_ipv4 *acc_mask,
struct rte_flow_error *error);
int mlx5_flow_validate_item_ipv6(const struct rte_flow_item *item,
uint64_t item_flags,
+ uint64_t last_item,
+ uint16_t ether_type,
const struct rte_flow_item_ipv6 *acc_mask,
struct rte_flow_error *error);
int mlx5_flow_validate_item_mpls(struct rte_eth_dev *dev,
uint64_t item_flags = 0;
uint64_t last_item = 0;
uint8_t next_protocol = 0xff;
+ uint16_t ether_type = 0;
int actions_n = 0;
const struct rte_flow_item *gre_item = NULL;
struct rte_flow_item_tcp nic_tcp_mask = {
return ret;
last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
MLX5_FLOW_LAYER_OUTER_L2;
+ if (items->mask != NULL && items->spec != NULL) {
+ ether_type =
+ ((const struct rte_flow_item_eth *)
+ items->spec)->type;
+ ether_type &=
+ ((const struct rte_flow_item_eth *)
+ items->mask)->type;
+ ether_type = rte_be_to_cpu_16(ether_type);
+ } else {
+ ether_type = 0;
+ }
break;
case RTE_FLOW_ITEM_TYPE_VLAN:
ret = mlx5_flow_validate_item_vlan(items, item_flags,
return ret;
last_item = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
MLX5_FLOW_LAYER_OUTER_VLAN;
+ if (items->mask != NULL && items->spec != NULL) {
+ ether_type =
+ ((const struct rte_flow_item_vlan *)
+ items->spec)->inner_type;
+ ether_type &=
+ ((const struct rte_flow_item_vlan *)
+ items->mask)->inner_type;
+ ether_type = rte_be_to_cpu_16(ether_type);
+ } else {
+ ether_type = 0;
+ }
break;
case RTE_FLOW_ITEM_TYPE_IPV4:
mlx5_flow_tunnel_ip_check(items, next_protocol,
&item_flags, &tunnel);
ret = mlx5_flow_validate_item_ipv4(items, item_flags,
- NULL, error);
+ last_item,
+ ether_type, NULL,
+ error);
if (ret < 0)
return ret;
last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
mlx5_flow_tunnel_ip_check(items, next_protocol,
&item_flags, &tunnel);
ret = mlx5_flow_validate_item_ipv6(items, item_flags,
- NULL, error);
+ last_item,
+ ether_type, NULL,
+ error);
if (ret < 0)
return ret;
last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
uint64_t item_flags = 0;
uint64_t last_item = 0;
uint8_t next_protocol = 0xff;
+ uint16_t ether_type = 0;
if (items == NULL)
return -1;
return ret;
last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
MLX5_FLOW_LAYER_OUTER_L2;
+ if (items->mask != NULL && items->spec != NULL) {
+ ether_type =
+ ((const struct rte_flow_item_eth *)
+ items->spec)->type;
+ ether_type &=
+ ((const struct rte_flow_item_eth *)
+ items->mask)->type;
+ ether_type = rte_be_to_cpu_16(ether_type);
+ } else {
+ ether_type = 0;
+ }
break;
case RTE_FLOW_ITEM_TYPE_VLAN:
ret = mlx5_flow_validate_item_vlan(items, item_flags,
MLX5_FLOW_LAYER_INNER_VLAN) :
(MLX5_FLOW_LAYER_OUTER_L2 |
MLX5_FLOW_LAYER_OUTER_VLAN);
+ if (items->mask != NULL && items->spec != NULL) {
+ ether_type =
+ ((const struct rte_flow_item_vlan *)
+ items->spec)->inner_type;
+ ether_type &=
+ ((const struct rte_flow_item_vlan *)
+ items->mask)->inner_type;
+ ether_type = rte_be_to_cpu_16(ether_type);
+ } else {
+ ether_type = 0;
+ }
break;
case RTE_FLOW_ITEM_TYPE_IPV4:
ret = mlx5_flow_validate_item_ipv4(items, item_flags,
- NULL, error);
+ last_item,
+ ether_type, NULL,
+ error);
if (ret < 0)
return ret;
last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
break;
case RTE_FLOW_ITEM_TYPE_IPV6:
ret = mlx5_flow_validate_item_ipv6(items, item_flags,
- NULL, error);
+ last_item,
+ ether_type, NULL,
+ error);
if (ret < 0)
return ret;
last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :