for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
unsigned int i;
- if ((item_flags & MLX5_FLOW_LAYER_TUNNEL) &&
- items->type != RTE_FLOW_ITEM_TYPE_ETH)
- return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ITEM,
- items,
- "only L2 inner item"
- " is supported");
switch (items->type) {
case RTE_FLOW_ITEM_TYPE_VOID:
break;
case RTE_FLOW_ITEM_TYPE_PORT_ID:
+ if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
+ return rte_flow_error_set
+ (error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM, items,
+ "inner tunnel port id"
+ " item is not supported");
mask.port_id = flow_tcf_item_mask
(items, &rte_flow_item_port_id_mask,
&flow_tcf_mask_supported.port_id,
if (ret < 0)
return ret;
item_flags |= (item_flags & MLX5_FLOW_LAYER_TUNNEL) ?
- MLX5_FLOW_LAYER_INNER_L2 :
- MLX5_FLOW_LAYER_OUTER_L2;
+ MLX5_FLOW_LAYER_INNER_L2 :
+ MLX5_FLOW_LAYER_OUTER_L2;
/* TODO:
* Redundant check due to different supported mask.
* Same for the rest of items.
" \"type\" field");
break;
case RTE_FLOW_ITEM_TYPE_VLAN:
+ if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
+ return rte_flow_error_set
+ (error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM, items,
+ "inner tunnel VLAN"
+ " is not supported");
ret = mlx5_flow_validate_item_vlan(items, item_flags,
error);
if (ret < 0)
error);
if (ret < 0)
return ret;
- item_flags |= MLX5_FLOW_LAYER_OUTER_L3_IPV4;
+ item_flags |= (item_flags & MLX5_FLOW_LAYER_TUNNEL) ?
+ MLX5_FLOW_LAYER_INNER_L3_IPV4 :
+ MLX5_FLOW_LAYER_OUTER_L3_IPV4;
mask.ipv4 = flow_tcf_item_mask
(items, &rte_flow_item_ipv4_mask,
&flow_tcf_mask_supported.ipv4,
error);
if (ret < 0)
return ret;
- item_flags |= MLX5_FLOW_LAYER_OUTER_L3_IPV6;
+ item_flags |= (item_flags & MLX5_FLOW_LAYER_TUNNEL) ?
+ MLX5_FLOW_LAYER_INNER_L3_IPV6 :
+ MLX5_FLOW_LAYER_OUTER_L3_IPV6;
mask.ipv6 = flow_tcf_item_mask
(items, &rte_flow_item_ipv6_mask,
&flow_tcf_mask_supported.ipv6,
next_protocol, error);
if (ret < 0)
return ret;
- item_flags |= MLX5_FLOW_LAYER_OUTER_L4_UDP;
+ item_flags |= (item_flags & MLX5_FLOW_LAYER_TUNNEL) ?
+ MLX5_FLOW_LAYER_INNER_L4_UDP :
+ MLX5_FLOW_LAYER_OUTER_L4_UDP;
mask.udp = flow_tcf_item_mask
(items, &rte_flow_item_udp_mask,
&flow_tcf_mask_supported.udp,
error);
if (ret < 0)
return ret;
- item_flags |= MLX5_FLOW_LAYER_OUTER_L4_TCP;
+ item_flags |= (item_flags & MLX5_FLOW_LAYER_TUNNEL) ?
+ MLX5_FLOW_LAYER_INNER_L4_TCP :
+ MLX5_FLOW_LAYER_OUTER_L4_TCP;
mask.tcp = flow_tcf_item_mask
(items, &rte_flow_item_tcp_mask,
&flow_tcf_mask_supported.tcp,
return -rte_errno;
break;
case RTE_FLOW_ITEM_TYPE_VXLAN:
- if (!(action_flags & MLX5_FLOW_ACTION_VXLAN_DECAP))
+ if (item_flags & MLX5_FLOW_LAYER_OUTER_VLAN)
return rte_flow_error_set
(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ITEM,
- items,
- "vni pattern should be followed by"
- " vxlan decapsulation action");
+ RTE_FLOW_ERROR_TYPE_ITEM, items,
+ "vxlan tunnel over vlan"
+ " is not supported");
ret = mlx5_flow_validate_item_vxlan(items,
item_flags, error);
if (ret < 0)