* Item specification.
* @param[in] item_flags
* Bit-fields that holds the items detected until now.
+ * @param[in] acc_mask
+ * Acceptable mask, if NULL default internal default mask
+ * will be used to check whether item fields are supported.
* @param[out] error
* Pointer to error structure.
*
int
mlx5_flow_validate_item_ipv4(const struct rte_flow_item *item,
uint64_t item_flags,
+ const struct rte_flow_item_ipv4 *acc_mask,
struct rte_flow_error *error)
{
const struct rte_flow_item_ipv4 *mask = item->mask;
"partial mask is not supported"
" for protocol");
ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
- (const uint8_t *)&nic_mask,
+ acc_mask ? (const uint8_t *)acc_mask
+ : (const uint8_t *)&nic_mask,
sizeof(struct rte_flow_item_ipv4),
error);
if (ret < 0)
* Item specification.
* @param[in] item_flags
* Bit-fields that holds the items detected until now.
+ * @param[in] acc_mask
+ * Acceptable mask, if NULL default internal default mask
+ * will be used to check whether item fields are supported.
* @param[out] error
* Pointer to error structure.
*
int
mlx5_flow_validate_item_ipv6(const struct rte_flow_item *item,
uint64_t item_flags,
+ const struct rte_flow_item_ipv6 *acc_mask,
struct rte_flow_error *error)
{
const struct rte_flow_item_ipv6 *mask = item->mask;
if (!mask)
mask = &rte_flow_item_ipv6_mask;
ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
- (const uint8_t *)&nic_mask,
+ acc_mask ? (const uint8_t *)acc_mask
+ : (const uint8_t *)&nic_mask,
sizeof(struct rte_flow_item_ipv6),
error);
if (ret < 0)
struct rte_flow_error *error);
int mlx5_flow_validate_item_ipv4(const struct rte_flow_item *item,
uint64_t item_flags,
+ const struct rte_flow_item_ipv4 *acc_mask,
struct rte_flow_error *error);
int mlx5_flow_validate_item_ipv6(const struct rte_flow_item *item,
uint64_t item_flags,
+ const struct rte_flow_item_ipv6 *acc_mask,
struct rte_flow_error *error);
int mlx5_flow_validate_item_mpls(struct rte_eth_dev *dev,
const struct rte_flow_item *item,
break;
case RTE_FLOW_ITEM_TYPE_IPV4:
ret = mlx5_flow_validate_item_ipv4(items, item_flags,
- error);
+ NULL, error);
if (ret < 0)
return ret;
last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
break;
case RTE_FLOW_ITEM_TYPE_IPV6:
ret = mlx5_flow_validate_item_ipv6(items, item_flags,
- error);
+ NULL, error);
if (ret < 0)
return ret;
last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
},
.ipv4.hdr = {
.next_proto_id = 0xff,
+ .time_to_live = 0xff,
+ .type_of_service = 0xff,
.src_addr = RTE_BE32(0xffffffff),
.dst_addr = RTE_BE32(0xffffffff),
},
.ipv6.hdr = {
.proto = 0xff,
+ .vtc_flow = RTE_BE32(0xfful << IPV6_HDR_FL_SHIFT),
+ .hop_limits = 0xff,
.src_addr =
"\xff\xff\xff\xff\xff\xff\xff\xff"
"\xff\xff\xff\xff\xff\xff\xff\xff",
break;
break;
case RTE_FLOW_ITEM_TYPE_IPV4:
- ret = mlx5_flow_validate_item_ipv4(items, item_flags,
- error);
+ ret = mlx5_flow_validate_item_ipv4
+ (items, item_flags,
+ &flow_tcf_mask_supported.ipv4, error);
if (ret < 0)
return ret;
ret = flow_tcf_validate_vxlan_encap_ipv4(items, error);
item_flags |= MLX5_FLOW_LAYER_OUTER_L3_IPV4;
break;
case RTE_FLOW_ITEM_TYPE_IPV6:
- ret = mlx5_flow_validate_item_ipv6(items, item_flags,
- error);
+ ret = mlx5_flow_validate_item_ipv6
+ (items, item_flags,
+ &flow_tcf_mask_supported.ipv6, error);
if (ret < 0)
return ret;
ret = flow_tcf_validate_vxlan_encap_ipv6(items, error);
vlan_etype = spec.vlan->inner_type;
break;
case RTE_FLOW_ITEM_TYPE_IPV4:
- ret = mlx5_flow_validate_item_ipv4(items, item_flags,
- error);
+ ret = mlx5_flow_validate_item_ipv4
+ (items, item_flags,
+ &flow_tcf_mask_supported.ipv4, error);
if (ret < 0)
return ret;
item_flags |= (item_flags & MLX5_FLOW_LAYER_TUNNEL) ?
}
break;
case RTE_FLOW_ITEM_TYPE_IPV6:
- ret = mlx5_flow_validate_item_ipv6(items, item_flags,
- error);
+ ret = mlx5_flow_validate_item_ipv6
+ (items, item_flags,
+ &flow_tcf_mask_supported.ipv6, error);
if (ret < 0)
return ret;
item_flags |= (item_flags & MLX5_FLOW_LAYER_TUNNEL) ?
break;
case RTE_FLOW_ITEM_TYPE_IPV4:
ret = mlx5_flow_validate_item_ipv4(items, item_flags,
- error);
+ NULL, error);
if (ret < 0)
return ret;
last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
break;
case RTE_FLOW_ITEM_TYPE_IPV6:
ret = mlx5_flow_validate_item_ipv6(items, item_flags,
- error);
+ NULL, error);
if (ret < 0)
return ret;
last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :