.tunnel = MLX5_FLOW_LAYER_MPLS,
.ptype = RTE_PTYPE_TUNNEL_MPLS_IN_GRE,
},
+ {
+ .tunnel = MLX5_FLOW_LAYER_NVGRE,
+ .ptype = RTE_PTYPE_TUNNEL_NVGRE,
+ },
};
/**
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM, item,
"L3 cannot follow an L4 layer.");
+ else if ((item_flags & MLX5_FLOW_LAYER_NVGRE) &&
+ !(item_flags & MLX5_FLOW_LAYER_INNER_L2))
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "L3 cannot follow an NVGRE layer.");
if (!mask)
mask = &rte_flow_item_ipv4_mask;
else if (mask->hdr.next_proto_id != 0 &&
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM, item,
"L3 cannot follow an L4 layer.");
+ else if ((item_flags & MLX5_FLOW_LAYER_NVGRE) &&
+ !(item_flags & MLX5_FLOW_LAYER_INNER_L2))
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "L3 cannot follow an NVGRE layer.");
if (!mask)
mask = &rte_flow_item_ipv6_mask;
ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
" update.");
}
+/**
+ * Validate NVGRE item.
+ *
+ * @param[in] item
+ * Item specification.
+ * @param[in] item_flags
+ * Bit flags to mark detected items.
+ * @param[in] target_protocol
+ * The next protocol in the previous item.
+ * @param[out] error
+ * Pointer to error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_flow_validate_item_nvgre(const struct rte_flow_item *item,
+ uint64_t item_flags,
+ uint8_t target_protocol,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_item_nvgre *mask = item->mask;
+ int ret;
+
+ if (target_protocol != 0xff && target_protocol != IPPROTO_GRE)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "protocol filtering not compatible"
+ " with this GRE layer");
+ if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "multiple tunnel layers not"
+ " supported");
+ if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L3))
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "L3 Layer is missing");
+ if (!mask)
+ mask = &rte_flow_item_nvgre_mask;
+ ret = mlx5_flow_item_acceptable
+ (item, (const uint8_t *)mask,
+ (const uint8_t *)&rte_flow_item_nvgre_mask,
+ sizeof(struct rte_flow_item_nvgre), error);
+ if (ret < 0)
+ return ret;
+ return 0;
+}
+
static int
flow_null_validate(struct rte_eth_dev *dev __rte_unused,
const struct rte_flow_attr *attr __rte_unused,
#define MLX5_FLOW_LAYER_VXLAN_GPE (1u << 13)
#define MLX5_FLOW_LAYER_GRE (1u << 14)
#define MLX5_FLOW_LAYER_MPLS (1u << 15)
+/* List of tunnel Layer bits continued below. */
/* General pattern items bits. */
#define MLX5_FLOW_ITEM_METADATA (1u << 16)
#define MLX5_FLOW_LAYER_ICMP6 (1u << 19)
#define MLX5_FLOW_LAYER_GRE_KEY (1u << 20)
+/* Pattern tunnel Layer bits (continued). */
#define MLX5_FLOW_LAYER_IPIP (1u << 21)
#define MLX5_FLOW_LAYER_IPV6_ENCAP (1u << 22)
+#define MLX5_FLOW_LAYER_NVGRE (1u << 23)
/* Outer Masks. */
#define MLX5_FLOW_LAYER_OUTER_L3 \
/* Tunnel Masks. */
#define MLX5_FLOW_LAYER_TUNNEL \
(MLX5_FLOW_LAYER_VXLAN | MLX5_FLOW_LAYER_VXLAN_GPE | \
- MLX5_FLOW_LAYER_GRE | MLX5_FLOW_LAYER_MPLS | \
+ MLX5_FLOW_LAYER_GRE | MLX5_FLOW_LAYER_NVGRE | MLX5_FLOW_LAYER_MPLS | \
MLX5_FLOW_LAYER_IPIP | MLX5_FLOW_LAYER_IPV6_ENCAP)
/* Inner Masks. */
uint64_t item_flags,
uint8_t target_protocol,
struct rte_flow_error *error);
-
+int mlx5_flow_validate_item_nvgre(const struct rte_flow_item *item,
+ uint64_t item_flags,
+ uint8_t target_protocol,
+ struct rte_flow_error *error);
#endif /* RTE_PMD_MLX5_FLOW_H_ */
MLX5_FLOW_LAYER_OUTER_L4_UDP;
break;
case RTE_FLOW_ITEM_TYPE_GRE:
- case RTE_FLOW_ITEM_TYPE_NVGRE:
ret = mlx5_flow_validate_item_gre(items, item_flags,
next_protocol, error);
if (ret < 0)
gre_item = items;
last_item = MLX5_FLOW_LAYER_GRE;
break;
+ case RTE_FLOW_ITEM_TYPE_NVGRE:
+ ret = mlx5_flow_validate_item_nvgre(items, item_flags,
+ next_protocol,
+ error);
+ if (ret < 0)
+ return ret;
+ last_item = MLX5_FLOW_LAYER_NVGRE;
+ break;
case RTE_FLOW_ITEM_TYPE_GRE_KEY:
ret = mlx5_flow_validate_item_gre_key
(items, item_flags, gre_item, error);
int size;
int i;
- flow_dv_translate_item_gre(matcher, key, item, inner);
+ /* For NVGRE, GRE header fields must be set with defined values. */
+ const struct rte_flow_item_gre gre_spec = {
+ .c_rsvd0_ver = RTE_BE16(0x2000),
+ .protocol = RTE_BE16(RTE_ETHER_TYPE_TEB)
+ };
+ const struct rte_flow_item_gre gre_mask = {
+ .c_rsvd0_ver = RTE_BE16(0xB000),
+ .protocol = RTE_BE16(UINT16_MAX),
+ };
+ const struct rte_flow_item gre_item = {
+ .spec = &gre_spec,
+ .mask = &gre_mask,
+ .last = NULL,
+ };
+ flow_dv_translate_item_gre(matcher, key, &gre_item, inner);
if (!nvgre_v)
return;
if (!nvgre_m)