Enabled IP-in-IP tunnel type support on DV/DR flow engine.
This includes the following combination:
- IPv4 over IPv4
- IPv4 over IPv6
- IPv6 over IPv4
- IPv6 over IPv6
MLX5 NIC supports IP-in-IP tunnel via FLEX Parser so
need to make sure fw using FLEX Paser profile 0.
mlxconfig -d <mst device> -y set FLEX_PARSER_PROFILE_ENABLE=0
The example testpmd commands would be:
- Match on IPv4 over IPv4 packets and do inner RSS:
testpmd> flow create 0 ingress pattern eth / ipv4 proto is 0x04 /
ipv4 / udp / end actions rss level 2 queues 0 1 2 3 end / end
- Match on IPv6 over IPv4 packets and do inner RSS:
testpmd> flow create 0 ingress pattern eth / ipv4 proto is 0x29 /
ipv6 / udp / end actions rss level 2 queues 0 1 2 3 end / end
Signed-off-by: Xiaoyu Min <jackmin@mellanox.com>
Acked-by: Viacheslav Ovsiienko <viacheslavo@mellanox.com>
- RX interrupts.
- Statistics query including Basic, Extended and per queue.
- Rx HW timestamp.
-- Tunnel types: VXLAN, L3 VXLAN, VXLAN-GPE, GRE, MPLSoGRE, MPLSoUDP.
+- Tunnel types: VXLAN, L3 VXLAN, VXLAN-GPE, GRE, MPLSoGRE, MPLSoUDP, IP-in-IP.
- Tunnel HW offloads: packet type, inner/outer RSS, IP and UDP checksum verification.
- NIC HW offloads: encapsulation (vxlan, gre, mplsoudp, mplsogre), NAT, routing, TTL
increment/decrement, count, drop, mark. For details please see :ref:`Supported hardware offloads using rte_flow API`.
mlxconfig -d <mst device> query | grep FLEX_PARSER_PROFILE_ENABLE
FLEX_PARSER_PROFILE_ENABLE 2
+- IP-in-IP tunnel enable
+
+ .. code-block:: console
+
+ mlxconfig -d <mst device> set FLEX_PARSER_PROFILE_ENABLE=0
+
+ Verify configurations are set:
+
+ .. code-block:: console
+
+ mlxconfig -d <mst device> query | grep FLEX_PARSER_PROFILE_ENABLE
+ FLEX_PARSER_PROFILE_ENABLE 0
+
Prerequisites
-------------
sequence number and acknowledgment number modification.
* Added support for match on ICMP/ICMP6 code and type.
* Added support for matching on GRE's key and C,K,S present bits.
+ * Added support for IP-in-IP tunnel.
* **Updated Solarflare network PMD.**
struct rte_flow_error *error)
{
const struct rte_flow_item_ipv4 *mask = item->mask;
+ const struct rte_flow_item_ipv4 *spec = item->spec;
const struct rte_flow_item_ipv4 nic_mask = {
.hdr = {
.src_addr = RTE_BE32(0xffffffff),
const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
MLX5_FLOW_LAYER_OUTER_L4;
int ret;
+ uint8_t next_proto = 0xFF;
+ if (item_flags & MLX5_FLOW_LAYER_IPIP) {
+ if (mask && spec)
+ next_proto = mask->hdr.next_proto_id &
+ spec->hdr.next_proto_id;
+ if (next_proto == IPPROTO_IPIP || next_proto == IPPROTO_IPV6)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "multiple tunnel "
+ "not supported");
+ }
+ if (item_flags & MLX5_FLOW_LAYER_IPV6_ENCAP)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "wrong tunnel type - IPv6 specified "
+ "but IPv4 item provided");
if (item_flags & l3m)
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ITEM, item,
struct rte_flow_error *error)
{
const struct rte_flow_item_ipv6 *mask = item->mask;
+ const struct rte_flow_item_ipv6 *spec = item->spec;
const struct rte_flow_item_ipv6 nic_mask = {
.hdr = {
.src_addr =
const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
MLX5_FLOW_LAYER_OUTER_L4;
int ret;
+ uint8_t next_proto = 0xFF;
+ if (item_flags & MLX5_FLOW_LAYER_IPV6_ENCAP) {
+ if (mask && spec)
+ next_proto = mask->hdr.proto & spec->hdr.proto;
+ if (next_proto == IPPROTO_IPIP || next_proto == IPPROTO_IPV6)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "multiple tunnel "
+ "not supported");
+ }
+ if (item_flags & MLX5_FLOW_LAYER_IPIP)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "wrong tunnel type - IPv4 specified "
+ "but IPv6 item provided");
if (item_flags & l3m)
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ITEM, item,
#define MLX5_FLOW_LAYER_ICMP6 (1u << 19)
#define MLX5_FLOW_LAYER_GRE_KEY (1u << 20)
+#define MLX5_FLOW_LAYER_IPIP (1u << 21)
+#define MLX5_FLOW_LAYER_IPV6_ENCAP (1u << 22)
+
/* Outer Masks. */
#define MLX5_FLOW_LAYER_OUTER_L3 \
(MLX5_FLOW_LAYER_OUTER_L3_IPV4 | MLX5_FLOW_LAYER_OUTER_L3_IPV6)
/* Tunnel Masks. */
#define MLX5_FLOW_LAYER_TUNNEL \
(MLX5_FLOW_LAYER_VXLAN | MLX5_FLOW_LAYER_VXLAN_GPE | \
- MLX5_FLOW_LAYER_GRE | MLX5_FLOW_LAYER_MPLS)
+ MLX5_FLOW_LAYER_GRE | MLX5_FLOW_LAYER_MPLS | \
+ MLX5_FLOW_LAYER_IPIP | MLX5_FLOW_LAYER_IPV6_ENCAP)
/* Inner Masks. */
#define MLX5_FLOW_LAYER_INNER_L3 \
{0, 0, 0},
};
+static void
+mlx5_flow_tunnel_ip_check(const struct rte_flow_item *item, uint64_t *flags)
+{
+ uint8_t next_protocol = 0xFF;
+
+ if (item->mask != NULL) {
+ switch (item->type) {
+ case RTE_FLOW_ITEM_TYPE_IPV4:
+ next_protocol =
+ ((const struct rte_flow_item_ipv4 *)
+ (item->spec))->hdr.next_proto_id;
+ next_protocol &=
+ ((const struct rte_flow_item_ipv4 *)
+ (item->mask))->hdr.next_proto_id;
+ break;
+ case RTE_FLOW_ITEM_TYPE_IPV6:
+ next_protocol =
+ ((const struct rte_flow_item_ipv6 *)
+ (item->spec))->hdr.proto;
+ next_protocol &=
+ ((const struct rte_flow_item_ipv6 *)
+ (item->mask))->hdr.proto;
+ break;
+ default:
+ break;
+ }
+ }
+ if (next_protocol == IPPROTO_IPIP)
+ *flags |= MLX5_FLOW_LAYER_IPIP;
+ if (next_protocol == IPPROTO_IPV6)
+ *flags |= MLX5_FLOW_LAYER_IPV6_ENCAP;
+}
+
/**
* Acquire the synchronizing object to protect multithreaded access
* to shared dv context. Lock occurs only if context is actually
/* Reset for inner layer. */
next_protocol = 0xff;
}
+ mlx5_flow_tunnel_ip_check(items, &last_item);
break;
case RTE_FLOW_ITEM_TYPE_IPV6:
ret = mlx5_flow_validate_item_ipv6(items, item_flags,
/* Reset for inner layer. */
next_protocol = 0xff;
}
+ mlx5_flow_tunnel_ip_check(items, &last_item);
break;
case RTE_FLOW_ITEM_TYPE_TCP:
ret = mlx5_flow_validate_item_tcp
MLX5_IPV4_IBV_RX_HASH);
last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
MLX5_FLOW_LAYER_OUTER_L3_IPV4;
+ mlx5_flow_tunnel_ip_check(items, &last_item);
break;
case RTE_FLOW_ITEM_TYPE_IPV6:
flow_dv_translate_item_ipv6(match_mask, match_value,
MLX5_IPV6_IBV_RX_HASH);
last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
MLX5_FLOW_LAYER_OUTER_L3_IPV6;
+ mlx5_flow_tunnel_ip_check(items, &last_item);
break;
case RTE_FLOW_ITEM_TYPE_TCP:
flow_dv_translate_item_tcp(match_mask, match_value,