This commit adds support for matching flows on Geneve headers.
Signed-off-by: Moti Haimovsky <motih@mellanox.com>
Acked-by: Viacheslav Ovsiienko <viacheslavo@mellanox.com>
- RX interrupts.
- Statistics query including Basic, Extended and per queue.
- Rx HW timestamp.
-- Tunnel types: VXLAN, L3 VXLAN, VXLAN-GPE, GRE, MPLSoGRE, MPLSoUDP, IP-in-IP.
+- Tunnel types: VXLAN, L3 VXLAN, VXLAN-GPE, GRE, MPLSoGRE, MPLSoUDP, IP-in-IP, Geneve.
- Tunnel HW offloads: packet type, inner/outer RSS, IP and UDP checksum verification.
- NIC HW offloads: encapsulation (vxlan, gre, mplsoudp, mplsogre), NAT, routing, TTL
increment/decrement, count, drop, mark. For details please see :ref:`mlx5_offloads_support`.
- L3 VXLAN and VXLAN-GPE tunnels cannot be supported together with MPLSoGRE and MPLSoUDP.
+- Match on Geneve header supports the following fields only:
+
+ - VNI
+ - OAM
+ - protocol type
+ - options length
+ Currently, the only supported options length value is 0.
+
- VF: flow rules created on VF devices can only match traffic targeted at the
configured MAC addresses (see ``rte_eth_dev_mac_addr_add()``).
FLEX_PARSER_PROFILE_ENABLE=2
+- enable Geneve flow matching::
+
+ FLEX_PARSER_PROFILE_ENABLE=0
+
Prerequisites
-------------
* Supported Flow director for LACP, VRRP, BGP and so on.
* Supported FW version get.
+* **Updated Mellanox mlx5 driver.**
+
+ Updated Mellanox mlx5 driver with new features and improvements, including:
+
+ * Added support for VLAN pop flow offload command.
+ * Added support for VLAN push flow offload command.
+ * Added support for VLAN set PCP offload command.
+ * Added support for VLAN set VID offload command.
+ * Added support for matching on packets withe Geneve tunnel header.
+
* **Added Marvell NITROX symmetric crypto PMD.**
Added a symmetric crypto PMD for Marvell NITROX V security processor.
Also, make sure to start the actual text at the margin.
=========================================================
-* **Updated Mellanox mlx5 driver.**
-
- Updated Mellanox mlx5 driver with new features and improvements, including:
-
- * Added support for VLAN pop flow offload command.
- * Added support for VLAN push flow offload command.
- * Added support for VLAN set PCP offload command.
- * Added support for VLAN set VID offload command.
-
uint32_t wqe_vlan_insert:1;
uint32_t wqe_inline_mode:2;
uint32_t vport_inline_mode:3;
+ uint32_t tunnel_stateless_geneve_rx:1;
+ uint32_t geneve_max_opt_len:1; /* 0x0: 14DW, 0x1: 63DW */
uint32_t lro_cap:1;
uint32_t tunnel_lro_gre:1;
uint32_t tunnel_lro_vxlan:1;
MLX5_GET(per_protocol_networking_offload_caps, hcattr,
lro_timer_supported_periods[i]);
}
+ attr->tunnel_stateless_geneve_rx =
+ MLX5_GET(per_protocol_networking_offload_caps,
+ hcattr, tunnel_stateless_geneve_rx);
+ attr->geneve_max_opt_len =
+ MLX5_GET(per_protocol_networking_offload_caps,
+ hcattr, max_geneve_opt_len);
attr->wqe_inline_mode = MLX5_GET(per_protocol_networking_offload_caps,
hcattr, wqe_inline_mode);
if (attr->wqe_inline_mode != MLX5_CAP_INLINE_MODE_VPORT_CONTEXT)
return 0;
}
+/**
+ * Validate Geneve item.
+ *
+ * @param[in] item
+ * Item specification.
+ * @param[in] itemFlags
+ * Bit-fields that holds the items detected until now.
+ * @param[in] enPriv
+ * Pointer to the private data structure.
+ * @param[out] error
+ * Pointer to error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+
+int
+mlx5_flow_validate_item_geneve(const struct rte_flow_item *item,
+ uint64_t item_flags,
+ struct rte_eth_dev *dev,
+ struct rte_flow_error *error)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ const struct rte_flow_item_geneve *spec = item->spec;
+ const struct rte_flow_item_geneve *mask = item->mask;
+ int ret;
+ uint16_t gbhdr;
+ uint8_t opt_len = priv->config.hca_attr.geneve_max_opt_len ?
+ MLX5_GENEVE_OPT_LEN_1 : MLX5_GENEVE_OPT_LEN_0;
+ const struct rte_flow_item_geneve nic_mask = {
+ .ver_opt_len_o_c_rsvd0 = RTE_BE16(0x3f80),
+ .vni = "\xff\xff\xff",
+ .protocol = RTE_BE16(UINT16_MAX),
+ };
+
+ if (!(priv->config.hca_attr.flex_parser_protocols &
+ MLX5_HCA_FLEX_GENEVE_ENABLED) ||
+ !priv->config.hca_attr.tunnel_stateless_geneve_rx)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "L3 Geneve is not enabled by device"
+ " parameter and/or not configured in"
+ " firmware");
+ if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "multiple tunnel layers not"
+ " supported");
+ /*
+ * Verify only UDPv4 is present as defined in
+ * https://tools.ietf.org/html/rfc7348
+ */
+ if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP))
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "no outer UDP layer found");
+ if (!mask)
+ mask = &rte_flow_item_geneve_mask;
+ ret = mlx5_flow_item_acceptable
+ (item, (const uint8_t *)mask,
+ (const uint8_t *)&nic_mask,
+ sizeof(struct rte_flow_item_geneve), error);
+ if (ret)
+ return ret;
+ if (spec) {
+ gbhdr = rte_be_to_cpu_16(spec->ver_opt_len_o_c_rsvd0);
+ if (MLX5_GENEVE_VER_VAL(gbhdr) ||
+ MLX5_GENEVE_CRITO_VAL(gbhdr) ||
+ MLX5_GENEVE_RSVD_VAL(gbhdr) || spec->rsvd1)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Geneve protocol unsupported"
+ " fields are being used");
+ if (MLX5_GENEVE_OPTLEN_VAL(gbhdr) > opt_len)
+ return rte_flow_error_set
+ (error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Unsupported Geneve options length");
+ }
+ if (!(item_flags & MLX5_FLOW_LAYER_OUTER))
+ return rte_flow_error_set
+ (error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "Geneve tunnel must be fully defined");
+ return 0;
+}
+
/**
* Validate MPLS item.
*
#define MLX5_FLOW_LAYER_IPIP (1u << 21)
#define MLX5_FLOW_LAYER_IPV6_ENCAP (1u << 22)
#define MLX5_FLOW_LAYER_NVGRE (1u << 23)
+#define MLX5_FLOW_LAYER_GENEVE (1u << 24)
/* Outer Masks. */
#define MLX5_FLOW_LAYER_OUTER_L3 \
#define MLX5_FLOW_LAYER_TUNNEL \
(MLX5_FLOW_LAYER_VXLAN | MLX5_FLOW_LAYER_VXLAN_GPE | \
MLX5_FLOW_LAYER_GRE | MLX5_FLOW_LAYER_NVGRE | MLX5_FLOW_LAYER_MPLS | \
- MLX5_FLOW_LAYER_IPIP | MLX5_FLOW_LAYER_IPV6_ENCAP)
+ MLX5_FLOW_LAYER_IPIP | MLX5_FLOW_LAYER_IPV6_ENCAP | \
+ MLX5_FLOW_LAYER_GENEVE)
/* Inner Masks. */
#define MLX5_FLOW_LAYER_INNER_L3 \
#define MLX5_UDP_PORT_VXLAN 4789
#define MLX5_UDP_PORT_VXLAN_GPE 4790
+/* UDP port numbers for GENEVE. */
+#define MLX5_UDP_PORT_GENEVE 6081
+
/* Priority reserved for default flows. */
#define MLX5_FLOW_PRIO_RSVD ((uint32_t)-1)
/* IBV hash source bits for IPV6. */
#define MLX5_IPV6_IBV_RX_HASH (IBV_RX_HASH_SRC_IPV6 | IBV_RX_HASH_DST_IPV6)
+
+/* Geneve header first 16Bit */
+#define MLX5_GENEVE_VER_MASK 0x3
+#define MLX5_GENEVE_VER_SHIFT 14
+#define MLX5_GENEVE_VER_VAL(a) \
+ (((a) >> (MLX5_GENEVE_VER_SHIFT)) & (MLX5_GENEVE_VER_MASK))
+#define MLX5_GENEVE_OPTLEN_MASK 0x3F
+#define MLX5_GENEVE_OPTLEN_SHIFT 7
+#define MLX5_GENEVE_OPTLEN_VAL(a) \
+ (((a) >> (MLX5_GENEVE_OPTLEN_SHIFT)) & (MLX5_GENEVE_OPTLEN_MASK))
+#define MLX5_GENEVE_OAMF_MASK 0x1
+#define MLX5_GENEVE_OAMF_SHIFT 7
+#define MLX5_GENEVE_OAMF_VAL(a) \
+ (((a) >> (MLX5_GENEVE_OAMF_SHIFT)) & (MLX5_GENEVE_OAMF_MASK))
+#define MLX5_GENEVE_CRITO_MASK 0x1
+#define MLX5_GENEVE_CRITO_SHIFT 6
+#define MLX5_GENEVE_CRITO_VAL(a) \
+ (((a) >> (MLX5_GENEVE_CRITO_SHIFT)) & (MLX5_GENEVE_CRITO_MASK))
+#define MLX5_GENEVE_RSVD_MASK 0x3F
+#define MLX5_GENEVE_RSVD_VAL(a) ((a) & (MLX5_GENEVE_RSVD_MASK))
+/*
+ * The length of the Geneve options fields, expressed in four byte multiples,
+ * not including the eight byte fixed tunnel.
+ */
+#define MLX5_GENEVE_OPT_LEN_0 14
+#define MLX5_GENEVE_OPT_LEN_1 63
+
enum mlx5_flow_drv_type {
MLX5_FLOW_TYPE_MIN,
MLX5_FLOW_TYPE_DV,
uint64_t item_flags,
uint8_t target_protocol,
struct rte_flow_error *error);
+int mlx5_flow_validate_item_geneve(const struct rte_flow_item *item,
+ uint64_t item_flags,
+ struct rte_eth_dev *dev,
+ struct rte_flow_error *error);
#endif /* RTE_PMD_MLX5_FLOW_H_ */
return ret;
last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
break;
+ case RTE_FLOW_ITEM_TYPE_GENEVE:
+ ret = mlx5_flow_validate_item_geneve(items,
+ item_flags, dev,
+ error);
+ if (ret < 0)
+ return ret;
+ last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
+ break;
case RTE_FLOW_ITEM_TYPE_MPLS:
ret = mlx5_flow_validate_item_mpls(dev, items,
item_flags,
vni_v[i] = vni_m[i] & vxlan_v->vni[i];
}
+/**
+ * Add Geneve item to matcher and to the value.
+ *
+ * @param[in, out] matcher
+ * Flow matcher.
+ * @param[in, out] key
+ * Flow matcher value.
+ * @param[in] item
+ * Flow pattern to translate.
+ * @param[in] inner
+ * Item is inner pattern.
+ */
+
+static void
+flow_dv_translate_item_geneve(void *matcher, void *key,
+ const struct rte_flow_item *item, int inner)
+{
+ const struct rte_flow_item_geneve *geneve_m = item->mask;
+ const struct rte_flow_item_geneve *geneve_v = item->spec;
+ void *headers_m;
+ void *headers_v;
+ void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
+ void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
+ uint16_t dport;
+ uint16_t gbhdr_m;
+ uint16_t gbhdr_v;
+ char *vni_m;
+ char *vni_v;
+ size_t size, i;
+
+ if (inner) {
+ headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
+ inner_headers);
+ headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
+ } else {
+ headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
+ outer_headers);
+ headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
+ }
+ dport = MLX5_UDP_PORT_GENEVE;
+ if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
+ MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
+ }
+ if (!geneve_v)
+ return;
+ if (!geneve_m)
+ geneve_m = &rte_flow_item_geneve_mask;
+ size = sizeof(geneve_m->vni);
+ vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, geneve_vni);
+ vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, geneve_vni);
+ memcpy(vni_m, geneve_m->vni, size);
+ for (i = 0; i < size; ++i)
+ vni_v[i] = vni_m[i] & geneve_v->vni[i];
+ MLX5_SET(fte_match_set_misc, misc_m, geneve_protocol_type,
+ rte_be_to_cpu_16(geneve_m->protocol));
+ MLX5_SET(fte_match_set_misc, misc_v, geneve_protocol_type,
+ rte_be_to_cpu_16(geneve_v->protocol & geneve_m->protocol));
+ gbhdr_m = rte_be_to_cpu_16(geneve_m->ver_opt_len_o_c_rsvd0);
+ gbhdr_v = rte_be_to_cpu_16(geneve_v->ver_opt_len_o_c_rsvd0);
+ MLX5_SET(fte_match_set_misc, misc_m, geneve_oam,
+ MLX5_GENEVE_OAMF_VAL(gbhdr_m));
+ MLX5_SET(fte_match_set_misc, misc_v, geneve_oam,
+ MLX5_GENEVE_OAMF_VAL(gbhdr_v) & MLX5_GENEVE_OAMF_VAL(gbhdr_m));
+ MLX5_SET(fte_match_set_misc, misc_m, geneve_opt_len,
+ MLX5_GENEVE_OPTLEN_VAL(gbhdr_m));
+ MLX5_SET(fte_match_set_misc, misc_v, geneve_opt_len,
+ MLX5_GENEVE_OPTLEN_VAL(gbhdr_v) &
+ MLX5_GENEVE_OPTLEN_VAL(gbhdr_m));
+}
+
/**
* Add MPLS item to matcher and to the value.
*
items, tunnel);
last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
break;
+ case RTE_FLOW_ITEM_TYPE_GENEVE:
+ flow_dv_translate_item_geneve(match_mask, match_value,
+ items, tunnel);
+ last_item = MLX5_FLOW_LAYER_GENEVE;
+ break;
case RTE_FLOW_ITEM_TYPE_MPLS:
flow_dv_translate_item_mpls(match_mask, match_value,
items, last_item, tunnel);
u8 gre_key_l[0x8];
u8 vxlan_vni[0x18];
u8 reserved_at_b8[0x8];
- u8 reserved_at_c0[0x20];
+ u8 geneve_vni[0x18];
+ u8 reserved_at_e4[0x7];
+ u8 geneve_oam[0x1];
u8 reserved_at_e0[0xc];
u8 outer_ipv6_flow_label[0x14];
u8 reserved_at_100[0xc];
u8 inner_ipv6_flow_label[0x14];
- u8 reserved_at_120[0xe0];
+ u8 reserved_at_120[0xa];
+ u8 geneve_opt_len[0x6];
+ u8 geneve_protocol_type[0x10];
+ u8 reserved_at_140[0xc0];
};
struct mlx5_ifc_ipv4_layout_bits {