net/mlx5: support GTP
authorDekel Peled <dekelp@mellanox.com>
Thu, 16 Jan 2020 18:36:23 +0000 (20:36 +0200)
committerFerruh Yigit <ferruh.yigit@intel.com>
Mon, 20 Jan 2020 17:02:17 +0000 (18:02 +0100)
This patch adds to MLX5 PMD support of matching on GTP item,
fields msg_type and teid, according to RFC [1].
GTP item validation and translation functions are added and called.
GTP tunnel type is added to supported tunnels.

[1] http://mails.dpdk.org/archives/dev/2019-December/152799.html

Signed-off-by: Dekel Peled <dekelp@mellanox.com>
Acked-by: Ori Kam <orika@mellanox.com>
doc/guides/nics/mlx5.rst
doc/guides/rel_notes/release_20_02.rst
drivers/net/mlx5/mlx5.h
drivers/net/mlx5/mlx5_devx_cmds.c
drivers/net/mlx5/mlx5_flow.c
drivers/net/mlx5/mlx5_flow.h
drivers/net/mlx5/mlx5_flow_dv.c
drivers/net/mlx5/mlx5_prm.h
drivers/net/mlx5/mlx5_rxtx.h

index b513cfc..66997f1 100644 (file)
@@ -90,6 +90,7 @@ Features
 - Statistics query including Basic, Extended and per queue.
 - Rx HW timestamp.
 - Tunnel types: VXLAN, L3 VXLAN, VXLAN-GPE, GRE, MPLSoGRE, MPLSoUDP, IP-in-IP, Geneve.
+- Tunnel types: VXLAN, L3 VXLAN, VXLAN-GPE, GRE, MPLSoGRE, MPLSoUDP, IP-in-IP, Geneve, GTP.
 - Tunnel HW offloads: packet type, inner/outer RSS, IP and UDP checksum verification.
 - NIC HW offloads: encapsulation (vxlan, gre, mplsoudp, mplsogre), NAT, routing, TTL
   increment/decrement, count, drop, mark. For details please see :ref:`mlx5_offloads_support`.
@@ -159,6 +160,11 @@ Limitations
 - VF: flow rules created on VF devices can only match traffic targeted at the
   configured MAC addresses (see ``rte_eth_dev_mac_addr_add()``).
 
+- Match on GTP tunnel header item supports the following fields only:
+
+     - msg_type
+     - teid
+
 .. note::
 
    MAC addresses not already present in the bridge table of the associated
@@ -788,6 +794,10 @@ Below are some firmware configurations listed.
 
    FLEX_PARSER_PROFILE_ENABLE=0
 
+- enable GTP flow matching::
+
+   FLEX_PARSER_PROFILE_ENABLE=3
+
 Prerequisites
 -------------
 
index 2748264..50e2c14 100644 (file)
@@ -111,6 +111,7 @@ New Features
   Updated Mellanox mlx5 driver with new features and improvements, including:
 
   * Added support for RSS using L3/L4 source/destination only.
+  * Added support for matching on GTP tunnel header item.
 
 * **Updated testpmd application.**
 
index ceb6de8..01542e7 100644 (file)
@@ -195,6 +195,7 @@ struct mlx5_hca_attr {
        uint32_t vport_inline_mode:3;
        uint32_t tunnel_stateless_geneve_rx:1;
        uint32_t geneve_max_opt_len:1; /* 0x0: 14DW, 0x1: 63DW */
+       uint32_t tunnel_stateless_gtp:1;
        uint32_t lro_cap:1;
        uint32_t tunnel_lro_gre:1;
        uint32_t tunnel_lro_vxlan:1;
index d6bf156..9985d30 100644 (file)
@@ -415,6 +415,9 @@ mlx5_devx_cmd_query_hca_attr(struct ibv_context *ctx,
                             hcattr, max_geneve_opt_len);
        attr->wqe_inline_mode = MLX5_GET(per_protocol_networking_offload_caps,
                                         hcattr, wqe_inline_mode);
+       attr->tunnel_stateless_gtp = MLX5_GET
+                                       (per_protocol_networking_offload_caps,
+                                        hcattr, tunnel_stateless_gtp);
        if (attr->wqe_inline_mode != MLX5_CAP_INLINE_MODE_VPORT_CONTEXT)
                return 0;
        if (attr->eth_virt) {
index 0126cd8..bb1f1f1 100644 (file)
@@ -319,6 +319,10 @@ static struct mlx5_flow_tunnel_info tunnels_info[] = {
                .tunnel = MLX5_FLOW_LAYER_IPV6_ENCAP,
                .ptype = RTE_PTYPE_TUNNEL_IP,
        },
+       {
+               .tunnel = MLX5_FLOW_LAYER_GTP,
+               .ptype = RTE_PTYPE_TUNNEL_GTPU,
+       },
 };
 
 /**
index e42c98a..a1c7b67 100644 (file)
@@ -122,6 +122,9 @@ enum mlx5_feature_name {
 /* Queue items. */
 #define MLX5_FLOW_ITEM_TX_QUEUE (1u << 27)
 
+/* Pattern tunnel Layer bits (continued). */
+#define MLX5_FLOW_LAYER_GTP (1u << 28)
+
 /* Outer Masks. */
 #define MLX5_FLOW_LAYER_OUTER_L3 \
        (MLX5_FLOW_LAYER_OUTER_L3_IPV4 | MLX5_FLOW_LAYER_OUTER_L3_IPV6)
@@ -136,7 +139,7 @@ enum mlx5_feature_name {
        (MLX5_FLOW_LAYER_VXLAN | MLX5_FLOW_LAYER_VXLAN_GPE | \
         MLX5_FLOW_LAYER_GRE | MLX5_FLOW_LAYER_NVGRE | MLX5_FLOW_LAYER_MPLS | \
         MLX5_FLOW_LAYER_IPIP | MLX5_FLOW_LAYER_IPV6_ENCAP | \
-        MLX5_FLOW_LAYER_GENEVE)
+        MLX5_FLOW_LAYER_GENEVE | MLX5_FLOW_LAYER_GTP)
 
 /* Inner Masks. */
 #define MLX5_FLOW_LAYER_INNER_L3 \
index c02517a..26dbaaf 100644 (file)
@@ -27,6 +27,7 @@
 #include <rte_ip.h>
 #include <rte_gre.h>
 #include <rte_vxlan.h>
+#include <rte_gtp.h>
 
 #include "mlx5.h"
 #include "mlx5_defs.h"
@@ -1551,6 +1552,56 @@ flow_dv_validate_item_port_id(struct rte_eth_dev *dev,
        return 0;
 }
 
+/**
+ * Validate GTP item.
+ *
+ * @param[in] dev
+ *   Pointer to the rte_eth_dev structure.
+ * @param[in] item
+ *   Item specification.
+ * @param[in] item_flags
+ *   Bit-fields that holds the items detected until now.
+ * @param[out] error
+ *   Pointer to error structure.
+ *
+ * @return
+ *   0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+flow_dv_validate_item_gtp(struct rte_eth_dev *dev,
+                         const struct rte_flow_item *item,
+                         uint64_t item_flags,
+                         struct rte_flow_error *error)
+{
+       struct mlx5_priv *priv = dev->data->dev_private;
+       const struct rte_flow_item_gtp *mask = item->mask;
+       const struct rte_flow_item_gtp nic_mask = {
+               .msg_type = 0xff,
+               .teid = RTE_BE32(0xffffffff),
+       };
+
+       if (!priv->config.hca_attr.tunnel_stateless_gtp)
+               return rte_flow_error_set(error, ENOTSUP,
+                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
+                                         "GTP support is not enabled");
+       if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
+               return rte_flow_error_set(error, ENOTSUP,
+                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
+                                         "multiple tunnel layers not"
+                                         " supported");
+       if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP))
+               return rte_flow_error_set(error, EINVAL,
+                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
+                                         "no outer UDP layer found");
+       if (!mask)
+               mask = &rte_flow_item_gtp_mask;
+       return mlx5_flow_item_acceptable
+               (item, (const uint8_t *)mask,
+                (const uint8_t *)&nic_mask,
+                sizeof(struct rte_flow_item_gtp),
+                error);
+}
+
 /**
  * Validate the pop VLAN action.
  *
@@ -4629,6 +4680,13 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
                case MLX5_RTE_FLOW_ITEM_TYPE_TAG:
                case MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE:
                        break;
+               case RTE_FLOW_ITEM_TYPE_GTP:
+                       ret = flow_dv_validate_item_gtp(dev, items, item_flags,
+                                                       error);
+                       if (ret < 0)
+                               return ret;
+                       last_item = MLX5_FLOW_LAYER_GTP;
+                       break;
                default:
                        return rte_flow_error_set(error, ENOTSUP,
                                                  RTE_FLOW_ERROR_TYPE_ITEM,
@@ -6337,6 +6395,57 @@ flow_dv_translate_item_icmp(void *matcher, void *key,
                 icmp_v->hdr.icmp_code & icmp_m->hdr.icmp_code);
 }
 
+/**
+ * Add GTP item to matcher and to the value.
+ *
+ * @param[in, out] matcher
+ *   Flow matcher.
+ * @param[in, out] key
+ *   Flow matcher value.
+ * @param[in] item
+ *   Flow pattern to translate.
+ * @param[in] inner
+ *   Item is inner pattern.
+ */
+static void
+flow_dv_translate_item_gtp(void *matcher, void *key,
+                          const struct rte_flow_item *item, int inner)
+{
+       const struct rte_flow_item_gtp *gtp_m = item->mask;
+       const struct rte_flow_item_gtp *gtp_v = item->spec;
+       void *headers_m;
+       void *headers_v;
+       void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
+                                    misc_parameters_3);
+       void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
+       uint16_t dport = RTE_GTPU_UDP_PORT;
+
+       if (inner) {
+               headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
+                                        inner_headers);
+               headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
+       } else {
+               headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
+                                        outer_headers);
+               headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
+       }
+       if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
+               MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
+               MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
+       }
+       if (!gtp_v)
+               return;
+       if (!gtp_m)
+               gtp_m = &rte_flow_item_gtp_mask;
+       MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_msg_type, gtp_m->msg_type);
+       MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_msg_type,
+                gtp_v->msg_type & gtp_m->msg_type);
+       MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_teid,
+                rte_be_to_cpu_32(gtp_m->teid));
+       MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_teid,
+                rte_be_to_cpu_32(gtp_v->teid & gtp_m->teid));
+}
+
 static uint32_t matcher_zero[MLX5_ST_SZ_DW(fte_match_param)] = { 0 };
 
 #define HEADER_IS_ZERO(match_criteria, headers)                                     \
@@ -7515,6 +7624,11 @@ cnt_err:
                                                        items);
                        last_item = MLX5_FLOW_ITEM_TX_QUEUE;
                        break;
+               case RTE_FLOW_ITEM_TYPE_GTP:
+                       flow_dv_translate_item_gtp(match_mask, match_value,
+                                                  items, tunnel);
+                       last_item = MLX5_FLOW_LAYER_GTP;
+                       break;
                default:
                        break;
                }
index a805363..6ad214b 100644 (file)
@@ -682,7 +682,11 @@ struct mlx5_ifc_fte_match_set_misc3_bits {
        u8 icmp_code[0x8];
        u8 icmpv6_type[0x8];
        u8 icmpv6_code[0x8];
-       u8 reserved_at_1a0[0xe0];
+       u8 reserved_at_120[0x20];
+       u8 gtpu_teid[0x20];
+       u8 gtpu_msg_type[0x08];
+       u8 gtpu_msg_flags[0x08];
+       u8 reserved_at_170[0x90];
 };
 
 /* Flow matcher. */
@@ -1235,7 +1239,9 @@ struct mlx5_ifc_per_protocol_networking_offload_caps_bits {
        u8 swp[0x1];
        u8 swp_csum[0x1];
        u8 swp_lso[0x1];
-       u8 reserved_at_23[0xd];
+       u8 reserved_at_23[0x8];
+       u8 tunnel_stateless_gtp[0x1];
+       u8 reserved_at_25[0x4];
        u8 max_vxlan_udp_ports[0x8];
        u8 reserved_at_38[0x6];
        u8 max_geneve_opt_len[0x1];
index 24fa038..3f659d2 100644 (file)
@@ -40,7 +40,7 @@
 #include "mlx5_glue.h"
 
 /* Support tunnel matching. */
-#define MLX5_FLOW_TUNNEL 9
+#define MLX5_FLOW_TUNNEL 10
 
 struct mlx5_rxq_stats {
 #ifdef MLX5_PMD_SOFT_COUNTERS