net/mlx5: add Direct Verbs translate items
authorOri Kam <orika@mellanox.com>
Mon, 24 Sep 2018 23:17:47 +0000 (23:17 +0000)
committerFerruh Yigit <ferruh.yigit@intel.com>
Thu, 11 Oct 2018 16:53:49 +0000 (18:53 +0200)
This commit handles the translation of the requested flow into Direct
Verbs API.

The Direct Verbs introduce the matcher object which acts as shared mask
for all flows that are using the same mask. So in this commit we
translate the item and get in return a matcher and the value that should
be matched.

Signed-off-by: Ori Kam <orika@mellanox.com>
Acked-by: Yongseok Koh <yskoh@mellanox.com>
drivers/net/mlx5/mlx5_flow.c
drivers/net/mlx5/mlx5_flow.h
drivers/net/mlx5/mlx5_flow_dv.c
drivers/net/mlx5/mlx5_flow_verbs.c
drivers/net/mlx5/mlx5_prm.h

index 6c30466..69523ab 100644 (file)
@@ -444,6 +444,42 @@ mlx5_flow_item_acceptable(const struct rte_flow_item *item,
        return 0;
 }
 
+/**
+ * Adjust the hash fields according to the @p flow information.
+ *
+ * @param[in] dev_flow.
+ *   Pointer to the mlx5_flow.
+ * @param[in] tunnel
+ *   1 when the hash field is for a tunnel item.
+ * @param[in] layer_types
+ *   ETH_RSS_* types.
+ * @param[in] hash_fields
+ *   Item hash fields.
+ *
+ * @return
+ *   The hash fileds that should be used.
+ */
+uint64_t
+mlx5_flow_hashfields_adjust(struct mlx5_flow *dev_flow,
+                           int tunnel __rte_unused, uint32_t layer_types,
+                           uint64_t hash_fields)
+{
+       struct rte_flow *flow = dev_flow->flow;
+#ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
+       int rss_request_inner = flow->rss.level >= 2;
+
+       /* Check RSS hash level for tunnel. */
+       if (tunnel && rss_request_inner)
+               hash_fields |= IBV_RX_HASH_INNER;
+       else if (tunnel || rss_request_inner)
+               return 0;
+#endif
+       /* Check if requested layer matches RSS hash fields. */
+       if (!(flow->rss.types & layer_types))
+               return 0;
+       return hash_fields;
+}
+
 /**
  * Lookup and set the ptype in the data Rx part.  A single Ptype can be used,
  * if several tunnel rules are used on this queue, the tunnel ptype will be
index e5c6bf3..1e4209a 100644 (file)
 #define IPPROTO_MPLS 137
 #endif
 
+/* UDP port numbers for VxLAN. */
+#define MLX5_UDP_PORT_VXLAN 4789
+#define MLX5_UDP_PORT_VXLAN_GPE 4790
+
 /* Priority reserved for default flows. */
 #define MLX5_FLOW_PRIO_RSVD ((uint32_t)-1)
 
 #define MLX5_PRIORITY_MAP_L4 0
 #define MLX5_PRIORITY_MAP_MAX 3
 
+/* Valid layer type for IPV4 RSS. */
+#define MLX5_IPV4_LAYER_TYPES \
+       (ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 | \
+        ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_NONFRAG_IPV4_UDP | \
+        ETH_RSS_NONFRAG_IPV4_OTHER)
+
+/* IBV hash source bits  for IPV4. */
+#define MLX5_IPV4_IBV_RX_HASH (IBV_RX_HASH_SRC_IPV4 | IBV_RX_HASH_DST_IPV4)
+
+/* Valid layer type for IPV6 RSS. */
+#define MLX5_IPV6_LAYER_TYPES \
+       (ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 | ETH_RSS_NONFRAG_IPV6_TCP | \
+        ETH_RSS_NONFRAG_IPV6_UDP | ETH_RSS_IPV6_EX  | ETH_RSS_IPV6_TCP_EX | \
+        ETH_RSS_IPV6_UDP_EX | ETH_RSS_NONFRAG_IPV6_OTHER)
+
+/* IBV hash source bits  for IPV6. */
+#define MLX5_IPV6_IBV_RX_HASH (IBV_RX_HASH_SRC_IPV6 | IBV_RX_HASH_DST_IPV6)
+
 /* Max number of actions per DV flow. */
 #define MLX5_DV_MAX_NUMBER_OF_ACTIONS 8
 
@@ -228,6 +250,9 @@ struct mlx5_flow_driver_ops {
 
 /* mlx5_flow.c */
 
+uint64_t mlx5_flow_hashfields_adjust(struct mlx5_flow *dev_flow, int tunnel,
+                                    uint32_t layer_types,
+                                    uint64_t hash_fields);
 uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
                                   uint32_t subpriority);
 int mlx5_flow_validate_action_count(struct rte_eth_dev *dev,
index 960d0eb..a49e627 100644 (file)
@@ -337,6 +337,781 @@ flow_dv_prepare(const struct rte_flow_attr *attr __rte_unused,
        return flow;
 }
 
+/**
+ * Add Ethernet item to matcher and to the value.
+ *
+ * @param[in, out] matcher
+ *   Flow matcher.
+ * @param[in, out] key
+ *   Flow matcher value.
+ * @param[in] item
+ *   Flow pattern to translate.
+ * @param[in] inner
+ *   Item is inner pattern.
+ */
+static void
+flow_dv_translate_item_eth(void *matcher, void *key,
+                          const struct rte_flow_item *item, int inner)
+{
+       const struct rte_flow_item_eth *eth_m = item->mask;
+       const struct rte_flow_item_eth *eth_v = item->spec;
+       const struct rte_flow_item_eth nic_mask = {
+               .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
+               .src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
+               .type = RTE_BE16(0xffff),
+       };
+       void *headers_m;
+       void *headers_v;
+       char *l24_v;
+       unsigned int i;
+
+       if (!eth_v)
+               return;
+       if (!eth_m)
+               eth_m = &nic_mask;
+       if (inner) {
+               headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
+                                        inner_headers);
+               headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
+       } else {
+               headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
+                                        outer_headers);
+               headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
+       }
+       memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m, dmac_47_16),
+              &eth_m->dst, sizeof(eth_m->dst));
+       /* The value must be in the range of the mask. */
+       l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, dmac_47_16);
+       for (i = 0; i < sizeof(eth_m->dst); ++i)
+               l24_v[i] = eth_m->dst.addr_bytes[i] & eth_v->dst.addr_bytes[i];
+       memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m, smac_47_16),
+              &eth_m->src, sizeof(eth_m->src));
+       l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, smac_47_16);
+       /* The value must be in the range of the mask. */
+       for (i = 0; i < sizeof(eth_m->dst); ++i)
+               l24_v[i] = eth_m->src.addr_bytes[i] & eth_v->src.addr_bytes[i];
+       MLX5_SET(fte_match_set_lyr_2_4, headers_m, ethertype,
+                rte_be_to_cpu_16(eth_m->type));
+       l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, ethertype);
+       *(uint16_t *)(l24_v) = eth_m->type & eth_v->type;
+}
+
+/**
+ * Add VLAN item to matcher and to the value.
+ *
+ * @param[in, out] matcher
+ *   Flow matcher.
+ * @param[in, out] key
+ *   Flow matcher value.
+ * @param[in] item
+ *   Flow pattern to translate.
+ * @param[in] inner
+ *   Item is inner pattern.
+ */
+static void
+flow_dv_translate_item_vlan(void *matcher, void *key,
+                           const struct rte_flow_item *item,
+                           int inner)
+{
+       const struct rte_flow_item_vlan *vlan_m = item->mask;
+       const struct rte_flow_item_vlan *vlan_v = item->spec;
+       const struct rte_flow_item_vlan nic_mask = {
+               .tci = RTE_BE16(0x0fff),
+               .inner_type = RTE_BE16(0xffff),
+       };
+       void *headers_m;
+       void *headers_v;
+       uint16_t tci_m;
+       uint16_t tci_v;
+
+       if (!vlan_v)
+               return;
+       if (!vlan_m)
+               vlan_m = &nic_mask;
+       if (inner) {
+               headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
+                                        inner_headers);
+               headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
+       } else {
+               headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
+                                        outer_headers);
+               headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
+       }
+       tci_m = rte_be_to_cpu_16(vlan_m->tci);
+       tci_v = rte_be_to_cpu_16(vlan_m->tci & vlan_v->tci);
+       MLX5_SET(fte_match_set_lyr_2_4, headers_m, cvlan_tag, 1);
+       MLX5_SET(fte_match_set_lyr_2_4, headers_v, cvlan_tag, 1);
+       MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_vid, tci_m);
+       MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid, tci_v);
+       MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_cfi, tci_m >> 12);
+       MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_cfi, tci_v >> 12);
+       MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_prio, tci_m >> 13);
+       MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_prio, tci_v >> 13);
+}
+
+/**
+ * Add IPV4 item to matcher and to the value.
+ *
+ * @param[in, out] matcher
+ *   Flow matcher.
+ * @param[in, out] key
+ *   Flow matcher value.
+ * @param[in] item
+ *   Flow pattern to translate.
+ * @param[in] inner
+ *   Item is inner pattern.
+ */
+static void
+flow_dv_translate_item_ipv4(void *matcher, void *key,
+                           const struct rte_flow_item *item,
+                           int inner)
+{
+       const struct rte_flow_item_ipv4 *ipv4_m = item->mask;
+       const struct rte_flow_item_ipv4 *ipv4_v = item->spec;
+       const struct rte_flow_item_ipv4 nic_mask = {
+               .hdr = {
+                       .src_addr = RTE_BE32(0xffffffff),
+                       .dst_addr = RTE_BE32(0xffffffff),
+                       .type_of_service = 0xff,
+                       .next_proto_id = 0xff,
+               },
+       };
+       void *headers_m;
+       void *headers_v;
+       char *l24_m;
+       char *l24_v;
+       uint8_t tos;
+
+       if (!ipv4_v)
+               return;
+       if (!ipv4_m)
+               ipv4_m = &nic_mask;
+       if (inner) {
+               headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
+                                        inner_headers);
+               headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
+       } else {
+               headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
+                                        outer_headers);
+               headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
+       }
+       MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0xf);
+       MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, 4);
+       l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
+                            dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
+       l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
+                            dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
+       *(uint32_t *)l24_m = ipv4_m->hdr.dst_addr;
+       *(uint32_t *)l24_v = ipv4_m->hdr.dst_addr & ipv4_v->hdr.dst_addr;
+       l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
+                         src_ipv4_src_ipv6.ipv4_layout.ipv4);
+       l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
+                         src_ipv4_src_ipv6.ipv4_layout.ipv4);
+       *(uint32_t *)l24_m = ipv4_m->hdr.src_addr;
+       *(uint32_t *)l24_v = ipv4_m->hdr.src_addr & ipv4_v->hdr.src_addr;
+       tos = ipv4_m->hdr.type_of_service & ipv4_v->hdr.type_of_service;
+       MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn,
+                ipv4_m->hdr.type_of_service);
+       MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, tos);
+       MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp,
+                ipv4_m->hdr.type_of_service >> 2);
+       MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, tos >> 2);
+       MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
+                ipv4_m->hdr.next_proto_id);
+       MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
+                ipv4_v->hdr.next_proto_id & ipv4_m->hdr.next_proto_id);
+}
+
+/**
+ * Add IPV6 item to matcher and to the value.
+ *
+ * @param[in, out] matcher
+ *   Flow matcher.
+ * @param[in, out] key
+ *   Flow matcher value.
+ * @param[in] item
+ *   Flow pattern to translate.
+ * @param[in] inner
+ *   Item is inner pattern.
+ */
+static void
+flow_dv_translate_item_ipv6(void *matcher, void *key,
+                           const struct rte_flow_item *item,
+                           int inner)
+{
+       const struct rte_flow_item_ipv6 *ipv6_m = item->mask;
+       const struct rte_flow_item_ipv6 *ipv6_v = item->spec;
+       const struct rte_flow_item_ipv6 nic_mask = {
+               .hdr = {
+                       .src_addr =
+                               "\xff\xff\xff\xff\xff\xff\xff\xff"
+                               "\xff\xff\xff\xff\xff\xff\xff\xff",
+                       .dst_addr =
+                               "\xff\xff\xff\xff\xff\xff\xff\xff"
+                               "\xff\xff\xff\xff\xff\xff\xff\xff",
+                       .vtc_flow = RTE_BE32(0xffffffff),
+                       .proto = 0xff,
+                       .hop_limits = 0xff,
+               },
+       };
+       void *headers_m;
+       void *headers_v;
+       void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
+       void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
+       char *l24_m;
+       char *l24_v;
+       uint32_t vtc_m;
+       uint32_t vtc_v;
+       int i;
+       int size;
+
+       if (!ipv6_v)
+               return;
+       if (!ipv6_m)
+               ipv6_m = &nic_mask;
+       if (inner) {
+               headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
+                                        inner_headers);
+               headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
+       } else {
+               headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
+                                        outer_headers);
+               headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
+       }
+       size = sizeof(ipv6_m->hdr.dst_addr);
+       l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
+                            dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
+       l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
+                            dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
+       memcpy(l24_m, ipv6_m->hdr.dst_addr, size);
+       for (i = 0; i < size; ++i)
+               l24_v[i] = l24_m[i] & ipv6_v->hdr.dst_addr[i];
+       l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
+                            src_ipv4_src_ipv6.ipv6_layout.ipv6);
+       l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
+                            src_ipv4_src_ipv6.ipv6_layout.ipv6);
+       memcpy(l24_m, ipv6_m->hdr.src_addr, size);
+       for (i = 0; i < size; ++i)
+               l24_v[i] = l24_m[i] & ipv6_v->hdr.src_addr[i];
+       MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0xf);
+       MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, 6);
+       /* TOS. */
+       vtc_m = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow);
+       vtc_v = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow & ipv6_v->hdr.vtc_flow);
+       MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn, vtc_m >> 20);
+       MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, vtc_v >> 20);
+       MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp, vtc_m >> 22);
+       MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, vtc_v >> 22);
+       /* Label. */
+       if (inner) {
+               MLX5_SET(fte_match_set_misc, misc_m, inner_ipv6_flow_label,
+                        vtc_m);
+               MLX5_SET(fte_match_set_misc, misc_v, inner_ipv6_flow_label,
+                        vtc_v);
+       } else {
+               MLX5_SET(fte_match_set_misc, misc_m, outer_ipv6_flow_label,
+                        vtc_m);
+               MLX5_SET(fte_match_set_misc, misc_v, outer_ipv6_flow_label,
+                        vtc_v);
+       }
+       /* Protocol. */
+       MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
+                ipv6_m->hdr.proto);
+       MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
+                ipv6_v->hdr.proto & ipv6_m->hdr.proto);
+}
+
+/**
+ * Add TCP item to matcher and to the value.
+ *
+ * @param[in, out] matcher
+ *   Flow matcher.
+ * @param[in, out] key
+ *   Flow matcher value.
+ * @param[in] item
+ *   Flow pattern to translate.
+ * @param[in] inner
+ *   Item is inner pattern.
+ */
+static void
+flow_dv_translate_item_tcp(void *matcher, void *key,
+                          const struct rte_flow_item *item,
+                          int inner)
+{
+       const struct rte_flow_item_tcp *tcp_m = item->mask;
+       const struct rte_flow_item_tcp *tcp_v = item->spec;
+       void *headers_m;
+       void *headers_v;
+
+       if (!tcp_v)
+               return;
+       if (!tcp_m)
+               tcp_m = &rte_flow_item_tcp_mask;
+       if (inner) {
+               headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
+                                        inner_headers);
+               headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
+       } else {
+               headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
+                                        outer_headers);
+               headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
+       }
+       MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
+       MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_TCP);
+       MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_sport,
+                rte_be_to_cpu_16(tcp_m->hdr.src_port));
+       MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_sport,
+                rte_be_to_cpu_16(tcp_v->hdr.src_port & tcp_m->hdr.src_port));
+       MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_dport,
+                rte_be_to_cpu_16(tcp_m->hdr.dst_port));
+       MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_dport,
+                rte_be_to_cpu_16(tcp_v->hdr.dst_port & tcp_m->hdr.dst_port));
+}
+
+/**
+ * Add UDP item to matcher and to the value.
+ *
+ * @param[in, out] matcher
+ *   Flow matcher.
+ * @param[in, out] key
+ *   Flow matcher value.
+ * @param[in] item
+ *   Flow pattern to translate.
+ * @param[in] inner
+ *   Item is inner pattern.
+ */
+static void
+flow_dv_translate_item_udp(void *matcher, void *key,
+                          const struct rte_flow_item *item,
+                          int inner)
+{
+       const struct rte_flow_item_udp *udp_m = item->mask;
+       const struct rte_flow_item_udp *udp_v = item->spec;
+       void *headers_m;
+       void *headers_v;
+
+       if (!udp_v)
+               return;
+       if (!udp_m)
+               udp_m = &rte_flow_item_udp_mask;
+       if (inner) {
+               headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
+                                        inner_headers);
+               headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
+       } else {
+               headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
+                                        outer_headers);
+               headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
+       }
+       MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
+       MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_UDP);
+       MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_sport,
+                rte_be_to_cpu_16(udp_m->hdr.src_port));
+       MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_sport,
+                rte_be_to_cpu_16(udp_v->hdr.src_port & udp_m->hdr.src_port));
+       MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport,
+                rte_be_to_cpu_16(udp_m->hdr.dst_port));
+       MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
+                rte_be_to_cpu_16(udp_v->hdr.dst_port & udp_m->hdr.dst_port));
+}
+
+/**
+ * Add GRE item to matcher and to the value.
+ *
+ * @param[in, out] matcher
+ *   Flow matcher.
+ * @param[in, out] key
+ *   Flow matcher value.
+ * @param[in] item
+ *   Flow pattern to translate.
+ * @param[in] inner
+ *   Item is inner pattern.
+ */
+static void
+flow_dv_translate_item_gre(void *matcher, void *key,
+                          const struct rte_flow_item *item,
+                          int inner)
+{
+       const struct rte_flow_item_gre *gre_m = item->mask;
+       const struct rte_flow_item_gre *gre_v = item->spec;
+       void *headers_m;
+       void *headers_v;
+       void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
+       void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
+
+       if (!gre_v)
+               return;
+       if (!gre_m)
+               gre_m = &rte_flow_item_gre_mask;
+       if (inner) {
+               headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
+                                        inner_headers);
+               headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
+       } else {
+               headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
+                                        outer_headers);
+               headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
+       }
+       MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
+       MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_GRE);
+       MLX5_SET(fte_match_set_misc, misc_m, gre_protocol,
+                rte_be_to_cpu_16(gre_m->protocol));
+       MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
+                rte_be_to_cpu_16(gre_v->protocol & gre_m->protocol));
+}
+
+/**
+ * Add NVGRE item to matcher and to the value.
+ *
+ * @param[in, out] matcher
+ *   Flow matcher.
+ * @param[in, out] key
+ *   Flow matcher value.
+ * @param[in] item
+ *   Flow pattern to translate.
+ * @param[in] inner
+ *   Item is inner pattern.
+ */
+static void
+flow_dv_translate_item_nvgre(void *matcher, void *key,
+                            const struct rte_flow_item *item,
+                            int inner)
+{
+       const struct rte_flow_item_nvgre *nvgre_m = item->mask;
+       const struct rte_flow_item_nvgre *nvgre_v = item->spec;
+       void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
+       void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
+       char *gre_key_m;
+       char *gre_key_v;
+       int size;
+       int i;
+
+       if (!nvgre_v)
+               return;
+       if (!nvgre_m)
+               nvgre_m = &rte_flow_item_nvgre_mask;
+       size = sizeof(nvgre_m->tni) + sizeof(nvgre_m->flow_id);
+       gre_key_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, gre_key_h);
+       gre_key_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, gre_key_h);
+       memcpy(gre_key_m, nvgre_m->tni, size);
+       for (i = 0; i < size; ++i)
+               gre_key_v[i] = gre_key_m[i] & ((const char *)(nvgre_v->tni))[i];
+       flow_dv_translate_item_gre(matcher, key, item, inner);
+}
+
+/**
+ * Add VXLAN item to matcher and to the value.
+ *
+ * @param[in, out] matcher
+ *   Flow matcher.
+ * @param[in, out] key
+ *   Flow matcher value.
+ * @param[in] item
+ *   Flow pattern to translate.
+ * @param[in] inner
+ *   Item is inner pattern.
+ */
+static void
+flow_dv_translate_item_vxlan(void *matcher, void *key,
+                            const struct rte_flow_item *item,
+                            int inner)
+{
+       const struct rte_flow_item_vxlan *vxlan_m = item->mask;
+       const struct rte_flow_item_vxlan *vxlan_v = item->spec;
+       void *headers_m;
+       void *headers_v;
+       void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
+       void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
+       char *vni_m;
+       char *vni_v;
+       uint16_t dport;
+       int size;
+       int i;
+
+       if (!vxlan_v)
+               return;
+       if (!vxlan_m)
+               vxlan_m = &rte_flow_item_vxlan_mask;
+       if (inner) {
+               headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
+                                        inner_headers);
+               headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
+       } else {
+               headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
+                                        outer_headers);
+               headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
+       }
+       dport = item->type == RTE_FLOW_ITEM_TYPE_VXLAN ?
+               MLX5_UDP_PORT_VXLAN : MLX5_UDP_PORT_VXLAN_GPE;
+       if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
+               MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
+               MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
+       }
+       size = sizeof(vxlan_m->vni);
+       vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, vxlan_vni);
+       vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, vxlan_vni);
+       memcpy(vni_m, vxlan_m->vni, size);
+       for (i = 0; i < size; ++i)
+               vni_v[i] = vni_m[i] & vxlan_v->vni[i];
+}
+
+/**
+ * Update the matcher and the value based the selected item.
+ *
+ * @param[in, out] matcher
+ *   Flow matcher.
+ * @param[in, out] key
+ *   Flow matcher value.
+ * @param[in] item
+ *   Flow pattern to translate.
+ * @param[in, out] dev_flow
+ *   Pointer to the mlx5_flow.
+ * @param[in] inner
+ *   Item is inner pattern.
+ */
+static void
+flow_dv_create_item(void *matcher, void *key,
+                   const struct rte_flow_item *item,
+                   struct mlx5_flow *dev_flow,
+                   int inner)
+{
+       struct mlx5_flow_dv_matcher *tmatcher = matcher;
+
+       switch (item->type) {
+       case RTE_FLOW_ITEM_TYPE_VOID:
+       case RTE_FLOW_ITEM_TYPE_END:
+               break;
+       case RTE_FLOW_ITEM_TYPE_ETH:
+               flow_dv_translate_item_eth(tmatcher->mask.buf, key, item,
+                                          inner);
+               tmatcher->priority = MLX5_PRIORITY_MAP_L2;
+               break;
+       case RTE_FLOW_ITEM_TYPE_VLAN:
+               flow_dv_translate_item_vlan(tmatcher->mask.buf, key, item,
+                                           inner);
+               break;
+       case RTE_FLOW_ITEM_TYPE_IPV4:
+               flow_dv_translate_item_ipv4(tmatcher->mask.buf, key, item,
+                                           inner);
+               tmatcher->priority = MLX5_PRIORITY_MAP_L3;
+               dev_flow->dv.hash_fields |=
+                       mlx5_flow_hashfields_adjust(dev_flow, inner,
+                                                   MLX5_IPV4_LAYER_TYPES,
+                                                   MLX5_IPV4_IBV_RX_HASH);
+               break;
+       case RTE_FLOW_ITEM_TYPE_IPV6:
+               flow_dv_translate_item_ipv6(tmatcher->mask.buf, key, item,
+                                           inner);
+               tmatcher->priority = MLX5_PRIORITY_MAP_L3;
+               dev_flow->dv.hash_fields |=
+                       mlx5_flow_hashfields_adjust(dev_flow, inner,
+                                                   MLX5_IPV6_LAYER_TYPES,
+                                                   MLX5_IPV6_IBV_RX_HASH);
+               break;
+       case RTE_FLOW_ITEM_TYPE_TCP:
+               flow_dv_translate_item_tcp(tmatcher->mask.buf, key, item,
+                                          inner);
+               tmatcher->priority = MLX5_PRIORITY_MAP_L4;
+               dev_flow->dv.hash_fields |=
+                       mlx5_flow_hashfields_adjust(dev_flow, inner,
+                                                   ETH_RSS_TCP,
+                                                   (IBV_RX_HASH_SRC_PORT_TCP |
+                                                    IBV_RX_HASH_DST_PORT_TCP));
+               break;
+       case RTE_FLOW_ITEM_TYPE_UDP:
+               flow_dv_translate_item_udp(tmatcher->mask.buf, key, item,
+                                          inner);
+               tmatcher->priority = MLX5_PRIORITY_MAP_L4;
+               dev_flow->verbs.hash_fields |=
+                       mlx5_flow_hashfields_adjust(dev_flow, inner,
+                                                   ETH_RSS_TCP,
+                                                   (IBV_RX_HASH_SRC_PORT_TCP |
+                                                    IBV_RX_HASH_DST_PORT_TCP));
+               break;
+       case RTE_FLOW_ITEM_TYPE_NVGRE:
+               flow_dv_translate_item_nvgre(tmatcher->mask.buf, key, item,
+                                            inner);
+               break;
+       case RTE_FLOW_ITEM_TYPE_GRE:
+               flow_dv_translate_item_gre(tmatcher->mask.buf, key, item,
+                                          inner);
+               break;
+       case RTE_FLOW_ITEM_TYPE_VXLAN:
+       case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
+               flow_dv_translate_item_vxlan(tmatcher->mask.buf, key, item,
+                                            inner);
+               break;
+       default:
+               break;
+       }
+}
+
+static uint32_t matcher_zero[MLX5_ST_SZ_DW(fte_match_param)] = { 0 };
+
+#define HEADER_IS_ZERO(match_criteria, headers)                                     \
+       !(memcmp(MLX5_ADDR_OF(fte_match_param, match_criteria, headers),     \
+                matcher_zero, MLX5_FLD_SZ_BYTES(fte_match_param, headers))) \
+
+/**
+ * Calculate flow matcher enable bitmap.
+ *
+ * @param match_criteria
+ *   Pointer to flow matcher criteria.
+ *
+ * @return
+ *   Bitmap of enabled fields.
+ */
+static uint8_t
+flow_dv_matcher_enable(uint32_t *match_criteria)
+{
+       uint8_t match_criteria_enable;
+
+       match_criteria_enable =
+               (!HEADER_IS_ZERO(match_criteria, outer_headers)) <<
+               MLX5_MATCH_CRITERIA_ENABLE_OUTER_BIT;
+       match_criteria_enable |=
+               (!HEADER_IS_ZERO(match_criteria, misc_parameters)) <<
+               MLX5_MATCH_CRITERIA_ENABLE_MISC_BIT;
+       match_criteria_enable |=
+               (!HEADER_IS_ZERO(match_criteria, inner_headers)) <<
+               MLX5_MATCH_CRITERIA_ENABLE_INNER_BIT;
+       match_criteria_enable |=
+               (!HEADER_IS_ZERO(match_criteria, misc_parameters_2)) <<
+               MLX5_MATCH_CRITERIA_ENABLE_MISC2_BIT;
+
+       return match_criteria_enable;
+}
+
+/**
+ * Register the flow matcher.
+ *
+ * @param dev[in, out]
+ *   Pointer to rte_eth_dev structure.
+ * @param[in, out] matcher
+ *   Pointer to flow matcher.
+ * @parm[in, out] dev_flow
+ *   Pointer to the dev_flow.
+ * @param[out] error
+ *   pointer to error structure.
+ *
+ * @return
+ *   0 on success otherwise -errno and errno is set.
+ */
+static int
+flow_dv_matcher_register(struct rte_eth_dev *dev,
+                        struct mlx5_flow_dv_matcher *matcher,
+                        struct mlx5_flow *dev_flow,
+                        struct rte_flow_error *error)
+{
+       struct priv *priv = dev->data->dev_private;
+       struct mlx5_flow_dv_matcher *cache_matcher;
+       struct mlx5dv_flow_matcher_attr dv_attr = {
+               .type = IBV_FLOW_ATTR_NORMAL,
+               .match_mask = (void *)&matcher->mask,
+       };
+
+       /* Lookup from cache. */
+       LIST_FOREACH(cache_matcher, &priv->matchers, next) {
+               if (matcher->crc == cache_matcher->crc &&
+                   matcher->priority == cache_matcher->priority &&
+                   matcher->egress == cache_matcher->egress &&
+                   !memcmp((const void *)matcher->mask.buf,
+                           (const void *)cache_matcher->mask.buf,
+                           cache_matcher->mask.size)) {
+                       DRV_LOG(DEBUG,
+                               "priority %hd use %s matcher %p: refcnt %d++",
+                               cache_matcher->priority,
+                               cache_matcher->egress ? "tx" : "rx",
+                               (void *)cache_matcher,
+                               rte_atomic32_read(&cache_matcher->refcnt));
+                       rte_atomic32_inc(&cache_matcher->refcnt);
+                       dev_flow->dv.matcher = cache_matcher;
+                       return 0;
+               }
+       }
+       /* Register new matcher. */
+       cache_matcher = rte_calloc(__func__, 1, sizeof(*cache_matcher), 0);
+       if (!cache_matcher)
+               return rte_flow_error_set(error, ENOMEM,
+                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+                                         "cannot allocate matcher memory");
+       *cache_matcher = *matcher;
+       dv_attr.match_criteria_enable =
+               flow_dv_matcher_enable(cache_matcher->mask.buf);
+       dv_attr.priority = matcher->priority;
+       if (matcher->egress)
+               dv_attr.flags |= IBV_FLOW_ATTR_FLAGS_EGRESS;
+       cache_matcher->matcher_object =
+               mlx5dv_create_flow_matcher(priv->ctx, &dv_attr);
+       if (!cache_matcher->matcher_object)
+               return rte_flow_error_set(error, ENOMEM,
+                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+                                         NULL, "cannot create matcher");
+       rte_atomic32_inc(&cache_matcher->refcnt);
+       LIST_INSERT_HEAD(&priv->matchers, cache_matcher, next);
+       dev_flow->dv.matcher = cache_matcher;
+       DRV_LOG(DEBUG, "priority %hd new %s matcher %p: refcnt %d",
+               cache_matcher->priority,
+               cache_matcher->egress ? "tx" : "rx", (void *)cache_matcher,
+               rte_atomic32_read(&cache_matcher->refcnt));
+       return 0;
+}
+
+
+/**
+ * Fill the flow with DV spec.
+ *
+ * @param[in] dev
+ *   Pointer to rte_eth_dev structure.
+ * @param[in, out] dev_flow
+ *   Pointer to the sub flow.
+ * @param[in] attr
+ *   Pointer to the flow attributes.
+ * @param[in] items
+ *   Pointer to the list of items.
+ * @param[in] actions
+ *   Pointer to the list of actions.
+ * @param[out] error
+ *   Pointer to the error structure.
+ *
+ * @return
+ *   0 on success, a negative errno value otherwise and rte_ernno is set.
+ */
+static int
+flow_dv_translate(struct rte_eth_dev *dev,
+                 struct mlx5_flow *dev_flow,
+                 const struct rte_flow_attr *attr,
+                 const struct rte_flow_item items[],
+                 const struct rte_flow_action actions[] __rte_unused,
+                 struct rte_flow_error *error)
+{
+       struct priv *priv = dev->data->dev_private;
+       uint64_t priority = attr->priority;
+       struct mlx5_flow_dv_matcher matcher = {
+               .mask = {
+                       .size = sizeof(matcher.mask.buf),
+               },
+       };
+       void *match_value = dev_flow->dv.value.buf;
+       uint8_t inner = 0;
+
+       if (priority == MLX5_FLOW_PRIO_RSVD)
+               priority = priv->config.flow_prio - 1;
+       for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++)
+               flow_dv_create_item(&matcher, match_value, items, dev_flow,
+                                   inner);
+       matcher.crc = rte_raw_cksum((const void *)matcher.mask.buf,
+                                    matcher.mask.size);
+       if (priority == MLX5_FLOW_PRIO_RSVD)
+               priority = priv->config.flow_prio - 1;
+       matcher.priority = mlx5_flow_adjust_priority(dev, priority,
+                                                    matcher.priority);
+       matcher.egress = attr->egress;
+       if (flow_dv_matcher_register(dev, &matcher, dev_flow, error))
+               return -rte_errno;
+       return 0;
+}
+
 /**
  * Fills the flow_ops with the function pointers.
  *
@@ -349,7 +1124,7 @@ mlx5_flow_dv_get_driver_ops(struct mlx5_flow_driver_ops *flow_ops)
        *flow_ops = (struct mlx5_flow_driver_ops) {
                .validate = flow_dv_validate,
                .prepare = flow_dv_prepare,
-               .translate = NULL,
+               .translate = flow_dv_translate,
                .apply = NULL,
                .remove = NULL,
                .destroy = NULL,
index b9f475d..bf27b45 100644 (file)
@@ -132,37 +132,6 @@ flow_verbs_spec_add(struct mlx5_flow *flow, void *src, unsigned int size)
        verbs->size += size;
 }
 
-/**
- * Adjust verbs hash fields according to the @p flow information.
- *
- * @param[in] dev_flow.
- *   Pointer to dev flow structure.
- * @param[in] tunnel
- *   1 when the hash field is for a tunnel item.
- * @param[in] layer_types
- *   ETH_RSS_* types.
- * @param[in] hash_fields
- *   Item hash fields.
- */
-static void
-flow_verbs_hashfields_adjust(struct mlx5_flow *dev_flow,
-                            int tunnel __rte_unused,
-                            uint32_t layer_types, uint64_t hash_fields)
-{
-#ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
-       int rss_request_inner = dev_flow->flow->rss.level >= 2;
-
-       hash_fields |= (tunnel ? IBV_RX_HASH_INNER : 0);
-       if (rss_request_inner && !tunnel)
-               hash_fields = 0;
-       else if (rss_request_inner < 2 && tunnel)
-               hash_fields = 0;
-#endif
-       if (!(dev_flow->flow->rss.types & layer_types))
-               hash_fields = 0;
-       dev_flow->verbs.hash_fields |= hash_fields;
-}
-
 /**
  * Convert the @p item into a Verbs specification. This function assumes that
  * the input is valid and that there is space to insert the requested item
@@ -347,13 +316,10 @@ flow_verbs_translate_item_ipv4(const struct rte_flow_item *item,
                ipv4.val.proto &= ipv4.mask.proto;
                ipv4.val.tos &= ipv4.mask.tos;
        }
-       flow_verbs_hashfields_adjust(dev_flow, tunnel,
-                                    (ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 |
-                                     ETH_RSS_NONFRAG_IPV4_TCP |
-                                     ETH_RSS_NONFRAG_IPV4_UDP |
-                                     ETH_RSS_NONFRAG_IPV4_OTHER),
-                                    (IBV_RX_HASH_SRC_IPV4 |
-                                     IBV_RX_HASH_DST_IPV4));
+       dev_flow->verbs.hash_fields |=
+               mlx5_flow_hashfields_adjust(dev_flow, tunnel,
+                                           MLX5_IPV4_LAYER_TYPES,
+                                           MLX5_IPV4_IBV_RX_HASH);
        dev_flow->verbs.attr->priority = MLX5_PRIORITY_MAP_L3;
        flow_verbs_spec_add(dev_flow, &ipv4, size);
 }
@@ -427,16 +393,10 @@ flow_verbs_translate_item_ipv6(const struct rte_flow_item *item,
                ipv6.val.next_hdr &= ipv6.mask.next_hdr;
                ipv6.val.hop_limit &= ipv6.mask.hop_limit;
        }
-       flow_verbs_hashfields_adjust(dev_flow, tunnel,
-                                    (ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 |
-                                     ETH_RSS_NONFRAG_IPV6_TCP |
-                                     ETH_RSS_NONFRAG_IPV6_UDP |
-                                     ETH_RSS_IPV6_EX  |
-                                     ETH_RSS_IPV6_TCP_EX |
-                                     ETH_RSS_IPV6_UDP_EX |
-                                     ETH_RSS_NONFRAG_IPV6_OTHER),
-                                    (IBV_RX_HASH_SRC_IPV6 |
-                                     IBV_RX_HASH_DST_IPV6));
+       dev_flow->verbs.hash_fields |=
+               mlx5_flow_hashfields_adjust(dev_flow, tunnel,
+                                           MLX5_IPV6_LAYER_TYPES,
+                                           MLX5_IPV6_IBV_RX_HASH);
        dev_flow->verbs.attr->priority = MLX5_PRIORITY_MAP_L3;
        flow_verbs_spec_add(dev_flow, &ipv6, size);
 }
@@ -480,10 +440,10 @@ flow_verbs_translate_item_udp(const struct rte_flow_item *item,
                udp.val.src_port &= udp.mask.src_port;
                udp.val.dst_port &= udp.mask.dst_port;
        }
-       flow_verbs_hashfields_adjust(dev_flow,
-                                    tunnel, ETH_RSS_UDP,
-                                    (IBV_RX_HASH_SRC_PORT_UDP |
-                                     IBV_RX_HASH_DST_PORT_UDP));
+       dev_flow->verbs.hash_fields |=
+               mlx5_flow_hashfields_adjust(dev_flow, tunnel, ETH_RSS_UDP,
+                                           (IBV_RX_HASH_SRC_PORT_UDP |
+                                            IBV_RX_HASH_DST_PORT_UDP));
        dev_flow->verbs.attr->priority = MLX5_PRIORITY_MAP_L4;
        flow_verbs_spec_add(dev_flow, &udp, size);
 }
@@ -527,10 +487,10 @@ flow_verbs_translate_item_tcp(const struct rte_flow_item *item,
                tcp.val.src_port &= tcp.mask.src_port;
                tcp.val.dst_port &= tcp.mask.dst_port;
        }
-       flow_verbs_hashfields_adjust(dev_flow,
-                                    tunnel, ETH_RSS_TCP,
-                                    (IBV_RX_HASH_SRC_PORT_TCP |
-                                     IBV_RX_HASH_DST_PORT_TCP));
+       dev_flow->verbs.hash_fields |=
+               mlx5_flow_hashfields_adjust(dev_flow, tunnel, ETH_RSS_TCP,
+                                           (IBV_RX_HASH_SRC_PORT_TCP |
+                                            IBV_RX_HASH_DST_PORT_TCP));
        dev_flow->verbs.attr->priority = MLX5_PRIORITY_MAP_L4;
        flow_verbs_spec_add(dev_flow, &tcp, size);
 }
index 5b34d0d..69296a0 100644 (file)
@@ -493,6 +493,13 @@ struct mlx5_ifc_fte_match_param_bits {
        u8 reserved_at_800[0x800];
 };
 
+enum {
+       MLX5_MATCH_CRITERIA_ENABLE_OUTER_BIT,
+       MLX5_MATCH_CRITERIA_ENABLE_MISC_BIT,
+       MLX5_MATCH_CRITERIA_ENABLE_INNER_BIT,
+       MLX5_MATCH_CRITERIA_ENABLE_MISC2_BIT
+};
+
 /* CQE format mask. */
 #define MLX5E_CQE_FORMAT_MASK 0xc