bus/pci: change IOVA as VA flag name
[dpdk.git] / drivers / net / mlx5 / mlx5_flow.c
index b8475ab..534cd93 100644 (file)
@@ -42,7 +42,6 @@ extern const struct eth_dev_ops mlx5_dev_ops_isolate;
 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
 extern const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops;
 #endif
-extern const struct mlx5_flow_driver_ops mlx5_flow_tcf_drv_ops;
 extern const struct mlx5_flow_driver_ops mlx5_flow_verbs_drv_ops;
 
 const struct mlx5_flow_driver_ops mlx5_flow_null_drv_ops;
@@ -52,7 +51,6 @@ const struct mlx5_flow_driver_ops *flow_drv_ops[] = {
 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
        [MLX5_FLOW_TYPE_DV] = &mlx5_flow_dv_drv_ops,
 #endif
-       [MLX5_FLOW_TYPE_TCF] = &mlx5_flow_tcf_drv_ops,
        [MLX5_FLOW_TYPE_VERBS] = &mlx5_flow_verbs_drv_ops,
        [MLX5_FLOW_TYPE_MAX] = &mlx5_flow_null_drv_ops
 };
@@ -1037,7 +1035,7 @@ mlx5_flow_validate_attributes(struct rte_eth_dev *dev,
                return rte_flow_error_set(error, ENOTSUP,
                                          RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL,
                                          "egress is not supported");
-       if (attributes->transfer)
+       if (attributes->transfer && !priv->config.dv_esw_en)
                return rte_flow_error_set(error, ENOTSUP,
                                          RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
                                          NULL, "transfer is not supported");
@@ -1049,6 +1047,110 @@ mlx5_flow_validate_attributes(struct rte_eth_dev *dev,
        return 0;
 }
 
+/**
+ * Validate ICMP6 item.
+ *
+ * @param[in] item
+ *   Item specification.
+ * @param[in] item_flags
+ *   Bit-fields that holds the items detected until now.
+ * @param[out] error
+ *   Pointer to error structure.
+ *
+ * @return
+ *   0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_flow_validate_item_icmp6(const struct rte_flow_item *item,
+                              uint64_t item_flags,
+                              uint8_t target_protocol,
+                              struct rte_flow_error *error)
+{
+       const struct rte_flow_item_icmp6 *mask = item->mask;
+       const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
+       const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
+                                     MLX5_FLOW_LAYER_OUTER_L3_IPV6;
+       const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
+                                     MLX5_FLOW_LAYER_OUTER_L4;
+       int ret;
+
+       if (target_protocol != 0xFF && target_protocol != IPPROTO_ICMPV6)
+               return rte_flow_error_set(error, EINVAL,
+                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
+                                         "protocol filtering not compatible"
+                                         " with ICMP6 layer");
+       if (!(item_flags & l3m))
+               return rte_flow_error_set(error, EINVAL,
+                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
+                                         "IPv6 is mandatory to filter on"
+                                         " ICMP6");
+       if (item_flags & l4m)
+               return rte_flow_error_set(error, EINVAL,
+                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
+                                         "multiple L4 layers not supported");
+       if (!mask)
+               mask = &rte_flow_item_icmp6_mask;
+       ret = mlx5_flow_item_acceptable
+               (item, (const uint8_t *)mask,
+                (const uint8_t *)&rte_flow_item_icmp6_mask,
+                sizeof(struct rte_flow_item_icmp6), error);
+       if (ret < 0)
+               return ret;
+       return 0;
+}
+
+/**
+ * Validate ICMP item.
+ *
+ * @param[in] item
+ *   Item specification.
+ * @param[in] item_flags
+ *   Bit-fields that holds the items detected until now.
+ * @param[out] error
+ *   Pointer to error structure.
+ *
+ * @return
+ *   0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_flow_validate_item_icmp(const struct rte_flow_item *item,
+                            uint64_t item_flags,
+                            uint8_t target_protocol,
+                            struct rte_flow_error *error)
+{
+       const struct rte_flow_item_icmp *mask = item->mask;
+       const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
+       const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
+                                     MLX5_FLOW_LAYER_OUTER_L3_IPV4;
+       const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
+                                     MLX5_FLOW_LAYER_OUTER_L4;
+       int ret;
+
+       if (target_protocol != 0xFF && target_protocol != IPPROTO_ICMP)
+               return rte_flow_error_set(error, EINVAL,
+                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
+                                         "protocol filtering not compatible"
+                                         " with ICMP layer");
+       if (!(item_flags & l3m))
+               return rte_flow_error_set(error, EINVAL,
+                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
+                                         "IPv4 is mandatory to filter"
+                                         " on ICMP");
+       if (item_flags & l4m)
+               return rte_flow_error_set(error, EINVAL,
+                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
+                                         "multiple L4 layers not supported");
+       if (!mask)
+               mask = &rte_flow_item_icmp_mask;
+       ret = mlx5_flow_item_acceptable
+               (item, (const uint8_t *)mask,
+                (const uint8_t *)&rte_flow_item_icmp_mask,
+                sizeof(struct rte_flow_item_icmp), error);
+       if (ret < 0)
+               return ret;
+       return 0;
+}
+
 /**
  * Validate Ethernet item.
  *
@@ -1294,7 +1396,7 @@ mlx5_flow_validate_item_ipv6(const struct rte_flow_item *item,
  * @param[in] target_protocol
  *   The next protocol in the previous item.
  * @param[in] flow_mask
- *   mlx5 flow-specific (TCF, DV, verbs, etc.) supported header fields mask.
+ *   mlx5 flow-specific (DV, verbs, etc.) supported header fields mask.
  * @param[out] error
  *   Pointer to error structure.
  *
@@ -1694,19 +1796,20 @@ flow_null_validate(struct rte_eth_dev *dev __rte_unused,
                   const struct rte_flow_attr *attr __rte_unused,
                   const struct rte_flow_item items[] __rte_unused,
                   const struct rte_flow_action actions[] __rte_unused,
-                  struct rte_flow_error *error __rte_unused)
+                  struct rte_flow_error *error)
 {
-       rte_errno = ENOTSUP;
-       return -rte_errno;
+       return rte_flow_error_set(error, ENOTSUP,
+                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, NULL);
 }
 
 static struct mlx5_flow *
 flow_null_prepare(const struct rte_flow_attr *attr __rte_unused,
                  const struct rte_flow_item items[] __rte_unused,
                  const struct rte_flow_action actions[] __rte_unused,
-                 struct rte_flow_error *error __rte_unused)
+                 struct rte_flow_error *error)
 {
-       rte_errno = ENOTSUP;
+       rte_flow_error_set(error, ENOTSUP,
+                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, NULL);
        return NULL;
 }
 
@@ -1716,19 +1819,19 @@ flow_null_translate(struct rte_eth_dev *dev __rte_unused,
                    const struct rte_flow_attr *attr __rte_unused,
                    const struct rte_flow_item items[] __rte_unused,
                    const struct rte_flow_action actions[] __rte_unused,
-                   struct rte_flow_error *error __rte_unused)
+                   struct rte_flow_error *error)
 {
-       rte_errno = ENOTSUP;
-       return -rte_errno;
+       return rte_flow_error_set(error, ENOTSUP,
+                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, NULL);
 }
 
 static int
 flow_null_apply(struct rte_eth_dev *dev __rte_unused,
                struct rte_flow *flow __rte_unused,
-               struct rte_flow_error *error __rte_unused)
+               struct rte_flow_error *error)
 {
-       rte_errno = ENOTSUP;
-       return -rte_errno;
+       return rte_flow_error_set(error, ENOTSUP,
+                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, NULL);
 }
 
 static void
@@ -1748,10 +1851,10 @@ flow_null_query(struct rte_eth_dev *dev __rte_unused,
                struct rte_flow *flow __rte_unused,
                const struct rte_flow_action *actions __rte_unused,
                void *data __rte_unused,
-               struct rte_flow_error *error __rte_unused)
+               struct rte_flow_error *error)
 {
-       rte_errno = ENOTSUP;
-       return -rte_errno;
+       return rte_flow_error_set(error, ENOTSUP,
+                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, NULL);
 }
 
 /* Void driver to protect from null pointer reference. */
@@ -1783,9 +1886,9 @@ flow_get_drv_type(struct rte_eth_dev *dev, const struct rte_flow_attr *attr)
        struct mlx5_priv *priv = dev->data->dev_private;
        enum mlx5_flow_drv_type type = MLX5_FLOW_TYPE_MAX;
 
-       if (attr->transfer && !priv->config.dv_esw_en)
-               type = MLX5_FLOW_TYPE_TCF;
-       else
+       if (attr->transfer && priv->config.dv_esw_en)
+               type = MLX5_FLOW_TYPE_DV;
+       if (!attr->transfer)
                type = priv->config.dv_flow_en ? MLX5_FLOW_TYPE_DV :
                                                 MLX5_FLOW_TYPE_VERBS;
        return type;
@@ -1832,7 +1935,7 @@ flow_drv_validate(struct rte_eth_dev *dev,
  * initializes the device flow and returns the pointer.
  *
  * @note
- *   This function initializes device flow structure such as dv, tcf or verbs in
+ *   This function initializes device flow structure such as dv or verbs in
  *   struct mlx5_flow. However, it is caller's responsibility to initialize the
  *   rest. For example, adding returning device flow to flow->dev_flow list and
  *   setting backward reference to the flow should be done out of this function.
@@ -2092,8 +2195,13 @@ flow_list_create(struct rte_eth_dev *dev, struct mlx5_flows *list,
        else
                flow_size += RTE_ALIGN_CEIL(sizeof(uint16_t), sizeof(void *));
        flow = rte_calloc(__func__, 1, flow_size, 0);
+       if (!flow) {
+               rte_errno = ENOMEM;
+               return NULL;
+       }
        flow->drv_type = flow_get_drv_type(dev, attr);
        flow->ingress = attr->ingress;
+       flow->transfer = attr->transfer;
        assert(flow->drv_type > MLX5_FLOW_TYPE_MIN &&
               flow->drv_type < MLX5_FLOW_TYPE_MAX);
        flow->queue = (void *)(flow + 1);
@@ -2155,7 +2263,7 @@ mlx5_flow_create(struct rte_eth_dev *dev,
                 const struct rte_flow_action actions[],
                 struct rte_flow_error *error)
 {
-       struct mlx5_priv *priv = (struct mlx5_priv *)dev->data->dev_private;
+       struct mlx5_priv *priv = dev->data->dev_private;
 
        return flow_list_create(dev, &priv->flows,
                                attr, items, actions, error);
@@ -2547,13 +2655,13 @@ flow_fdir_filter_convert(struct rte_eth_dev *dev,
        case RTE_ETH_FLOW_NONFRAG_IPV4_UDP:
        case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
        case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER:
-               attributes->l3.ipv4.hdr = (struct ipv4_hdr){
+               attributes->l3.ipv4.hdr = (struct rte_ipv4_hdr){
                        .src_addr = input->flow.ip4_flow.src_ip,
                        .dst_addr = input->flow.ip4_flow.dst_ip,
                        .time_to_live = input->flow.ip4_flow.ttl,
                        .type_of_service = input->flow.ip4_flow.tos,
                };
-               attributes->l3_mask.ipv4.hdr = (struct ipv4_hdr){
+               attributes->l3_mask.ipv4.hdr = (struct rte_ipv4_hdr){
                        .src_addr = mask->ipv4_mask.src_ip,
                        .dst_addr = mask->ipv4_mask.dst_ip,
                        .time_to_live = mask->ipv4_mask.ttl,
@@ -2569,7 +2677,7 @@ flow_fdir_filter_convert(struct rte_eth_dev *dev,
        case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
        case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
        case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER:
-               attributes->l3.ipv6.hdr = (struct ipv6_hdr){
+               attributes->l3.ipv6.hdr = (struct rte_ipv6_hdr){
                        .hop_limits = input->flow.ipv6_flow.hop_limits,
                        .proto = input->flow.ipv6_flow.proto,
                };
@@ -2601,11 +2709,11 @@ flow_fdir_filter_convert(struct rte_eth_dev *dev,
        /* Handle L4. */
        switch (fdir_filter->input.flow_type) {
        case RTE_ETH_FLOW_NONFRAG_IPV4_UDP:
-               attributes->l4.udp.hdr = (struct udp_hdr){
+               attributes->l4.udp.hdr = (struct rte_udp_hdr){
                        .src_port = input->flow.udp4_flow.src_port,
                        .dst_port = input->flow.udp4_flow.dst_port,
                };
-               attributes->l4_mask.udp.hdr = (struct udp_hdr){
+               attributes->l4_mask.udp.hdr = (struct rte_udp_hdr){
                        .src_port = mask->src_port_mask,
                        .dst_port = mask->dst_port_mask,
                };
@@ -2616,11 +2724,11 @@ flow_fdir_filter_convert(struct rte_eth_dev *dev,
                };
                break;
        case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
-               attributes->l4.tcp.hdr = (struct tcp_hdr){
+               attributes->l4.tcp.hdr = (struct rte_tcp_hdr){
                        .src_port = input->flow.tcp4_flow.src_port,
                        .dst_port = input->flow.tcp4_flow.dst_port,
                };
-               attributes->l4_mask.tcp.hdr = (struct tcp_hdr){
+               attributes->l4_mask.tcp.hdr = (struct rte_tcp_hdr){
                        .src_port = mask->src_port_mask,
                        .dst_port = mask->dst_port_mask,
                };
@@ -2631,11 +2739,11 @@ flow_fdir_filter_convert(struct rte_eth_dev *dev,
                };
                break;
        case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
-               attributes->l4.udp.hdr = (struct udp_hdr){
+               attributes->l4.udp.hdr = (struct rte_udp_hdr){
                        .src_port = input->flow.udp6_flow.src_port,
                        .dst_port = input->flow.udp6_flow.dst_port,
                };
-               attributes->l4_mask.udp.hdr = (struct udp_hdr){
+               attributes->l4_mask.udp.hdr = (struct rte_udp_hdr){
                        .src_port = mask->src_port_mask,
                        .dst_port = mask->dst_port_mask,
                };
@@ -2646,11 +2754,11 @@ flow_fdir_filter_convert(struct rte_eth_dev *dev,
                };
                break;
        case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
-               attributes->l4.tcp.hdr = (struct tcp_hdr){
+               attributes->l4.tcp.hdr = (struct rte_tcp_hdr){
                        .src_port = input->flow.tcp6_flow.src_port,
                        .dst_port = input->flow.tcp6_flow.dst_port,
                };
-               attributes->l4_mask.tcp.hdr = (struct tcp_hdr){
+               attributes->l4_mask.tcp.hdr = (struct rte_tcp_hdr){
                        .src_port = mask->src_port_mask,
                        .dst_port = mask->dst_port_mask,
                };