net/mlx5: fix MPLS item validation
authorDekel Peled <dekelp@mellanox.com>
Thu, 15 Nov 2018 15:17:13 +0000 (17:17 +0200)
committerFerruh Yigit <ferruh.yigit@intel.com>
Fri, 16 Nov 2018 09:45:37 +0000 (10:45 +0100)
Update the mlx5_flow_validate_item_mpls() function to allow
MPLS over IP, UDP, and GRE.
Modify the flow_dv_validate() function with the new logic introduced
in previous patch of this series: set new variable last_item
after each validation, update item_flags after each item iteration.
The new variable last_item is sent to mlx5_flow_validate_item_mpls()
and used to validate the MPLS item.
Same change implemented in flow_verbs_validate().

Update the mlx5_flow_validate_item_mpls() function to verify that
device configuration has mpls_en set to 1.
This code was added earlier this year, but unintentionaly removed
in recent versions.

Fixes: 84c406e74524 ("net/mlx5: add flow translate function")

Signed-off-by: Dekel Peled <dekelp@mellanox.com>
Acked-by: Shahaf Shuler <shahafs@mellanox.com>
drivers/net/mlx5/mlx5_flow.c
drivers/net/mlx5/mlx5_flow.h
drivers/net/mlx5/mlx5_flow_dv.c
drivers/net/mlx5/mlx5_flow_verbs.c

index 01499c4..c8fad5f 100644 (file)
@@ -1593,12 +1593,14 @@ mlx5_flow_validate_item_gre(const struct rte_flow_item *item,
 /**
  * Validate MPLS item.
  *
+ * @param[in] dev
+ *   Pointer to the rte_eth_dev structure.
  * @param[in] item
  *   Item specification.
  * @param[in] item_flags
  *   Bit-fields that holds the items detected until now.
- * @param[in] target_protocol
- *   The next protocol in the previous item.
+ * @param[in] prev_layer
+ *   The protocol layer indicated in previous item.
  * @param[out] error
  *   Pointer to error structure.
  *
@@ -1606,16 +1608,27 @@ mlx5_flow_validate_item_gre(const struct rte_flow_item *item,
  *   0 on success, a negative errno value otherwise and rte_errno is set.
  */
 int
-mlx5_flow_validate_item_mpls(const struct rte_flow_item *item __rte_unused,
+mlx5_flow_validate_item_mpls(struct rte_eth_dev *dev __rte_unused,
+                            const struct rte_flow_item *item __rte_unused,
                             uint64_t item_flags __rte_unused,
-                            uint8_t target_protocol __rte_unused,
+                            uint64_t prev_layer __rte_unused,
                             struct rte_flow_error *error)
 {
 #ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT
        const struct rte_flow_item_mpls *mask = item->mask;
+       struct priv *priv = dev->data->dev_private;
        int ret;
 
-       if (target_protocol != 0xff && target_protocol != IPPROTO_MPLS)
+       if (!priv->config.mpls_en)
+               return rte_flow_error_set(error, ENOTSUP,
+                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
+                                         "MPLS not supported or"
+                                         " disabled in firmware"
+                                         " configuration.");
+       /* MPLS over IP, UDP, GRE is allowed */
+       if (!(prev_layer & (MLX5_FLOW_LAYER_OUTER_L3 |
+                           MLX5_FLOW_LAYER_OUTER_L4_UDP |
+                           MLX5_FLOW_LAYER_GRE)))
                return rte_flow_error_set(error, EINVAL,
                                          RTE_FLOW_ERROR_TYPE_ITEM, item,
                                          "protocol filtering not compatible"
index dd954d8..4a7c052 100644 (file)
@@ -385,9 +385,10 @@ int mlx5_flow_validate_item_ipv4(const struct rte_flow_item *item,
 int mlx5_flow_validate_item_ipv6(const struct rte_flow_item *item,
                                 uint64_t item_flags,
                                 struct rte_flow_error *error);
-int mlx5_flow_validate_item_mpls(const struct rte_flow_item *item,
+int mlx5_flow_validate_item_mpls(struct rte_eth_dev *dev,
+                                const struct rte_flow_item *item,
                                 uint64_t item_flags,
-                                uint8_t target_protocol,
+                                uint64_t prev_layer,
                                 struct rte_flow_error *error);
 int mlx5_flow_validate_item_tcp(const struct rte_flow_item *item,
                                uint64_t item_flags,
index b9293bd..1f31874 100644 (file)
@@ -775,6 +775,7 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
        int ret;
        uint64_t action_flags = 0;
        uint64_t item_flags = 0;
+       uint64_t last_item = 0;
        int tunnel = 0;
        uint8_t next_protocol = 0xff;
        int actions_n = 0;
@@ -794,24 +795,24 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
                                                          error);
                        if (ret < 0)
                                return ret;
-                       item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
-                                              MLX5_FLOW_LAYER_OUTER_L2;
+                       last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
+                                            MLX5_FLOW_LAYER_OUTER_L2;
                        break;
                case RTE_FLOW_ITEM_TYPE_VLAN:
                        ret = mlx5_flow_validate_item_vlan(items, item_flags,
                                                           error);
                        if (ret < 0)
                                return ret;
-                       item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
-                                              MLX5_FLOW_LAYER_OUTER_VLAN;
+                       last_item = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
+                                            MLX5_FLOW_LAYER_OUTER_VLAN;
                        break;
                case RTE_FLOW_ITEM_TYPE_IPV4:
                        ret = mlx5_flow_validate_item_ipv4(items, item_flags,
                                                           error);
                        if (ret < 0)
                                return ret;
-                       item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
-                                              MLX5_FLOW_LAYER_OUTER_L3_IPV4;
+                       last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
+                                            MLX5_FLOW_LAYER_OUTER_L3_IPV4;
                        if (items->mask != NULL &&
                            ((const struct rte_flow_item_ipv4 *)
                             items->mask)->hdr.next_proto_id) {
@@ -831,8 +832,8 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
                                                           error);
                        if (ret < 0)
                                return ret;
-                       item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
-                                              MLX5_FLOW_LAYER_OUTER_L3_IPV6;
+                       last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
+                                            MLX5_FLOW_LAYER_OUTER_L3_IPV6;
                        if (items->mask != NULL &&
                            ((const struct rte_flow_item_ipv6 *)
                             items->mask)->hdr.proto) {
@@ -855,8 +856,8 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
                                                 error);
                        if (ret < 0)
                                return ret;
-                       item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
-                                              MLX5_FLOW_LAYER_OUTER_L4_TCP;
+                       last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
+                                            MLX5_FLOW_LAYER_OUTER_L4_TCP;
                        break;
                case RTE_FLOW_ITEM_TYPE_UDP:
                        ret = mlx5_flow_validate_item_udp(items, item_flags,
@@ -864,8 +865,8 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
                                                          error);
                        if (ret < 0)
                                return ret;
-                       item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
-                                              MLX5_FLOW_LAYER_OUTER_L4_UDP;
+                       last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
+                                            MLX5_FLOW_LAYER_OUTER_L4_UDP;
                        break;
                case RTE_FLOW_ITEM_TYPE_GRE:
                case RTE_FLOW_ITEM_TYPE_NVGRE:
@@ -873,14 +874,14 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
                                                          next_protocol, error);
                        if (ret < 0)
                                return ret;
-                       item_flags |= MLX5_FLOW_LAYER_GRE;
+                       last_item = MLX5_FLOW_LAYER_GRE;
                        break;
                case RTE_FLOW_ITEM_TYPE_VXLAN:
                        ret = mlx5_flow_validate_item_vxlan(items, item_flags,
                                                            error);
                        if (ret < 0)
                                return ret;
-                       item_flags |= MLX5_FLOW_LAYER_VXLAN;
+                       last_item = MLX5_FLOW_LAYER_VXLAN;
                        break;
                case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
                        ret = mlx5_flow_validate_item_vxlan_gpe(items,
@@ -888,28 +889,29 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
                                                                error);
                        if (ret < 0)
                                return ret;
-                       item_flags |= MLX5_FLOW_LAYER_VXLAN_GPE;
+                       last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
                        break;
                case RTE_FLOW_ITEM_TYPE_MPLS:
-                       ret = mlx5_flow_validate_item_mpls(items, item_flags,
-                                                          next_protocol,
-                                                          error);
+                       ret = mlx5_flow_validate_item_mpls(dev, items,
+                                                          item_flags,
+                                                          last_item, error);
                        if (ret < 0)
                                return ret;
-                       item_flags |= MLX5_FLOW_LAYER_MPLS;
+                       last_item = MLX5_FLOW_LAYER_MPLS;
                        break;
                case RTE_FLOW_ITEM_TYPE_META:
                        ret = flow_dv_validate_item_meta(dev, items, attr,
                                                         error);
                        if (ret < 0)
                                return ret;
-                       item_flags |= MLX5_FLOW_ITEM_METADATA;
+                       last_item = MLX5_FLOW_ITEM_METADATA;
                        break;
                default:
                        return rte_flow_error_set(error, ENOTSUP,
                                                  RTE_FLOW_ERROR_TYPE_ITEM,
                                                  NULL, "item not supported");
                }
+               item_flags |= last_item;
        }
        for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
                if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
index 13fbf24..81ec59d 100644 (file)
@@ -1018,6 +1018,7 @@ flow_verbs_validate(struct rte_eth_dev *dev,
        int ret;
        uint64_t action_flags = 0;
        uint64_t item_flags = 0;
+       uint64_t last_item = 0;
        uint8_t next_protocol = 0xff;
 
        if (items == NULL)
@@ -1037,26 +1038,26 @@ flow_verbs_validate(struct rte_eth_dev *dev,
                                                          error);
                        if (ret < 0)
                                return ret;
-                       item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
-                                              MLX5_FLOW_LAYER_OUTER_L2;
+                       last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
+                                            MLX5_FLOW_LAYER_OUTER_L2;
                        break;
                case RTE_FLOW_ITEM_TYPE_VLAN:
                        ret = mlx5_flow_validate_item_vlan(items, item_flags,
                                                           error);
                        if (ret < 0)
                                return ret;
-                       item_flags |= tunnel ? (MLX5_FLOW_LAYER_INNER_L2 |
-                                               MLX5_FLOW_LAYER_INNER_VLAN) :
-                                              (MLX5_FLOW_LAYER_OUTER_L2 |
-                                               MLX5_FLOW_LAYER_OUTER_VLAN);
+                       last_item = tunnel ? (MLX5_FLOW_LAYER_INNER_L2 |
+                                             MLX5_FLOW_LAYER_INNER_VLAN) :
+                                            (MLX5_FLOW_LAYER_OUTER_L2 |
+                                             MLX5_FLOW_LAYER_OUTER_VLAN);
                        break;
                case RTE_FLOW_ITEM_TYPE_IPV4:
                        ret = mlx5_flow_validate_item_ipv4(items, item_flags,
                                                           error);
                        if (ret < 0)
                                return ret;
-                       item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
-                                              MLX5_FLOW_LAYER_OUTER_L3_IPV4;
+                       last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
+                                            MLX5_FLOW_LAYER_OUTER_L3_IPV4;
                        if (items->mask != NULL &&
                            ((const struct rte_flow_item_ipv4 *)
                             items->mask)->hdr.next_proto_id) {
@@ -1076,8 +1077,8 @@ flow_verbs_validate(struct rte_eth_dev *dev,
                                                           error);
                        if (ret < 0)
                                return ret;
-                       item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
-                                              MLX5_FLOW_LAYER_OUTER_L3_IPV6;
+                       last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
+                                            MLX5_FLOW_LAYER_OUTER_L3_IPV6;
                        if (items->mask != NULL &&
                            ((const struct rte_flow_item_ipv6 *)
                             items->mask)->hdr.proto) {
@@ -1098,8 +1099,8 @@ flow_verbs_validate(struct rte_eth_dev *dev,
                                                          error);
                        if (ret < 0)
                                return ret;
-                       item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
-                                              MLX5_FLOW_LAYER_OUTER_L4_UDP;
+                       last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
+                                            MLX5_FLOW_LAYER_OUTER_L4_UDP;
                        break;
                case RTE_FLOW_ITEM_TYPE_TCP:
                        ret = mlx5_flow_validate_item_tcp
@@ -1109,15 +1110,15 @@ flow_verbs_validate(struct rte_eth_dev *dev,
                                                 error);
                        if (ret < 0)
                                return ret;
-                       item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
-                                              MLX5_FLOW_LAYER_OUTER_L4_TCP;
+                       last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
+                                            MLX5_FLOW_LAYER_OUTER_L4_TCP;
                        break;
                case RTE_FLOW_ITEM_TYPE_VXLAN:
                        ret = mlx5_flow_validate_item_vxlan(items, item_flags,
                                                            error);
                        if (ret < 0)
                                return ret;
-                       item_flags |= MLX5_FLOW_LAYER_VXLAN;
+                       last_item = MLX5_FLOW_LAYER_VXLAN;
                        break;
                case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
                        ret = mlx5_flow_validate_item_vxlan_gpe(items,
@@ -1125,28 +1126,29 @@ flow_verbs_validate(struct rte_eth_dev *dev,
                                                                dev, error);
                        if (ret < 0)
                                return ret;
-                       item_flags |= MLX5_FLOW_LAYER_VXLAN_GPE;
+                       last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
                        break;
                case RTE_FLOW_ITEM_TYPE_GRE:
                        ret = mlx5_flow_validate_item_gre(items, item_flags,
                                                          next_protocol, error);
                        if (ret < 0)
                                return ret;
-                       item_flags |= MLX5_FLOW_LAYER_GRE;
+                       last_item = MLX5_FLOW_LAYER_GRE;
                        break;
                case RTE_FLOW_ITEM_TYPE_MPLS:
-                       ret = mlx5_flow_validate_item_mpls(items, item_flags,
-                                                          next_protocol,
-                                                          error);
+                       ret = mlx5_flow_validate_item_mpls(dev, items,
+                                                          item_flags,
+                                                          last_item, error);
                        if (ret < 0)
                                return ret;
-                       item_flags |= MLX5_FLOW_LAYER_MPLS;
+                       last_item = MLX5_FLOW_LAYER_MPLS;
                        break;
                default:
                        return rte_flow_error_set(error, ENOTSUP,
                                                  RTE_FLOW_ERROR_TYPE_ITEM,
                                                  NULL, "item not supported");
                }
+               item_flags |= last_item;
        }
        for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
                switch (actions->type) {