net/mlx5: validate flow rule item order
[dpdk.git] / drivers / net / mlx5 / mlx5_flow.c
index 6ae98e1..578d003 100644 (file)
@@ -127,7 +127,9 @@ static const struct rte_flow_expand_node mlx5_support_expansion[] = {
                .next = RTE_FLOW_EXPAND_RSS_NEXT
                        (MLX5_EXPANSION_OUTER_IPV4_UDP,
                         MLX5_EXPANSION_OUTER_IPV4_TCP,
-                        MLX5_EXPANSION_GRE),
+                        MLX5_EXPANSION_GRE,
+                        MLX5_EXPANSION_IPV4,
+                        MLX5_EXPANSION_IPV6),
                .type = RTE_FLOW_ITEM_TYPE_IPV4,
                .rss_types = ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 |
                        ETH_RSS_NONFRAG_IPV4_OTHER,
@@ -145,7 +147,9 @@ static const struct rte_flow_expand_node mlx5_support_expansion[] = {
        [MLX5_EXPANSION_OUTER_IPV6] = {
                .next = RTE_FLOW_EXPAND_RSS_NEXT
                        (MLX5_EXPANSION_OUTER_IPV6_UDP,
-                        MLX5_EXPANSION_OUTER_IPV6_TCP),
+                        MLX5_EXPANSION_OUTER_IPV6_TCP,
+                        MLX5_EXPANSION_IPV4,
+                        MLX5_EXPANSION_IPV6),
                .type = RTE_FLOW_ITEM_TYPE_IPV6,
                .rss_types = ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 |
                        ETH_RSS_NONFRAG_IPV6_OTHER,
@@ -302,6 +306,14 @@ static struct mlx5_flow_tunnel_info tunnels_info[] = {
                .tunnel = MLX5_FLOW_LAYER_NVGRE,
                .ptype = RTE_PTYPE_TUNNEL_NVGRE,
        },
+       {
+               .tunnel = MLX5_FLOW_LAYER_IPIP,
+               .ptype = RTE_PTYPE_TUNNEL_IP,
+       },
+       {
+               .tunnel = MLX5_FLOW_LAYER_IPV6_ENCAP,
+               .ptype = RTE_PTYPE_TUNNEL_IP,
+       },
 };
 
 /**
@@ -684,6 +696,29 @@ flow_rxq_flags_clear(struct rte_eth_dev *dev)
        }
 }
 
+/*
+ * return a pointer to the desired action in the list of actions.
+ *
+ * @param[in] actions
+ *   The list of actions to search the action in.
+ * @param[in] action
+ *   The action to find.
+ *
+ * @return
+ *   Pointer to the action in the list, if found. NULL otherwise.
+ */
+const struct rte_flow_action *
+mlx5_flow_find_action(const struct rte_flow_action *actions,
+                     enum rte_flow_action_type action)
+{
+       if (actions == NULL)
+               return NULL;
+       for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++)
+               if (actions->type == action)
+                       return actions;
+       return NULL;
+}
+
 /*
  * Validate the flag action.
  *
@@ -805,7 +840,8 @@ mlx5_flow_validate_action_drop(uint64_t action_flags,
                return rte_flow_error_set(error, EINVAL,
                                          RTE_FLOW_ERROR_TYPE_ACTION, NULL,
                                          "can't drop and mark in same flow");
-       if (action_flags & MLX5_FLOW_FATE_ACTIONS)
+       if (action_flags & (MLX5_FLOW_FATE_ACTIONS |
+                           MLX5_FLOW_FATE_ESWITCH_ACTIONS))
                return rte_flow_error_set(error, EINVAL,
                                          RTE_FLOW_ERROR_TYPE_ACTION, NULL,
                                          "can't have 2 fate actions in"
@@ -1188,6 +1224,11 @@ mlx5_flow_validate_item_eth(const struct rte_flow_item *item,
                return rte_flow_error_set(error, ENOTSUP,
                                          RTE_FLOW_ERROR_TYPE_ITEM, item,
                                          "multiple L2 layers not supported");
+       if (tunnel && (item_flags & MLX5_FLOW_LAYER_INNER_L3))
+               return rte_flow_error_set(error, EINVAL,
+                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
+                                         "inner L2 layer should not "
+                                         "follow inner L3 layers");
        if (!mask)
                mask = &rte_flow_item_eth_mask;
        ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
@@ -1234,6 +1275,8 @@ mlx5_flow_validate_item_vlan(const struct rte_flow_item *item,
        const uint64_t vlanm = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
                                        MLX5_FLOW_LAYER_OUTER_VLAN;
 
+       const uint64_t l2m = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
+                                     MLX5_FLOW_LAYER_OUTER_L2;
        if (item_flags & vlanm)
                return rte_flow_error_set(error, EINVAL,
                                          RTE_FLOW_ERROR_TYPE_ITEM, item,
@@ -1242,6 +1285,10 @@ mlx5_flow_validate_item_vlan(const struct rte_flow_item *item,
                return rte_flow_error_set(error, EINVAL,
                                          RTE_FLOW_ERROR_TYPE_ITEM, item,
                                          "L2 layer cannot follow L3/L4 layer");
+       else if ((item_flags & l2m) == 0)
+               return rte_flow_error_set(error, EINVAL,
+                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
+                                         "no L2 layer before VLAN");
        if (!mask)
                mask = &rte_flow_item_vlan_mask;
        ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
@@ -1354,6 +1401,10 @@ mlx5_flow_validate_item_ipv4(const struct rte_flow_item *item,
                return rte_flow_error_set(error, EINVAL,
                                          RTE_FLOW_ERROR_TYPE_ITEM, item,
                                          "L3 cannot follow an NVGRE layer.");
+       else if (!tunnel && !(item_flags & MLX5_FLOW_LAYER_OUTER_L2))
+               return rte_flow_error_set(error, EINVAL,
+                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
+                                         "no L2 layer before IPV4");
        if (!mask)
                mask = &rte_flow_item_ipv4_mask;
        else if (mask->hdr.next_proto_id != 0 &&
@@ -1445,6 +1496,10 @@ mlx5_flow_validate_item_ipv6(const struct rte_flow_item *item,
                return rte_flow_error_set(error, EINVAL,
                                          RTE_FLOW_ERROR_TYPE_ITEM, item,
                                          "L3 cannot follow an NVGRE layer.");
+       else if (!tunnel && !(item_flags & MLX5_FLOW_LAYER_OUTER_L2))
+               return rte_flow_error_set(error, EINVAL,
+                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
+                                         "no L2 layer before IPV6");
        if (!mask)
                mask = &rte_flow_item_ipv6_mask;
        ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
@@ -1977,6 +2032,7 @@ flow_null_validate(struct rte_eth_dev *dev __rte_unused,
                   const struct rte_flow_attr *attr __rte_unused,
                   const struct rte_flow_item items[] __rte_unused,
                   const struct rte_flow_action actions[] __rte_unused,
+                  bool external __rte_unused,
                   struct rte_flow_error *error)
 {
        return rte_flow_error_set(error, ENOTSUP,
@@ -2089,6 +2145,8 @@ flow_get_drv_type(struct rte_eth_dev *dev, const struct rte_flow_attr *attr)
  *   Pointer to the list of items.
  * @param[in] actions
  *   Pointer to the list of actions.
+ * @param[in] external
+ *   This flow rule is created by request external to PMD.
  * @param[out] error
  *   Pointer to the error structure.
  *
@@ -2100,13 +2158,13 @@ flow_drv_validate(struct rte_eth_dev *dev,
                  const struct rte_flow_attr *attr,
                  const struct rte_flow_item items[],
                  const struct rte_flow_action actions[],
-                 struct rte_flow_error *error)
+                 bool external, struct rte_flow_error *error)
 {
        const struct mlx5_flow_driver_ops *fops;
        enum mlx5_flow_drv_type type = flow_get_drv_type(dev, attr);
 
        fops = flow_get_drv_ops(type);
-       return fops->validate(dev, attr, items, actions, error);
+       return fops->validate(dev, attr, items, actions, external, error);
 }
 
 /**
@@ -2278,7 +2336,7 @@ mlx5_flow_validate(struct rte_eth_dev *dev,
 {
        int ret;
 
-       ret = flow_drv_validate(dev, attr, items, actions, error);
+       ret = flow_drv_validate(dev, attr, items, actions, true, error);
        if (ret < 0)
                return ret;
        return 0;
@@ -2340,6 +2398,8 @@ find_graph_root(const struct rte_flow_item pattern[], uint32_t rss_level)
  *   Pattern specification (list terminated by the END pattern item).
  * @param[in] actions
  *   Associated actions (list terminated by the END action).
+ * @param[in] external
+ *   This flow rule is created by request external to PMD.
  * @param[out] error
  *   Perform verbose error reporting if not NULL.
  *
@@ -2351,7 +2411,7 @@ flow_list_create(struct rte_eth_dev *dev, struct mlx5_flows *list,
                 const struct rte_flow_attr *attr,
                 const struct rte_flow_item items[],
                 const struct rte_flow_action actions[],
-                struct rte_flow_error *error)
+                bool external, struct rte_flow_error *error)
 {
        struct rte_flow *flow = NULL;
        struct mlx5_flow *dev_flow;
@@ -2365,7 +2425,7 @@ flow_list_create(struct rte_eth_dev *dev, struct mlx5_flows *list,
        uint32_t i;
        uint32_t flow_size;
 
-       ret = flow_drv_validate(dev, attr, items, actions, error);
+       ret = flow_drv_validate(dev, attr, items, actions, external, error);
        if (ret < 0)
                return NULL;
        flow_size = sizeof(struct rte_flow);
@@ -2407,6 +2467,7 @@ flow_list_create(struct rte_eth_dev *dev, struct mlx5_flows *list,
                if (!dev_flow)
                        goto error;
                dev_flow->flow = flow;
+               dev_flow->external = external;
                LIST_INSERT_HEAD(&flow->dev_flows, dev_flow, next);
                ret = flow_drv_translate(dev, dev_flow, attr,
                                         buf->entry[i].pattern,
@@ -2431,6 +2492,55 @@ error:
        return NULL;
 }
 
+/**
+ * Create a dedicated flow rule on e-switch table 0 (root table), to direct all
+ * incoming packets to table 1.
+ *
+ * Other flow rules, requested for group n, will be created in
+ * e-switch table n+1.
+ * Jump action to e-switch group n will be created to group n+1.
+ *
+ * Used when working in switchdev mode, to utilise advantages of table 1
+ * and above.
+ *
+ * @param dev
+ *   Pointer to Ethernet device.
+ *
+ * @return
+ *   Pointer to flow on success, NULL otherwise and rte_errno is set.
+ */
+struct rte_flow *
+mlx5_flow_create_esw_table_zero_flow(struct rte_eth_dev *dev)
+{
+       const struct rte_flow_attr attr = {
+               .group = 0,
+               .priority = 0,
+               .ingress = 1,
+               .egress = 0,
+               .transfer = 1,
+       };
+       const struct rte_flow_item pattern = {
+               .type = RTE_FLOW_ITEM_TYPE_END,
+       };
+       struct rte_flow_action_jump jump = {
+               .group = 1,
+       };
+       const struct rte_flow_action actions[] = {
+               {
+                       .type = RTE_FLOW_ACTION_TYPE_JUMP,
+                       .conf = &jump,
+               },
+               {
+                       .type = RTE_FLOW_ACTION_TYPE_END,
+               },
+       };
+       struct mlx5_priv *priv = dev->data->dev_private;
+       struct rte_flow_error error;
+
+       return flow_list_create(dev, &priv->ctrl_flows, &attr, &pattern,
+                               actions, false, &error);
+}
+
 /**
  * Create a flow.
  *
@@ -2447,7 +2557,7 @@ mlx5_flow_create(struct rte_eth_dev *dev,
        struct mlx5_priv *priv = dev->data->dev_private;
 
        return flow_list_create(dev, &priv->flows,
-                               attr, items, actions, error);
+                               attr, items, actions, true, error);
 }
 
 /**
@@ -2644,7 +2754,7 @@ mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev,
        for (i = 0; i != priv->reta_idx_n; ++i)
                queue[i] = (*priv->reta_idx)[i];
        flow = flow_list_create(dev, &priv->ctrl_flows,
-                               &attr, items, actions, &error);
+                               &attr, items, actions, false, &error);
        if (!flow)
                return -rte_errno;
        return 0;
@@ -3058,7 +3168,8 @@ flow_fdir_filter_add(struct rte_eth_dev *dev,
                goto error;
        }
        flow = flow_list_create(dev, &priv->flows, &fdir_flow->attr,
-                               fdir_flow->items, fdir_flow->actions, NULL);
+                               fdir_flow->items, fdir_flow->actions, true,
+                               NULL);
        if (!flow)
                goto error;
        assert(!flow->fdir);
@@ -3406,3 +3517,39 @@ mlx5_flow_async_pool_query_handle(struct mlx5_ibv_shared *sh,
        pool->raw_hw = NULL;
        sh->cmng.pending_queries--;
 }
+
+/**
+ * Translate the rte_flow group index to HW table value.
+ *
+ * @param[in] attributes
+ *   Pointer to flow attributes
+ * @param[in] external
+ *   Value is part of flow rule created by request external to PMD.
+ * @param[in] group
+ *   rte_flow group index value.
+ * @param[out] table
+ *   HW table value.
+ * @param[out] error
+ *   Pointer to error structure.
+ *
+ * @return
+ *   0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_flow_group_to_table(const struct rte_flow_attr *attributes, bool external,
+                        uint32_t group, uint32_t *table,
+                        struct rte_flow_error *error)
+{
+       if (attributes->transfer && external) {
+               if (group == UINT32_MAX)
+                       return rte_flow_error_set
+                                               (error, EINVAL,
+                                                RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
+                                                NULL,
+                                                "group index not supported");
+               *table = group + 1;
+       } else {
+               *table = group;
+       }
+       return 0;
+}