net/mlx5: add Altivec Rx
[dpdk.git] / drivers / net / mlx5 / mlx5_flow.c
index 78cc06f..d4d956f 100644 (file)
@@ -696,6 +696,29 @@ flow_rxq_flags_clear(struct rte_eth_dev *dev)
        }
 }
 
+/*
+ * return a pointer to the desired action in the list of actions.
+ *
+ * @param[in] actions
+ *   The list of actions to search the action in.
+ * @param[in] action
+ *   The action to find.
+ *
+ * @return
+ *   Pointer to the action in the list, if found. NULL otherwise.
+ */
+const struct rte_flow_action *
+mlx5_flow_find_action(const struct rte_flow_action *actions,
+                     enum rte_flow_action_type action)
+{
+       if (actions == NULL)
+               return NULL;
+       for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++)
+               if (actions->type == action)
+                       return actions;
+       return NULL;
+}
+
 /*
  * Validate the flag action.
  *
@@ -1201,6 +1224,11 @@ mlx5_flow_validate_item_eth(const struct rte_flow_item *item,
                return rte_flow_error_set(error, ENOTSUP,
                                          RTE_FLOW_ERROR_TYPE_ITEM, item,
                                          "multiple L2 layers not supported");
+       if (tunnel && (item_flags & MLX5_FLOW_LAYER_INNER_L3))
+               return rte_flow_error_set(error, EINVAL,
+                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
+                                         "inner L2 layer should not "
+                                         "follow inner L3 layers");
        if (!mask)
                mask = &rte_flow_item_eth_mask;
        ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
@@ -1247,6 +1275,8 @@ mlx5_flow_validate_item_vlan(const struct rte_flow_item *item,
        const uint64_t vlanm = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
                                        MLX5_FLOW_LAYER_OUTER_VLAN;
 
+       const uint64_t l2m = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
+                                     MLX5_FLOW_LAYER_OUTER_L2;
        if (item_flags & vlanm)
                return rte_flow_error_set(error, EINVAL,
                                          RTE_FLOW_ERROR_TYPE_ITEM, item,
@@ -1255,6 +1285,10 @@ mlx5_flow_validate_item_vlan(const struct rte_flow_item *item,
                return rte_flow_error_set(error, EINVAL,
                                          RTE_FLOW_ERROR_TYPE_ITEM, item,
                                          "L2 layer cannot follow L3/L4 layer");
+       else if ((item_flags & l2m) == 0)
+               return rte_flow_error_set(error, EINVAL,
+                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
+                                         "no L2 layer before VLAN");
        if (!mask)
                mask = &rte_flow_item_vlan_mask;
        ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
@@ -1367,6 +1401,10 @@ mlx5_flow_validate_item_ipv4(const struct rte_flow_item *item,
                return rte_flow_error_set(error, EINVAL,
                                          RTE_FLOW_ERROR_TYPE_ITEM, item,
                                          "L3 cannot follow an NVGRE layer.");
+       else if (!tunnel && !(item_flags & MLX5_FLOW_LAYER_OUTER_L2))
+               return rte_flow_error_set(error, EINVAL,
+                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
+                                         "no L2 layer before IPV4");
        if (!mask)
                mask = &rte_flow_item_ipv4_mask;
        else if (mask->hdr.next_proto_id != 0 &&
@@ -1458,6 +1496,10 @@ mlx5_flow_validate_item_ipv6(const struct rte_flow_item *item,
                return rte_flow_error_set(error, EINVAL,
                                          RTE_FLOW_ERROR_TYPE_ITEM, item,
                                          "L3 cannot follow an NVGRE layer.");
+       else if (!tunnel && !(item_flags & MLX5_FLOW_LAYER_OUTER_L2))
+               return rte_flow_error_set(error, EINVAL,
+                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
+                                         "no L2 layer before IPV6");
        if (!mask)
                mask = &rte_flow_item_ipv6_mask;
        ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
@@ -1870,6 +1912,95 @@ mlx5_flow_validate_item_gre(const struct rte_flow_item *item,
        return 0;
 }
 
+/**
+ * Validate Geneve item.
+ *
+ * @param[in] item
+ *   Item specification.
+ * @param[in] itemFlags
+ *   Bit-fields that holds the items detected until now.
+ * @param[in] enPriv
+ *   Pointer to the private data structure.
+ * @param[out] error
+ *   Pointer to error structure.
+ *
+ * @return
+ *   0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+
+int
+mlx5_flow_validate_item_geneve(const struct rte_flow_item *item,
+                              uint64_t item_flags,
+                              struct rte_eth_dev *dev,
+                              struct rte_flow_error *error)
+{
+       struct mlx5_priv *priv = dev->data->dev_private;
+       const struct rte_flow_item_geneve *spec = item->spec;
+       const struct rte_flow_item_geneve *mask = item->mask;
+       int ret;
+       uint16_t gbhdr;
+       uint8_t opt_len = priv->config.hca_attr.geneve_max_opt_len ?
+                         MLX5_GENEVE_OPT_LEN_1 : MLX5_GENEVE_OPT_LEN_0;
+       const struct rte_flow_item_geneve nic_mask = {
+               .ver_opt_len_o_c_rsvd0 = RTE_BE16(0x3f80),
+               .vni = "\xff\xff\xff",
+               .protocol = RTE_BE16(UINT16_MAX),
+       };
+
+       if (!(priv->config.hca_attr.flex_parser_protocols &
+             MLX5_HCA_FLEX_GENEVE_ENABLED) ||
+           !priv->config.hca_attr.tunnel_stateless_geneve_rx)
+               return rte_flow_error_set(error, ENOTSUP,
+                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
+                                         "L3 Geneve is not enabled by device"
+                                         " parameter and/or not configured in"
+                                         " firmware");
+       if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
+               return rte_flow_error_set(error, ENOTSUP,
+                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
+                                         "multiple tunnel layers not"
+                                         " supported");
+       /*
+        * Verify only UDPv4 is present as defined in
+        * https://tools.ietf.org/html/rfc7348
+        */
+       if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP))
+               return rte_flow_error_set(error, EINVAL,
+                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
+                                         "no outer UDP layer found");
+       if (!mask)
+               mask = &rte_flow_item_geneve_mask;
+       ret = mlx5_flow_item_acceptable
+                                 (item, (const uint8_t *)mask,
+                                  (const uint8_t *)&nic_mask,
+                                  sizeof(struct rte_flow_item_geneve), error);
+       if (ret)
+               return ret;
+       if (spec) {
+               gbhdr = rte_be_to_cpu_16(spec->ver_opt_len_o_c_rsvd0);
+               if (MLX5_GENEVE_VER_VAL(gbhdr) ||
+                    MLX5_GENEVE_CRITO_VAL(gbhdr) ||
+                    MLX5_GENEVE_RSVD_VAL(gbhdr) || spec->rsvd1)
+                       return rte_flow_error_set(error, ENOTSUP,
+                                                 RTE_FLOW_ERROR_TYPE_ITEM,
+                                                 item,
+                                                 "Geneve protocol unsupported"
+                                                 " fields are being used");
+               if (MLX5_GENEVE_OPTLEN_VAL(gbhdr) > opt_len)
+                       return rte_flow_error_set
+                                       (error, ENOTSUP,
+                                        RTE_FLOW_ERROR_TYPE_ITEM,
+                                        item,
+                                        "Unsupported Geneve options length");
+       }
+       if (!(item_flags & MLX5_FLOW_LAYER_OUTER))
+               return rte_flow_error_set
+                                   (error, ENOTSUP,
+                                    RTE_FLOW_ERROR_TYPE_ITEM, item,
+                                    "Geneve tunnel must be fully defined");
+       return 0;
+}
+
 /**
  * Validate MPLS item.
  *
@@ -1990,6 +2121,7 @@ flow_null_validate(struct rte_eth_dev *dev __rte_unused,
                   const struct rte_flow_attr *attr __rte_unused,
                   const struct rte_flow_item items[] __rte_unused,
                   const struct rte_flow_action actions[] __rte_unused,
+                  bool external __rte_unused,
                   struct rte_flow_error *error)
 {
        return rte_flow_error_set(error, ENOTSUP,
@@ -2102,6 +2234,8 @@ flow_get_drv_type(struct rte_eth_dev *dev, const struct rte_flow_attr *attr)
  *   Pointer to the list of items.
  * @param[in] actions
  *   Pointer to the list of actions.
+ * @param[in] external
+ *   This flow rule is created by request external to PMD.
  * @param[out] error
  *   Pointer to the error structure.
  *
@@ -2113,13 +2247,13 @@ flow_drv_validate(struct rte_eth_dev *dev,
                  const struct rte_flow_attr *attr,
                  const struct rte_flow_item items[],
                  const struct rte_flow_action actions[],
-                 struct rte_flow_error *error)
+                 bool external, struct rte_flow_error *error)
 {
        const struct mlx5_flow_driver_ops *fops;
        enum mlx5_flow_drv_type type = flow_get_drv_type(dev, attr);
 
        fops = flow_get_drv_ops(type);
-       return fops->validate(dev, attr, items, actions, error);
+       return fops->validate(dev, attr, items, actions, external, error);
 }
 
 /**
@@ -2291,7 +2425,7 @@ mlx5_flow_validate(struct rte_eth_dev *dev,
 {
        int ret;
 
-       ret = flow_drv_validate(dev, attr, items, actions, error);
+       ret = flow_drv_validate(dev, attr, items, actions, true, error);
        if (ret < 0)
                return ret;
        return 0;
@@ -2353,6 +2487,8 @@ find_graph_root(const struct rte_flow_item pattern[], uint32_t rss_level)
  *   Pattern specification (list terminated by the END pattern item).
  * @param[in] actions
  *   Associated actions (list terminated by the END action).
+ * @param[in] external
+ *   This flow rule is created by request external to PMD.
  * @param[out] error
  *   Perform verbose error reporting if not NULL.
  *
@@ -2364,7 +2500,7 @@ flow_list_create(struct rte_eth_dev *dev, struct mlx5_flows *list,
                 const struct rte_flow_attr *attr,
                 const struct rte_flow_item items[],
                 const struct rte_flow_action actions[],
-                struct rte_flow_error *error)
+                bool external, struct rte_flow_error *error)
 {
        struct rte_flow *flow = NULL;
        struct mlx5_flow *dev_flow;
@@ -2378,7 +2514,7 @@ flow_list_create(struct rte_eth_dev *dev, struct mlx5_flows *list,
        uint32_t i;
        uint32_t flow_size;
 
-       ret = flow_drv_validate(dev, attr, items, actions, error);
+       ret = flow_drv_validate(dev, attr, items, actions, external, error);
        if (ret < 0)
                return NULL;
        flow_size = sizeof(struct rte_flow);
@@ -2420,6 +2556,7 @@ flow_list_create(struct rte_eth_dev *dev, struct mlx5_flows *list,
                if (!dev_flow)
                        goto error;
                dev_flow->flow = flow;
+               dev_flow->external = external;
                LIST_INSERT_HEAD(&flow->dev_flows, dev_flow, next);
                ret = flow_drv_translate(dev, dev_flow, attr,
                                         buf->entry[i].pattern,
@@ -2444,6 +2581,55 @@ error:
        return NULL;
 }
 
+/**
+ * Create a dedicated flow rule on e-switch table 0 (root table), to direct all
+ * incoming packets to table 1.
+ *
+ * Other flow rules, requested for group n, will be created in
+ * e-switch table n+1.
+ * Jump action to e-switch group n will be created to group n+1.
+ *
+ * Used when working in switchdev mode, to utilise advantages of table 1
+ * and above.
+ *
+ * @param dev
+ *   Pointer to Ethernet device.
+ *
+ * @return
+ *   Pointer to flow on success, NULL otherwise and rte_errno is set.
+ */
+struct rte_flow *
+mlx5_flow_create_esw_table_zero_flow(struct rte_eth_dev *dev)
+{
+       const struct rte_flow_attr attr = {
+               .group = 0,
+               .priority = 0,
+               .ingress = 1,
+               .egress = 0,
+               .transfer = 1,
+       };
+       const struct rte_flow_item pattern = {
+               .type = RTE_FLOW_ITEM_TYPE_END,
+       };
+       struct rte_flow_action_jump jump = {
+               .group = 1,
+       };
+       const struct rte_flow_action actions[] = {
+               {
+                       .type = RTE_FLOW_ACTION_TYPE_JUMP,
+                       .conf = &jump,
+               },
+               {
+                       .type = RTE_FLOW_ACTION_TYPE_END,
+               },
+       };
+       struct mlx5_priv *priv = dev->data->dev_private;
+       struct rte_flow_error error;
+
+       return flow_list_create(dev, &priv->ctrl_flows, &attr, &pattern,
+                               actions, false, &error);
+}
+
 /**
  * Create a flow.
  *
@@ -2460,7 +2646,7 @@ mlx5_flow_create(struct rte_eth_dev *dev,
        struct mlx5_priv *priv = dev->data->dev_private;
 
        return flow_list_create(dev, &priv->flows,
-                               attr, items, actions, error);
+                               attr, items, actions, true, error);
 }
 
 /**
@@ -2657,7 +2843,7 @@ mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev,
        for (i = 0; i != priv->reta_idx_n; ++i)
                queue[i] = (*priv->reta_idx)[i];
        flow = flow_list_create(dev, &priv->ctrl_flows,
-                               &attr, items, actions, &error);
+                               &attr, items, actions, false, &error);
        if (!flow)
                return -rte_errno;
        return 0;
@@ -3071,7 +3257,8 @@ flow_fdir_filter_add(struct rte_eth_dev *dev,
                goto error;
        }
        flow = flow_list_create(dev, &priv->flows, &fdir_flow->attr,
-                               fdir_flow->items, fdir_flow->actions, NULL);
+                               fdir_flow->items, fdir_flow->actions, true,
+                               NULL);
        if (!flow)
                goto error;
        assert(!flow->fdir);
@@ -3419,3 +3606,39 @@ mlx5_flow_async_pool_query_handle(struct mlx5_ibv_shared *sh,
        pool->raw_hw = NULL;
        sh->cmng.pending_queries--;
 }
+
+/**
+ * Translate the rte_flow group index to HW table value.
+ *
+ * @param[in] attributes
+ *   Pointer to flow attributes
+ * @param[in] external
+ *   Value is part of flow rule created by request external to PMD.
+ * @param[in] group
+ *   rte_flow group index value.
+ * @param[out] table
+ *   HW table value.
+ * @param[out] error
+ *   Pointer to error structure.
+ *
+ * @return
+ *   0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_flow_group_to_table(const struct rte_flow_attr *attributes, bool external,
+                        uint32_t group, uint32_t *table,
+                        struct rte_flow_error *error)
+{
+       if (attributes->transfer && external) {
+               if (group == UINT32_MAX)
+                       return rte_flow_error_set
+                                               (error, EINVAL,
+                                                RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
+                                                NULL,
+                                                "group index not supported");
+               *table = group + 1;
+       } else {
+               *table = group;
+       }
+       return 0;
+}