mbuf: add namespace to offload flags
[dpdk.git] / drivers / net / mlx5 / mlx5_flow.c
index ffcc031..5d19ef1 100644 (file)
@@ -98,7 +98,7 @@ struct mlx5_flow_expand_node {
        uint64_t rss_types;
        /**<
         * RSS types bit-field associated with this node
-        * (see ETH_RSS_* definitions).
+        * (see RTE_ETH_RSS_* definitions).
         */
        uint64_t node_flags;
        /**<
@@ -298,7 +298,7 @@ mlx5_flow_expand_rss_skip_explicit(const struct mlx5_flow_expand_node graph[],
  * @param[in] pattern
  *   User flow pattern.
  * @param[in] types
- *   RSS types to expand (see ETH_RSS_* definitions).
+ *   RSS types to expand (see RTE_ETH_RSS_* definitions).
  * @param[in] graph
  *   Input graph to expand @p pattern according to @p types.
  * @param[in] graph_root_index
@@ -560,8 +560,8 @@ static const struct mlx5_flow_expand_node mlx5_support_expansion[] = {
                         MLX5_EXPANSION_IPV4,
                         MLX5_EXPANSION_IPV6),
                .type = RTE_FLOW_ITEM_TYPE_IPV4,
-               .rss_types = ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 |
-                       ETH_RSS_NONFRAG_IPV4_OTHER,
+               .rss_types = RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_FRAG_IPV4 |
+                       RTE_ETH_RSS_NONFRAG_IPV4_OTHER,
        },
        [MLX5_EXPANSION_OUTER_IPV4_UDP] = {
                .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_VXLAN,
@@ -569,11 +569,11 @@ static const struct mlx5_flow_expand_node mlx5_support_expansion[] = {
                                                  MLX5_EXPANSION_MPLS,
                                                  MLX5_EXPANSION_GTP),
                .type = RTE_FLOW_ITEM_TYPE_UDP,
-               .rss_types = ETH_RSS_NONFRAG_IPV4_UDP,
+               .rss_types = RTE_ETH_RSS_NONFRAG_IPV4_UDP,
        },
        [MLX5_EXPANSION_OUTER_IPV4_TCP] = {
                .type = RTE_FLOW_ITEM_TYPE_TCP,
-               .rss_types = ETH_RSS_NONFRAG_IPV4_TCP,
+               .rss_types = RTE_ETH_RSS_NONFRAG_IPV4_TCP,
        },
        [MLX5_EXPANSION_OUTER_IPV6] = {
                .next = MLX5_FLOW_EXPAND_RSS_NEXT
@@ -584,8 +584,8 @@ static const struct mlx5_flow_expand_node mlx5_support_expansion[] = {
                         MLX5_EXPANSION_GRE,
                         MLX5_EXPANSION_NVGRE),
                .type = RTE_FLOW_ITEM_TYPE_IPV6,
-               .rss_types = ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 |
-                       ETH_RSS_NONFRAG_IPV6_OTHER,
+               .rss_types = RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_FRAG_IPV6 |
+                       RTE_ETH_RSS_NONFRAG_IPV6_OTHER,
        },
        [MLX5_EXPANSION_OUTER_IPV6_UDP] = {
                .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_VXLAN,
@@ -593,11 +593,11 @@ static const struct mlx5_flow_expand_node mlx5_support_expansion[] = {
                                                  MLX5_EXPANSION_MPLS,
                                                  MLX5_EXPANSION_GTP),
                .type = RTE_FLOW_ITEM_TYPE_UDP,
-               .rss_types = ETH_RSS_NONFRAG_IPV6_UDP,
+               .rss_types = RTE_ETH_RSS_NONFRAG_IPV6_UDP,
        },
        [MLX5_EXPANSION_OUTER_IPV6_TCP] = {
                .type = RTE_FLOW_ITEM_TYPE_TCP,
-               .rss_types = ETH_RSS_NONFRAG_IPV6_TCP,
+               .rss_types = RTE_ETH_RSS_NONFRAG_IPV6_TCP,
        },
        [MLX5_EXPANSION_VXLAN] = {
                .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH,
@@ -659,32 +659,32 @@ static const struct mlx5_flow_expand_node mlx5_support_expansion[] = {
                .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4_UDP,
                                                  MLX5_EXPANSION_IPV4_TCP),
                .type = RTE_FLOW_ITEM_TYPE_IPV4,
-               .rss_types = ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 |
-                       ETH_RSS_NONFRAG_IPV4_OTHER,
+               .rss_types = RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_FRAG_IPV4 |
+                       RTE_ETH_RSS_NONFRAG_IPV4_OTHER,
        },
        [MLX5_EXPANSION_IPV4_UDP] = {
                .type = RTE_FLOW_ITEM_TYPE_UDP,
-               .rss_types = ETH_RSS_NONFRAG_IPV4_UDP,
+               .rss_types = RTE_ETH_RSS_NONFRAG_IPV4_UDP,
        },
        [MLX5_EXPANSION_IPV4_TCP] = {
                .type = RTE_FLOW_ITEM_TYPE_TCP,
-               .rss_types = ETH_RSS_NONFRAG_IPV4_TCP,
+               .rss_types = RTE_ETH_RSS_NONFRAG_IPV4_TCP,
        },
        [MLX5_EXPANSION_IPV6] = {
                .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV6_UDP,
                                                  MLX5_EXPANSION_IPV6_TCP,
                                                  MLX5_EXPANSION_IPV6_FRAG_EXT),
                .type = RTE_FLOW_ITEM_TYPE_IPV6,
-               .rss_types = ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 |
-                       ETH_RSS_NONFRAG_IPV6_OTHER,
+               .rss_types = RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_FRAG_IPV6 |
+                       RTE_ETH_RSS_NONFRAG_IPV6_OTHER,
        },
        [MLX5_EXPANSION_IPV6_UDP] = {
                .type = RTE_FLOW_ITEM_TYPE_UDP,
-               .rss_types = ETH_RSS_NONFRAG_IPV6_UDP,
+               .rss_types = RTE_ETH_RSS_NONFRAG_IPV6_UDP,
        },
        [MLX5_EXPANSION_IPV6_TCP] = {
                .type = RTE_FLOW_ITEM_TYPE_TCP,
-               .rss_types = ETH_RSS_NONFRAG_IPV6_TCP,
+               .rss_types = RTE_ETH_RSS_NONFRAG_IPV6_TCP,
        },
        [MLX5_EXPANSION_IPV6_FRAG_EXT] = {
                .type = RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT,
@@ -1003,13 +1003,15 @@ mlx5_get_lowest_priority(struct rte_eth_dev *dev,
  *   Pointer to device flow rule attributes.
  * @param[in] subpriority
  *   The priority based on the items.
+ * @param[in] external
+ *   Flow is user flow.
  * @return
  *   The matcher priority of the flow.
  */
 uint16_t
 mlx5_get_matcher_priority(struct rte_eth_dev *dev,
                          const struct rte_flow_attr *attr,
-                         uint32_t subpriority)
+                         uint32_t subpriority, bool external)
 {
        uint16_t priority = (uint16_t)attr->priority;
        struct mlx5_priv *priv = dev->data->dev_private;
@@ -1018,6 +1020,9 @@ mlx5_get_matcher_priority(struct rte_eth_dev *dev,
                if (attr->priority == MLX5_FLOW_LOWEST_PRIO_INDICATOR)
                        priority = priv->config.flow_prio - 1;
                return mlx5_os_flow_adjust_priority(dev, priority, subpriority);
+       } else if (!external && attr->transfer && attr->group == 0 &&
+                  attr->priority == MLX5_FLOW_LOWEST_PRIO_INDICATOR) {
+               return (priv->config.flow_prio - 1) * 3;
        }
        if (attr->priority == MLX5_FLOW_LOWEST_PRIO_INDICATOR)
                priority = MLX5_NON_ROOT_FLOW_MAX_PRIO;
@@ -1095,7 +1100,7 @@ mlx5_flow_item_acceptable(const struct rte_flow_item *item,
  * @param[in] tunnel
  *   1 when the hash field is for a tunnel item.
  * @param[in] layer_types
- *   ETH_RSS_* types.
+ *   RTE_ETH_RSS_* types.
  * @param[in] hash_fields
  *   Item hash fields.
  *
@@ -1648,14 +1653,14 @@ mlx5_validate_action_rss(struct rte_eth_dev *dev,
                                          &rss->types,
                                          "some RSS protocols are not"
                                          " supported");
-       if ((rss->types & (ETH_RSS_L3_SRC_ONLY | ETH_RSS_L3_DST_ONLY)) &&
-           !(rss->types & ETH_RSS_IP))
+       if ((rss->types & (RTE_ETH_RSS_L3_SRC_ONLY | RTE_ETH_RSS_L3_DST_ONLY)) &&
+           !(rss->types & RTE_ETH_RSS_IP))
                return rte_flow_error_set(error, EINVAL,
                                          RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
                                          "L3 partial RSS requested but L3 RSS"
                                          " type not specified");
-       if ((rss->types & (ETH_RSS_L4_SRC_ONLY | ETH_RSS_L4_DST_ONLY)) &&
-           !(rss->types & (ETH_RSS_UDP | ETH_RSS_TCP)))
+       if ((rss->types & (RTE_ETH_RSS_L4_SRC_ONLY | RTE_ETH_RSS_L4_DST_ONLY)) &&
+           !(rss->types & (RTE_ETH_RSS_UDP | RTE_ETH_RSS_TCP)))
                return rte_flow_error_set(error, EINVAL,
                                          RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
                                          "L4 partial RSS requested but L4 RSS"
@@ -3594,6 +3599,8 @@ flow_get_rss_action(struct rte_eth_dev *dev,
 {
        struct mlx5_priv *priv = dev->data->dev_private;
        const struct rte_flow_action_rss *rss = NULL;
+       struct mlx5_meter_policy_action_container *acg;
+       struct mlx5_meter_policy_action_container *acy;
 
        for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
                switch (actions->type) {
@@ -3629,9 +3636,18 @@ flow_get_rss_action(struct rte_eth_dev *dev,
                                        if (!policy)
                                                return NULL;
                                }
-                               if (policy->is_rss)
-                                       rss =
-                               policy->act_cnt[RTE_COLOR_GREEN].rss->conf;
+                               if (policy->is_rss) {
+                                       acg =
+                                       &policy->act_cnt[RTE_COLOR_GREEN];
+                                       acy =
+                                       &policy->act_cnt[RTE_COLOR_YELLOW];
+                                       if (acg->fate_action ==
+                                           MLX5_FLOW_FATE_SHARED_RSS)
+                                               rss = acg->rss->conf;
+                                       else if (acy->fate_action ==
+                                                MLX5_FLOW_FATE_SHARED_RSS)
+                                               rss = acy->rss->conf;
+                               }
                        }
                        break;
                }
@@ -6411,8 +6427,8 @@ flow_list_create(struct rte_eth_dev *dev, enum mlx5_flow_type type,
                 * mlx5_flow_hashfields_adjust() in advance.
                 */
                rss_desc->level = rss->level;
-               /* RSS type 0 indicates default RSS type (ETH_RSS_IP). */
-               rss_desc->types = !rss->types ? ETH_RSS_IP : rss->types;
+               /* RSS type 0 indicates default RSS type (RTE_ETH_RSS_IP). */
+               rss_desc->types = !rss->types ? RTE_ETH_RSS_IP : rss->types;
        }
        flow->dev_handles = 0;
        if (rss && rss->types) {
@@ -6591,6 +6607,80 @@ mlx5_flow_create_esw_table_zero_flow(struct rte_eth_dev *dev)
                                                   actions, false, &error);
 }
 
+/**
+ * Create a dedicated flow rule on e-switch table 1, matches ESW manager
+ * and sq number, directs all packets to peer vport.
+ *
+ * @param dev
+ *   Pointer to Ethernet device.
+ * @param txq
+ *   Txq index.
+ *
+ * @return
+ *   Flow ID on success, 0 otherwise and rte_errno is set.
+ */
+uint32_t
+mlx5_flow_create_devx_sq_miss_flow(struct rte_eth_dev *dev, uint32_t txq)
+{
+       struct rte_flow_attr attr = {
+               .group = 0,
+               .priority = MLX5_FLOW_LOWEST_PRIO_INDICATOR,
+               .ingress = 1,
+               .egress = 0,
+               .transfer = 1,
+       };
+       struct rte_flow_item_port_id port_spec = {
+               .id = MLX5_PORT_ESW_MGR,
+       };
+       struct mlx5_rte_flow_item_tx_queue txq_spec = {
+               .queue = txq,
+       };
+       struct rte_flow_item pattern[] = {
+               {
+                       .type = RTE_FLOW_ITEM_TYPE_PORT_ID,
+                       .spec = &port_spec,
+               },
+               {
+                       .type = (enum rte_flow_item_type)
+                               MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE,
+                       .spec = &txq_spec,
+               },
+               {
+                       .type = RTE_FLOW_ITEM_TYPE_END,
+               },
+       };
+       struct rte_flow_action_jump jump = {
+               .group = 1,
+       };
+       struct rte_flow_action_port_id port = {
+               .id = dev->data->port_id,
+       };
+       struct rte_flow_action actions[] = {
+               {
+                       .type = RTE_FLOW_ACTION_TYPE_JUMP,
+                       .conf = &jump,
+               },
+               {
+                       .type = RTE_FLOW_ACTION_TYPE_END,
+               },
+       };
+       struct rte_flow_error error;
+
+       /*
+        * Creates group 0, highest priority jump flow.
+        * Matches txq to bypass kernel packets.
+        */
+       if (flow_list_create(dev, MLX5_FLOW_TYPE_CTL, &attr, pattern, actions,
+                            false, &error) == 0)
+               return 0;
+       /* Create group 1, lowest priority redirect flow for txq. */
+       attr.group = 1;
+       actions[0].conf = &port;
+       actions[0].type = RTE_FLOW_ACTION_TYPE_PORT_ID;
+       return flow_list_create(dev, MLX5_FLOW_TYPE_CTL, &attr, pattern,
+                               actions, false, &error);
+}
+
 /**
  * Validate a flow supported by the NIC.
  *
@@ -7036,7 +7126,7 @@ mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev,
        if (!priv->reta_idx_n || !priv->rxqs_n) {
                return 0;
        }
-       if (!(dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG))
+       if (!(dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG))
                action_rss.types = 0;
        for (i = 0; i != priv->reta_idx_n; ++i)
                queue[i] = (*priv->reta_idx)[i];
@@ -8704,7 +8794,7 @@ flow_tunnel_add_default_miss(struct rte_eth_dev *dev,
                                (error, EINVAL,
                                RTE_FLOW_ERROR_TYPE_ACTION_CONF,
                                NULL, "invalid port configuration");
-               if (!(dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG))
+               if (!(dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG))
                        ctx->action_rss.types = 0;
                for (i = 0; i != priv->reta_idx_n; ++i)
                        ctx->queue[i] = (*priv->reta_idx)[i];
@@ -9301,7 +9391,7 @@ mlx5_flow_tunnel_get_restore_info(struct rte_eth_dev *dev,
 {
        uint64_t ol_flags = m->ol_flags;
        const struct mlx5_flow_tbl_data_entry *tble;
-       const uint64_t mask = PKT_RX_FDIR | PKT_RX_FDIR_ID;
+       const uint64_t mask = RTE_MBUF_F_RX_FDIR | RTE_MBUF_F_RX_FDIR_ID;
 
        if (!is_tunnel_offload_active(dev)) {
                info->flags = 0;