net/mlx5: add C++ include guard to public header
[dpdk.git] / drivers / net / mlx5 / mlx5_flow_verbs.c
index 3f5aaa8..90ccb9a 100644 (file)
 #define VERBS_SPEC_INNER(item_flags) \
        (!!((item_flags) & MLX5_FLOW_LAYER_TUNNEL) ? IBV_FLOW_SPEC_INNER : 0)
 
-/* Map of Verbs to Flow priority with 8 Verbs priorities. */
-static const uint32_t priority_map_3[][MLX5_PRIORITY_MAP_MAX] = {
-       { 0, 1, 2 }, { 2, 3, 4 }, { 5, 6, 7 },
-};
-
-/* Map of Verbs to Flow priority with 16 Verbs priorities. */
-static const uint32_t priority_map_5[][MLX5_PRIORITY_MAP_MAX] = {
-       { 0, 1, 2 }, { 3, 4, 5 }, { 6, 7, 8 },
-       { 9, 10, 11 }, { 12, 13, 14 },
-};
-
 /* Verbs specification header. */
 struct ibv_spec_header {
        enum ibv_flow_spec_type type;
@@ -50,13 +39,17 @@ struct ibv_spec_header {
  *
  * @param[in] dev
  *   Pointer to the Ethernet device structure.
- *
+ * @param[in] vprio
+ *   Expected result variants.
+ * @param[in] vprio_n
+ *   Number of entries in @p vprio array.
  * @return
- *   number of supported flow priority on success, a negative errno
+ *   Number of supported flow priority on success, a negative errno
  *   value otherwise and rte_errno is set.
  */
-int
-mlx5_flow_discover_priorities(struct rte_eth_dev *dev)
+static int
+flow_verbs_discover_priorities(struct rte_eth_dev *dev,
+                              const uint16_t *vprio, int vprio_n)
 {
        struct mlx5_priv *priv = dev->data->dev_private;
        struct {
@@ -79,20 +72,19 @@ mlx5_flow_discover_priorities(struct rte_eth_dev *dev)
        };
        struct ibv_flow *flow;
        struct mlx5_hrxq *drop = priv->drop_queue.hrxq;
-       uint16_t vprio[] = { 8, 16 };
        int i;
        int priority = 0;
 
 #if defined(HAVE_MLX5DV_DR_DEVX_PORT) || defined(HAVE_MLX5DV_DR_DEVX_PORT_V35)
        /* If DevX supported, driver must support 16 verbs flow priorities. */
-       priority = RTE_DIM(priority_map_5);
+       priority = 16;
        goto out;
 #endif
        if (!drop->qp) {
                rte_errno = ENOTSUP;
                return -rte_errno;
        }
-       for (i = 0; i != RTE_DIM(vprio); i++) {
+       for (i = 0; i != vprio_n; i++) {
                flow_attr.attr.priority = vprio[i] - 1;
                flow = mlx5_glue->create_flow(drop->qp, &flow_attr.attr);
                if (!flow)
@@ -100,20 +92,6 @@ mlx5_flow_discover_priorities(struct rte_eth_dev *dev)
                claim_zero(mlx5_glue->destroy_flow(flow));
                priority = vprio[i];
        }
-       switch (priority) {
-       case 8:
-               priority = RTE_DIM(priority_map_3);
-               break;
-       case 16:
-               priority = RTE_DIM(priority_map_5);
-               break;
-       default:
-               rte_errno = ENOTSUP;
-               DRV_LOG(ERR,
-                       "port %u verbs maximum priority: %d expected 8/16",
-                       dev->data->port_id, priority);
-               return -rte_errno;
-       }
 #if defined(HAVE_MLX5DV_DR_DEVX_PORT) || defined(HAVE_MLX5DV_DR_DEVX_PORT_V35)
 out:
 #endif
@@ -125,37 +103,6 @@ out:
        return priority;
 }
 
-/**
- * Adjust flow priority based on the highest layer and the request priority.
- *
- * @param[in] dev
- *   Pointer to the Ethernet device structure.
- * @param[in] priority
- *   The rule base priority.
- * @param[in] subpriority
- *   The priority based on the items.
- *
- * @return
- *   The new priority.
- */
-uint32_t
-mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
-                                  uint32_t subpriority)
-{
-       uint32_t res = 0;
-       struct mlx5_priv *priv = dev->data->dev_private;
-
-       switch (priv->config.flow_prio) {
-       case RTE_DIM(priority_map_3):
-               res = priority_map_3[priority][subpriority];
-               break;
-       case RTE_DIM(priority_map_5):
-               res = priority_map_5[priority][subpriority];
-               break;
-       }
-       return  res;
-}
-
 /**
  * Get Verbs flow counter by index.
  *
@@ -960,6 +907,7 @@ flow_verbs_translate_item_gre(struct mlx5_flow *dev_flow,
                .size = size,
        };
 #else
+       static const struct rte_flow_item_gre empty_gre = {0,};
        const struct rte_flow_item_gre *spec = item->spec;
        const struct rte_flow_item_gre *mask = item->mask;
        unsigned int size = sizeof(struct ibv_flow_spec_gre);
@@ -968,17 +916,29 @@ flow_verbs_translate_item_gre(struct mlx5_flow *dev_flow,
                .size = size,
        };
 
-       if (!mask)
-               mask = &rte_flow_item_gre_mask;
-       if (spec) {
-               tunnel.val.c_ks_res0_ver = spec->c_rsvd0_ver;
-               tunnel.val.protocol = spec->protocol;
-               tunnel.mask.c_ks_res0_ver = mask->c_rsvd0_ver;
-               tunnel.mask.protocol = mask->protocol;
-               /* Remove unwanted bits from values. */
-               tunnel.val.c_ks_res0_ver &= tunnel.mask.c_ks_res0_ver;
+       if (!spec) {
+               spec = &empty_gre;
+               mask = &empty_gre;
+       } else {
+               if (!mask)
+                       mask = &rte_flow_item_gre_mask;
+       }
+       tunnel.val.c_ks_res0_ver = spec->c_rsvd0_ver;
+       tunnel.val.protocol = spec->protocol;
+       tunnel.mask.c_ks_res0_ver = mask->c_rsvd0_ver;
+       tunnel.mask.protocol = mask->protocol;
+       /* Remove unwanted bits from values. */
+       tunnel.val.c_ks_res0_ver &= tunnel.mask.c_ks_res0_ver;
+       tunnel.val.key &= tunnel.mask.key;
+       if (tunnel.mask.protocol) {
                tunnel.val.protocol &= tunnel.mask.protocol;
-               tunnel.val.key &= tunnel.mask.key;
+       } else {
+               tunnel.val.protocol = mlx5_translate_tunnel_etypes(item_flags);
+               if (tunnel.val.protocol) {
+                       tunnel.mask.protocol = 0xFFFF;
+                       tunnel.val.protocol =
+                               rte_cpu_to_be_16(tunnel.val.protocol);
+               }
        }
 #endif
        if (item_flags & MLX5_FLOW_LAYER_OUTER_L3_IPV4)
@@ -1723,7 +1683,7 @@ flow_verbs_translate(struct rte_eth_dev *dev,
        MLX5_ASSERT(wks);
        rss_desc = &wks->rss_desc;
        if (priority == MLX5_FLOW_LOWEST_PRIO_INDICATOR)
-               priority = priv->config.flow_prio - 1;
+               priority = priv->sh->flow_max_priority - 1;
        for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
                int ret;
 
@@ -1733,12 +1693,12 @@ flow_verbs_translate(struct rte_eth_dev *dev,
                case RTE_FLOW_ACTION_TYPE_FLAG:
                        flow_verbs_translate_action_flag(dev_flow, actions);
                        action_flags |= MLX5_FLOW_ACTION_FLAG;
-                       dev_flow->handle->mark = 1;
+                       wks->mark = 1;
                        break;
                case RTE_FLOW_ACTION_TYPE_MARK:
                        flow_verbs_translate_action_mark(dev_flow, actions);
                        action_flags |= MLX5_FLOW_ACTION_MARK;
-                       dev_flow->handle->mark = 1;
+                       wks->mark = 1;
                        break;
                case RTE_FLOW_ACTION_TYPE_DROP:
                        flow_verbs_translate_action_drop(dev_flow, actions);
@@ -1824,7 +1784,7 @@ flow_verbs_translate(struct rte_eth_dev *dev,
                        if (dev_flow->hash_fields != 0)
                                dev_flow->hash_fields |=
                                        mlx5_flow_hashfields_adjust
-                                       (rss_desc, tunnel, ETH_RSS_TCP,
+                                       (rss_desc, tunnel, RTE_ETH_RSS_TCP,
                                         (IBV_RX_HASH_SRC_PORT_TCP |
                                          IBV_RX_HASH_DST_PORT_TCP));
                        item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
@@ -1837,7 +1797,7 @@ flow_verbs_translate(struct rte_eth_dev *dev,
                        if (dev_flow->hash_fields != 0)
                                dev_flow->hash_fields |=
                                        mlx5_flow_hashfields_adjust
-                                       (rss_desc, tunnel, ETH_RSS_UDP,
+                                       (rss_desc, tunnel, RTE_ETH_RSS_UDP,
                                         (IBV_RX_HASH_SRC_PORT_UDP |
                                          IBV_RX_HASH_DST_PORT_UDP));
                        item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
@@ -1856,8 +1816,6 @@ flow_verbs_translate(struct rte_eth_dev *dev,
                        item_flags |= MLX5_FLOW_LAYER_VXLAN_GPE;
                        break;
                case RTE_FLOW_ITEM_TYPE_GRE:
-                       flow_verbs_translate_item_gre(dev_flow, items,
-                                                     item_flags);
                        subpriority = MLX5_TUNNEL_PRIO_GET(rss_desc);
                        item_flags |= MLX5_FLOW_LAYER_GRE;
                        break;
@@ -1873,6 +1831,8 @@ flow_verbs_translate(struct rte_eth_dev *dev,
                                                  NULL, "item not supported");
                }
        }
+       if (item_flags & MLX5_FLOW_LAYER_GRE)
+               flow_verbs_translate_item_gre(dev_flow, items, item_flags);
        dev_flow->handle->layers = item_flags;
        /* Other members of attr will be ignored. */
        dev_flow->verbs.attr.priority =
@@ -2095,4 +2055,5 @@ const struct mlx5_flow_driver_ops mlx5_flow_verbs_drv_ops = {
        .destroy = flow_verbs_destroy,
        .query = flow_verbs_query,
        .sync_domain = flow_verbs_sync_domain,
+       .discover_priorities = flow_verbs_discover_priorities,
 };