net/mlx5: fix shared RSS and mark actions combination
[dpdk.git] / drivers / net / mlx5 / mlx5_flow_dv.c
index 7c2b389..50673ce 100644 (file)
@@ -35,7 +35,7 @@
 #include "mlx5_rxtx.h"
 #include "rte_pmd_mlx5.h"
 
-#ifdef HAVE_IBV_FLOW_DV_SUPPORT
+#if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
 
 #ifndef HAVE_IBV_FLOW_DEVX_COUNTERS
 #define MLX5DV_FLOW_ACTION_COUNTERS_DEVX 0
@@ -955,7 +955,7 @@ flow_dv_convert_action_set_reg
                                          RTE_FLOW_ERROR_TYPE_ACTION, NULL,
                                          "too many items to modify");
        MLX5_ASSERT(conf->id != REG_NON);
-       MLX5_ASSERT(conf->id < RTE_DIM(reg_to_field));
+       MLX5_ASSERT(conf->id < (enum modify_reg)RTE_DIM(reg_to_field));
        actions[i] = (struct mlx5_modification_cmd) {
                .action_type = MLX5_MODIFICATION_TYPE_SET,
                .field = reg_to_field[conf->id],
@@ -5280,6 +5280,11 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
        } else {
                tunnel = NULL;
        }
+       if (tunnel && priv->representor)
+               return rte_flow_error_set(error, ENOTSUP,
+                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+                                         "decap not supported "
+                                         "for VF representor");
        grp_info.std_tbl_fix = tunnel_use_standard_attr_group_translate
                                (dev, tunnel, attr, items, actions);
        ret = flow_dv_validate_attributes(dev, tunnel, attr, &grp_info, error);
@@ -8757,11 +8762,13 @@ flow_dv_sample_create_cb(struct mlx5_cache_list *list __rte_unused,
                                          "for sample");
                goto error;
        }
+       int ret;
+
        cache_resource->normal_path_tbl = tbl;
        if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB) {
-               cache_resource->default_miss =
-                               mlx5_glue->dr_create_flow_action_default_miss();
-               if (!cache_resource->default_miss) {
+               ret = mlx5_flow_os_create_flow_action_default_miss
+                       (&cache_resource->default_miss);
+               if (!ret) {
                        rte_flow_error_set(error, ENOMEM,
                                                RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
                                                NULL,
@@ -10562,8 +10569,9 @@ flow_dv_translate(struct rte_eth_dev *dev,
        /* Register matcher. */
        matcher.crc = rte_raw_cksum((const void *)matcher.mask.buf,
                                    matcher.mask.size);
-       matcher.priority = mlx5_flow_adjust_priority(dev, priority,
-                                                    matcher.priority);
+       matcher.priority = mlx5_os_flow_adjust_priority(dev,
+                                                       priority,
+                                                       matcher.priority);
        /* reserved field no needs to be set to 0 here. */
        tbl_key.domain = attr->transfer;
        tbl_key.direction = attr->egress;
@@ -10672,47 +10680,6 @@ __flow_dv_action_rss_hrxq_lookup(struct rte_eth_dev *dev, uint32_t idx,
        }
 }
 
-/**
- * Retrieves hash RX queue suitable for the *flow*.
- * If shared action configured for *flow* suitable hash RX queue will be
- * retrieved from attached shared action.
- *
- * @param[in] dev
- *   Pointer to the Ethernet device structure.
- * @param[in] dev_flow
- *   Pointer to the sub flow.
- * @param[in] rss_desc
- *   Pointer to the RSS descriptor.
- * @param[out] hrxq
- *   Pointer to retrieved hash RX queue object.
- *
- * @return
- *   Valid hash RX queue index, otherwise 0 and rte_errno is set.
- */
-static uint32_t
-__flow_dv_rss_get_hrxq(struct rte_eth_dev *dev, struct mlx5_flow *dev_flow,
-                      struct mlx5_flow_rss_desc *rss_desc,
-                      struct mlx5_hrxq **hrxq)
-{
-       struct mlx5_priv *priv = dev->data->dev_private;
-       uint32_t hrxq_idx;
-
-       if (rss_desc->shared_rss) {
-               hrxq_idx = __flow_dv_action_rss_hrxq_lookup
-                               (dev, rss_desc->shared_rss,
-                                dev_flow->hash_fields,
-                                !!(dev_flow->handle->layers &
-                                   MLX5_FLOW_LAYER_TUNNEL));
-               if (hrxq_idx)
-                       *hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
-                                              hrxq_idx);
-       } else {
-               *hrxq = flow_dv_hrxq_prepare(dev, dev_flow, rss_desc,
-                                            &hrxq_idx);
-       }
-       return hrxq_idx;
-}
-
 /**
  * Apply the flow to the NIC, lock free,
  * (mutex should be acquired by caller).
@@ -10744,11 +10711,6 @@ flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
        struct mlx5_flow_rss_desc *rss_desc = &wks->rss_desc;
 
        MLX5_ASSERT(wks);
-       if (rss_desc->shared_rss) {
-               dh = wks->flows[wks->flow_idx - 1].handle;
-               MLX5_ASSERT(dh->fate_action == MLX5_FLOW_FATE_SHARED_RSS);
-               dh->rix_srss = rss_desc->shared_rss;
-       }
        for (idx = wks->flow_idx - 1; idx >= 0; idx--) {
                dev_flow = &wks->flows[idx];
                dv = &dev_flow->dv;
@@ -10764,11 +10726,34 @@ flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
                                                priv->drop_queue.hrxq->action;
                        }
                } else if ((dh->fate_action == MLX5_FLOW_FATE_QUEUE &&
-                          !dv_h->rix_sample && !dv_h->rix_dest_array) ||
-                           (dh->fate_action == MLX5_FLOW_FATE_SHARED_RSS)) {
+                          !dv_h->rix_sample && !dv_h->rix_dest_array)) {
+                       struct mlx5_hrxq *hrxq;
+                       uint32_t hrxq_idx;
+
+                       hrxq = flow_dv_hrxq_prepare(dev, dev_flow, rss_desc,
+                                                   &hrxq_idx);
+                       if (!hrxq) {
+                               rte_flow_error_set
+                                       (error, rte_errno,
+                                        RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+                                        "cannot get hash queue");
+                               goto error;
+                       }
+                       dh->rix_hrxq = hrxq_idx;
+                       dv->actions[n++] = hrxq->action;
+               } else if (dh->fate_action == MLX5_FLOW_FATE_SHARED_RSS) {
                        struct mlx5_hrxq *hrxq = NULL;
-                       uint32_t hrxq_idx = __flow_dv_rss_get_hrxq
-                                       (dev, dev_flow, rss_desc, &hrxq);
+                       uint32_t hrxq_idx;
+
+                       hrxq_idx = __flow_dv_action_rss_hrxq_lookup(dev,
+                                               rss_desc->shared_rss,
+                                               dev_flow->hash_fields,
+                                               !!(dh->layers &
+                                               MLX5_FLOW_LAYER_TUNNEL));
+                       if (hrxq_idx)
+                               hrxq = mlx5_ipool_get
+                                       (priv->sh->ipool[MLX5_IPOOL_HRXQ],
+                                        hrxq_idx);
                        if (!hrxq) {
                                rte_flow_error_set
                                        (error, rte_errno,
@@ -10776,8 +10761,7 @@ flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
                                         "cannot get hash queue");
                                goto error;
                        }
-                       if (dh->fate_action == MLX5_FLOW_FATE_QUEUE)
-                               dh->rix_hrxq = hrxq_idx;
+                       dh->rix_srss = rss_desc->shared_rss;
                        dv->actions[n++] = hrxq->action;
                } else if (dh->fate_action == MLX5_FLOW_FATE_DEFAULT_MISS) {
                        if (!priv->sh->default_miss_action) {
@@ -10819,12 +10803,12 @@ error:
                if (dh->fate_action == MLX5_FLOW_FATE_QUEUE && dh->rix_hrxq) {
                        mlx5_hrxq_release(dev, dh->rix_hrxq);
                        dh->rix_hrxq = 0;
+               } else if (dh->fate_action == MLX5_FLOW_FATE_SHARED_RSS) {
+                       dh->rix_srss = 0;
                }
                if (dh->vf_vlan.tag && dh->vf_vlan.created)
                        mlx5_vlan_vmwa_release(dev, &dh->vf_vlan);
        }
-       if (rss_desc->shared_rss)
-               wks->flows[wks->flow_idx - 1].handle->rix_srss = 0;
        rte_errno = err; /* Restore rte_errno. */
        return -rte_errno;
 }
@@ -11092,9 +11076,6 @@ flow_dv_fate_resource_release(struct rte_eth_dev *dev,
                flow_dv_port_id_action_resource_release(dev,
                                handle->rix_port_id_action);
                break;
-       case MLX5_FLOW_FATE_SHARED_RSS:
-               flow_dv_shared_rss_action_release(dev, handle->rix_srss);
-               break;
        default:
                DRV_LOG(DEBUG, "Incorrect fate action:%d", handle->fate_action);
                break;
@@ -11112,11 +11093,11 @@ flow_dv_sample_remove_cb(struct mlx5_cache_list *list __rte_unused,
        struct mlx5_priv *priv = dev->data->dev_private;
 
        if (cache_resource->verbs_action)
-               claim_zero(mlx5_glue->destroy_flow_action
+               claim_zero(mlx5_flow_os_destroy_flow_action
                                (cache_resource->verbs_action));
        if (cache_resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB) {
                if (cache_resource->default_miss)
-                       claim_zero(mlx5_glue->destroy_flow_action
+                       claim_zero(mlx5_flow_os_destroy_flow_action
                          (cache_resource->default_miss));
        }
        if (cache_resource->normal_path_tbl)
@@ -11169,7 +11150,7 @@ flow_dv_dest_array_remove_cb(struct mlx5_cache_list *list __rte_unused,
 
        MLX5_ASSERT(cache_resource->action);
        if (cache_resource->action)
-               claim_zero(mlx5_glue->destroy_flow_action
+               claim_zero(mlx5_flow_os_destroy_flow_action
                                        (cache_resource->action));
        for (; i < cache_resource->num_of_dest; i++)
                flow_dv_sample_sub_actions_release(dev,
@@ -11257,6 +11238,7 @@ flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
 {
        struct mlx5_flow_handle *dev_handle;
        struct mlx5_priv *priv = dev->data->dev_private;
+       uint32_t srss = 0;
 
        if (!flow)
                return;
@@ -11301,10 +11283,15 @@ flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
                if (dev_handle->dvh.rix_tag)
                        flow_dv_tag_release(dev,
                                            dev_handle->dvh.rix_tag);
-               flow_dv_fate_resource_release(dev, dev_handle);
+               if (dev_handle->fate_action != MLX5_FLOW_FATE_SHARED_RSS)
+                       flow_dv_fate_resource_release(dev, dev_handle);
+               else if (!srss)
+                       srss = dev_handle->rix_srss;
                mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
                           tmp_idx);
        }
+       if (srss)
+               flow_dv_shared_rss_action_release(dev, srss);
 }
 
 /**
@@ -12625,18 +12612,18 @@ flow_dv_sync_domain(struct rte_eth_dev *dev, uint32_t domains, uint32_t flags)
        int ret = 0;
 
        if ((domains & MLX5_DOMAIN_BIT_NIC_RX) && priv->sh->rx_domain != NULL) {
-               ret = mlx5_glue->dr_sync_domain(priv->sh->rx_domain,
+               ret = mlx5_os_flow_dr_sync_domain(priv->sh->rx_domain,
                                                flags);
                if (ret != 0)
                        return ret;
        }
        if ((domains & MLX5_DOMAIN_BIT_NIC_TX) && priv->sh->tx_domain != NULL) {
-               ret = mlx5_glue->dr_sync_domain(priv->sh->tx_domain, flags);
+               ret = mlx5_os_flow_dr_sync_domain(priv->sh->tx_domain, flags);
                if (ret != 0)
                        return ret;
        }
        if ((domains & MLX5_DOMAIN_BIT_FDB) && priv->sh->fdb_domain != NULL) {
-               ret = mlx5_glue->dr_sync_domain(priv->sh->fdb_domain, flags);
+               ret = mlx5_os_flow_dr_sync_domain(priv->sh->fdb_domain, flags);
                if (ret != 0)
                        return ret;
        }