net/mlx5: fix shared RSS and mark actions combination
authorSuanming Mou <suanmingm@nvidia.com>
Tue, 15 Dec 2020 03:46:24 +0000 (11:46 +0800)
committerFerruh Yigit <ferruh.yigit@intel.com>
Fri, 8 Jan 2021 15:34:51 +0000 (16:34 +0100)
In order to allow mbuf mark ID update in Rx data-path, there is a
mechanism in the PMD to enable it according to the rte_flows.
When a flow with mark ID and RSS/QUEUE action exists, all the relevant
Rx queues will be enabled to report the mark ID.

When shared RSS action is combined with mark action, the PMD mechanism
misses the Rx queues updates.

This commit handles the shared RSS case in the mechanism too.

Fixes: e1592b6c4dea ("net/mlx5: make Rx queue thread safe")
Cc: stable@dpdk.org
Signed-off-by: Suanming Mou <suanmingm@nvidia.com>
Acked-by: Matan Azrad <matan@nvidia.com>
drivers/net/mlx5/mlx5_flow.c
drivers/net/mlx5/mlx5_flow_dv.c

index f110c6b..2a4073c 100644 (file)
@@ -1002,17 +1002,29 @@ flow_drv_rxq_flags_set(struct rte_eth_dev *dev,
        struct mlx5_priv *priv = dev->data->dev_private;
        const int mark = dev_handle->mark;
        const int tunnel = !!(dev_handle->layers & MLX5_FLOW_LAYER_TUNNEL);
-       struct mlx5_hrxq *hrxq;
+       struct mlx5_ind_table_obj *ind_tbl = NULL;
        unsigned int i;
 
-       if (dev_handle->fate_action != MLX5_FLOW_FATE_QUEUE)
-               return;
-       hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
+       if (dev_handle->fate_action == MLX5_FLOW_FATE_QUEUE) {
+               struct mlx5_hrxq *hrxq;
+
+               hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
                              dev_handle->rix_hrxq);
-       if (!hrxq)
+               if (hrxq)
+                       ind_tbl = hrxq->ind_table;
+       } else if (dev_handle->fate_action == MLX5_FLOW_FATE_SHARED_RSS) {
+               struct mlx5_shared_action_rss *shared_rss;
+
+               shared_rss = mlx5_ipool_get
+                       (priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
+                        dev_handle->rix_srss);
+               if (shared_rss)
+                       ind_tbl = shared_rss->ind_tbl;
+       }
+       if (!ind_tbl)
                return;
-       for (i = 0; i != hrxq->ind_table->queues_n; ++i) {
-               int idx = hrxq->ind_table->queues[i];
+       for (i = 0; i != ind_tbl->queues_n; ++i) {
+               int idx = ind_tbl->queues[i];
                struct mlx5_rxq_ctrl *rxq_ctrl =
                        container_of((*priv->rxqs)[idx],
                                     struct mlx5_rxq_ctrl, rxq);
@@ -1084,18 +1096,30 @@ flow_drv_rxq_flags_trim(struct rte_eth_dev *dev,
        struct mlx5_priv *priv = dev->data->dev_private;
        const int mark = dev_handle->mark;
        const int tunnel = !!(dev_handle->layers & MLX5_FLOW_LAYER_TUNNEL);
-       struct mlx5_hrxq *hrxq;
+       struct mlx5_ind_table_obj *ind_tbl = NULL;
        unsigned int i;
 
-       if (dev_handle->fate_action != MLX5_FLOW_FATE_QUEUE)
-               return;
-       hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
+       if (dev_handle->fate_action == MLX5_FLOW_FATE_QUEUE) {
+               struct mlx5_hrxq *hrxq;
+
+               hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
                              dev_handle->rix_hrxq);
-       if (!hrxq)
+               if (hrxq)
+                       ind_tbl = hrxq->ind_table;
+       } else if (dev_handle->fate_action == MLX5_FLOW_FATE_SHARED_RSS) {
+               struct mlx5_shared_action_rss *shared_rss;
+
+               shared_rss = mlx5_ipool_get
+                       (priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
+                        dev_handle->rix_srss);
+               if (shared_rss)
+                       ind_tbl = shared_rss->ind_tbl;
+       }
+       if (!ind_tbl)
                return;
        MLX5_ASSERT(dev->data->dev_started);
-       for (i = 0; i != hrxq->ind_table->queues_n; ++i) {
-               int idx = hrxq->ind_table->queues[i];
+       for (i = 0; i != ind_tbl->queues_n; ++i) {
+               int idx = ind_tbl->queues[i];
                struct mlx5_rxq_ctrl *rxq_ctrl =
                        container_of((*priv->rxqs)[idx],
                                     struct mlx5_rxq_ctrl, rxq);
index ce229db..50673ce 100644 (file)
@@ -10680,47 +10680,6 @@ __flow_dv_action_rss_hrxq_lookup(struct rte_eth_dev *dev, uint32_t idx,
        }
 }
 
-/**
- * Retrieves hash RX queue suitable for the *flow*.
- * If shared action configured for *flow* suitable hash RX queue will be
- * retrieved from attached shared action.
- *
- * @param[in] dev
- *   Pointer to the Ethernet device structure.
- * @param[in] dev_flow
- *   Pointer to the sub flow.
- * @param[in] rss_desc
- *   Pointer to the RSS descriptor.
- * @param[out] hrxq
- *   Pointer to retrieved hash RX queue object.
- *
- * @return
- *   Valid hash RX queue index, otherwise 0 and rte_errno is set.
- */
-static uint32_t
-__flow_dv_rss_get_hrxq(struct rte_eth_dev *dev, struct mlx5_flow *dev_flow,
-                      struct mlx5_flow_rss_desc *rss_desc,
-                      struct mlx5_hrxq **hrxq)
-{
-       struct mlx5_priv *priv = dev->data->dev_private;
-       uint32_t hrxq_idx;
-
-       if (rss_desc->shared_rss) {
-               hrxq_idx = __flow_dv_action_rss_hrxq_lookup
-                               (dev, rss_desc->shared_rss,
-                                dev_flow->hash_fields,
-                                !!(dev_flow->handle->layers &
-                                   MLX5_FLOW_LAYER_TUNNEL));
-               if (hrxq_idx)
-                       *hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
-                                              hrxq_idx);
-       } else {
-               *hrxq = flow_dv_hrxq_prepare(dev, dev_flow, rss_desc,
-                                            &hrxq_idx);
-       }
-       return hrxq_idx;
-}
-
 /**
  * Apply the flow to the NIC, lock free,
  * (mutex should be acquired by caller).
@@ -10752,11 +10711,6 @@ flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
        struct mlx5_flow_rss_desc *rss_desc = &wks->rss_desc;
 
        MLX5_ASSERT(wks);
-       if (rss_desc->shared_rss) {
-               dh = wks->flows[wks->flow_idx - 1].handle;
-               MLX5_ASSERT(dh->fate_action == MLX5_FLOW_FATE_SHARED_RSS);
-               dh->rix_srss = rss_desc->shared_rss;
-       }
        for (idx = wks->flow_idx - 1; idx >= 0; idx--) {
                dev_flow = &wks->flows[idx];
                dv = &dev_flow->dv;
@@ -10772,11 +10726,34 @@ flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
                                                priv->drop_queue.hrxq->action;
                        }
                } else if ((dh->fate_action == MLX5_FLOW_FATE_QUEUE &&
-                          !dv_h->rix_sample && !dv_h->rix_dest_array) ||
-                           (dh->fate_action == MLX5_FLOW_FATE_SHARED_RSS)) {
+                          !dv_h->rix_sample && !dv_h->rix_dest_array)) {
+                       struct mlx5_hrxq *hrxq;
+                       uint32_t hrxq_idx;
+
+                       hrxq = flow_dv_hrxq_prepare(dev, dev_flow, rss_desc,
+                                                   &hrxq_idx);
+                       if (!hrxq) {
+                               rte_flow_error_set
+                                       (error, rte_errno,
+                                        RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+                                        "cannot get hash queue");
+                               goto error;
+                       }
+                       dh->rix_hrxq = hrxq_idx;
+                       dv->actions[n++] = hrxq->action;
+               } else if (dh->fate_action == MLX5_FLOW_FATE_SHARED_RSS) {
                        struct mlx5_hrxq *hrxq = NULL;
-                       uint32_t hrxq_idx = __flow_dv_rss_get_hrxq
-                                       (dev, dev_flow, rss_desc, &hrxq);
+                       uint32_t hrxq_idx;
+
+                       hrxq_idx = __flow_dv_action_rss_hrxq_lookup(dev,
+                                               rss_desc->shared_rss,
+                                               dev_flow->hash_fields,
+                                               !!(dh->layers &
+                                               MLX5_FLOW_LAYER_TUNNEL));
+                       if (hrxq_idx)
+                               hrxq = mlx5_ipool_get
+                                       (priv->sh->ipool[MLX5_IPOOL_HRXQ],
+                                        hrxq_idx);
                        if (!hrxq) {
                                rte_flow_error_set
                                        (error, rte_errno,
@@ -10784,8 +10761,7 @@ flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
                                         "cannot get hash queue");
                                goto error;
                        }
-                       if (dh->fate_action == MLX5_FLOW_FATE_QUEUE)
-                               dh->rix_hrxq = hrxq_idx;
+                       dh->rix_srss = rss_desc->shared_rss;
                        dv->actions[n++] = hrxq->action;
                } else if (dh->fate_action == MLX5_FLOW_FATE_DEFAULT_MISS) {
                        if (!priv->sh->default_miss_action) {
@@ -10827,12 +10803,12 @@ error:
                if (dh->fate_action == MLX5_FLOW_FATE_QUEUE && dh->rix_hrxq) {
                        mlx5_hrxq_release(dev, dh->rix_hrxq);
                        dh->rix_hrxq = 0;
+               } else if (dh->fate_action == MLX5_FLOW_FATE_SHARED_RSS) {
+                       dh->rix_srss = 0;
                }
                if (dh->vf_vlan.tag && dh->vf_vlan.created)
                        mlx5_vlan_vmwa_release(dev, &dh->vf_vlan);
        }
-       if (rss_desc->shared_rss)
-               wks->flows[wks->flow_idx - 1].handle->rix_srss = 0;
        rte_errno = err; /* Restore rte_errno. */
        return -rte_errno;
 }
@@ -11100,9 +11076,6 @@ flow_dv_fate_resource_release(struct rte_eth_dev *dev,
                flow_dv_port_id_action_resource_release(dev,
                                handle->rix_port_id_action);
                break;
-       case MLX5_FLOW_FATE_SHARED_RSS:
-               flow_dv_shared_rss_action_release(dev, handle->rix_srss);
-               break;
        default:
                DRV_LOG(DEBUG, "Incorrect fate action:%d", handle->fate_action);
                break;
@@ -11265,6 +11238,7 @@ flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
 {
        struct mlx5_flow_handle *dev_handle;
        struct mlx5_priv *priv = dev->data->dev_private;
+       uint32_t srss = 0;
 
        if (!flow)
                return;
@@ -11309,10 +11283,15 @@ flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
                if (dev_handle->dvh.rix_tag)
                        flow_dv_tag_release(dev,
                                            dev_handle->dvh.rix_tag);
-               flow_dv_fate_resource_release(dev, dev_handle);
+               if (dev_handle->fate_action != MLX5_FLOW_FATE_SHARED_RSS)
+                       flow_dv_fate_resource_release(dev, dev_handle);
+               else if (!srss)
+                       srss = dev_handle->rix_srss;
                mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
                           tmp_idx);
        }
+       if (srss)
+               flow_dv_shared_rss_action_release(dev, srss);
 }
 
 /**