net/mlx5: fix shared RSS and mark actions combination
[dpdk.git] / drivers / net / mlx5 / mlx5_flow.c
index 4035090..2a4073c 100644 (file)
@@ -798,7 +798,7 @@ mlx5_flow_get_reg_id(struct rte_eth_dev *dev,
                start_reg = priv->mtr_color_reg != REG_C_2 ? REG_C_2 :
                            (priv->mtr_reg_share ? REG_C_3 : REG_C_4);
                skip_mtr_reg = !!(priv->mtr_en && start_reg == REG_C_2);
-               if (id > (REG_C_7 - start_reg))
+               if (id > (uint32_t)(REG_C_7 - start_reg))
                        return rte_flow_error_set(error, EINVAL,
                                                  RTE_FLOW_ERROR_TYPE_ITEM,
                                                  NULL, "invalid tag id");
@@ -814,7 +814,7 @@ mlx5_flow_get_reg_id(struct rte_eth_dev *dev,
                 */
                if (skip_mtr_reg && config->flow_mreg_c
                    [id + start_reg - REG_C_0] >= priv->mtr_color_reg) {
-                       if (id >= (REG_C_7 - start_reg))
+                       if (id >= (uint32_t)(REG_C_7 - start_reg))
                                return rte_flow_error_set(error, EINVAL,
                                                       RTE_FLOW_ERROR_TYPE_ITEM,
                                                        NULL, "invalid tag id");
@@ -1002,17 +1002,29 @@ flow_drv_rxq_flags_set(struct rte_eth_dev *dev,
        struct mlx5_priv *priv = dev->data->dev_private;
        const int mark = dev_handle->mark;
        const int tunnel = !!(dev_handle->layers & MLX5_FLOW_LAYER_TUNNEL);
-       struct mlx5_hrxq *hrxq;
+       struct mlx5_ind_table_obj *ind_tbl = NULL;
        unsigned int i;
 
-       if (dev_handle->fate_action != MLX5_FLOW_FATE_QUEUE)
-               return;
-       hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
+       if (dev_handle->fate_action == MLX5_FLOW_FATE_QUEUE) {
+               struct mlx5_hrxq *hrxq;
+
+               hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
                              dev_handle->rix_hrxq);
-       if (!hrxq)
+               if (hrxq)
+                       ind_tbl = hrxq->ind_table;
+       } else if (dev_handle->fate_action == MLX5_FLOW_FATE_SHARED_RSS) {
+               struct mlx5_shared_action_rss *shared_rss;
+
+               shared_rss = mlx5_ipool_get
+                       (priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
+                        dev_handle->rix_srss);
+               if (shared_rss)
+                       ind_tbl = shared_rss->ind_tbl;
+       }
+       if (!ind_tbl)
                return;
-       for (i = 0; i != hrxq->ind_table->queues_n; ++i) {
-               int idx = hrxq->ind_table->queues[i];
+       for (i = 0; i != ind_tbl->queues_n; ++i) {
+               int idx = ind_tbl->queues[i];
                struct mlx5_rxq_ctrl *rxq_ctrl =
                        container_of((*priv->rxqs)[idx],
                                     struct mlx5_rxq_ctrl, rxq);
@@ -1084,18 +1096,30 @@ flow_drv_rxq_flags_trim(struct rte_eth_dev *dev,
        struct mlx5_priv *priv = dev->data->dev_private;
        const int mark = dev_handle->mark;
        const int tunnel = !!(dev_handle->layers & MLX5_FLOW_LAYER_TUNNEL);
-       struct mlx5_hrxq *hrxq;
+       struct mlx5_ind_table_obj *ind_tbl = NULL;
        unsigned int i;
 
-       if (dev_handle->fate_action != MLX5_FLOW_FATE_QUEUE)
-               return;
-       hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
+       if (dev_handle->fate_action == MLX5_FLOW_FATE_QUEUE) {
+               struct mlx5_hrxq *hrxq;
+
+               hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
                              dev_handle->rix_hrxq);
-       if (!hrxq)
+               if (hrxq)
+                       ind_tbl = hrxq->ind_table;
+       } else if (dev_handle->fate_action == MLX5_FLOW_FATE_SHARED_RSS) {
+               struct mlx5_shared_action_rss *shared_rss;
+
+               shared_rss = mlx5_ipool_get
+                       (priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
+                        dev_handle->rix_srss);
+               if (shared_rss)
+                       ind_tbl = shared_rss->ind_tbl;
+       }
+       if (!ind_tbl)
                return;
        MLX5_ASSERT(dev->data->dev_started);
-       for (i = 0; i != hrxq->ind_table->queues_n; ++i) {
-               int idx = hrxq->ind_table->queues[i];
+       for (i = 0; i != ind_tbl->queues_n; ++i) {
+               int idx = ind_tbl->queues[i];
                struct mlx5_rxq_ctrl *rxq_ctrl =
                        container_of((*priv->rxqs)[idx],
                                     struct mlx5_rxq_ctrl, rxq);
@@ -6135,9 +6159,9 @@ mlx5_flow_isolate(struct rte_eth_dev *dev,
        }
        priv->isolated = !!enable;
        if (enable)
-               dev->dev_ops = &mlx5_os_dev_ops_isolate;
+               dev->dev_ops = &mlx5_dev_ops_isolate;
        else
-               dev->dev_ops = &mlx5_os_dev_ops;
+               dev->dev_ops = &mlx5_dev_ops;
 
        dev->rx_descriptor_status = mlx5_rx_descriptor_status;
        dev->tx_descriptor_status = mlx5_tx_descriptor_status;