net/mlx5: fix crash on hash Rx queue handling for drop
authorXiaoyu Min <jackmin@mellanox.com>
Thu, 17 Oct 2019 10:49:46 +0000 (13:49 +0300)
committerFerruh Yigit <ferruh.yigit@intel.com>
Wed, 23 Oct 2019 14:43:10 +0000 (16:43 +0200)
When to create hrxq for the drop, it could fail on creating qp and goto
the error handle which will release created ind_table by calling drop
release function, which takes rte_ethdev as the only parameter and uses
the priv->drop_queue.hrxq as input to release.

Unfortunately, at this point, the hrxq is not allocated and
priv->drop_queue.hrxq is still NULL, which leads to a segfault.

This patch fixes the above by allocating the hrxq at first place and
when the error happens, hrxq is released as the last one.

This patch also release other allocated resources by the correct order,
which is missing previously.

Fixes: 78be885295b8 ("net/mlx5: handle drop queues as regular queues")
Cc: stable@dpdk.org
Reported-by: Zengmo Gao <gaozengmo@jd.com>
Signed-off-by: Xiaoyu Min <jackmin@mellanox.com>
Acked-by: Viacheslav Ovsiienko <viacheslavo@mellanox.com>
drivers/net/mlx5/mlx5_rxq.c

index 0db065a..f0ab843 100644 (file)
@@ -2536,17 +2536,27 @@ struct mlx5_hrxq *
 mlx5_hrxq_drop_new(struct rte_eth_dev *dev)
 {
        struct mlx5_priv *priv = dev->data->dev_private;
-       struct mlx5_ind_table_obj *ind_tbl;
-       struct ibv_qp *qp;
-       struct mlx5_hrxq *hrxq;
+       struct mlx5_ind_table_obj *ind_tbl = NULL;
+       struct ibv_qp *qp = NULL;
+       struct mlx5_hrxq *hrxq = NULL;
 
        if (priv->drop_queue.hrxq) {
                rte_atomic32_inc(&priv->drop_queue.hrxq->refcnt);
                return priv->drop_queue.hrxq;
        }
+       hrxq = rte_calloc(__func__, 1, sizeof(*hrxq), 0);
+       if (!hrxq) {
+               DRV_LOG(WARNING,
+                       "port %u cannot allocate memory for drop queue",
+                       dev->data->port_id);
+               rte_errno = ENOMEM;
+               goto error;
+       }
+       priv->drop_queue.hrxq = hrxq;
        ind_tbl = mlx5_ind_table_obj_drop_new(dev);
        if (!ind_tbl)
-               return NULL;
+               goto error;
+       hrxq->ind_table = ind_tbl;
        qp = mlx5_glue->create_qp_ex(priv->sh->ctx,
                 &(struct ibv_qp_init_attr_ex){
                        .qp_type = IBV_QPT_RAW_PACKET,
@@ -2570,15 +2580,6 @@ mlx5_hrxq_drop_new(struct rte_eth_dev *dev)
                rte_errno = errno;
                goto error;
        }
-       hrxq = rte_calloc(__func__, 1, sizeof(*hrxq), 0);
-       if (!hrxq) {
-               DRV_LOG(WARNING,
-                       "port %u cannot allocate memory for drop queue",
-                       dev->data->port_id);
-               rte_errno = ENOMEM;
-               goto error;
-       }
-       hrxq->ind_table = ind_tbl;
        hrxq->qp = qp;
 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
        hrxq->action = mlx5_glue->dv_create_flow_action_dest_ibv_qp(hrxq->qp);
@@ -2587,12 +2588,21 @@ mlx5_hrxq_drop_new(struct rte_eth_dev *dev)
                goto error;
        }
 #endif
-       priv->drop_queue.hrxq = hrxq;
        rte_atomic32_set(&hrxq->refcnt, 1);
        return hrxq;
 error:
+#ifdef HAVE_IBV_FLOW_DV_SUPPORT
+       if (hrxq && hrxq->action)
+               mlx5_glue->destroy_flow_action(hrxq->action);
+#endif
+       if (qp)
+               claim_zero(mlx5_glue->destroy_qp(hrxq->qp));
        if (ind_tbl)
                mlx5_ind_table_obj_drop_release(dev);
+       if (hrxq) {
+               priv->drop_queue.hrxq = NULL;
+               rte_free(hrxq);
+       }
        return NULL;
 }