int
mlx5_regex_stop(struct rte_regexdev *dev __rte_unused)
{
+ struct mlx5_regex_priv *priv = dev->data->dev_private;
+ uint32_t i;
+
+ mlx5_regex_clean_ctrl(dev);
+ rte_free(priv->qps);
+ priv->qps = NULL;
+
+ for (i = 0; i < (priv->nb_engines + MLX5_RXP_EM_COUNT); i++) {
+ if (priv->db[i].umem.umem)
+ mlx5_glue->devx_umem_dereg(priv->db[i].umem.umem);
+ rte_free(priv->db[i].ptr);
+ priv->db[i].ptr = NULL;
+ }
+
return 0;
}
/* mlx5_regex_control.c */
int mlx5_regex_qp_setup(struct rte_regexdev *dev, uint16_t qp_ind,
const struct rte_regexdev_qp_conf *cfg);
+void mlx5_regex_clean_ctrl(struct rte_regexdev *dev);
/* mlx5_regex_fastpath.c */
int mlx5_regexdev_setup_fastpath(struct mlx5_regex_priv *priv, uint32_t qp_id);
rte_free(qp->qps);
return ret;
}
+
+void
+mlx5_regex_clean_ctrl(struct rte_regexdev *dev)
+{
+ struct mlx5_regex_priv *priv = dev->data->dev_private;
+ struct mlx5_regex_qp *qp;
+ int qp_ind;
+ int i;
+
+ if (!priv->qps)
+ return;
+ for (qp_ind = 0; qp_ind < priv->nb_queues; qp_ind++) {
+ qp = &priv->qps[qp_ind];
+ /* Check if mlx5_regex_qp_setup() was called for this QP */
+ if (!qp->jobs)
+ continue;
+ mlx5_regexdev_teardown_fastpath(priv, qp_ind);
+ mlx5_mr_btree_free(&qp->mr_ctrl.cache_bh);
+ for (i = 0; i < qp->nb_obj; i++)
+ regex_ctrl_destroy_hw_qp(qp, i);
+ regex_ctrl_destroy_cq(&qp->cq);
+ }
+}
err = setup_buffers(priv, qp);
if (err) {
rte_free(qp->jobs);
+ qp->jobs = NULL;
return err;
}
struct mlx5_regex_qp *qp = &priv->qps[qp_id];
uint32_t i;
- if (qp) {
+ if (qp->jobs) {
for (i = 0; i < qp->nb_desc; i++) {
if (qp->jobs[i].imkey)
claim_zero(mlx5_devx_cmd_destroy
(qp->jobs[i].imkey));
}
free_buffers(qp);
- if (qp->jobs)
- rte_free(qp->jobs);
+ rte_free(qp->jobs);
+ qp->jobs = NULL;
}
}
return 0;
tidyup_error:
for (i = 0; i < (priv->nb_engines + MLX5_RXP_EM_COUNT); i++) {
- if (priv->db[i].ptr)
- rte_free(priv->db[i].ptr);
if (priv->db[i].umem.umem)
mlx5_glue->devx_umem_dereg(priv->db[i].umem.umem);
+ rte_free(priv->db[i].ptr);
+ priv->db[i].ptr = NULL;
}
return -ret;
}