1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2020 Mellanox Technologies, Ltd
9 #include <rte_memory.h>
10 #include <rte_malloc.h>
11 #include <rte_regexdev.h>
12 #include <rte_regexdev_core.h>
13 #include <rte_regexdev_driver.h>
16 #include <mlx5_common.h>
17 #include <mlx5_glue.h>
18 #include <mlx5_devx_cmds.h>
20 #include <mlx5_common_os.h>
21 #include <mlx5_common_devx.h>
23 #include "mlx5_regex.h"
24 #include "mlx5_regex_utils.h"
27 #define MLX5_REGEX_NUM_WQE_PER_PAGE (4096/64)
29 #define MLX5_REGEX_WQE_LOG_NUM(has_umr, log_desc) \
30 ((has_umr) ? ((log_desc) + 2) : (log_desc))
33 * Returns the number of qp obj to be created.
36 * The number of descriptors for the queue.
39 * The number of obj to be created.
42 regex_ctrl_get_nb_obj(uint16_t nb_desc)
44 return ((nb_desc / MLX5_REGEX_NUM_WQE_PER_PAGE) +
45 !!(nb_desc % MLX5_REGEX_NUM_WQE_PER_PAGE));
52 * Pointer to the CQ to be destroyed.
55 * 0 on success, a negative errno value otherwise and rte_errno is set.
58 regex_ctrl_destroy_cq(struct mlx5_regex_cq *cq)
60 mlx5_devx_cq_destroy(&cq->cq_obj);
61 memset(cq, 0, sizeof(*cq));
66 * create the CQ object.
69 * Pointer to the priv object.
71 * Pointer to the CQ to be created.
74 * 0 on success, a negative errno value otherwise and rte_errno is set.
77 regex_ctrl_create_cq(struct mlx5_regex_priv *priv, struct mlx5_regex_cq *cq)
79 struct mlx5_devx_cq_attr attr = {
80 .uar_page_id = priv->uar->page_id,
85 ret = mlx5_devx_cq_create(priv->cdev->ctx, &cq->cq_obj, cq->log_nb_desc,
86 &attr, SOCKET_ID_ANY);
88 DRV_LOG(ERR, "Can't create CQ object.");
89 memset(cq, 0, sizeof(*cq));
97 * Destroy the SQ object.
100 * Pointer to the QP element
102 * The index of the queue.
105 * 0 on success, a negative errno value otherwise and rte_errno is set.
108 regex_ctrl_destroy_hw_qp(struct mlx5_regex_qp *qp, uint16_t q_ind)
110 struct mlx5_regex_hw_qp *qp_obj = &qp->qps[q_ind];
112 mlx5_devx_qp_destroy(&qp_obj->qp_obj);
113 memset(qp, 0, sizeof(*qp));
118 * create the SQ object.
121 * Pointer to the priv object.
123 * Pointer to the QP element
125 * The index of the queue.
127 * Log 2 of the number of descriptors to be used.
130 * 0 on success, a negative errno value otherwise and rte_errno is set.
133 regex_ctrl_create_hw_qp(struct mlx5_regex_priv *priv, struct mlx5_regex_qp *qp,
134 uint16_t q_ind, uint16_t log_nb_desc)
136 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
137 struct mlx5_devx_qp_attr attr = {
138 .cqn = qp->cq.cq_obj.cq->id,
139 .uar_index = priv->uar->page_id,
140 .pd = priv->cdev->pdn,
141 .ts_format = mlx5_ts_format_conv
142 (priv->cdev->config.hca_attr.qp_ts_format),
145 struct mlx5_regex_hw_qp *qp_obj = &qp->qps[q_ind];
148 qp_obj->log_nb_desc = log_nb_desc;
153 attr.sq_size = RTE_BIT32(MLX5_REGEX_WQE_LOG_NUM(priv->has_umr,
155 attr.mmo = priv->mmo_regex_qp_cap;
156 ret = mlx5_devx_qp_create(priv->cdev->ctx, &qp_obj->qp_obj,
157 MLX5_REGEX_WQE_LOG_NUM(priv->has_umr, log_nb_desc),
158 &attr, SOCKET_ID_ANY);
160 DRV_LOG(ERR, "Can't create QP object.");
164 ret = mlx5_devx_qp2rts(&qp_obj->qp_obj, 0);
166 DRV_LOG(ERR, "Can't change QP state to RTS.");
167 regex_ctrl_destroy_hw_qp(qp, q_ind);
177 DRV_LOG(ERR, "Cannot get pdn - no DV support.");
186 * Pointer to RegEx dev structure.
188 * The queue index to setup.
190 * The queue requested configuration.
193 * 0 on success, a negative errno value otherwise and rte_errno is set.
196 mlx5_regex_qp_setup(struct rte_regexdev *dev, uint16_t qp_ind,
197 const struct rte_regexdev_qp_conf *cfg)
199 struct mlx5_regex_priv *priv = dev->data->dev_private;
200 struct mlx5_regex_qp *qp;
202 int nb_sq_config = 0;
206 qp = &priv->qps[qp_ind];
208 DRV_LOG(ERR, "Attempting to setup QP a second time.");
213 qp->flags = cfg->qp_conf_flags;
214 log_desc = rte_log2_u32(cfg->nb_desc);
216 * UMR mode requires two WQEs(UMR and RegEx WQE) for one descriptor.
217 * For CQ, expand the CQE number multiple with 2.
218 * For SQ, the UMR and RegEx WQE for one descriptor consumes 4 WQEBBS,
219 * expand the WQE number multiple with 4.
221 qp->cq.log_nb_desc = log_desc + (!!priv->has_umr);
222 qp->nb_desc = 1 << log_desc;
223 if (qp->flags & RTE_REGEX_QUEUE_PAIR_CFG_OOS_F)
224 qp->nb_obj = regex_ctrl_get_nb_obj
225 (1 << MLX5_REGEX_WQE_LOG_NUM(priv->has_umr, log_desc));
228 qp->qps = rte_malloc(NULL,
229 qp->nb_obj * sizeof(struct mlx5_regex_hw_qp), 64);
231 DRV_LOG(ERR, "Can't allocate qp array memory.");
235 log_desc = rte_log2_u32(qp->nb_desc / qp->nb_obj);
236 ret = regex_ctrl_create_cq(priv, &qp->cq);
238 DRV_LOG(ERR, "Can't create cq.");
241 for (i = 0; i < qp->nb_obj; i++) {
242 ret = regex_ctrl_create_hw_qp(priv, qp, i, log_desc);
244 DRV_LOG(ERR, "Can't create qp object.");
250 ret = mlx5_mr_ctrl_init(&qp->mr_ctrl, &priv->cdev->mr_scache.dev_gen,
253 DRV_LOG(ERR, "Error setting up mr btree");
257 ret = mlx5_regexdev_setup_fastpath(priv, qp_ind);
259 DRV_LOG(ERR, "Error setting up fastpath");
265 mlx5_mr_btree_free(&qp->mr_ctrl.cache_bh);
267 for (i = 0; i < nb_sq_config; i++)
268 regex_ctrl_destroy_hw_qp(qp, i);
269 regex_ctrl_destroy_cq(&qp->cq);
276 mlx5_regex_clean_ctrl(struct rte_regexdev *dev)
278 struct mlx5_regex_priv *priv = dev->data->dev_private;
279 struct mlx5_regex_qp *qp;
285 for (qp_ind = 0; qp_ind < priv->nb_queues; qp_ind++) {
286 qp = &priv->qps[qp_ind];
287 /* Check if mlx5_regex_qp_setup() was called for this QP */
290 mlx5_regexdev_teardown_fastpath(priv, qp_ind);
291 mlx5_mr_btree_free(&qp->mr_ctrl.cache_bh);
292 for (i = 0; i < qp->nb_obj; i++)
293 regex_ctrl_destroy_hw_qp(qp, i);
294 regex_ctrl_destroy_cq(&qp->cq);