1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2020 Mellanox Technologies, Ltd
9 #include <rte_malloc.h>
10 #include <rte_regexdev.h>
11 #include <rte_regexdev_core.h>
12 #include <rte_regexdev_driver.h>
15 #include <mlx5_common.h>
16 #include <mlx5_glue.h>
17 #include <mlx5_devx_cmds.h>
19 #include <mlx5_common_os.h>
21 #include "mlx5_regex.h"
22 #include "mlx5_regex_utils.h"
23 #include "mlx5_rxp_csrs.h"
26 #define MLX5_REGEX_NUM_WQE_PER_PAGE (4096/64)
29 * Returns the number of qp obj to be created.
32 * The number of descriptors for the queue.
35 * The number of obj to be created.
38 regex_ctrl_get_nb_obj(uint16_t nb_desc)
40 return ((nb_desc / MLX5_REGEX_NUM_WQE_PER_PAGE) +
41 !!(nb_desc % MLX5_REGEX_NUM_WQE_PER_PAGE));
48 * Pointer to the priv object.
50 * Pointer to the CQ to be destroyed.
53 * 0 on success, a negative errno value otherwise and rte_errno is set.
56 regex_ctrl_destroy_cq(struct mlx5_regex_priv *priv, struct mlx5_regex_cq *cq)
59 mlx5_glue->devx_umem_dereg(cq->cqe_umem);
63 rte_free((void *)(uintptr_t)cq->cqe);
67 mlx5_release_dbr(&priv->dbrpgs, cq->dbr_umem, cq->dbr_offset);
71 mlx5_devx_cmd_destroy(cq->obj);
78 * create the CQ object.
81 * Pointer to the priv object.
83 * Pointer to the CQ to be created.
86 * 0 on success, a negative errno value otherwise and rte_errno is set.
89 regex_ctrl_create_cq(struct mlx5_regex_priv *priv, struct mlx5_regex_cq *cq)
91 struct mlx5_devx_cq_attr attr = {
96 struct mlx5_devx_dbr_page *dbr_page = NULL;
98 size_t pgsize = sysconf(_SC_PAGESIZE);
99 uint32_t cq_size = 1 << cq->log_nb_desc;
102 cq->dbr_offset = mlx5_get_dbr(priv->ctx, &priv->dbrpgs, &dbr_page);
103 if (cq->dbr_offset < 0) {
104 DRV_LOG(ERR, "Can't allocate cq door bell record.");
108 cq->dbr_umem = mlx5_os_get_umem_id(dbr_page->umem);
109 cq->dbr = (uint32_t *)((uintptr_t)dbr_page->dbrs +
110 (uintptr_t)cq->dbr_offset);
112 buf = rte_calloc(NULL, 1, sizeof(struct mlx5_cqe) * cq_size, 4096);
114 DRV_LOG(ERR, "Can't allocate cqe buffer.");
119 for (i = 0; i < cq_size; i++)
120 cq->cqe[i].op_own = 0xff;
121 cq->cqe_umem = mlx5_glue->devx_umem_reg(priv->ctx, buf,
122 sizeof(struct mlx5_cqe) *
126 DRV_LOG(ERR, "Can't register cqe mem.");
130 attr.db_umem_offset = cq->dbr_offset;
131 attr.db_umem_id = cq->dbr_umem;
132 attr.q_umem_id = mlx5_os_get_umem_id(cq->cqe_umem);
133 attr.log_cq_size = cq->log_nb_desc;
134 attr.uar_page_id = priv->uar->page_id;
135 attr.log_page_size = rte_log2_u32(pgsize);
136 cq->obj = mlx5_devx_cmd_create_cq(priv->ctx, &attr);
138 DRV_LOG(ERR, "Can't create cq object.");
145 mlx5_glue->devx_umem_dereg(cq->cqe_umem);
149 mlx5_release_dbr(&priv->dbrpgs, cq->dbr_umem, cq->dbr_offset);
153 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
155 regex_get_pdn(void *pd, uint32_t *pdn)
157 struct mlx5dv_obj obj;
158 struct mlx5dv_pd pd_info;
162 obj.pd.out = &pd_info;
163 ret = mlx5_glue->dv_init_obj(&obj, MLX5DV_OBJ_PD);
165 DRV_LOG(DEBUG, "Fail to get PD object info");
174 * create the SQ object.
177 * Pointer to the priv object.
179 * Pointer to the QP element
181 * The index of the queue.
183 * Log 2 of the number of descriptors to be used.
186 * 0 on success, a negative errno value otherwise and rte_errno is set.
189 regex_ctrl_create_sq(struct mlx5_regex_priv *priv, struct mlx5_regex_qp *qp,
190 uint16_t q_ind, uint16_t log_nb_desc)
192 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
193 struct mlx5_devx_create_sq_attr attr = { 0 };
194 struct mlx5_devx_modify_sq_attr modify_attr = { 0 };
195 struct mlx5_devx_wq_attr *wq_attr = &attr.wq_attr;
196 struct mlx5_devx_dbr_page *dbr_page = NULL;
197 struct mlx5_regex_sq *sq = &qp->sqs[q_ind];
203 sq->log_nb_desc = log_nb_desc;
204 sq_size = 1 << sq->log_nb_desc;
205 sq->dbr_offset = mlx5_get_dbr(priv->ctx, &priv->dbrpgs, &dbr_page);
206 if (sq->dbr_offset < 0) {
207 DRV_LOG(ERR, "Can't allocate sq door bell record.");
211 sq->dbr_umem = mlx5_os_get_umem_id(dbr_page->umem);
212 sq->dbr = (uint32_t *)((uintptr_t)dbr_page->dbrs +
213 (uintptr_t)sq->dbr_offset);
215 buf = rte_calloc(NULL, 1, 64 * sq_size, 4096);
217 DRV_LOG(ERR, "Can't allocate wqe buffer.");
222 sq->wqe_umem = mlx5_glue->devx_umem_reg(priv->ctx, buf, 64 * sq_size,
227 DRV_LOG(ERR, "Can't register wqe mem.");
231 attr.state = MLX5_SQC_STATE_RST;
234 attr.user_index = q_ind;
235 attr.cqn = qp->cq.obj->id;
236 wq_attr->uar_page = priv->uar->page_id;
237 regex_get_pdn(priv->pd, &pd_num);
238 wq_attr->pd = pd_num;
239 wq_attr->wq_type = MLX5_WQ_TYPE_CYCLIC;
240 wq_attr->dbr_umem_id = sq->dbr_umem;
241 wq_attr->dbr_addr = sq->dbr_offset;
242 wq_attr->dbr_umem_valid = 1;
243 wq_attr->wq_umem_id = mlx5_os_get_umem_id(sq->wqe_umem);
244 wq_attr->wq_umem_offset = 0;
245 wq_attr->wq_umem_valid = 1;
246 wq_attr->log_wq_stride = 6;
247 wq_attr->log_wq_sz = sq->log_nb_desc;
248 sq->obj = mlx5_devx_cmd_create_sq(priv->ctx, &attr);
250 DRV_LOG(ERR, "Can't create sq object.");
254 modify_attr.state = MLX5_SQC_STATE_RDY;
255 ret = mlx5_devx_cmd_modify_sq(sq->obj, &modify_attr);
257 DRV_LOG(ERR, "Can't change sq state to ready.");
265 mlx5_glue->devx_umem_dereg(sq->wqe_umem);
269 mlx5_release_dbr(&priv->dbrpgs, sq->dbr_umem, sq->dbr_offset);
276 DRV_LOG(ERR, "Cannot get pdn - no DV support.");
282 * Destroy the SQ object.
285 * Pointer to the priv object.
287 * Pointer to the QP element
289 * The index of the queue.
292 * 0 on success, a negative errno value otherwise and rte_errno is set.
295 regex_ctrl_destroy_sq(struct mlx5_regex_priv *priv, struct mlx5_regex_qp *qp,
298 struct mlx5_regex_sq *sq = &qp->sqs[q_ind];
301 mlx5_glue->devx_umem_dereg(sq->wqe_umem);
305 rte_free((void *)(uintptr_t)sq->wqe);
308 if (sq->dbr_offset) {
309 mlx5_release_dbr(&priv->dbrpgs, sq->dbr_umem, sq->dbr_offset);
313 mlx5_devx_cmd_destroy(sq->obj);
323 * Pointer to RegEx dev structure.
325 * The queue index to setup.
327 * The queue requested configuration.
330 * 0 on success, a negative errno value otherwise and rte_errno is set.
333 mlx5_regex_qp_setup(struct rte_regexdev *dev, uint16_t qp_ind,
334 const struct rte_regexdev_qp_conf *cfg)
336 struct mlx5_regex_priv *priv = dev->data->dev_private;
337 struct mlx5_regex_qp *qp;
342 qp = &priv->qps[qp_ind];
343 qp->flags = cfg->qp_conf_flags;
344 qp->cq.log_nb_desc = rte_log2_u32(cfg->nb_desc);
345 qp->nb_desc = 1 << qp->cq.log_nb_desc;
346 if (qp->flags & RTE_REGEX_QUEUE_PAIR_CFG_OOS_F)
347 qp->nb_obj = regex_ctrl_get_nb_obj(qp->nb_desc);
350 qp->sqs = rte_malloc(NULL,
351 qp->nb_obj * sizeof(struct mlx5_regex_sq), 64);
353 DRV_LOG(ERR, "Can't allocate sq array memory.");
357 log_desc = rte_log2_u32(qp->nb_desc / qp->nb_obj);
358 ret = regex_ctrl_create_cq(priv, &qp->cq);
360 DRV_LOG(ERR, "Can't create cq.");
363 for (i = 0; i < qp->nb_obj; i++) {
364 ret = regex_ctrl_create_sq(priv, qp, i, log_desc);
366 DRV_LOG(ERR, "Can't create sq.");
371 ret = mlx5_mr_btree_init(&qp->mr_ctrl.cache_bh, MLX5_MR_BTREE_CACHE_N,
374 DRV_LOG(ERR, "Error setting up mr btree");
378 ret = mlx5_regexdev_setup_fastpath(priv, qp_ind);
380 DRV_LOG(ERR, "Error setting up fastpath");
386 mlx5_mr_btree_free(&qp->mr_ctrl.cache_bh);
388 for (i = 0; i < qp->nb_obj; i++)
389 regex_ctrl_destroy_sq(priv, qp, i);
391 regex_ctrl_destroy_cq(priv, &qp->cq);