regex/mlx5: remove RXP CSR file
[dpdk.git] / drivers / regex / mlx5 / mlx5_regex_control.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2020 Mellanox Technologies, Ltd
3  */
4
5 #include <errno.h>
6
7 #include <rte_log.h>
8 #include <rte_errno.h>
9 #include <rte_memory.h>
10 #include <rte_malloc.h>
11 #include <rte_regexdev.h>
12 #include <rte_regexdev_core.h>
13 #include <rte_regexdev_driver.h>
14 #include <rte_dev.h>
15
16 #include <mlx5_common.h>
17 #include <mlx5_glue.h>
18 #include <mlx5_devx_cmds.h>
19 #include <mlx5_prm.h>
20 #include <mlx5_common_os.h>
21 #include <mlx5_common_devx.h>
22
23 #include "mlx5_regex.h"
24 #include "mlx5_regex_utils.h"
25 #include "mlx5_rxp.h"
26
27 #define MLX5_REGEX_NUM_WQE_PER_PAGE (4096/64)
28
29 #define MLX5_REGEX_WQE_LOG_NUM(has_umr, log_desc) \
30                 ((has_umr) ? ((log_desc) + 2) : (log_desc))
31
32 /**
33  * Returns the number of qp obj to be created.
34  *
35  * @param nb_desc
36  *   The number of descriptors for the queue.
37  *
38  * @return
39  *   The number of obj to be created.
40  */
41 static uint16_t
42 regex_ctrl_get_nb_obj(uint16_t nb_desc)
43 {
44         return ((nb_desc / MLX5_REGEX_NUM_WQE_PER_PAGE) +
45                 !!(nb_desc % MLX5_REGEX_NUM_WQE_PER_PAGE));
46 }
47
48 /**
49  * destroy CQ.
50  *
51  * @param cp
52  *   Pointer to the CQ to be destroyed.
53  *
54  * @return
55  *   0 on success, a negative errno value otherwise and rte_errno is set.
56  */
57 static int
58 regex_ctrl_destroy_cq(struct mlx5_regex_cq *cq)
59 {
60         mlx5_devx_cq_destroy(&cq->cq_obj);
61         memset(cq, 0, sizeof(*cq));
62         return 0;
63 }
64
65 /**
66  * create the CQ object.
67  *
68  * @param priv
69  *   Pointer to the priv object.
70  * @param cp
71  *   Pointer to the CQ to be created.
72  *
73  * @return
74  *   0 on success, a negative errno value otherwise and rte_errno is set.
75  */
76 static int
77 regex_ctrl_create_cq(struct mlx5_regex_priv *priv, struct mlx5_regex_cq *cq)
78 {
79         struct mlx5_devx_cq_attr attr = {
80                 .uar_page_id = priv->uar->page_id,
81         };
82         int ret;
83
84         cq->ci = 0;
85         ret = mlx5_devx_cq_create(priv->cdev->ctx, &cq->cq_obj, cq->log_nb_desc,
86                                   &attr, SOCKET_ID_ANY);
87         if (ret) {
88                 DRV_LOG(ERR, "Can't create CQ object.");
89                 memset(cq, 0, sizeof(*cq));
90                 rte_errno = ENOMEM;
91                 return -rte_errno;
92         }
93         return 0;
94 }
95
96 /**
97  * Destroy the SQ object.
98  *
99  * @param qp
100  *   Pointer to the QP element
101  * @param q_ind
102  *   The index of the queue.
103  *
104  * @return
105  *   0 on success, a negative errno value otherwise and rte_errno is set.
106  */
107 static int
108 regex_ctrl_destroy_hw_qp(struct mlx5_regex_qp *qp, uint16_t q_ind)
109 {
110         struct mlx5_regex_hw_qp *qp_obj = &qp->qps[q_ind];
111
112         mlx5_devx_qp_destroy(&qp_obj->qp_obj);
113         memset(qp, 0, sizeof(*qp));
114         return 0;
115 }
116
117 /**
118  * create the SQ object.
119  *
120  * @param priv
121  *   Pointer to the priv object.
122  * @param qp
123  *   Pointer to the QP element
124  * @param q_ind
125  *   The index of the queue.
126  * @param log_nb_desc
127  *   Log 2 of the number of descriptors to be used.
128  *
129  * @return
130  *   0 on success, a negative errno value otherwise and rte_errno is set.
131  */
132 static int
133 regex_ctrl_create_hw_qp(struct mlx5_regex_priv *priv, struct mlx5_regex_qp *qp,
134                      uint16_t q_ind, uint16_t log_nb_desc)
135 {
136 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
137         struct mlx5_devx_qp_attr attr = {
138                 .cqn = qp->cq.cq_obj.cq->id,
139                 .uar_index = priv->uar->page_id,
140                 .pd = priv->cdev->pdn,
141                 .ts_format = mlx5_ts_format_conv
142                                      (priv->cdev->config.hca_attr.qp_ts_format),
143                 .user_index = q_ind,
144         };
145         struct mlx5_regex_hw_qp *qp_obj = &qp->qps[q_ind];
146         int ret;
147
148         qp_obj->log_nb_desc = log_nb_desc;
149         qp_obj->qpn = q_ind;
150         qp_obj->ci = 0;
151         qp_obj->pi = 0;
152         attr.rq_size = 0;
153         attr.sq_size = RTE_BIT32(MLX5_REGEX_WQE_LOG_NUM(priv->has_umr,
154                         log_nb_desc));
155         attr.mmo = priv->mmo_regex_qp_cap;
156         ret = mlx5_devx_qp_create(priv->cdev->ctx, &qp_obj->qp_obj,
157                         MLX5_REGEX_WQE_LOG_NUM(priv->has_umr, log_nb_desc),
158                         &attr, SOCKET_ID_ANY);
159         if (ret) {
160                 DRV_LOG(ERR, "Can't create QP object.");
161                 rte_errno = ENOMEM;
162                 return -rte_errno;
163         }
164         ret = mlx5_devx_qp2rts(&qp_obj->qp_obj, 0);
165         if (ret) {
166                 DRV_LOG(ERR, "Can't change QP state to RTS.");
167                 regex_ctrl_destroy_hw_qp(qp, q_ind);
168                 rte_errno = ENOMEM;
169                 return -rte_errno;
170         }
171         return 0;
172 #else
173         (void)priv;
174         (void)qp;
175         (void)q_ind;
176         (void)log_nb_desc;
177         DRV_LOG(ERR, "Cannot get pdn - no DV support.");
178         return -ENOTSUP;
179 #endif
180 }
181
182 /**
183  * Setup the qp.
184  *
185  * @param dev
186  *   Pointer to RegEx dev structure.
187  * @param qp_ind
188  *   The queue index to setup.
189  * @param cfg
190  *   The queue requested configuration.
191  *
192  * @return
193  *   0 on success, a negative errno value otherwise and rte_errno is set.
194  */
195 int
196 mlx5_regex_qp_setup(struct rte_regexdev *dev, uint16_t qp_ind,
197                     const struct rte_regexdev_qp_conf *cfg)
198 {
199         struct mlx5_regex_priv *priv = dev->data->dev_private;
200         struct mlx5_regex_qp *qp;
201         int i;
202         int nb_sq_config = 0;
203         int ret;
204         uint16_t log_desc;
205
206         qp = &priv->qps[qp_ind];
207         qp->flags = cfg->qp_conf_flags;
208         log_desc = rte_log2_u32(cfg->nb_desc);
209         /*
210          * UMR mode requires two WQEs(UMR and RegEx WQE) for one descriptor.
211          * For CQ, expand the CQE number multiple with 2.
212          * For SQ, the UMR and RegEx WQE for one descriptor consumes 4 WQEBBS,
213          * expand the WQE number multiple with 4.
214          */
215         qp->cq.log_nb_desc = log_desc + (!!priv->has_umr);
216         qp->nb_desc = 1 << log_desc;
217         if (qp->flags & RTE_REGEX_QUEUE_PAIR_CFG_OOS_F)
218                 qp->nb_obj = regex_ctrl_get_nb_obj
219                         (1 << MLX5_REGEX_WQE_LOG_NUM(priv->has_umr, log_desc));
220         else
221                 qp->nb_obj = 1;
222         qp->qps = rte_malloc(NULL,
223                              qp->nb_obj * sizeof(struct mlx5_regex_hw_qp), 64);
224         if (!qp->qps) {
225                 DRV_LOG(ERR, "Can't allocate qp array memory.");
226                 rte_errno = ENOMEM;
227                 return -rte_errno;
228         }
229         log_desc = rte_log2_u32(qp->nb_desc / qp->nb_obj);
230         ret = regex_ctrl_create_cq(priv, &qp->cq);
231         if (ret) {
232                 DRV_LOG(ERR, "Can't create cq.");
233                 goto err_cq;
234         }
235         for (i = 0; i < qp->nb_obj; i++) {
236                 ret = regex_ctrl_create_hw_qp(priv, qp, i, log_desc);
237                 if (ret) {
238                         DRV_LOG(ERR, "Can't create qp object.");
239                         goto err_btree;
240                 }
241                 nb_sq_config++;
242         }
243
244         ret = mlx5_mr_ctrl_init(&qp->mr_ctrl, &priv->cdev->mr_scache.dev_gen,
245                                 rte_socket_id());
246         if (ret) {
247                 DRV_LOG(ERR, "Error setting up mr btree");
248                 goto err_btree;
249         }
250
251         ret = mlx5_regexdev_setup_fastpath(priv, qp_ind);
252         if (ret) {
253                 DRV_LOG(ERR, "Error setting up fastpath");
254                 goto err_fp;
255         }
256         return 0;
257
258 err_fp:
259         mlx5_mr_btree_free(&qp->mr_ctrl.cache_bh);
260 err_btree:
261         for (i = 0; i < nb_sq_config; i++)
262                 regex_ctrl_destroy_hw_qp(qp, i);
263         regex_ctrl_destroy_cq(&qp->cq);
264 err_cq:
265         rte_free(qp->qps);
266         return ret;
267 }
268
269 void
270 mlx5_regex_clean_ctrl(struct rte_regexdev *dev)
271 {
272         struct mlx5_regex_priv *priv = dev->data->dev_private;
273         struct mlx5_regex_qp *qp;
274         int qp_ind;
275         int i;
276
277         if (!priv->qps)
278                 return;
279         for (qp_ind = 0; qp_ind < priv->nb_queues; qp_ind++) {
280                 qp = &priv->qps[qp_ind];
281                 /* Check if mlx5_regex_qp_setup() was called for this QP */
282                 if (!qp->jobs)
283                         continue;
284                 mlx5_regexdev_teardown_fastpath(priv, qp_ind);
285                 mlx5_mr_btree_free(&qp->mr_ctrl.cache_bh);
286                 for (i = 0; i < qp->nb_obj; i++)
287                         regex_ctrl_destroy_hw_qp(qp, i);
288                 regex_ctrl_destroy_cq(&qp->cq);
289         }
290 }