common/mlx5: share HCA capabilities handle
[dpdk.git] / drivers / regex / mlx5 / mlx5_regex_control.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2020 Mellanox Technologies, Ltd
3  */
4
5 #include <errno.h>
6
7 #include <rte_log.h>
8 #include <rte_errno.h>
9 #include <rte_memory.h>
10 #include <rte_malloc.h>
11 #include <rte_regexdev.h>
12 #include <rte_regexdev_core.h>
13 #include <rte_regexdev_driver.h>
14 #include <rte_dev.h>
15
16 #include <mlx5_common.h>
17 #include <mlx5_glue.h>
18 #include <mlx5_devx_cmds.h>
19 #include <mlx5_prm.h>
20 #include <mlx5_common_os.h>
21 #include <mlx5_common_devx.h>
22
23 #include "mlx5_regex.h"
24 #include "mlx5_regex_utils.h"
25 #include "mlx5_rxp_csrs.h"
26 #include "mlx5_rxp.h"
27
28 #define MLX5_REGEX_NUM_WQE_PER_PAGE (4096/64)
29
30 #define MLX5_REGEX_WQE_LOG_NUM(has_umr, log_desc) \
31                 ((has_umr) ? ((log_desc) + 2) : (log_desc))
32
33 /**
34  * Returns the number of qp obj to be created.
35  *
36  * @param nb_desc
37  *   The number of descriptors for the queue.
38  *
39  * @return
40  *   The number of obj to be created.
41  */
42 static uint16_t
43 regex_ctrl_get_nb_obj(uint16_t nb_desc)
44 {
45         return ((nb_desc / MLX5_REGEX_NUM_WQE_PER_PAGE) +
46                 !!(nb_desc % MLX5_REGEX_NUM_WQE_PER_PAGE));
47 }
48
49 /**
50  * destroy CQ.
51  *
52  * @param cp
53  *   Pointer to the CQ to be destroyed.
54  *
55  * @return
56  *   0 on success, a negative errno value otherwise and rte_errno is set.
57  */
58 static int
59 regex_ctrl_destroy_cq(struct mlx5_regex_cq *cq)
60 {
61         mlx5_devx_cq_destroy(&cq->cq_obj);
62         memset(cq, 0, sizeof(*cq));
63         return 0;
64 }
65
66 /**
67  * create the CQ object.
68  *
69  * @param priv
70  *   Pointer to the priv object.
71  * @param cp
72  *   Pointer to the CQ to be created.
73  *
74  * @return
75  *   0 on success, a negative errno value otherwise and rte_errno is set.
76  */
77 static int
78 regex_ctrl_create_cq(struct mlx5_regex_priv *priv, struct mlx5_regex_cq *cq)
79 {
80         struct mlx5_devx_cq_attr attr = {
81                 .uar_page_id = priv->uar->page_id,
82         };
83         int ret;
84
85         cq->ci = 0;
86         ret = mlx5_devx_cq_create(priv->cdev->ctx, &cq->cq_obj, cq->log_nb_desc,
87                                   &attr, SOCKET_ID_ANY);
88         if (ret) {
89                 DRV_LOG(ERR, "Can't create CQ object.");
90                 memset(cq, 0, sizeof(*cq));
91                 rte_errno = ENOMEM;
92                 return -rte_errno;
93         }
94         return 0;
95 }
96
97 /**
98  * Destroy the SQ object.
99  *
100  * @param qp
101  *   Pointer to the QP element
102  * @param q_ind
103  *   The index of the queue.
104  *
105  * @return
106  *   0 on success, a negative errno value otherwise and rte_errno is set.
107  */
108 static int
109 regex_ctrl_destroy_hw_qp(struct mlx5_regex_qp *qp, uint16_t q_ind)
110 {
111         struct mlx5_regex_hw_qp *qp_obj = &qp->qps[q_ind];
112
113         mlx5_devx_qp_destroy(&qp_obj->qp_obj);
114         memset(qp, 0, sizeof(*qp));
115         return 0;
116 }
117
118 /**
119  * create the SQ object.
120  *
121  * @param priv
122  *   Pointer to the priv object.
123  * @param qp
124  *   Pointer to the QP element
125  * @param q_ind
126  *   The index of the queue.
127  * @param log_nb_desc
128  *   Log 2 of the number of descriptors to be used.
129  *
130  * @return
131  *   0 on success, a negative errno value otherwise and rte_errno is set.
132  */
133 static int
134 regex_ctrl_create_hw_qp(struct mlx5_regex_priv *priv, struct mlx5_regex_qp *qp,
135                      uint16_t q_ind, uint16_t log_nb_desc)
136 {
137 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
138         struct mlx5_devx_qp_attr attr = {
139                 .cqn = qp->cq.cq_obj.cq->id,
140                 .uar_index = priv->uar->page_id,
141                 .pd = priv->cdev->pdn,
142                 .ts_format = mlx5_ts_format_conv
143                                      (priv->cdev->config.hca_attr.qp_ts_format),
144                 .user_index = q_ind,
145         };
146         struct mlx5_regex_hw_qp *qp_obj = &qp->qps[q_ind];
147         int ret;
148
149         qp_obj->log_nb_desc = log_nb_desc;
150         qp_obj->qpn = q_ind;
151         qp_obj->ci = 0;
152         qp_obj->pi = 0;
153         attr.rq_size = 0;
154         attr.sq_size = RTE_BIT32(MLX5_REGEX_WQE_LOG_NUM(priv->has_umr,
155                         log_nb_desc));
156         attr.mmo = priv->mmo_regex_qp_cap;
157         ret = mlx5_devx_qp_create(priv->cdev->ctx, &qp_obj->qp_obj,
158                         MLX5_REGEX_WQE_LOG_NUM(priv->has_umr, log_nb_desc),
159                         &attr, SOCKET_ID_ANY);
160         if (ret) {
161                 DRV_LOG(ERR, "Can't create QP object.");
162                 rte_errno = ENOMEM;
163                 return -rte_errno;
164         }
165         ret = mlx5_devx_qp2rts(&qp_obj->qp_obj, 0);
166         if (ret) {
167                 DRV_LOG(ERR, "Can't change QP state to RTS.");
168                 regex_ctrl_destroy_hw_qp(qp, q_ind);
169                 rte_errno = ENOMEM;
170                 return -rte_errno;
171         }
172         return 0;
173 #else
174         (void)priv;
175         (void)qp;
176         (void)q_ind;
177         (void)log_nb_desc;
178         DRV_LOG(ERR, "Cannot get pdn - no DV support.");
179         return -ENOTSUP;
180 #endif
181 }
182
183 /**
184  * Setup the qp.
185  *
186  * @param dev
187  *   Pointer to RegEx dev structure.
188  * @param qp_ind
189  *   The queue index to setup.
190  * @param cfg
191  *   The queue requested configuration.
192  *
193  * @return
194  *   0 on success, a negative errno value otherwise and rte_errno is set.
195  */
196 int
197 mlx5_regex_qp_setup(struct rte_regexdev *dev, uint16_t qp_ind,
198                     const struct rte_regexdev_qp_conf *cfg)
199 {
200         struct mlx5_regex_priv *priv = dev->data->dev_private;
201         struct mlx5_regex_qp *qp;
202         int i;
203         int nb_sq_config = 0;
204         int ret;
205         uint16_t log_desc;
206
207         qp = &priv->qps[qp_ind];
208         qp->flags = cfg->qp_conf_flags;
209         log_desc = rte_log2_u32(cfg->nb_desc);
210         /*
211          * UMR mode requires two WQEs(UMR and RegEx WQE) for one descriptor.
212          * For CQ, expand the CQE number multiple with 2.
213          * For SQ, the UMR and RegEx WQE for one descriptor consumes 4 WQEBBS,
214          * expand the WQE number multiple with 4.
215          */
216         qp->cq.log_nb_desc = log_desc + (!!priv->has_umr);
217         qp->nb_desc = 1 << log_desc;
218         if (qp->flags & RTE_REGEX_QUEUE_PAIR_CFG_OOS_F)
219                 qp->nb_obj = regex_ctrl_get_nb_obj
220                         (1 << MLX5_REGEX_WQE_LOG_NUM(priv->has_umr, log_desc));
221         else
222                 qp->nb_obj = 1;
223         qp->qps = rte_malloc(NULL,
224                              qp->nb_obj * sizeof(struct mlx5_regex_hw_qp), 64);
225         if (!qp->qps) {
226                 DRV_LOG(ERR, "Can't allocate qp array memory.");
227                 rte_errno = ENOMEM;
228                 return -rte_errno;
229         }
230         log_desc = rte_log2_u32(qp->nb_desc / qp->nb_obj);
231         ret = regex_ctrl_create_cq(priv, &qp->cq);
232         if (ret) {
233                 DRV_LOG(ERR, "Can't create cq.");
234                 goto err_cq;
235         }
236         for (i = 0; i < qp->nb_obj; i++) {
237                 ret = regex_ctrl_create_hw_qp(priv, qp, i, log_desc);
238                 if (ret) {
239                         DRV_LOG(ERR, "Can't create qp object.");
240                         goto err_btree;
241                 }
242                 nb_sq_config++;
243         }
244
245         /* Save pointer of global generation number to check memory event. */
246         qp->mr_ctrl.dev_gen_ptr = &priv->mr_scache.dev_gen;
247         ret = mlx5_mr_btree_init(&qp->mr_ctrl.cache_bh, MLX5_MR_BTREE_CACHE_N,
248                                  rte_socket_id());
249         if (ret) {
250                 DRV_LOG(ERR, "Error setting up mr btree");
251                 goto err_btree;
252         }
253
254         ret = mlx5_regexdev_setup_fastpath(priv, qp_ind);
255         if (ret) {
256                 DRV_LOG(ERR, "Error setting up fastpath");
257                 goto err_fp;
258         }
259         return 0;
260
261 err_fp:
262         mlx5_mr_btree_free(&qp->mr_ctrl.cache_bh);
263 err_btree:
264         for (i = 0; i < nb_sq_config; i++)
265                 regex_ctrl_destroy_hw_qp(qp, i);
266         regex_ctrl_destroy_cq(&qp->cq);
267 err_cq:
268         rte_free(qp->qps);
269         return ret;
270 }