common/mlx5: add global MR cache create function
[dpdk.git] / drivers / regex / mlx5 / mlx5_regex.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2020 Mellanox Technologies, Ltd
3  */
4
5 #include <rte_malloc.h>
6 #include <rte_log.h>
7 #include <rte_errno.h>
8 #include <rte_pci.h>
9 #include <rte_regexdev.h>
10 #include <rte_regexdev_core.h>
11 #include <rte_regexdev_driver.h>
12 #include <rte_bus_pci.h>
13
14 #include <mlx5_common.h>
15 #include <mlx5_common_mr.h>
16 #include <mlx5_glue.h>
17 #include <mlx5_devx_cmds.h>
18 #include <mlx5_prm.h>
19
20 #include "mlx5_regex.h"
21 #include "mlx5_regex_utils.h"
22 #include "mlx5_rxp_csrs.h"
23
24 #define MLX5_REGEX_DRIVER_NAME regex_mlx5
25
26 int mlx5_regex_logtype;
27
28 TAILQ_HEAD(regex_mem_event, mlx5_regex_priv) mlx5_mem_event_list =
29                                 TAILQ_HEAD_INITIALIZER(mlx5_mem_event_list);
30 static pthread_mutex_t mem_event_list_lock = PTHREAD_MUTEX_INITIALIZER;
31
32 const struct rte_regexdev_ops mlx5_regexdev_ops = {
33         .dev_info_get = mlx5_regex_info_get,
34         .dev_configure = mlx5_regex_configure,
35         .dev_db_import = mlx5_regex_rules_db_import,
36         .dev_qp_setup = mlx5_regex_qp_setup,
37         .dev_start = mlx5_regex_start,
38         .dev_stop = mlx5_regex_stop,
39         .dev_close = mlx5_regex_close,
40 };
41
42 int
43 mlx5_regex_start(struct rte_regexdev *dev __rte_unused)
44 {
45         return 0;
46 }
47
48 int
49 mlx5_regex_stop(struct rte_regexdev *dev __rte_unused)
50 {
51         return 0;
52 }
53
54 int
55 mlx5_regex_close(struct rte_regexdev *dev __rte_unused)
56 {
57         return 0;
58 }
59
60 static int
61 mlx5_regex_engines_status(struct ibv_context *ctx, int num_engines)
62 {
63         uint32_t fpga_ident = 0;
64         int err;
65         int i;
66
67         for (i = 0; i < num_engines; i++) {
68                 err = mlx5_devx_regex_register_read(ctx, i,
69                                                     MLX5_RXP_CSR_IDENTIFIER,
70                                                     &fpga_ident);
71                 fpga_ident = (fpga_ident & (0x0000FFFF));
72                 if (err || fpga_ident != MLX5_RXP_IDENTIFIER) {
73                         DRV_LOG(ERR, "Failed setup RXP %d err %d database "
74                                 "memory 0x%x", i, err, fpga_ident);
75                         if (!err)
76                                 err = EINVAL;
77                         return err;
78                 }
79         }
80         return 0;
81 }
82
83 static void
84 mlx5_regex_get_name(char *name, struct rte_device *dev)
85 {
86         sprintf(name, "mlx5_regex_%s", dev->name);
87 }
88
89 /**
90  * Callback for memory event.
91  *
92  * @param event_type
93  *   Memory event type.
94  * @param addr
95  *   Address of memory.
96  * @param len
97  *   Size of memory.
98  */
99 static void
100 mlx5_regex_mr_mem_event_cb(enum rte_mem_event event_type, const void *addr,
101                            size_t len, void *arg __rte_unused)
102 {
103         struct mlx5_regex_priv *priv;
104
105         /* Must be called from the primary process. */
106         MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
107         switch (event_type) {
108         case RTE_MEM_EVENT_FREE:
109                 pthread_mutex_lock(&mem_event_list_lock);
110                 /* Iterate all the existing mlx5 devices. */
111                 TAILQ_FOREACH(priv, &mlx5_mem_event_list, mem_event_cb)
112                         mlx5_free_mr_by_addr(&priv->mr_scache,
113                                              mlx5_os_get_ctx_device_name
114                                                               (priv->cdev->ctx),
115                                              addr, len);
116                 pthread_mutex_unlock(&mem_event_list_lock);
117                 break;
118         case RTE_MEM_EVENT_ALLOC:
119         default:
120                 break;
121         }
122 }
123
124 static int
125 mlx5_regex_dev_probe(struct mlx5_common_device *cdev)
126 {
127         struct mlx5_regex_priv *priv = NULL;
128         struct mlx5_hca_attr *attr = &cdev->config.hca_attr;
129         char name[RTE_REGEXDEV_NAME_MAX_LEN];
130         int ret;
131         uint32_t val;
132
133         if ((!attr->regex && !attr->mmo_regex_sq_en && !attr->mmo_regex_qp_en)
134             || attr->regexp_num_of_engines == 0) {
135                 DRV_LOG(ERR, "Not enough capabilities to support RegEx, maybe "
136                         "old FW/OFED version?");
137                 rte_errno = ENOTSUP;
138                 return -rte_errno;
139         }
140         if (mlx5_regex_engines_status(cdev->ctx, 2)) {
141                 DRV_LOG(ERR, "RegEx engine error.");
142                 rte_errno = ENOMEM;
143                 return -rte_errno;
144         }
145         priv = rte_zmalloc("mlx5 regex device private", sizeof(*priv),
146                            RTE_CACHE_LINE_SIZE);
147         if (!priv) {
148                 DRV_LOG(ERR, "Failed to allocate private memory.");
149                 rte_errno = ENOMEM;
150                 return -rte_errno;
151         }
152         priv->mmo_regex_qp_cap = attr->mmo_regex_qp_en;
153         priv->mmo_regex_sq_cap = attr->mmo_regex_sq_en;
154         priv->cdev = cdev;
155         priv->nb_engines = 2; /* attr.regexp_num_of_engines */
156         ret = mlx5_devx_regex_register_read(priv->cdev->ctx, 0,
157                                             MLX5_RXP_CSR_IDENTIFIER, &val);
158         if (ret) {
159                 DRV_LOG(ERR, "CSR read failed!");
160                 goto dev_error;
161         }
162         if (val == MLX5_RXP_BF2_IDENTIFIER)
163                 priv->is_bf2 = 1;
164         /* Default RXP programming mode to Shared. */
165         priv->prog_mode = MLX5_RXP_SHARED_PROG_MODE;
166         mlx5_regex_get_name(name, cdev->dev);
167         priv->regexdev = rte_regexdev_register(name);
168         if (priv->regexdev == NULL) {
169                 DRV_LOG(ERR, "Failed to register RegEx device.");
170                 rte_errno = rte_errno ? rte_errno : EINVAL;
171                 goto dev_error;
172         }
173         /*
174          * This PMD always claims the write memory barrier on UAR
175          * registers writings, it is safe to allocate UAR with any
176          * memory mapping type.
177          */
178         priv->uar = mlx5_devx_alloc_uar(priv->cdev->ctx, -1);
179         if (!priv->uar) {
180                 DRV_LOG(ERR, "can't allocate uar.");
181                 rte_errno = ENOMEM;
182                 goto error;
183         }
184         priv->regexdev->dev_ops = &mlx5_regexdev_ops;
185         priv->regexdev->enqueue = mlx5_regexdev_enqueue;
186 #ifdef HAVE_MLX5_UMR_IMKEY
187         if (!attr->umr_indirect_mkey_disabled &&
188             !attr->umr_modify_entity_size_disabled)
189                 priv->has_umr = 1;
190         if (priv->has_umr)
191                 priv->regexdev->enqueue = mlx5_regexdev_enqueue_gga;
192 #endif
193         priv->regexdev->dequeue = mlx5_regexdev_dequeue;
194         priv->regexdev->device = cdev->dev;
195         priv->regexdev->data->dev_private = priv;
196         priv->regexdev->state = RTE_REGEXDEV_READY;
197         ret = mlx5_mr_create_cache(&priv->mr_scache, rte_socket_id());
198         if (ret) {
199                 DRV_LOG(ERR, "MR init tree failed.");
200             rte_errno = ENOMEM;
201                 goto error;
202         }
203         /* Register callback function for global shared MR cache management. */
204         if (TAILQ_EMPTY(&mlx5_mem_event_list))
205                 rte_mem_event_callback_register("MLX5_MEM_EVENT_CB",
206                                                 mlx5_regex_mr_mem_event_cb,
207                                                 NULL);
208         /* Add device to memory callback list. */
209         pthread_mutex_lock(&mem_event_list_lock);
210         TAILQ_INSERT_TAIL(&mlx5_mem_event_list, priv, mem_event_cb);
211         pthread_mutex_unlock(&mem_event_list_lock);
212         DRV_LOG(INFO, "RegEx GGA is %s.",
213                 priv->has_umr ? "supported" : "unsupported");
214         return 0;
215
216 error:
217         if (priv->uar)
218                 mlx5_glue->devx_free_uar(priv->uar);
219         if (priv->regexdev)
220                 rte_regexdev_unregister(priv->regexdev);
221 dev_error:
222         if (priv)
223                 rte_free(priv);
224         return -rte_errno;
225 }
226
227 static int
228 mlx5_regex_dev_remove(struct mlx5_common_device *cdev)
229 {
230         char name[RTE_REGEXDEV_NAME_MAX_LEN];
231         struct rte_regexdev *dev;
232         struct mlx5_regex_priv *priv = NULL;
233
234         mlx5_regex_get_name(name, cdev->dev);
235         dev = rte_regexdev_get_device_by_name(name);
236         if (!dev)
237                 return 0;
238         priv = dev->data->dev_private;
239         if (priv) {
240                 /* Remove from memory callback device list. */
241                 pthread_mutex_lock(&mem_event_list_lock);
242                 TAILQ_REMOVE(&mlx5_mem_event_list, priv, mem_event_cb);
243                 pthread_mutex_unlock(&mem_event_list_lock);
244                 if (TAILQ_EMPTY(&mlx5_mem_event_list))
245                         rte_mem_event_callback_unregister("MLX5_MEM_EVENT_CB",
246                                                           NULL);
247                 if (priv->mr_scache.cache.table)
248                         mlx5_mr_release_cache(&priv->mr_scache);
249                 if (priv->uar)
250                         mlx5_glue->devx_free_uar(priv->uar);
251                 if (priv->regexdev)
252                         rte_regexdev_unregister(priv->regexdev);
253                 rte_free(priv);
254         }
255         return 0;
256 }
257
258 static const struct rte_pci_id mlx5_regex_pci_id_map[] = {
259         {
260                 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
261                                 PCI_DEVICE_ID_MELLANOX_CONNECTX6DXBF)
262         },
263         {
264                 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
265                                 PCI_DEVICE_ID_MELLANOX_CONNECTX7BF)
266         },
267         {
268                 .vendor_id = 0
269         }
270 };
271
272 static struct mlx5_class_driver mlx5_regex_driver = {
273         .drv_class = MLX5_CLASS_REGEX,
274         .name = RTE_STR(MLX5_REGEX_DRIVER_NAME),
275         .id_table = mlx5_regex_pci_id_map,
276         .probe = mlx5_regex_dev_probe,
277         .remove = mlx5_regex_dev_remove,
278 };
279
280 RTE_INIT(rte_mlx5_regex_init)
281 {
282         mlx5_common_init();
283         if (mlx5_glue)
284                 mlx5_class_driver_register(&mlx5_regex_driver);
285 }
286
287 RTE_LOG_REGISTER_DEFAULT(mlx5_regex_logtype, NOTICE)
288 RTE_PMD_EXPORT_NAME(MLX5_REGEX_DRIVER_NAME, __COUNTER__);
289 RTE_PMD_REGISTER_PCI_TABLE(MLX5_REGEX_DRIVER_NAME, mlx5_regex_pci_id_map);
290 RTE_PMD_REGISTER_KMOD_DEP(MLX5_REGEX_DRIVER_NAME, "* ib_uverbs & mlx5_core & mlx5_ib");