1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2020 Mellanox Technologies, Ltd
5 #include <rte_malloc.h>
9 #include <rte_regexdev.h>
10 #include <rte_regexdev_core.h>
11 #include <rte_regexdev_driver.h>
12 #include <rte_bus_pci.h>
14 #include <mlx5_common.h>
15 #include <mlx5_common_mr.h>
16 #include <mlx5_glue.h>
17 #include <mlx5_devx_cmds.h>
20 #include "mlx5_regex.h"
21 #include "mlx5_regex_utils.h"
22 #include "mlx5_rxp_csrs.h"
24 #define MLX5_REGEX_DRIVER_NAME regex_mlx5
26 int mlx5_regex_logtype;
28 TAILQ_HEAD(regex_mem_event, mlx5_regex_priv) mlx5_mem_event_list =
29 TAILQ_HEAD_INITIALIZER(mlx5_mem_event_list);
30 static pthread_mutex_t mem_event_list_lock = PTHREAD_MUTEX_INITIALIZER;
32 const struct rte_regexdev_ops mlx5_regexdev_ops = {
33 .dev_info_get = mlx5_regex_info_get,
34 .dev_configure = mlx5_regex_configure,
35 .dev_db_import = mlx5_regex_rules_db_import,
36 .dev_qp_setup = mlx5_regex_qp_setup,
37 .dev_start = mlx5_regex_start,
38 .dev_stop = mlx5_regex_stop,
39 .dev_close = mlx5_regex_close,
43 mlx5_regex_start(struct rte_regexdev *dev __rte_unused)
49 mlx5_regex_stop(struct rte_regexdev *dev __rte_unused)
55 mlx5_regex_close(struct rte_regexdev *dev __rte_unused)
61 mlx5_regex_engines_status(struct ibv_context *ctx, int num_engines)
63 uint32_t fpga_ident = 0;
67 for (i = 0; i < num_engines; i++) {
68 err = mlx5_devx_regex_register_read(ctx, i,
69 MLX5_RXP_CSR_IDENTIFIER,
71 fpga_ident = (fpga_ident & (0x0000FFFF));
72 if (err || fpga_ident != MLX5_RXP_IDENTIFIER) {
73 DRV_LOG(ERR, "Failed setup RXP %d err %d database "
74 "memory 0x%x", i, err, fpga_ident);
84 mlx5_regex_get_name(char *name, struct rte_device *dev)
86 sprintf(name, "mlx5_regex_%s", dev->name);
90 * Callback for memory event.
100 mlx5_regex_mr_mem_event_cb(enum rte_mem_event event_type, const void *addr,
101 size_t len, void *arg __rte_unused)
103 struct mlx5_regex_priv *priv;
105 /* Must be called from the primary process. */
106 MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
107 switch (event_type) {
108 case RTE_MEM_EVENT_FREE:
109 pthread_mutex_lock(&mem_event_list_lock);
110 /* Iterate all the existing mlx5 devices. */
111 TAILQ_FOREACH(priv, &mlx5_mem_event_list, mem_event_cb)
112 mlx5_free_mr_by_addr(&priv->mr_scache,
113 priv->ctx->device->name,
115 pthread_mutex_unlock(&mem_event_list_lock);
117 case RTE_MEM_EVENT_ALLOC:
124 mlx5_regex_dev_probe(struct rte_device *rte_dev)
126 struct ibv_device *ibv;
127 struct mlx5_regex_priv *priv = NULL;
128 struct ibv_context *ctx = NULL;
129 struct mlx5_hca_attr attr;
130 char name[RTE_REGEXDEV_NAME_MAX_LEN];
134 ibv = mlx5_os_get_ibv_dev(rte_dev);
137 DRV_LOG(INFO, "Probe device \"%s\".", ibv->name);
138 ctx = mlx5_glue->dv_open_device(ibv);
140 DRV_LOG(ERR, "Failed to open IB device \"%s\".", ibv->name);
144 ret = mlx5_devx_cmd_query_hca_attr(ctx, &attr);
146 DRV_LOG(ERR, "Unable to read HCA capabilities.");
149 } else if (((!attr.regex) && (!attr.mmo_regex_sq_en) &&
150 (!attr.mmo_regex_qp_en)) || attr.regexp_num_of_engines == 0) {
151 DRV_LOG(ERR, "Not enough capabilities to support RegEx, maybe "
152 "old FW/OFED version?");
156 if (mlx5_regex_engines_status(ctx, 2)) {
157 DRV_LOG(ERR, "RegEx engine error.");
161 priv = rte_zmalloc("mlx5 regex device private", sizeof(*priv),
162 RTE_CACHE_LINE_SIZE);
164 DRV_LOG(ERR, "Failed to allocate private memory.");
168 priv->mmo_regex_qp_cap = attr.mmo_regex_qp_en;
169 priv->mmo_regex_sq_cap = attr.mmo_regex_sq_en;
170 priv->qp_ts_format = attr.qp_ts_format;
172 priv->nb_engines = 2; /* attr.regexp_num_of_engines */
173 ret = mlx5_devx_regex_register_read(priv->ctx, 0,
174 MLX5_RXP_CSR_IDENTIFIER, &val);
176 DRV_LOG(ERR, "CSR read failed!");
179 if (val == MLX5_RXP_BF2_IDENTIFIER)
181 /* Default RXP programming mode to Shared. */
182 priv->prog_mode = MLX5_RXP_SHARED_PROG_MODE;
183 mlx5_regex_get_name(name, rte_dev);
184 priv->regexdev = rte_regexdev_register(name);
185 if (priv->regexdev == NULL) {
186 DRV_LOG(ERR, "Failed to register RegEx device.");
187 rte_errno = rte_errno ? rte_errno : EINVAL;
191 * This PMD always claims the write memory barrier on UAR
192 * registers writings, it is safe to allocate UAR with any
193 * memory mapping type.
195 priv->uar = mlx5_devx_alloc_uar(ctx, -1);
197 DRV_LOG(ERR, "can't allocate uar.");
201 priv->pd = mlx5_glue->alloc_pd(ctx);
203 DRV_LOG(ERR, "can't allocate pd.");
207 priv->regexdev->dev_ops = &mlx5_regexdev_ops;
208 priv->regexdev->enqueue = mlx5_regexdev_enqueue;
209 #ifdef HAVE_MLX5_UMR_IMKEY
210 if (!attr.umr_indirect_mkey_disabled &&
211 !attr.umr_modify_entity_size_disabled)
214 priv->regexdev->enqueue = mlx5_regexdev_enqueue_gga;
216 priv->regexdev->dequeue = mlx5_regexdev_dequeue;
217 priv->regexdev->device = rte_dev;
218 priv->regexdev->data->dev_private = priv;
219 priv->regexdev->state = RTE_REGEXDEV_READY;
220 priv->mr_scache.reg_mr_cb = mlx5_common_verbs_reg_mr;
221 priv->mr_scache.dereg_mr_cb = mlx5_common_verbs_dereg_mr;
222 ret = mlx5_mr_btree_init(&priv->mr_scache.cache,
223 MLX5_MR_BTREE_CACHE_N * 2,
226 DRV_LOG(ERR, "MR init tree failed.");
230 /* Register callback function for global shared MR cache management. */
231 if (TAILQ_EMPTY(&mlx5_mem_event_list))
232 rte_mem_event_callback_register("MLX5_MEM_EVENT_CB",
233 mlx5_regex_mr_mem_event_cb,
235 /* Add device to memory callback list. */
236 pthread_mutex_lock(&mem_event_list_lock);
237 TAILQ_INSERT_TAIL(&mlx5_mem_event_list, priv, mem_event_cb);
238 pthread_mutex_unlock(&mem_event_list_lock);
239 DRV_LOG(INFO, "RegEx GGA is %s.",
240 priv->has_umr ? "supported" : "unsupported");
245 mlx5_glue->dealloc_pd(priv->pd);
247 mlx5_glue->devx_free_uar(priv->uar);
249 rte_regexdev_unregister(priv->regexdev);
252 mlx5_glue->close_device(ctx);
259 mlx5_regex_dev_remove(struct rte_device *rte_dev)
261 char name[RTE_REGEXDEV_NAME_MAX_LEN];
262 struct rte_regexdev *dev;
263 struct mlx5_regex_priv *priv = NULL;
265 mlx5_regex_get_name(name, rte_dev);
266 dev = rte_regexdev_get_device_by_name(name);
269 priv = dev->data->dev_private;
271 /* Remove from memory callback device list. */
272 pthread_mutex_lock(&mem_event_list_lock);
273 TAILQ_REMOVE(&mlx5_mem_event_list, priv, mem_event_cb);
274 pthread_mutex_unlock(&mem_event_list_lock);
275 if (TAILQ_EMPTY(&mlx5_mem_event_list))
276 rte_mem_event_callback_unregister("MLX5_MEM_EVENT_CB",
278 if (priv->mr_scache.cache.table)
279 mlx5_mr_release_cache(&priv->mr_scache);
281 mlx5_glue->dealloc_pd(priv->pd);
283 mlx5_glue->devx_free_uar(priv->uar);
285 rte_regexdev_unregister(priv->regexdev);
287 mlx5_glue->close_device(priv->ctx);
293 static const struct rte_pci_id mlx5_regex_pci_id_map[] = {
295 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
296 PCI_DEVICE_ID_MELLANOX_CONNECTX6DXBF)
299 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
300 PCI_DEVICE_ID_MELLANOX_CONNECTX7BF)
307 static struct mlx5_class_driver mlx5_regex_driver = {
308 .drv_class = MLX5_CLASS_REGEX,
309 .name = RTE_STR(MLX5_REGEX_DRIVER_NAME),
310 .id_table = mlx5_regex_pci_id_map,
311 .probe = mlx5_regex_dev_probe,
312 .remove = mlx5_regex_dev_remove,
315 RTE_INIT(rte_mlx5_regex_init)
319 mlx5_class_driver_register(&mlx5_regex_driver);
322 RTE_LOG_REGISTER_DEFAULT(mlx5_regex_logtype, NOTICE)
323 RTE_PMD_EXPORT_NAME(MLX5_REGEX_DRIVER_NAME, __COUNTER__);
324 RTE_PMD_REGISTER_PCI_TABLE(MLX5_REGEX_DRIVER_NAME, mlx5_regex_pci_id_map);
325 RTE_PMD_REGISTER_KMOD_DEP(MLX5_REGEX_DRIVER_NAME, "* ib_uverbs & mlx5_core & mlx5_ib");