1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2021 Mellanox Technologies, Ltd
5 #include <rte_malloc.h>
10 #include <rte_compressdev.h>
11 #include <rte_compressdev_pmd.h>
13 #include <mlx5_glue.h>
14 #include <mlx5_common.h>
15 #include <mlx5_common_pci.h>
16 #include <mlx5_devx_cmds.h>
17 #include <mlx5_common_os.h>
20 #include "mlx5_compress_utils.h"
22 #define MLX5_COMPRESS_DRIVER_NAME mlx5_compress
23 #define MLX5_COMPRESS_LOG_NAME pmd.compress.mlx5
24 #define MLX5_COMPRESS_MAX_QPS 1024
26 struct mlx5_compress_priv {
27 TAILQ_ENTRY(mlx5_compress_priv) next;
28 struct ibv_context *ctx; /* Device context. */
29 struct rte_pci_device *pci_dev;
30 struct rte_compressdev *cdev;
32 uint32_t pdn; /* Protection Domain number. */
33 uint8_t min_block_size;
34 /* Minimum huffman block size supported by the device. */
36 struct rte_compressdev_config dev_config;
39 TAILQ_HEAD(mlx5_compress_privs, mlx5_compress_priv) mlx5_compress_priv_list =
40 TAILQ_HEAD_INITIALIZER(mlx5_compress_priv_list);
41 static pthread_mutex_t priv_list_lock = PTHREAD_MUTEX_INITIALIZER;
43 int mlx5_compress_logtype;
45 const struct rte_compressdev_capabilities mlx5_caps[RTE_COMP_ALGO_LIST_END];
49 mlx5_compress_dev_info_get(struct rte_compressdev *dev,
50 struct rte_compressdev_info *info)
54 info->max_nb_queue_pairs = MLX5_COMPRESS_MAX_QPS;
55 info->feature_flags = RTE_COMPDEV_FF_HW_ACCELERATED;
56 info->capabilities = mlx5_caps;
61 mlx5_compress_dev_configure(struct rte_compressdev *dev,
62 struct rte_compressdev_config *config)
64 struct mlx5_compress_priv *priv;
66 if (dev == NULL || config == NULL)
68 priv = dev->data->dev_private;
69 priv->dev_config = *config;
74 mlx5_compress_dev_close(struct rte_compressdev *dev)
80 static struct rte_compressdev_ops mlx5_compress_ops = {
81 .dev_configure = mlx5_compress_dev_configure,
84 .dev_close = mlx5_compress_dev_close,
85 .dev_infos_get = mlx5_compress_dev_info_get,
88 .queue_pair_setup = NULL,
89 .queue_pair_release = NULL,
90 .private_xform_create = NULL,
91 .private_xform_free = NULL,
92 .stream_create = NULL,
96 static struct ibv_device *
97 mlx5_compress_get_ib_device_match(struct rte_pci_addr *addr)
100 struct ibv_device **ibv_list = mlx5_glue->get_device_list(&n);
101 struct ibv_device *ibv_match = NULL;
103 if (ibv_list == NULL) {
108 struct rte_pci_addr paddr;
110 DRV_LOG(DEBUG, "Checking device \"%s\"..", ibv_list[n]->name);
111 if (mlx5_dev_to_pci_addr(ibv_list[n]->ibdev_path, &paddr) != 0)
113 if (rte_pci_addr_cmp(addr, &paddr) != 0)
115 ibv_match = ibv_list[n];
118 if (ibv_match == NULL)
120 mlx5_glue->free_device_list(ibv_list);
125 mlx5_compress_hw_global_release(struct mlx5_compress_priv *priv)
127 if (priv->pd != NULL) {
128 claim_zero(mlx5_glue->dealloc_pd(priv->pd));
131 if (priv->uar != NULL) {
132 mlx5_glue->devx_free_uar(priv->uar);
138 mlx5_compress_pd_create(struct mlx5_compress_priv *priv)
140 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
141 struct mlx5dv_obj obj;
142 struct mlx5dv_pd pd_info;
145 priv->pd = mlx5_glue->alloc_pd(priv->ctx);
146 if (priv->pd == NULL) {
147 DRV_LOG(ERR, "Failed to allocate PD.");
148 return errno ? -errno : -ENOMEM;
150 obj.pd.in = priv->pd;
151 obj.pd.out = &pd_info;
152 ret = mlx5_glue->dv_init_obj(&obj, MLX5DV_OBJ_PD);
154 DRV_LOG(ERR, "Fail to get PD object info.");
155 mlx5_glue->dealloc_pd(priv->pd);
159 priv->pdn = pd_info.pdn;
163 DRV_LOG(ERR, "Cannot get pdn - no DV support.");
165 #endif /* HAVE_IBV_FLOW_DV_SUPPORT */
169 mlx5_compress_hw_global_prepare(struct mlx5_compress_priv *priv)
171 if (mlx5_compress_pd_create(priv) != 0)
173 priv->uar = mlx5_devx_alloc_uar(priv->ctx, -1);
174 if (priv->uar == NULL || mlx5_os_get_devx_uar_reg_addr(priv->uar) ==
177 claim_zero(mlx5_glue->dealloc_pd(priv->pd));
178 DRV_LOG(ERR, "Failed to allocate UAR.");
185 * DPDK callback to register a PCI device.
187 * This function spawns compress device out of a given PCI device.
190 * PCI driver structure (mlx5_compress_driver).
192 * PCI device information.
195 * 0 on success, 1 to skip this driver, a negative errno value otherwise
196 * and rte_errno is set.
199 mlx5_compress_pci_probe(struct rte_pci_driver *pci_drv,
200 struct rte_pci_device *pci_dev)
202 struct ibv_device *ibv;
203 struct rte_compressdev *cdev;
204 struct ibv_context *ctx;
205 struct mlx5_compress_priv *priv;
206 struct mlx5_hca_attr att = { 0 };
207 struct rte_compressdev_pmd_init_params init_params = {
209 .socket_id = pci_dev->device.numa_node,
212 RTE_SET_USED(pci_drv);
213 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
214 DRV_LOG(ERR, "Non-primary process type is not supported.");
218 ibv = mlx5_compress_get_ib_device_match(&pci_dev->addr);
220 DRV_LOG(ERR, "No matching IB device for PCI slot "
221 PCI_PRI_FMT ".", pci_dev->addr.domain,
222 pci_dev->addr.bus, pci_dev->addr.devid,
223 pci_dev->addr.function);
226 DRV_LOG(INFO, "PCI information matches for device \"%s\".", ibv->name);
227 ctx = mlx5_glue->dv_open_device(ibv);
229 DRV_LOG(ERR, "Failed to open IB device \"%s\".", ibv->name);
233 if (mlx5_devx_cmd_query_hca_attr(ctx, &att) != 0 ||
234 att.mmo_compress_en == 0 || att.mmo_decompress_en == 0 ||
235 att.mmo_dma_en == 0) {
236 DRV_LOG(ERR, "Not enough capabilities to support compress "
237 "operations, maybe old FW/OFED version?");
238 claim_zero(mlx5_glue->close_device(ctx));
242 cdev = rte_compressdev_pmd_create(ibv->name, &pci_dev->device,
243 sizeof(*priv), &init_params);
245 DRV_LOG(ERR, "Failed to create device \"%s\".", ibv->name);
246 claim_zero(mlx5_glue->close_device(ctx));
250 "Compress device %s was created successfully.", ibv->name);
251 cdev->dev_ops = &mlx5_compress_ops;
252 cdev->dequeue_burst = NULL;
253 cdev->enqueue_burst = NULL;
254 cdev->feature_flags = RTE_COMPDEV_FF_HW_ACCELERATED;
255 priv = cdev->data->dev_private;
257 priv->pci_dev = pci_dev;
259 priv->min_block_size = att.compress_min_block_size;
260 if (mlx5_compress_hw_global_prepare(priv) != 0) {
261 rte_compressdev_pmd_destroy(priv->cdev);
262 claim_zero(mlx5_glue->close_device(priv->ctx));
265 pthread_mutex_lock(&priv_list_lock);
266 TAILQ_INSERT_TAIL(&mlx5_compress_priv_list, priv, next);
267 pthread_mutex_unlock(&priv_list_lock);
272 * DPDK callback to remove a PCI device.
274 * This function removes all compress devices belong to a given PCI device.
277 * Pointer to the PCI device.
280 * 0 on success, the function cannot fail.
283 mlx5_compress_pci_remove(struct rte_pci_device *pdev)
285 struct mlx5_compress_priv *priv = NULL;
287 pthread_mutex_lock(&priv_list_lock);
288 TAILQ_FOREACH(priv, &mlx5_compress_priv_list, next)
289 if (rte_pci_addr_cmp(&priv->pci_dev->addr, &pdev->addr) != 0)
292 TAILQ_REMOVE(&mlx5_compress_priv_list, priv, next);
293 pthread_mutex_unlock(&priv_list_lock);
295 mlx5_compress_hw_global_release(priv);
296 rte_compressdev_pmd_destroy(priv->cdev);
297 claim_zero(mlx5_glue->close_device(priv->ctx));
302 static const struct rte_pci_id mlx5_compress_pci_id_map[] = {
304 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
305 PCI_DEVICE_ID_MELLANOX_CONNECTX6DXBF)
312 static struct mlx5_pci_driver mlx5_compress_driver = {
313 .driver_class = MLX5_CLASS_COMPRESS,
316 .name = RTE_STR(MLX5_COMPRESS_DRIVER_NAME),
318 .id_table = mlx5_compress_pci_id_map,
319 .probe = mlx5_compress_pci_probe,
320 .remove = mlx5_compress_pci_remove,
325 RTE_INIT(rte_mlx5_compress_init)
328 if (mlx5_glue != NULL)
329 mlx5_pci_driver_register(&mlx5_compress_driver);
332 RTE_LOG_REGISTER(mlx5_compress_logtype, MLX5_COMPRESS_LOG_NAME, NOTICE)
333 RTE_PMD_EXPORT_NAME(MLX5_COMPRESS_DRIVER_NAME, __COUNTER__);
334 RTE_PMD_REGISTER_PCI_TABLE(MLX5_COMPRESS_DRIVER_NAME, mlx5_compress_pci_id_map);
335 RTE_PMD_REGISTER_KMOD_DEP(MLX5_COMPRESS_DRIVER_NAME, "* ib_uverbs & mlx5_core & mlx5_ib");