1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2021 Mellanox Technologies, Ltd
5 #include <rte_malloc.h>
9 #include <rte_spinlock.h>
11 #include <rte_compressdev.h>
12 #include <rte_compressdev_pmd.h>
14 #include <mlx5_glue.h>
15 #include <mlx5_common.h>
16 #include <mlx5_common_pci.h>
17 #include <mlx5_devx_cmds.h>
18 #include <mlx5_common_os.h>
19 #include <mlx5_common_devx.h>
20 #include <mlx5_common_mr.h>
23 #include "mlx5_compress_utils.h"
25 #define MLX5_COMPRESS_DRIVER_NAME mlx5_compress
26 #define MLX5_COMPRESS_LOG_NAME pmd.compress.mlx5
27 #define MLX5_COMPRESS_MAX_QPS 1024
28 #define MLX5_COMP_MAX_WIN_SIZE_CONF 6u
30 struct mlx5_compress_xform {
31 LIST_ENTRY(mlx5_compress_xform) next;
32 enum rte_comp_xform_type type;
33 enum rte_comp_checksum_type csum_type;
35 uint32_t gga_ctrl1; /* BE. */
38 struct mlx5_compress_priv {
39 TAILQ_ENTRY(mlx5_compress_priv) next;
40 struct ibv_context *ctx; /* Device context. */
41 struct rte_pci_device *pci_dev;
42 struct rte_compressdev *cdev;
44 uint32_t pdn; /* Protection Domain number. */
45 uint8_t min_block_size;
46 /* Minimum huffman block size supported by the device. */
48 struct rte_compressdev_config dev_config;
49 LIST_HEAD(xform_list, mlx5_compress_xform) xform_list;
50 rte_spinlock_t xform_sl;
51 struct mlx5_mr_share_cache mr_scache; /* Global shared MR cache. */
54 struct mlx5_compress_qp {
59 volatile uint64_t *uar_addr;
60 struct mlx5_mr_ctrl mr_ctrl;
62 struct mlx5_devx_cq cq;
63 struct mlx5_devx_sq sq;
64 struct mlx5_pmd_mr opaque_mr;
65 struct rte_comp_op **ops;
66 struct mlx5_compress_priv *priv;
67 struct rte_compressdev_stats stats;
70 TAILQ_HEAD(mlx5_compress_privs, mlx5_compress_priv) mlx5_compress_priv_list =
71 TAILQ_HEAD_INITIALIZER(mlx5_compress_priv_list);
72 static pthread_mutex_t priv_list_lock = PTHREAD_MUTEX_INITIALIZER;
74 int mlx5_compress_logtype;
76 const struct rte_compressdev_capabilities mlx5_caps[RTE_COMP_ALGO_LIST_END];
80 mlx5_compress_dev_info_get(struct rte_compressdev *dev,
81 struct rte_compressdev_info *info)
85 info->max_nb_queue_pairs = MLX5_COMPRESS_MAX_QPS;
86 info->feature_flags = RTE_COMPDEV_FF_HW_ACCELERATED;
87 info->capabilities = mlx5_caps;
92 mlx5_compress_dev_configure(struct rte_compressdev *dev,
93 struct rte_compressdev_config *config)
95 struct mlx5_compress_priv *priv;
97 if (dev == NULL || config == NULL)
99 priv = dev->data->dev_private;
100 priv->dev_config = *config;
105 mlx5_compress_dev_close(struct rte_compressdev *dev)
112 mlx5_compress_qp_release(struct rte_compressdev *dev, uint16_t qp_id)
114 struct mlx5_compress_qp *qp = dev->data->queue_pairs[qp_id];
116 if (qp->sq.sq != NULL)
117 mlx5_devx_sq_destroy(&qp->sq);
118 if (qp->cq.cq != NULL)
119 mlx5_devx_cq_destroy(&qp->cq);
120 if (qp->opaque_mr.obj != NULL) {
121 void *opaq = qp->opaque_mr.addr;
123 mlx5_common_verbs_dereg_mr(&qp->opaque_mr);
127 mlx5_mr_btree_free(&qp->mr_ctrl.cache_bh);
129 dev->data->queue_pairs[qp_id] = NULL;
134 mlx5_compress_init_sq(struct mlx5_compress_qp *qp)
136 volatile struct mlx5_gga_wqe *restrict wqe =
137 (volatile struct mlx5_gga_wqe *)qp->sq.wqes;
138 volatile struct mlx5_gga_compress_opaque *opaq = qp->opaque_mr.addr;
139 const uint32_t sq_ds = rte_cpu_to_be_32((qp->sq.sq->id << 8) | 4u);
140 const uint32_t flags = RTE_BE32(MLX5_COMP_ALWAYS <<
141 MLX5_COMP_MODE_OFFSET);
142 const uint32_t opaq_lkey = rte_cpu_to_be_32(qp->opaque_mr.lkey);
145 /* All the next fields state should stay constant. */
146 for (i = 0; i < qp->entries_n; ++i, ++wqe) {
149 wqe->opaque_lkey = opaq_lkey;
150 wqe->opaque_vaddr = rte_cpu_to_be_64
151 ((uint64_t)(uintptr_t)&opaq[i]);
156 mlx5_compress_qp_setup(struct rte_compressdev *dev, uint16_t qp_id,
157 uint32_t max_inflight_ops, int socket_id)
159 struct mlx5_compress_priv *priv = dev->data->dev_private;
160 struct mlx5_compress_qp *qp;
161 struct mlx5_devx_cq_attr cq_attr = {
162 .uar_page_id = mlx5_os_get_devx_uar_page_id(priv->uar),
164 struct mlx5_devx_create_sq_attr sq_attr = {
166 .wq_attr = (struct mlx5_devx_wq_attr){
168 .uar_page = mlx5_os_get_devx_uar_page_id(priv->uar),
171 struct mlx5_devx_modify_sq_attr modify_attr = {
172 .state = MLX5_SQC_STATE_RDY,
174 uint32_t log_ops_n = rte_log2_u32(max_inflight_ops);
175 uint32_t alloc_size = sizeof(*qp);
179 alloc_size = RTE_ALIGN(alloc_size, RTE_CACHE_LINE_SIZE);
180 alloc_size += sizeof(struct rte_comp_op *) * (1u << log_ops_n);
181 qp = rte_zmalloc_socket(__func__, alloc_size, RTE_CACHE_LINE_SIZE,
184 DRV_LOG(ERR, "Failed to allocate qp memory.");
188 dev->data->queue_pairs[qp_id] = qp;
189 opaq_buf = rte_calloc(__func__, 1u << log_ops_n,
190 sizeof(struct mlx5_gga_compress_opaque),
191 sizeof(struct mlx5_gga_compress_opaque));
192 if (opaq_buf == NULL) {
193 DRV_LOG(ERR, "Failed to allocate opaque memory.");
197 if (mlx5_mr_btree_init(&qp->mr_ctrl.cache_bh, MLX5_MR_BTREE_CACHE_N,
198 priv->dev_config.socket_id)) {
199 DRV_LOG(ERR, "Cannot allocate MR Btree for qp %u.",
204 qp->entries_n = 1 << log_ops_n;
205 qp->socket_id = socket_id;
208 qp->ops = (struct rte_comp_op **)RTE_ALIGN((uintptr_t)(qp + 1),
209 RTE_CACHE_LINE_SIZE);
210 qp->uar_addr = mlx5_os_get_devx_uar_reg_addr(priv->uar);
211 MLX5_ASSERT(qp->uar_addr);
212 if (mlx5_common_verbs_reg_mr(priv->pd, opaq_buf, qp->entries_n *
213 sizeof(struct mlx5_gga_compress_opaque),
214 &qp->opaque_mr) != 0) {
216 DRV_LOG(ERR, "Failed to register opaque MR.");
220 ret = mlx5_devx_cq_create(priv->ctx, &qp->cq, log_ops_n, &cq_attr,
223 DRV_LOG(ERR, "Failed to create CQ.");
226 sq_attr.cqn = qp->cq.cq->id;
227 ret = mlx5_devx_sq_create(priv->ctx, &qp->sq, log_ops_n, &sq_attr,
230 DRV_LOG(ERR, "Failed to create SQ.");
233 mlx5_compress_init_sq(qp);
234 ret = mlx5_devx_cmd_modify_sq(qp->sq.sq, &modify_attr);
236 DRV_LOG(ERR, "Can't change SQ state to ready.");
239 DRV_LOG(INFO, "QP %u: SQN=0x%X CQN=0x%X entries num = %u\n",
240 (uint32_t)qp_id, qp->sq.sq->id, qp->cq.cq->id, qp->entries_n);
243 mlx5_compress_qp_release(dev, qp_id);
248 mlx5_compress_xform_free(struct rte_compressdev *dev, void *xform)
250 struct mlx5_compress_priv *priv = dev->data->dev_private;
252 rte_spinlock_lock(&priv->xform_sl);
253 LIST_REMOVE((struct mlx5_compress_xform *)xform, next);
254 rte_spinlock_unlock(&priv->xform_sl);
260 mlx5_compress_xform_create(struct rte_compressdev *dev,
261 const struct rte_comp_xform *xform,
262 void **private_xform)
264 struct mlx5_compress_priv *priv = dev->data->dev_private;
265 struct mlx5_compress_xform *xfrm;
268 if (xform->type == RTE_COMP_COMPRESS && xform->compress.level ==
269 RTE_COMP_LEVEL_NONE) {
270 DRV_LOG(ERR, "Non-compressed block is not supported.");
273 if ((xform->type == RTE_COMP_COMPRESS && xform->compress.hash_algo !=
274 RTE_COMP_HASH_ALGO_NONE) || (xform->type == RTE_COMP_DECOMPRESS &&
275 xform->decompress.hash_algo != RTE_COMP_HASH_ALGO_NONE)) {
276 DRV_LOG(ERR, "SHA is not supported.");
279 xfrm = rte_zmalloc_socket(__func__, sizeof(*xfrm), 0,
280 priv->dev_config.socket_id);
283 xfrm->opcode = MLX5_OPCODE_MMO;
284 xfrm->type = xform->type;
285 switch (xform->type) {
286 case RTE_COMP_COMPRESS:
287 switch (xform->compress.algo) {
288 case RTE_COMP_ALGO_NULL:
289 xfrm->opcode += MLX5_OPC_MOD_MMO_DMA <<
290 WQE_CSEG_OPC_MOD_OFFSET;
292 case RTE_COMP_ALGO_DEFLATE:
293 size = 1 << xform->compress.window_size;
294 size /= MLX5_GGA_COMP_WIN_SIZE_UNITS;
295 xfrm->gga_ctrl1 += RTE_MIN(rte_log2_u32(size),
296 MLX5_COMP_MAX_WIN_SIZE_CONF) <<
297 WQE_GGA_COMP_WIN_SIZE_OFFSET;
298 if (xform->compress.level == RTE_COMP_LEVEL_PMD_DEFAULT)
299 size = MLX5_GGA_COMP_LOG_BLOCK_SIZE_MAX;
301 size = priv->min_block_size - 1 +
302 xform->compress.level;
303 xfrm->gga_ctrl1 += RTE_MIN(size,
304 MLX5_GGA_COMP_LOG_BLOCK_SIZE_MAX) <<
305 WQE_GGA_COMP_BLOCK_SIZE_OFFSET;
306 xfrm->opcode += MLX5_OPC_MOD_MMO_COMP <<
307 WQE_CSEG_OPC_MOD_OFFSET;
308 size = xform->compress.deflate.huffman ==
309 RTE_COMP_HUFFMAN_DYNAMIC ?
310 MLX5_GGA_COMP_LOG_DYNAMIC_SIZE_MAX :
311 MLX5_GGA_COMP_LOG_DYNAMIC_SIZE_MIN;
312 xfrm->gga_ctrl1 += size <<
313 WQE_GGA_COMP_DYNAMIC_SIZE_OFFSET;
318 xfrm->csum_type = xform->compress.chksum;
320 case RTE_COMP_DECOMPRESS:
321 switch (xform->decompress.algo) {
322 case RTE_COMP_ALGO_NULL:
323 xfrm->opcode += MLX5_OPC_MOD_MMO_DMA <<
324 WQE_CSEG_OPC_MOD_OFFSET;
326 case RTE_COMP_ALGO_DEFLATE:
327 xfrm->opcode += MLX5_OPC_MOD_MMO_DECOMP <<
328 WQE_CSEG_OPC_MOD_OFFSET;
333 xfrm->csum_type = xform->decompress.chksum;
336 DRV_LOG(ERR, "Algorithm %u is not supported.", xform->type);
339 DRV_LOG(DEBUG, "New xform: gga ctrl1 = 0x%08X opcode = 0x%08X csum "
340 "type = %d.", xfrm->gga_ctrl1, xfrm->opcode, xfrm->csum_type);
341 xfrm->gga_ctrl1 = rte_cpu_to_be_32(xfrm->gga_ctrl1);
342 rte_spinlock_lock(&priv->xform_sl);
343 LIST_INSERT_HEAD(&priv->xform_list, xfrm, next);
344 rte_spinlock_unlock(&priv->xform_sl);
345 *private_xform = xfrm;
353 mlx5_compress_dev_stop(struct rte_compressdev *dev)
359 mlx5_compress_dev_start(struct rte_compressdev *dev)
366 mlx5_compress_stats_get(struct rte_compressdev *dev,
367 struct rte_compressdev_stats *stats)
371 for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
372 struct mlx5_compress_qp *qp = dev->data->queue_pairs[qp_id];
374 stats->enqueued_count += qp->stats.enqueued_count;
375 stats->dequeued_count += qp->stats.dequeued_count;
376 stats->enqueue_err_count += qp->stats.enqueue_err_count;
377 stats->dequeue_err_count += qp->stats.dequeue_err_count;
382 mlx5_compress_stats_reset(struct rte_compressdev *dev)
386 for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
387 struct mlx5_compress_qp *qp = dev->data->queue_pairs[qp_id];
389 memset(&qp->stats, 0, sizeof(qp->stats));
393 static struct rte_compressdev_ops mlx5_compress_ops = {
394 .dev_configure = mlx5_compress_dev_configure,
395 .dev_start = mlx5_compress_dev_start,
396 .dev_stop = mlx5_compress_dev_stop,
397 .dev_close = mlx5_compress_dev_close,
398 .dev_infos_get = mlx5_compress_dev_info_get,
399 .stats_get = mlx5_compress_stats_get,
400 .stats_reset = mlx5_compress_stats_reset,
401 .queue_pair_setup = mlx5_compress_qp_setup,
402 .queue_pair_release = mlx5_compress_qp_release,
403 .private_xform_create = mlx5_compress_xform_create,
404 .private_xform_free = mlx5_compress_xform_free,
405 .stream_create = NULL,
409 static __rte_always_inline uint32_t
410 mlx5_compress_dseg_set(struct mlx5_compress_qp *qp,
411 volatile struct mlx5_wqe_dseg *restrict dseg,
412 struct rte_mbuf *restrict mbuf,
413 uint32_t offset, uint32_t len)
415 uintptr_t addr = rte_pktmbuf_mtod_offset(mbuf, uintptr_t, offset);
417 dseg->bcount = rte_cpu_to_be_32(len);
418 dseg->lkey = mlx5_mr_addr2mr_bh(qp->priv->pd, 0, &qp->priv->mr_scache,
420 !!(mbuf->ol_flags & EXT_ATTACHED_MBUF));
421 dseg->pbuf = rte_cpu_to_be_64(addr);
426 mlx5_compress_enqueue_burst(void *queue_pair, struct rte_comp_op **ops,
429 struct mlx5_compress_qp *qp = queue_pair;
430 volatile struct mlx5_gga_wqe *wqes = (volatile struct mlx5_gga_wqe *)
432 struct mlx5_compress_xform *xform;
433 struct rte_comp_op *op;
434 uint16_t mask = qp->entries_n - 1;
435 uint16_t remain = qp->entries_n - (qp->pi - qp->ci);
443 if (unlikely(remain == 0))
448 rte_prefetch0(&wqes[(qp->pi + 1) & mask]);
450 xform = op->private_xform;
452 * Check operation arguments and error cases:
453 * - Operation type must be state-less.
454 * - Compress operation flush flag must be FULL or FINAL.
455 * - Source and destination buffers must be mapped internally.
457 invalid = op->op_type != RTE_COMP_OP_STATELESS ||
458 (xform->type == RTE_COMP_COMPRESS &&
459 op->flush_flag < RTE_COMP_FLUSH_FULL);
460 if (unlikely(invalid ||
461 (mlx5_compress_dseg_set(qp, &wqe->gather,
466 (mlx5_compress_dseg_set(qp, &wqe->scatter,
469 rte_pktmbuf_pkt_len(op->m_dst) -
472 op->status = invalid ? RTE_COMP_OP_STATUS_INVALID_ARGS :
473 RTE_COMP_OP_STATUS_ERROR;
475 if (unlikely(nb_ops == 0))
479 wqe->gga_ctrl1 = xform->gga_ctrl1;
480 wqe->opcode = rte_cpu_to_be_32(xform->opcode + (qp->pi << 8));
484 qp->stats.enqueued_count += nb_ops;
486 qp->sq.db_rec[MLX5_SND_DBR] = rte_cpu_to_be_32(qp->pi);
488 *qp->uar_addr = *(volatile uint64_t *)wqe; /* Assume 64 bit ARCH.*/
494 mlx5_compress_dump_err_objs(volatile uint32_t *cqe, volatile uint32_t *wqe,
495 volatile uint32_t *opaq)
499 DRV_LOG(ERR, "Error cqe:");
500 for (i = 0; i < sizeof(struct mlx5_err_cqe) >> 2; i += 4)
501 DRV_LOG(ERR, "%08X %08X %08X %08X", cqe[i], cqe[i + 1],
502 cqe[i + 2], cqe[i + 3]);
503 DRV_LOG(ERR, "\nError wqe:");
504 for (i = 0; i < sizeof(struct mlx5_gga_wqe) >> 2; i += 4)
505 DRV_LOG(ERR, "%08X %08X %08X %08X", wqe[i], wqe[i + 1],
506 wqe[i + 2], wqe[i + 3]);
507 DRV_LOG(ERR, "\nError opaq:");
508 for (i = 0; i < sizeof(struct mlx5_gga_compress_opaque) >> 2; i += 4)
509 DRV_LOG(ERR, "%08X %08X %08X %08X", opaq[i], opaq[i + 1],
510 opaq[i + 2], opaq[i + 3]);
514 mlx5_compress_cqe_err_handle(struct mlx5_compress_qp *qp,
515 struct rte_comp_op *op)
517 const uint32_t idx = qp->ci & (qp->entries_n - 1);
518 volatile struct mlx5_err_cqe *cqe = (volatile struct mlx5_err_cqe *)
520 volatile struct mlx5_gga_wqe *wqes = (volatile struct mlx5_gga_wqe *)
522 volatile struct mlx5_gga_compress_opaque *opaq = qp->opaque_mr.addr;
524 op->status = RTE_COMP_OP_STATUS_ERROR;
527 op->output_chksum = 0;
528 op->debug_status = rte_be_to_cpu_32(opaq[idx].syndrom) |
529 ((uint64_t)rte_be_to_cpu_32(cqe->syndrome) << 32);
530 mlx5_compress_dump_err_objs((volatile uint32_t *)cqe,
531 (volatile uint32_t *)&wqes[idx],
532 (volatile uint32_t *)&opaq[idx]);
533 qp->stats.dequeue_err_count++;
537 mlx5_compress_dequeue_burst(void *queue_pair, struct rte_comp_op **ops,
540 struct mlx5_compress_qp *qp = queue_pair;
541 volatile struct mlx5_compress_xform *restrict xform;
542 volatile struct mlx5_cqe *restrict cqe;
543 volatile struct mlx5_gga_compress_opaque *opaq = qp->opaque_mr.addr;
544 struct rte_comp_op *restrict op;
545 const unsigned int cq_size = qp->entries_n;
546 const unsigned int mask = cq_size - 1;
548 uint32_t next_idx = qp->ci & mask;
549 const uint16_t max = RTE_MIN((uint16_t)(qp->pi - qp->ci), nb_ops);
553 if (unlikely(max == 0))
557 next_idx = (qp->ci + 1) & mask;
558 rte_prefetch0(&qp->cq.cqes[next_idx]);
559 rte_prefetch0(qp->ops[next_idx]);
561 cqe = &qp->cq.cqes[idx];
562 ret = check_cqe(cqe, cq_size, qp->ci);
564 * Be sure owner read is done before any other cookie field or
568 if (unlikely(ret != MLX5_CQE_STATUS_SW_OWN)) {
569 if (likely(ret == MLX5_CQE_STATUS_HW_OWN))
571 mlx5_compress_cqe_err_handle(qp, op);
573 xform = op->private_xform;
574 op->status = RTE_COMP_OP_STATUS_SUCCESS;
575 op->consumed = op->src.length;
576 op->produced = rte_be_to_cpu_32(cqe->byte_cnt);
577 MLX5_ASSERT(cqe->byte_cnt ==
578 qp->opaque_buf[idx].scattered_length);
579 switch (xform->csum_type) {
580 case RTE_COMP_CHECKSUM_CRC32:
581 op->output_chksum = (uint64_t)rte_be_to_cpu_32
584 case RTE_COMP_CHECKSUM_ADLER32:
585 op->output_chksum = (uint64_t)rte_be_to_cpu_32
586 (opaq[idx].adler32) << 32;
588 case RTE_COMP_CHECKSUM_CRC32_ADLER32:
589 op->output_chksum = (uint64_t)rte_be_to_cpu_32
591 ((uint64_t)rte_be_to_cpu_32
592 (opaq[idx].adler32) << 32);
601 if (likely(i != 0)) {
603 qp->cq.db_rec[0] = rte_cpu_to_be_32(qp->ci);
604 qp->stats.dequeued_count += i;
609 static struct ibv_device *
610 mlx5_compress_get_ib_device_match(struct rte_pci_addr *addr)
613 struct ibv_device **ibv_list = mlx5_glue->get_device_list(&n);
614 struct ibv_device *ibv_match = NULL;
616 if (ibv_list == NULL) {
621 struct rte_pci_addr paddr;
623 DRV_LOG(DEBUG, "Checking device \"%s\"..", ibv_list[n]->name);
624 if (mlx5_dev_to_pci_addr(ibv_list[n]->ibdev_path, &paddr) != 0)
626 if (rte_pci_addr_cmp(addr, &paddr) != 0)
628 ibv_match = ibv_list[n];
631 if (ibv_match == NULL)
633 mlx5_glue->free_device_list(ibv_list);
638 mlx5_compress_hw_global_release(struct mlx5_compress_priv *priv)
640 if (priv->pd != NULL) {
641 claim_zero(mlx5_glue->dealloc_pd(priv->pd));
644 if (priv->uar != NULL) {
645 mlx5_glue->devx_free_uar(priv->uar);
651 mlx5_compress_pd_create(struct mlx5_compress_priv *priv)
653 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
654 struct mlx5dv_obj obj;
655 struct mlx5dv_pd pd_info;
658 priv->pd = mlx5_glue->alloc_pd(priv->ctx);
659 if (priv->pd == NULL) {
660 DRV_LOG(ERR, "Failed to allocate PD.");
661 return errno ? -errno : -ENOMEM;
663 obj.pd.in = priv->pd;
664 obj.pd.out = &pd_info;
665 ret = mlx5_glue->dv_init_obj(&obj, MLX5DV_OBJ_PD);
667 DRV_LOG(ERR, "Fail to get PD object info.");
668 mlx5_glue->dealloc_pd(priv->pd);
672 priv->pdn = pd_info.pdn;
676 DRV_LOG(ERR, "Cannot get pdn - no DV support.");
678 #endif /* HAVE_IBV_FLOW_DV_SUPPORT */
682 mlx5_compress_hw_global_prepare(struct mlx5_compress_priv *priv)
684 if (mlx5_compress_pd_create(priv) != 0)
686 priv->uar = mlx5_devx_alloc_uar(priv->ctx, -1);
687 if (priv->uar == NULL || mlx5_os_get_devx_uar_reg_addr(priv->uar) ==
690 claim_zero(mlx5_glue->dealloc_pd(priv->pd));
691 DRV_LOG(ERR, "Failed to allocate UAR.");
698 * DPDK callback to register a PCI device.
700 * This function spawns compress device out of a given PCI device.
703 * PCI driver structure (mlx5_compress_driver).
705 * PCI device information.
708 * 0 on success, 1 to skip this driver, a negative errno value otherwise
709 * and rte_errno is set.
712 mlx5_compress_pci_probe(struct rte_pci_driver *pci_drv,
713 struct rte_pci_device *pci_dev)
715 struct ibv_device *ibv;
716 struct rte_compressdev *cdev;
717 struct ibv_context *ctx;
718 struct mlx5_compress_priv *priv;
719 struct mlx5_hca_attr att = { 0 };
720 struct rte_compressdev_pmd_init_params init_params = {
722 .socket_id = pci_dev->device.numa_node,
725 RTE_SET_USED(pci_drv);
726 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
727 DRV_LOG(ERR, "Non-primary process type is not supported.");
731 ibv = mlx5_compress_get_ib_device_match(&pci_dev->addr);
733 DRV_LOG(ERR, "No matching IB device for PCI slot "
734 PCI_PRI_FMT ".", pci_dev->addr.domain,
735 pci_dev->addr.bus, pci_dev->addr.devid,
736 pci_dev->addr.function);
739 DRV_LOG(INFO, "PCI information matches for device \"%s\".", ibv->name);
740 ctx = mlx5_glue->dv_open_device(ibv);
742 DRV_LOG(ERR, "Failed to open IB device \"%s\".", ibv->name);
746 if (mlx5_devx_cmd_query_hca_attr(ctx, &att) != 0 ||
747 att.mmo_compress_en == 0 || att.mmo_decompress_en == 0 ||
748 att.mmo_dma_en == 0) {
749 DRV_LOG(ERR, "Not enough capabilities to support compress "
750 "operations, maybe old FW/OFED version?");
751 claim_zero(mlx5_glue->close_device(ctx));
755 cdev = rte_compressdev_pmd_create(ibv->name, &pci_dev->device,
756 sizeof(*priv), &init_params);
758 DRV_LOG(ERR, "Failed to create device \"%s\".", ibv->name);
759 claim_zero(mlx5_glue->close_device(ctx));
763 "Compress device %s was created successfully.", ibv->name);
764 cdev->dev_ops = &mlx5_compress_ops;
765 cdev->dequeue_burst = mlx5_compress_dequeue_burst;
766 cdev->enqueue_burst = mlx5_compress_enqueue_burst;
767 cdev->feature_flags = RTE_COMPDEV_FF_HW_ACCELERATED;
768 priv = cdev->data->dev_private;
770 priv->pci_dev = pci_dev;
772 priv->min_block_size = att.compress_min_block_size;
773 if (mlx5_compress_hw_global_prepare(priv) != 0) {
774 rte_compressdev_pmd_destroy(priv->cdev);
775 claim_zero(mlx5_glue->close_device(priv->ctx));
778 if (mlx5_mr_btree_init(&priv->mr_scache.cache,
779 MLX5_MR_BTREE_CACHE_N * 2, rte_socket_id()) != 0) {
780 DRV_LOG(ERR, "Failed to allocate shared cache MR memory.");
781 mlx5_compress_hw_global_release(priv);
782 rte_compressdev_pmd_destroy(priv->cdev);
783 claim_zero(mlx5_glue->close_device(priv->ctx));
787 priv->mr_scache.reg_mr_cb = mlx5_common_verbs_reg_mr;
788 priv->mr_scache.dereg_mr_cb = mlx5_common_verbs_dereg_mr;
789 pthread_mutex_lock(&priv_list_lock);
790 TAILQ_INSERT_TAIL(&mlx5_compress_priv_list, priv, next);
791 pthread_mutex_unlock(&priv_list_lock);
796 * DPDK callback to remove a PCI device.
798 * This function removes all compress devices belong to a given PCI device.
801 * Pointer to the PCI device.
804 * 0 on success, the function cannot fail.
807 mlx5_compress_pci_remove(struct rte_pci_device *pdev)
809 struct mlx5_compress_priv *priv = NULL;
811 pthread_mutex_lock(&priv_list_lock);
812 TAILQ_FOREACH(priv, &mlx5_compress_priv_list, next)
813 if (rte_pci_addr_cmp(&priv->pci_dev->addr, &pdev->addr) != 0)
816 TAILQ_REMOVE(&mlx5_compress_priv_list, priv, next);
817 pthread_mutex_unlock(&priv_list_lock);
819 mlx5_mr_release_cache(&priv->mr_scache);
820 mlx5_compress_hw_global_release(priv);
821 rte_compressdev_pmd_destroy(priv->cdev);
822 claim_zero(mlx5_glue->close_device(priv->ctx));
827 static const struct rte_pci_id mlx5_compress_pci_id_map[] = {
829 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
830 PCI_DEVICE_ID_MELLANOX_CONNECTX6DXBF)
837 static struct mlx5_pci_driver mlx5_compress_driver = {
838 .driver_class = MLX5_CLASS_COMPRESS,
841 .name = RTE_STR(MLX5_COMPRESS_DRIVER_NAME),
843 .id_table = mlx5_compress_pci_id_map,
844 .probe = mlx5_compress_pci_probe,
845 .remove = mlx5_compress_pci_remove,
850 RTE_INIT(rte_mlx5_compress_init)
853 if (mlx5_glue != NULL)
854 mlx5_pci_driver_register(&mlx5_compress_driver);
857 RTE_LOG_REGISTER(mlx5_compress_logtype, MLX5_COMPRESS_LOG_NAME, NOTICE)
858 RTE_PMD_EXPORT_NAME(MLX5_COMPRESS_DRIVER_NAME, __COUNTER__);
859 RTE_PMD_REGISTER_PCI_TABLE(MLX5_COMPRESS_DRIVER_NAME, mlx5_compress_pci_id_map);
860 RTE_PMD_REGISTER_KMOD_DEP(MLX5_COMPRESS_DRIVER_NAME, "* ib_uverbs & mlx5_core & mlx5_ib");