1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2021 Mellanox Technologies, Ltd
5 #include <rte_malloc.h>
8 #include <rte_bus_pci.h>
9 #include <rte_spinlock.h>
11 #include <rte_compressdev.h>
12 #include <rte_compressdev_pmd.h>
14 #include <mlx5_glue.h>
15 #include <mlx5_common.h>
16 #include <mlx5_devx_cmds.h>
17 #include <mlx5_common_os.h>
18 #include <mlx5_common_devx.h>
19 #include <mlx5_common_mr.h>
22 #include "mlx5_compress_utils.h"
24 #define MLX5_COMPRESS_DRIVER_NAME mlx5_compress
25 #define MLX5_COMPRESS_MAX_QPS 1024
26 #define MLX5_COMP_MAX_WIN_SIZE_CONF 6u
28 struct mlx5_compress_devarg_params {
29 uint32_t log_block_sz;
32 struct mlx5_compress_xform {
33 LIST_ENTRY(mlx5_compress_xform) next;
34 enum rte_comp_xform_type type;
35 enum rte_comp_checksum_type csum_type;
37 uint32_t gga_ctrl1; /* BE. */
40 struct mlx5_compress_priv {
41 TAILQ_ENTRY(mlx5_compress_priv) next;
42 struct rte_compressdev *compressdev;
43 struct mlx5_common_device *cdev; /* Backend mlx5 device. */
45 uint8_t min_block_size;
46 /* Minimum huffman block size supported by the device. */
47 struct rte_compressdev_config dev_config;
48 LIST_HEAD(xform_list, mlx5_compress_xform) xform_list;
49 rte_spinlock_t xform_sl;
50 volatile uint64_t *uar_addr;
52 uint32_t mmo_decomp_sq:1;
53 uint32_t mmo_decomp_qp:1;
54 uint32_t mmo_comp_sq:1;
55 uint32_t mmo_comp_qp:1;
56 uint32_t mmo_dma_sq:1;
57 uint32_t mmo_dma_qp:1;
58 uint32_t log_block_sz;
60 rte_spinlock_t uar32_sl;
61 #endif /* RTE_ARCH_64 */
64 struct mlx5_compress_qp {
69 struct mlx5_mr_ctrl mr_ctrl;
71 struct mlx5_devx_cq cq;
72 struct mlx5_devx_qp qp;
73 struct mlx5_pmd_mr opaque_mr;
74 struct rte_comp_op **ops;
75 struct mlx5_compress_priv *priv;
76 struct rte_compressdev_stats stats;
79 TAILQ_HEAD(mlx5_compress_privs, mlx5_compress_priv) mlx5_compress_priv_list =
80 TAILQ_HEAD_INITIALIZER(mlx5_compress_priv_list);
81 static pthread_mutex_t priv_list_lock = PTHREAD_MUTEX_INITIALIZER;
83 int mlx5_compress_logtype;
85 static const struct rte_compressdev_capabilities mlx5_caps[] = {
87 .algo = RTE_COMP_ALGO_NULL,
88 .comp_feature_flags = RTE_COMP_FF_ADLER32_CHECKSUM |
89 RTE_COMP_FF_CRC32_CHECKSUM |
90 RTE_COMP_FF_CRC32_ADLER32_CHECKSUM |
91 RTE_COMP_FF_SHAREABLE_PRIV_XFORM,
94 .algo = RTE_COMP_ALGO_DEFLATE,
95 .comp_feature_flags = RTE_COMP_FF_ADLER32_CHECKSUM |
96 RTE_COMP_FF_CRC32_CHECKSUM |
97 RTE_COMP_FF_CRC32_ADLER32_CHECKSUM |
98 RTE_COMP_FF_SHAREABLE_PRIV_XFORM |
99 RTE_COMP_FF_HUFFMAN_FIXED |
100 RTE_COMP_FF_HUFFMAN_DYNAMIC,
101 .window_size = {.min = 10, .max = 15, .increment = 1},
104 .algo = RTE_COMP_ALGO_LIST_END,
109 mlx5_compress_dev_info_get(struct rte_compressdev *dev,
110 struct rte_compressdev_info *info)
114 info->max_nb_queue_pairs = MLX5_COMPRESS_MAX_QPS;
115 info->feature_flags = RTE_COMPDEV_FF_HW_ACCELERATED;
116 info->capabilities = mlx5_caps;
121 mlx5_compress_dev_configure(struct rte_compressdev *dev,
122 struct rte_compressdev_config *config)
124 struct mlx5_compress_priv *priv;
126 if (dev == NULL || config == NULL)
128 priv = dev->data->dev_private;
129 priv->dev_config = *config;
134 mlx5_compress_dev_close(struct rte_compressdev *dev)
141 mlx5_compress_qp_release(struct rte_compressdev *dev, uint16_t qp_id)
143 struct mlx5_compress_qp *qp = dev->data->queue_pairs[qp_id];
145 if (qp->qp.qp != NULL)
146 mlx5_devx_qp_destroy(&qp->qp);
147 if (qp->cq.cq != NULL)
148 mlx5_devx_cq_destroy(&qp->cq);
149 if (qp->opaque_mr.obj != NULL) {
150 void *opaq = qp->opaque_mr.addr;
152 mlx5_common_verbs_dereg_mr(&qp->opaque_mr);
156 mlx5_mr_btree_free(&qp->mr_ctrl.cache_bh);
158 dev->data->queue_pairs[qp_id] = NULL;
163 mlx5_compress_init_qp(struct mlx5_compress_qp *qp)
165 volatile struct mlx5_gga_wqe *restrict wqe =
166 (volatile struct mlx5_gga_wqe *)qp->qp.wqes;
167 volatile struct mlx5_gga_compress_opaque *opaq = qp->opaque_mr.addr;
168 const uint32_t sq_ds = rte_cpu_to_be_32((qp->qp.qp->id << 8) | 4u);
169 const uint32_t flags = RTE_BE32(MLX5_COMP_ALWAYS <<
170 MLX5_COMP_MODE_OFFSET);
171 const uint32_t opaq_lkey = rte_cpu_to_be_32(qp->opaque_mr.lkey);
174 /* All the next fields state should stay constant. */
175 for (i = 0; i < qp->entries_n; ++i, ++wqe) {
178 wqe->opaque_lkey = opaq_lkey;
179 wqe->opaque_vaddr = rte_cpu_to_be_64
180 ((uint64_t)(uintptr_t)&opaq[i]);
185 mlx5_compress_qp_setup(struct rte_compressdev *dev, uint16_t qp_id,
186 uint32_t max_inflight_ops, int socket_id)
188 struct mlx5_compress_priv *priv = dev->data->dev_private;
189 struct mlx5_compress_qp *qp;
190 struct mlx5_devx_cq_attr cq_attr = {
191 .uar_page_id = mlx5_os_get_devx_uar_page_id(priv->uar),
193 struct mlx5_devx_qp_attr qp_attr = {
194 .pd = priv->cdev->pdn,
195 .uar_index = mlx5_os_get_devx_uar_page_id(priv->uar),
198 uint32_t log_ops_n = rte_log2_u32(max_inflight_ops);
199 uint32_t alloc_size = sizeof(*qp);
203 alloc_size = RTE_ALIGN(alloc_size, RTE_CACHE_LINE_SIZE);
204 alloc_size += sizeof(struct rte_comp_op *) * (1u << log_ops_n);
205 qp = rte_zmalloc_socket(__func__, alloc_size, RTE_CACHE_LINE_SIZE,
208 DRV_LOG(ERR, "Failed to allocate qp memory.");
212 dev->data->queue_pairs[qp_id] = qp;
213 if (mlx5_mr_ctrl_init(&qp->mr_ctrl, &priv->cdev->mr_scache.dev_gen,
214 priv->dev_config.socket_id)) {
215 DRV_LOG(ERR, "Cannot allocate MR Btree for qp %u.",
220 opaq_buf = rte_calloc(__func__, (size_t)1 << log_ops_n,
221 sizeof(struct mlx5_gga_compress_opaque),
222 sizeof(struct mlx5_gga_compress_opaque));
223 if (opaq_buf == NULL) {
224 DRV_LOG(ERR, "Failed to allocate opaque memory.");
228 qp->entries_n = 1 << log_ops_n;
229 qp->socket_id = socket_id;
232 qp->ops = (struct rte_comp_op **)RTE_ALIGN((uintptr_t)(qp + 1),
233 RTE_CACHE_LINE_SIZE);
234 if (mlx5_common_verbs_reg_mr(priv->cdev->pd, opaq_buf, qp->entries_n *
235 sizeof(struct mlx5_gga_compress_opaque),
236 &qp->opaque_mr) != 0) {
238 DRV_LOG(ERR, "Failed to register opaque MR.");
242 ret = mlx5_devx_cq_create(priv->cdev->ctx, &qp->cq, log_ops_n, &cq_attr,
245 DRV_LOG(ERR, "Failed to create CQ.");
248 qp_attr.cqn = qp->cq.cq->id;
250 mlx5_ts_format_conv(priv->cdev->config.hca_attr.qp_ts_format);
252 qp_attr.sq_size = RTE_BIT32(log_ops_n);
253 qp_attr.mmo = priv->mmo_decomp_qp && priv->mmo_comp_qp
255 ret = mlx5_devx_qp_create(priv->cdev->ctx, &qp->qp, log_ops_n, &qp_attr,
258 DRV_LOG(ERR, "Failed to create QP.");
261 mlx5_compress_init_qp(qp);
262 ret = mlx5_devx_qp2rts(&qp->qp, 0);
265 DRV_LOG(INFO, "QP %u: SQN=0x%X CQN=0x%X entries num = %u",
266 (uint32_t)qp_id, qp->qp.qp->id, qp->cq.cq->id, qp->entries_n);
269 mlx5_compress_qp_release(dev, qp_id);
274 mlx5_compress_xform_free(struct rte_compressdev *dev, void *xform)
276 struct mlx5_compress_priv *priv = dev->data->dev_private;
278 rte_spinlock_lock(&priv->xform_sl);
279 LIST_REMOVE((struct mlx5_compress_xform *)xform, next);
280 rte_spinlock_unlock(&priv->xform_sl);
286 mlx5_compress_xform_create(struct rte_compressdev *dev,
287 const struct rte_comp_xform *xform,
288 void **private_xform)
290 struct mlx5_compress_priv *priv = dev->data->dev_private;
291 struct mlx5_compress_xform *xfrm;
294 switch (xform->type) {
295 case RTE_COMP_COMPRESS:
296 if (xform->compress.algo == RTE_COMP_ALGO_NULL &&
297 !priv->mmo_dma_qp && !priv->mmo_dma_sq) {
298 DRV_LOG(ERR, "Not enough capabilities to support DMA operation, maybe old FW/OFED version?");
300 } else if (!priv->mmo_comp_qp && !priv->mmo_comp_sq) {
301 DRV_LOG(ERR, "Not enough capabilities to support compress operation, maybe old FW/OFED version?");
304 if (xform->compress.level == RTE_COMP_LEVEL_NONE) {
305 DRV_LOG(ERR, "Non-compressed block is not supported.");
308 if (xform->compress.hash_algo != RTE_COMP_HASH_ALGO_NONE) {
309 DRV_LOG(ERR, "SHA is not supported.");
313 case RTE_COMP_DECOMPRESS:
314 if (xform->decompress.algo == RTE_COMP_ALGO_NULL &&
315 !priv->mmo_dma_qp && !priv->mmo_dma_sq) {
316 DRV_LOG(ERR, "Not enough capabilities to support DMA operation, maybe old FW/OFED version?");
318 } else if (!priv->mmo_decomp_qp && !priv->mmo_decomp_sq) {
319 DRV_LOG(ERR, "Not enough capabilities to support decompress operation, maybe old FW/OFED version?");
322 if (xform->compress.hash_algo != RTE_COMP_HASH_ALGO_NONE) {
323 DRV_LOG(ERR, "SHA is not supported.");
328 DRV_LOG(ERR, "Xform type should be compress/decompress");
332 xfrm = rte_zmalloc_socket(__func__, sizeof(*xfrm), 0,
333 priv->dev_config.socket_id);
336 xfrm->opcode = MLX5_OPCODE_MMO;
337 xfrm->type = xform->type;
338 switch (xform->type) {
339 case RTE_COMP_COMPRESS:
340 switch (xform->compress.algo) {
341 case RTE_COMP_ALGO_NULL:
342 xfrm->opcode += MLX5_OPC_MOD_MMO_DMA <<
343 WQE_CSEG_OPC_MOD_OFFSET;
345 case RTE_COMP_ALGO_DEFLATE:
346 size = 1 << xform->compress.window_size;
347 size /= MLX5_GGA_COMP_WIN_SIZE_UNITS;
348 xfrm->gga_ctrl1 += RTE_MIN(rte_log2_u32(size),
349 MLX5_COMP_MAX_WIN_SIZE_CONF) <<
350 WQE_GGA_COMP_WIN_SIZE_OFFSET;
351 size = priv->log_block_sz;
352 xfrm->gga_ctrl1 += size <<
353 WQE_GGA_COMP_BLOCK_SIZE_OFFSET;
354 xfrm->opcode += MLX5_OPC_MOD_MMO_COMP <<
355 WQE_CSEG_OPC_MOD_OFFSET;
356 size = xform->compress.deflate.huffman ==
357 RTE_COMP_HUFFMAN_DYNAMIC ?
358 MLX5_GGA_COMP_LOG_DYNAMIC_SIZE_MAX :
359 MLX5_GGA_COMP_LOG_DYNAMIC_SIZE_MIN;
360 xfrm->gga_ctrl1 += size <<
361 WQE_GGA_COMP_DYNAMIC_SIZE_OFFSET;
366 xfrm->csum_type = xform->compress.chksum;
368 case RTE_COMP_DECOMPRESS:
369 switch (xform->decompress.algo) {
370 case RTE_COMP_ALGO_NULL:
371 xfrm->opcode += MLX5_OPC_MOD_MMO_DMA <<
372 WQE_CSEG_OPC_MOD_OFFSET;
374 case RTE_COMP_ALGO_DEFLATE:
375 xfrm->opcode += MLX5_OPC_MOD_MMO_DECOMP <<
376 WQE_CSEG_OPC_MOD_OFFSET;
381 xfrm->csum_type = xform->decompress.chksum;
384 DRV_LOG(ERR, "Algorithm %u is not supported.", xform->type);
387 DRV_LOG(DEBUG, "New xform: gga ctrl1 = 0x%08X opcode = 0x%08X csum "
388 "type = %d.", xfrm->gga_ctrl1, xfrm->opcode, xfrm->csum_type);
389 xfrm->gga_ctrl1 = rte_cpu_to_be_32(xfrm->gga_ctrl1);
390 rte_spinlock_lock(&priv->xform_sl);
391 LIST_INSERT_HEAD(&priv->xform_list, xfrm, next);
392 rte_spinlock_unlock(&priv->xform_sl);
393 *private_xform = xfrm;
401 mlx5_compress_dev_stop(struct rte_compressdev *dev)
407 mlx5_compress_dev_start(struct rte_compressdev *dev)
409 struct mlx5_compress_priv *priv = dev->data->dev_private;
411 return mlx5_dev_mempool_subscribe(priv->cdev);
415 mlx5_compress_stats_get(struct rte_compressdev *dev,
416 struct rte_compressdev_stats *stats)
420 for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
421 struct mlx5_compress_qp *qp = dev->data->queue_pairs[qp_id];
423 stats->enqueued_count += qp->stats.enqueued_count;
424 stats->dequeued_count += qp->stats.dequeued_count;
425 stats->enqueue_err_count += qp->stats.enqueue_err_count;
426 stats->dequeue_err_count += qp->stats.dequeue_err_count;
431 mlx5_compress_stats_reset(struct rte_compressdev *dev)
435 for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
436 struct mlx5_compress_qp *qp = dev->data->queue_pairs[qp_id];
438 memset(&qp->stats, 0, sizeof(qp->stats));
442 static struct rte_compressdev_ops mlx5_compress_ops = {
443 .dev_configure = mlx5_compress_dev_configure,
444 .dev_start = mlx5_compress_dev_start,
445 .dev_stop = mlx5_compress_dev_stop,
446 .dev_close = mlx5_compress_dev_close,
447 .dev_infos_get = mlx5_compress_dev_info_get,
448 .stats_get = mlx5_compress_stats_get,
449 .stats_reset = mlx5_compress_stats_reset,
450 .queue_pair_setup = mlx5_compress_qp_setup,
451 .queue_pair_release = mlx5_compress_qp_release,
452 .private_xform_create = mlx5_compress_xform_create,
453 .private_xform_free = mlx5_compress_xform_free,
454 .stream_create = NULL,
458 static __rte_always_inline uint32_t
459 mlx5_compress_dseg_set(struct mlx5_compress_qp *qp,
460 volatile struct mlx5_wqe_dseg *restrict dseg,
461 struct rte_mbuf *restrict mbuf,
462 uint32_t offset, uint32_t len)
464 uintptr_t addr = rte_pktmbuf_mtod_offset(mbuf, uintptr_t, offset);
466 dseg->bcount = rte_cpu_to_be_32(len);
467 dseg->lkey = mlx5_mr_mb2mr(qp->priv->cdev, 0, &qp->mr_ctrl, mbuf);
468 dseg->pbuf = rte_cpu_to_be_64(addr);
473 * Provide safe 64bit store operation to mlx5 UAR region for both 32bit and
474 * 64bit architectures.
476 static __rte_always_inline void
477 mlx5_compress_uar_write(uint64_t val, struct mlx5_compress_priv *priv)
480 *priv->uar_addr = val;
481 #else /* !RTE_ARCH_64 */
482 rte_spinlock_lock(&priv->uar32_sl);
483 *(volatile uint32_t *)priv->uar_addr = val;
485 *((volatile uint32_t *)priv->uar_addr + 1) = val >> 32;
486 rte_spinlock_unlock(&priv->uar32_sl);
491 mlx5_compress_enqueue_burst(void *queue_pair, struct rte_comp_op **ops,
494 struct mlx5_compress_qp *qp = queue_pair;
495 volatile struct mlx5_gga_wqe *wqes = (volatile struct mlx5_gga_wqe *)
497 struct mlx5_compress_xform *xform;
498 struct rte_comp_op *op;
499 uint16_t mask = qp->entries_n - 1;
500 uint16_t remain = qp->entries_n - (qp->pi - qp->ci);
508 if (unlikely(remain == 0))
513 rte_prefetch0(&wqes[(qp->pi + 1) & mask]);
515 xform = op->private_xform;
517 * Check operation arguments and error cases:
518 * - Operation type must be state-less.
519 * - Compress operation flush flag must be FULL or FINAL.
520 * - Source and destination buffers must be mapped internally.
522 invalid = op->op_type != RTE_COMP_OP_STATELESS ||
523 (xform->type == RTE_COMP_COMPRESS &&
524 op->flush_flag < RTE_COMP_FLUSH_FULL);
525 if (unlikely(invalid ||
526 (mlx5_compress_dseg_set(qp, &wqe->gather,
531 (mlx5_compress_dseg_set(qp, &wqe->scatter,
534 rte_pktmbuf_pkt_len(op->m_dst) -
537 op->status = invalid ? RTE_COMP_OP_STATUS_INVALID_ARGS :
538 RTE_COMP_OP_STATUS_ERROR;
540 if (unlikely(nb_ops == 0))
544 wqe->gga_ctrl1 = xform->gga_ctrl1;
545 wqe->opcode = rte_cpu_to_be_32(xform->opcode + (qp->pi << 8));
549 qp->stats.enqueued_count += nb_ops;
551 qp->qp.db_rec[MLX5_SND_DBR] = rte_cpu_to_be_32(qp->pi);
553 mlx5_compress_uar_write(*(volatile uint64_t *)wqe, qp->priv);
559 mlx5_compress_dump_err_objs(volatile uint32_t *cqe, volatile uint32_t *wqe,
560 volatile uint32_t *opaq)
564 DRV_LOG(ERR, "Error cqe:");
565 for (i = 0; i < sizeof(struct mlx5_err_cqe) >> 2; i += 4)
566 DRV_LOG(ERR, "%08X %08X %08X %08X", cqe[i], cqe[i + 1],
567 cqe[i + 2], cqe[i + 3]);
568 DRV_LOG(ERR, "\nError wqe:");
569 for (i = 0; i < sizeof(struct mlx5_gga_wqe) >> 2; i += 4)
570 DRV_LOG(ERR, "%08X %08X %08X %08X", wqe[i], wqe[i + 1],
571 wqe[i + 2], wqe[i + 3]);
572 DRV_LOG(ERR, "\nError opaq:");
573 for (i = 0; i < sizeof(struct mlx5_gga_compress_opaque) >> 2; i += 4)
574 DRV_LOG(ERR, "%08X %08X %08X %08X", opaq[i], opaq[i + 1],
575 opaq[i + 2], opaq[i + 3]);
579 mlx5_compress_cqe_err_handle(struct mlx5_compress_qp *qp,
580 struct rte_comp_op *op)
582 const uint32_t idx = qp->ci & (qp->entries_n - 1);
583 volatile struct mlx5_err_cqe *cqe = (volatile struct mlx5_err_cqe *)
585 volatile struct mlx5_gga_wqe *wqes = (volatile struct mlx5_gga_wqe *)
587 volatile struct mlx5_gga_compress_opaque *opaq = qp->opaque_mr.addr;
589 op->status = RTE_COMP_OP_STATUS_ERROR;
592 op->output_chksum = 0;
593 op->debug_status = rte_be_to_cpu_32(opaq[idx].syndrom) |
594 ((uint64_t)rte_be_to_cpu_32(cqe->syndrome) << 32);
595 mlx5_compress_dump_err_objs((volatile uint32_t *)cqe,
596 (volatile uint32_t *)&wqes[idx],
597 (volatile uint32_t *)&opaq[idx]);
598 qp->stats.dequeue_err_count++;
602 mlx5_compress_dequeue_burst(void *queue_pair, struct rte_comp_op **ops,
605 struct mlx5_compress_qp *qp = queue_pair;
606 volatile struct mlx5_compress_xform *restrict xform;
607 volatile struct mlx5_cqe *restrict cqe;
608 volatile struct mlx5_gga_compress_opaque *opaq = qp->opaque_mr.addr;
609 struct rte_comp_op *restrict op;
610 const unsigned int cq_size = qp->entries_n;
611 const unsigned int mask = cq_size - 1;
613 uint32_t next_idx = qp->ci & mask;
614 const uint16_t max = RTE_MIN((uint16_t)(qp->pi - qp->ci), nb_ops);
618 if (unlikely(max == 0))
622 next_idx = (qp->ci + 1) & mask;
623 rte_prefetch0(&qp->cq.cqes[next_idx]);
624 rte_prefetch0(qp->ops[next_idx]);
626 cqe = &qp->cq.cqes[idx];
627 ret = check_cqe(cqe, cq_size, qp->ci);
629 * Be sure owner read is done before any other cookie field or
633 if (unlikely(ret != MLX5_CQE_STATUS_SW_OWN)) {
634 if (likely(ret == MLX5_CQE_STATUS_HW_OWN))
636 mlx5_compress_cqe_err_handle(qp, op);
638 xform = op->private_xform;
639 op->status = RTE_COMP_OP_STATUS_SUCCESS;
640 op->consumed = op->src.length;
641 op->produced = rte_be_to_cpu_32(cqe->byte_cnt);
642 MLX5_ASSERT(cqe->byte_cnt ==
643 opaq[idx].scattered_length);
644 switch (xform->csum_type) {
645 case RTE_COMP_CHECKSUM_CRC32:
646 op->output_chksum = (uint64_t)rte_be_to_cpu_32
649 case RTE_COMP_CHECKSUM_ADLER32:
650 op->output_chksum = (uint64_t)rte_be_to_cpu_32
651 (opaq[idx].adler32) << 32;
653 case RTE_COMP_CHECKSUM_CRC32_ADLER32:
654 op->output_chksum = (uint64_t)rte_be_to_cpu_32
656 ((uint64_t)rte_be_to_cpu_32
657 (opaq[idx].adler32) << 32);
666 if (likely(i != 0)) {
668 qp->cq.db_rec[0] = rte_cpu_to_be_32(qp->ci);
669 qp->stats.dequeued_count += i;
675 mlx5_compress_uar_release(struct mlx5_compress_priv *priv)
677 if (priv->uar != NULL) {
678 mlx5_glue->devx_free_uar(priv->uar);
684 mlx5_compress_uar_prepare(struct mlx5_compress_priv *priv)
686 priv->uar = mlx5_devx_alloc_uar(priv->cdev->ctx, -1);
687 if (priv->uar == NULL || mlx5_os_get_devx_uar_reg_addr(priv->uar) ==
690 DRV_LOG(ERR, "Failed to allocate UAR.");
693 priv->uar_addr = mlx5_os_get_devx_uar_reg_addr(priv->uar);
694 MLX5_ASSERT(priv->uar_addr);
696 rte_spinlock_init(&priv->uar32_sl);
697 #endif /* RTE_ARCH_64 */
702 mlx5_compress_args_check_handler(const char *key, const char *val, void *opaque)
704 struct mlx5_compress_devarg_params *devarg_prms = opaque;
706 if (strcmp(key, "log-block-size") == 0) {
708 devarg_prms->log_block_sz = (uint32_t)strtoul(val, NULL, 10);
710 DRV_LOG(WARNING, "%s: \"%s\" is an invalid integer."
720 mlx5_compress_handle_devargs(struct rte_devargs *devargs,
721 struct mlx5_compress_devarg_params *devarg_prms,
722 struct mlx5_hca_attr *att)
724 struct rte_kvargs *kvlist;
726 devarg_prms->log_block_sz = MLX5_GGA_COMP_LOG_BLOCK_SIZE_MAX;
729 kvlist = rte_kvargs_parse(devargs->args, NULL);
730 if (kvlist == NULL) {
731 DRV_LOG(ERR, "Failed to parse devargs.");
735 if (rte_kvargs_process(kvlist, NULL, mlx5_compress_args_check_handler,
737 DRV_LOG(ERR, "Devargs handler function Failed.");
738 rte_kvargs_free(kvlist);
742 rte_kvargs_free(kvlist);
743 if (devarg_prms->log_block_sz > MLX5_GGA_COMP_LOG_BLOCK_SIZE_MAX ||
744 devarg_prms->log_block_sz < att->compress_min_block_size) {
745 DRV_LOG(WARNING, "Log block size provided is out of range("
746 "%u); default it to %u.",
747 devarg_prms->log_block_sz,
748 MLX5_GGA_COMP_LOG_BLOCK_SIZE_MAX);
749 devarg_prms->log_block_sz = MLX5_GGA_COMP_LOG_BLOCK_SIZE_MAX;
755 mlx5_compress_dev_probe(struct mlx5_common_device *cdev)
757 struct rte_compressdev *compressdev;
758 struct mlx5_compress_priv *priv;
759 struct mlx5_hca_attr *attr = &cdev->config.hca_attr;
760 struct mlx5_compress_devarg_params devarg_prms = {0};
761 struct rte_compressdev_pmd_init_params init_params = {
763 .socket_id = cdev->dev->numa_node,
765 const char *ibdev_name = mlx5_os_get_ctx_device_name(cdev->ctx);
767 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
768 DRV_LOG(ERR, "Non-primary process type is not supported.");
772 if (!attr->mmo_decompress_qp_en && !attr->mmo_decompress_sq_en
773 && !attr->mmo_compress_qp_en && !attr->mmo_compress_sq_en
774 && !attr->mmo_dma_qp_en && !attr->mmo_dma_sq_en) {
775 DRV_LOG(ERR, "Not enough capabilities to support compress operations, maybe old FW/OFED version?");
776 claim_zero(mlx5_glue->close_device(cdev->ctx));
780 mlx5_compress_handle_devargs(cdev->dev->devargs, &devarg_prms, attr);
781 compressdev = rte_compressdev_pmd_create(ibdev_name, cdev->dev,
782 sizeof(*priv), &init_params);
783 if (compressdev == NULL) {
784 DRV_LOG(ERR, "Failed to create device \"%s\".", ibdev_name);
788 "Compress device %s was created successfully.", ibdev_name);
789 compressdev->dev_ops = &mlx5_compress_ops;
790 compressdev->dequeue_burst = mlx5_compress_dequeue_burst;
791 compressdev->enqueue_burst = mlx5_compress_enqueue_burst;
792 compressdev->feature_flags = RTE_COMPDEV_FF_HW_ACCELERATED;
793 priv = compressdev->data->dev_private;
794 priv->log_block_sz = devarg_prms.log_block_sz;
795 priv->mmo_decomp_sq = attr->mmo_decompress_sq_en;
796 priv->mmo_decomp_qp = attr->mmo_decompress_qp_en;
797 priv->mmo_comp_sq = attr->mmo_compress_sq_en;
798 priv->mmo_comp_qp = attr->mmo_compress_qp_en;
799 priv->mmo_dma_sq = attr->mmo_dma_sq_en;
800 priv->mmo_dma_qp = attr->mmo_dma_qp_en;
802 priv->compressdev = compressdev;
803 priv->min_block_size = attr->compress_min_block_size;
804 if (mlx5_compress_uar_prepare(priv) != 0) {
805 rte_compressdev_pmd_destroy(priv->compressdev);
808 pthread_mutex_lock(&priv_list_lock);
809 TAILQ_INSERT_TAIL(&mlx5_compress_priv_list, priv, next);
810 pthread_mutex_unlock(&priv_list_lock);
815 mlx5_compress_dev_remove(struct mlx5_common_device *cdev)
817 struct mlx5_compress_priv *priv = NULL;
819 pthread_mutex_lock(&priv_list_lock);
820 TAILQ_FOREACH(priv, &mlx5_compress_priv_list, next)
821 if (priv->compressdev->device == cdev->dev)
824 TAILQ_REMOVE(&mlx5_compress_priv_list, priv, next);
825 pthread_mutex_unlock(&priv_list_lock);
827 mlx5_compress_uar_release(priv);
828 rte_compressdev_pmd_destroy(priv->compressdev);
833 static const struct rte_pci_id mlx5_compress_pci_id_map[] = {
835 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
836 PCI_DEVICE_ID_MELLANOX_CONNECTX6DXBF)
843 static struct mlx5_class_driver mlx5_compress_driver = {
844 .drv_class = MLX5_CLASS_COMPRESS,
845 .name = RTE_STR(MLX5_COMPRESS_DRIVER_NAME),
846 .id_table = mlx5_compress_pci_id_map,
847 .probe = mlx5_compress_dev_probe,
848 .remove = mlx5_compress_dev_remove,
851 RTE_INIT(rte_mlx5_compress_init)
854 if (mlx5_glue != NULL)
855 mlx5_class_driver_register(&mlx5_compress_driver);
858 RTE_LOG_REGISTER_DEFAULT(mlx5_compress_logtype, NOTICE)
859 RTE_PMD_EXPORT_NAME(MLX5_COMPRESS_DRIVER_NAME, __COUNTER__);
860 RTE_PMD_REGISTER_PCI_TABLE(MLX5_COMPRESS_DRIVER_NAME, mlx5_compress_pci_id_map);
861 RTE_PMD_REGISTER_KMOD_DEP(MLX5_COMPRESS_DRIVER_NAME, "* ib_uverbs & mlx5_core & mlx5_ib");