1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2021 Mellanox Technologies, Ltd
5 #include <rte_malloc.h>
9 #include <rte_spinlock.h>
11 #include <rte_compressdev.h>
12 #include <rte_compressdev_pmd.h>
14 #include <mlx5_glue.h>
15 #include <mlx5_common.h>
16 #include <mlx5_common_pci.h>
17 #include <mlx5_devx_cmds.h>
18 #include <mlx5_common_os.h>
19 #include <mlx5_common_devx.h>
20 #include <mlx5_common_mr.h>
23 #include "mlx5_compress_utils.h"
25 #define MLX5_COMPRESS_DRIVER_NAME mlx5_compress
26 #define MLX5_COMPRESS_LOG_NAME pmd.compress.mlx5
27 #define MLX5_COMPRESS_MAX_QPS 1024
28 #define MLX5_COMP_MAX_WIN_SIZE_CONF 6u
30 struct mlx5_compress_xform {
31 LIST_ENTRY(mlx5_compress_xform) next;
32 enum rte_comp_xform_type type;
33 enum rte_comp_checksum_type csum_type;
35 uint32_t gga_ctrl1; /* BE. */
38 struct mlx5_compress_priv {
39 TAILQ_ENTRY(mlx5_compress_priv) next;
40 struct ibv_context *ctx; /* Device context. */
41 struct rte_pci_device *pci_dev;
42 struct rte_compressdev *cdev;
44 uint32_t pdn; /* Protection Domain number. */
45 uint8_t min_block_size;
46 /* Minimum huffman block size supported by the device. */
48 struct rte_compressdev_config dev_config;
49 LIST_HEAD(xform_list, mlx5_compress_xform) xform_list;
50 rte_spinlock_t xform_sl;
51 struct mlx5_mr_share_cache mr_scache; /* Global shared MR cache. */
52 volatile uint64_t *uar_addr;
54 rte_spinlock_t uar32_sl;
55 #endif /* RTE_ARCH_64 */
58 struct mlx5_compress_qp {
63 struct mlx5_mr_ctrl mr_ctrl;
65 struct mlx5_devx_cq cq;
66 struct mlx5_devx_sq sq;
67 struct mlx5_pmd_mr opaque_mr;
68 struct rte_comp_op **ops;
69 struct mlx5_compress_priv *priv;
70 struct rte_compressdev_stats stats;
73 TAILQ_HEAD(mlx5_compress_privs, mlx5_compress_priv) mlx5_compress_priv_list =
74 TAILQ_HEAD_INITIALIZER(mlx5_compress_priv_list);
75 static pthread_mutex_t priv_list_lock = PTHREAD_MUTEX_INITIALIZER;
77 int mlx5_compress_logtype;
79 static const struct rte_compressdev_capabilities mlx5_caps[] = {
81 .algo = RTE_COMP_ALGO_NULL,
82 .comp_feature_flags = RTE_COMP_FF_ADLER32_CHECKSUM |
83 RTE_COMP_FF_CRC32_CHECKSUM |
84 RTE_COMP_FF_CRC32_ADLER32_CHECKSUM |
85 RTE_COMP_FF_SHAREABLE_PRIV_XFORM,
88 .algo = RTE_COMP_ALGO_DEFLATE,
89 .comp_feature_flags = RTE_COMP_FF_ADLER32_CHECKSUM |
90 RTE_COMP_FF_CRC32_CHECKSUM |
91 RTE_COMP_FF_CRC32_ADLER32_CHECKSUM |
92 RTE_COMP_FF_SHAREABLE_PRIV_XFORM |
93 RTE_COMP_FF_HUFFMAN_FIXED |
94 RTE_COMP_FF_HUFFMAN_DYNAMIC,
95 .window_size = {.min = 10, .max = 15, .increment = 1},
98 .algo = RTE_COMP_ALGO_LIST_END,
103 mlx5_compress_dev_info_get(struct rte_compressdev *dev,
104 struct rte_compressdev_info *info)
108 info->max_nb_queue_pairs = MLX5_COMPRESS_MAX_QPS;
109 info->feature_flags = RTE_COMPDEV_FF_HW_ACCELERATED;
110 info->capabilities = mlx5_caps;
115 mlx5_compress_dev_configure(struct rte_compressdev *dev,
116 struct rte_compressdev_config *config)
118 struct mlx5_compress_priv *priv;
120 if (dev == NULL || config == NULL)
122 priv = dev->data->dev_private;
123 priv->dev_config = *config;
128 mlx5_compress_dev_close(struct rte_compressdev *dev)
135 mlx5_compress_qp_release(struct rte_compressdev *dev, uint16_t qp_id)
137 struct mlx5_compress_qp *qp = dev->data->queue_pairs[qp_id];
139 if (qp->sq.sq != NULL)
140 mlx5_devx_sq_destroy(&qp->sq);
141 if (qp->cq.cq != NULL)
142 mlx5_devx_cq_destroy(&qp->cq);
143 if (qp->opaque_mr.obj != NULL) {
144 void *opaq = qp->opaque_mr.addr;
146 mlx5_common_verbs_dereg_mr(&qp->opaque_mr);
150 mlx5_mr_btree_free(&qp->mr_ctrl.cache_bh);
152 dev->data->queue_pairs[qp_id] = NULL;
157 mlx5_compress_init_sq(struct mlx5_compress_qp *qp)
159 volatile struct mlx5_gga_wqe *restrict wqe =
160 (volatile struct mlx5_gga_wqe *)qp->sq.wqes;
161 volatile struct mlx5_gga_compress_opaque *opaq = qp->opaque_mr.addr;
162 const uint32_t sq_ds = rte_cpu_to_be_32((qp->sq.sq->id << 8) | 4u);
163 const uint32_t flags = RTE_BE32(MLX5_COMP_ALWAYS <<
164 MLX5_COMP_MODE_OFFSET);
165 const uint32_t opaq_lkey = rte_cpu_to_be_32(qp->opaque_mr.lkey);
168 /* All the next fields state should stay constant. */
169 for (i = 0; i < qp->entries_n; ++i, ++wqe) {
172 wqe->opaque_lkey = opaq_lkey;
173 wqe->opaque_vaddr = rte_cpu_to_be_64
174 ((uint64_t)(uintptr_t)&opaq[i]);
179 mlx5_compress_qp_setup(struct rte_compressdev *dev, uint16_t qp_id,
180 uint32_t max_inflight_ops, int socket_id)
182 struct mlx5_compress_priv *priv = dev->data->dev_private;
183 struct mlx5_compress_qp *qp;
184 struct mlx5_devx_cq_attr cq_attr = {
185 .uar_page_id = mlx5_os_get_devx_uar_page_id(priv->uar),
187 struct mlx5_devx_create_sq_attr sq_attr = {
189 .wq_attr = (struct mlx5_devx_wq_attr){
191 .uar_page = mlx5_os_get_devx_uar_page_id(priv->uar),
194 struct mlx5_devx_modify_sq_attr modify_attr = {
195 .state = MLX5_SQC_STATE_RDY,
197 uint32_t log_ops_n = rte_log2_u32(max_inflight_ops);
198 uint32_t alloc_size = sizeof(*qp);
202 alloc_size = RTE_ALIGN(alloc_size, RTE_CACHE_LINE_SIZE);
203 alloc_size += sizeof(struct rte_comp_op *) * (1u << log_ops_n);
204 qp = rte_zmalloc_socket(__func__, alloc_size, RTE_CACHE_LINE_SIZE,
207 DRV_LOG(ERR, "Failed to allocate qp memory.");
211 dev->data->queue_pairs[qp_id] = qp;
212 opaq_buf = rte_calloc(__func__, 1u << log_ops_n,
213 sizeof(struct mlx5_gga_compress_opaque),
214 sizeof(struct mlx5_gga_compress_opaque));
215 if (opaq_buf == NULL) {
216 DRV_LOG(ERR, "Failed to allocate opaque memory.");
220 if (mlx5_mr_btree_init(&qp->mr_ctrl.cache_bh, MLX5_MR_BTREE_CACHE_N,
221 priv->dev_config.socket_id)) {
222 DRV_LOG(ERR, "Cannot allocate MR Btree for qp %u.",
227 qp->entries_n = 1 << log_ops_n;
228 qp->socket_id = socket_id;
231 qp->ops = (struct rte_comp_op **)RTE_ALIGN((uintptr_t)(qp + 1),
232 RTE_CACHE_LINE_SIZE);
233 if (mlx5_common_verbs_reg_mr(priv->pd, opaq_buf, qp->entries_n *
234 sizeof(struct mlx5_gga_compress_opaque),
235 &qp->opaque_mr) != 0) {
237 DRV_LOG(ERR, "Failed to register opaque MR.");
241 ret = mlx5_devx_cq_create(priv->ctx, &qp->cq, log_ops_n, &cq_attr,
244 DRV_LOG(ERR, "Failed to create CQ.");
247 sq_attr.cqn = qp->cq.cq->id;
248 ret = mlx5_devx_sq_create(priv->ctx, &qp->sq, log_ops_n, &sq_attr,
251 DRV_LOG(ERR, "Failed to create SQ.");
254 mlx5_compress_init_sq(qp);
255 ret = mlx5_devx_cmd_modify_sq(qp->sq.sq, &modify_attr);
257 DRV_LOG(ERR, "Can't change SQ state to ready.");
260 DRV_LOG(INFO, "QP %u: SQN=0x%X CQN=0x%X entries num = %u\n",
261 (uint32_t)qp_id, qp->sq.sq->id, qp->cq.cq->id, qp->entries_n);
264 mlx5_compress_qp_release(dev, qp_id);
269 mlx5_compress_xform_free(struct rte_compressdev *dev, void *xform)
271 struct mlx5_compress_priv *priv = dev->data->dev_private;
273 rte_spinlock_lock(&priv->xform_sl);
274 LIST_REMOVE((struct mlx5_compress_xform *)xform, next);
275 rte_spinlock_unlock(&priv->xform_sl);
281 mlx5_compress_xform_create(struct rte_compressdev *dev,
282 const struct rte_comp_xform *xform,
283 void **private_xform)
285 struct mlx5_compress_priv *priv = dev->data->dev_private;
286 struct mlx5_compress_xform *xfrm;
289 if (xform->type == RTE_COMP_COMPRESS && xform->compress.level ==
290 RTE_COMP_LEVEL_NONE) {
291 DRV_LOG(ERR, "Non-compressed block is not supported.");
294 if ((xform->type == RTE_COMP_COMPRESS && xform->compress.hash_algo !=
295 RTE_COMP_HASH_ALGO_NONE) || (xform->type == RTE_COMP_DECOMPRESS &&
296 xform->decompress.hash_algo != RTE_COMP_HASH_ALGO_NONE)) {
297 DRV_LOG(ERR, "SHA is not supported.");
300 xfrm = rte_zmalloc_socket(__func__, sizeof(*xfrm), 0,
301 priv->dev_config.socket_id);
304 xfrm->opcode = MLX5_OPCODE_MMO;
305 xfrm->type = xform->type;
306 switch (xform->type) {
307 case RTE_COMP_COMPRESS:
308 switch (xform->compress.algo) {
309 case RTE_COMP_ALGO_NULL:
310 xfrm->opcode += MLX5_OPC_MOD_MMO_DMA <<
311 WQE_CSEG_OPC_MOD_OFFSET;
313 case RTE_COMP_ALGO_DEFLATE:
314 size = 1 << xform->compress.window_size;
315 size /= MLX5_GGA_COMP_WIN_SIZE_UNITS;
316 xfrm->gga_ctrl1 += RTE_MIN(rte_log2_u32(size),
317 MLX5_COMP_MAX_WIN_SIZE_CONF) <<
318 WQE_GGA_COMP_WIN_SIZE_OFFSET;
319 if (xform->compress.level == RTE_COMP_LEVEL_PMD_DEFAULT)
320 size = MLX5_GGA_COMP_LOG_BLOCK_SIZE_MAX;
322 size = priv->min_block_size - 1 +
323 xform->compress.level;
324 xfrm->gga_ctrl1 += RTE_MIN(size,
325 MLX5_GGA_COMP_LOG_BLOCK_SIZE_MAX) <<
326 WQE_GGA_COMP_BLOCK_SIZE_OFFSET;
327 xfrm->opcode += MLX5_OPC_MOD_MMO_COMP <<
328 WQE_CSEG_OPC_MOD_OFFSET;
329 size = xform->compress.deflate.huffman ==
330 RTE_COMP_HUFFMAN_DYNAMIC ?
331 MLX5_GGA_COMP_LOG_DYNAMIC_SIZE_MAX :
332 MLX5_GGA_COMP_LOG_DYNAMIC_SIZE_MIN;
333 xfrm->gga_ctrl1 += size <<
334 WQE_GGA_COMP_DYNAMIC_SIZE_OFFSET;
339 xfrm->csum_type = xform->compress.chksum;
341 case RTE_COMP_DECOMPRESS:
342 switch (xform->decompress.algo) {
343 case RTE_COMP_ALGO_NULL:
344 xfrm->opcode += MLX5_OPC_MOD_MMO_DMA <<
345 WQE_CSEG_OPC_MOD_OFFSET;
347 case RTE_COMP_ALGO_DEFLATE:
348 xfrm->opcode += MLX5_OPC_MOD_MMO_DECOMP <<
349 WQE_CSEG_OPC_MOD_OFFSET;
354 xfrm->csum_type = xform->decompress.chksum;
357 DRV_LOG(ERR, "Algorithm %u is not supported.", xform->type);
360 DRV_LOG(DEBUG, "New xform: gga ctrl1 = 0x%08X opcode = 0x%08X csum "
361 "type = %d.", xfrm->gga_ctrl1, xfrm->opcode, xfrm->csum_type);
362 xfrm->gga_ctrl1 = rte_cpu_to_be_32(xfrm->gga_ctrl1);
363 rte_spinlock_lock(&priv->xform_sl);
364 LIST_INSERT_HEAD(&priv->xform_list, xfrm, next);
365 rte_spinlock_unlock(&priv->xform_sl);
366 *private_xform = xfrm;
374 mlx5_compress_dev_stop(struct rte_compressdev *dev)
380 mlx5_compress_dev_start(struct rte_compressdev *dev)
387 mlx5_compress_stats_get(struct rte_compressdev *dev,
388 struct rte_compressdev_stats *stats)
392 for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
393 struct mlx5_compress_qp *qp = dev->data->queue_pairs[qp_id];
395 stats->enqueued_count += qp->stats.enqueued_count;
396 stats->dequeued_count += qp->stats.dequeued_count;
397 stats->enqueue_err_count += qp->stats.enqueue_err_count;
398 stats->dequeue_err_count += qp->stats.dequeue_err_count;
403 mlx5_compress_stats_reset(struct rte_compressdev *dev)
407 for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
408 struct mlx5_compress_qp *qp = dev->data->queue_pairs[qp_id];
410 memset(&qp->stats, 0, sizeof(qp->stats));
414 static struct rte_compressdev_ops mlx5_compress_ops = {
415 .dev_configure = mlx5_compress_dev_configure,
416 .dev_start = mlx5_compress_dev_start,
417 .dev_stop = mlx5_compress_dev_stop,
418 .dev_close = mlx5_compress_dev_close,
419 .dev_infos_get = mlx5_compress_dev_info_get,
420 .stats_get = mlx5_compress_stats_get,
421 .stats_reset = mlx5_compress_stats_reset,
422 .queue_pair_setup = mlx5_compress_qp_setup,
423 .queue_pair_release = mlx5_compress_qp_release,
424 .private_xform_create = mlx5_compress_xform_create,
425 .private_xform_free = mlx5_compress_xform_free,
426 .stream_create = NULL,
430 static __rte_always_inline uint32_t
431 mlx5_compress_dseg_set(struct mlx5_compress_qp *qp,
432 volatile struct mlx5_wqe_dseg *restrict dseg,
433 struct rte_mbuf *restrict mbuf,
434 uint32_t offset, uint32_t len)
436 uintptr_t addr = rte_pktmbuf_mtod_offset(mbuf, uintptr_t, offset);
438 dseg->bcount = rte_cpu_to_be_32(len);
439 dseg->lkey = mlx5_mr_addr2mr_bh(qp->priv->pd, 0, &qp->priv->mr_scache,
441 !!(mbuf->ol_flags & EXT_ATTACHED_MBUF));
442 dseg->pbuf = rte_cpu_to_be_64(addr);
447 * Provide safe 64bit store operation to mlx5 UAR region for both 32bit and
448 * 64bit architectures.
450 static __rte_always_inline void
451 mlx5_compress_uar_write(uint64_t val, struct mlx5_compress_priv *priv)
454 *priv->uar_addr = val;
455 #else /* !RTE_ARCH_64 */
456 rte_spinlock_lock(&priv->uar32_sl);
457 *(volatile uint32_t *)priv->uar_addr = val;
459 *((volatile uint32_t *)priv->uar_addr + 1) = val >> 32;
460 rte_spinlock_unlock(&priv->uar32_sl);
465 mlx5_compress_enqueue_burst(void *queue_pair, struct rte_comp_op **ops,
468 struct mlx5_compress_qp *qp = queue_pair;
469 volatile struct mlx5_gga_wqe *wqes = (volatile struct mlx5_gga_wqe *)
471 struct mlx5_compress_xform *xform;
472 struct rte_comp_op *op;
473 uint16_t mask = qp->entries_n - 1;
474 uint16_t remain = qp->entries_n - (qp->pi - qp->ci);
482 if (unlikely(remain == 0))
487 rte_prefetch0(&wqes[(qp->pi + 1) & mask]);
489 xform = op->private_xform;
491 * Check operation arguments and error cases:
492 * - Operation type must be state-less.
493 * - Compress operation flush flag must be FULL or FINAL.
494 * - Source and destination buffers must be mapped internally.
496 invalid = op->op_type != RTE_COMP_OP_STATELESS ||
497 (xform->type == RTE_COMP_COMPRESS &&
498 op->flush_flag < RTE_COMP_FLUSH_FULL);
499 if (unlikely(invalid ||
500 (mlx5_compress_dseg_set(qp, &wqe->gather,
505 (mlx5_compress_dseg_set(qp, &wqe->scatter,
508 rte_pktmbuf_pkt_len(op->m_dst) -
511 op->status = invalid ? RTE_COMP_OP_STATUS_INVALID_ARGS :
512 RTE_COMP_OP_STATUS_ERROR;
514 if (unlikely(nb_ops == 0))
518 wqe->gga_ctrl1 = xform->gga_ctrl1;
519 wqe->opcode = rte_cpu_to_be_32(xform->opcode + (qp->pi << 8));
523 qp->stats.enqueued_count += nb_ops;
525 qp->sq.db_rec[MLX5_SND_DBR] = rte_cpu_to_be_32(qp->pi);
527 mlx5_compress_uar_write(*(volatile uint64_t *)wqe, qp->priv);
533 mlx5_compress_dump_err_objs(volatile uint32_t *cqe, volatile uint32_t *wqe,
534 volatile uint32_t *opaq)
538 DRV_LOG(ERR, "Error cqe:");
539 for (i = 0; i < sizeof(struct mlx5_err_cqe) >> 2; i += 4)
540 DRV_LOG(ERR, "%08X %08X %08X %08X", cqe[i], cqe[i + 1],
541 cqe[i + 2], cqe[i + 3]);
542 DRV_LOG(ERR, "\nError wqe:");
543 for (i = 0; i < sizeof(struct mlx5_gga_wqe) >> 2; i += 4)
544 DRV_LOG(ERR, "%08X %08X %08X %08X", wqe[i], wqe[i + 1],
545 wqe[i + 2], wqe[i + 3]);
546 DRV_LOG(ERR, "\nError opaq:");
547 for (i = 0; i < sizeof(struct mlx5_gga_compress_opaque) >> 2; i += 4)
548 DRV_LOG(ERR, "%08X %08X %08X %08X", opaq[i], opaq[i + 1],
549 opaq[i + 2], opaq[i + 3]);
553 mlx5_compress_cqe_err_handle(struct mlx5_compress_qp *qp,
554 struct rte_comp_op *op)
556 const uint32_t idx = qp->ci & (qp->entries_n - 1);
557 volatile struct mlx5_err_cqe *cqe = (volatile struct mlx5_err_cqe *)
559 volatile struct mlx5_gga_wqe *wqes = (volatile struct mlx5_gga_wqe *)
561 volatile struct mlx5_gga_compress_opaque *opaq = qp->opaque_mr.addr;
563 op->status = RTE_COMP_OP_STATUS_ERROR;
566 op->output_chksum = 0;
567 op->debug_status = rte_be_to_cpu_32(opaq[idx].syndrom) |
568 ((uint64_t)rte_be_to_cpu_32(cqe->syndrome) << 32);
569 mlx5_compress_dump_err_objs((volatile uint32_t *)cqe,
570 (volatile uint32_t *)&wqes[idx],
571 (volatile uint32_t *)&opaq[idx]);
572 qp->stats.dequeue_err_count++;
576 mlx5_compress_dequeue_burst(void *queue_pair, struct rte_comp_op **ops,
579 struct mlx5_compress_qp *qp = queue_pair;
580 volatile struct mlx5_compress_xform *restrict xform;
581 volatile struct mlx5_cqe *restrict cqe;
582 volatile struct mlx5_gga_compress_opaque *opaq = qp->opaque_mr.addr;
583 struct rte_comp_op *restrict op;
584 const unsigned int cq_size = qp->entries_n;
585 const unsigned int mask = cq_size - 1;
587 uint32_t next_idx = qp->ci & mask;
588 const uint16_t max = RTE_MIN((uint16_t)(qp->pi - qp->ci), nb_ops);
592 if (unlikely(max == 0))
596 next_idx = (qp->ci + 1) & mask;
597 rte_prefetch0(&qp->cq.cqes[next_idx]);
598 rte_prefetch0(qp->ops[next_idx]);
600 cqe = &qp->cq.cqes[idx];
601 ret = check_cqe(cqe, cq_size, qp->ci);
603 * Be sure owner read is done before any other cookie field or
607 if (unlikely(ret != MLX5_CQE_STATUS_SW_OWN)) {
608 if (likely(ret == MLX5_CQE_STATUS_HW_OWN))
610 mlx5_compress_cqe_err_handle(qp, op);
612 xform = op->private_xform;
613 op->status = RTE_COMP_OP_STATUS_SUCCESS;
614 op->consumed = op->src.length;
615 op->produced = rte_be_to_cpu_32(cqe->byte_cnt);
616 MLX5_ASSERT(cqe->byte_cnt ==
617 opaq[idx].scattered_length);
618 switch (xform->csum_type) {
619 case RTE_COMP_CHECKSUM_CRC32:
620 op->output_chksum = (uint64_t)rte_be_to_cpu_32
623 case RTE_COMP_CHECKSUM_ADLER32:
624 op->output_chksum = (uint64_t)rte_be_to_cpu_32
625 (opaq[idx].adler32) << 32;
627 case RTE_COMP_CHECKSUM_CRC32_ADLER32:
628 op->output_chksum = (uint64_t)rte_be_to_cpu_32
630 ((uint64_t)rte_be_to_cpu_32
631 (opaq[idx].adler32) << 32);
640 if (likely(i != 0)) {
642 qp->cq.db_rec[0] = rte_cpu_to_be_32(qp->ci);
643 qp->stats.dequeued_count += i;
648 static struct ibv_device *
649 mlx5_compress_get_ib_device_match(struct rte_pci_addr *addr)
652 struct ibv_device **ibv_list = mlx5_glue->get_device_list(&n);
653 struct ibv_device *ibv_match = NULL;
655 if (ibv_list == NULL) {
660 struct rte_pci_addr paddr;
662 DRV_LOG(DEBUG, "Checking device \"%s\"..", ibv_list[n]->name);
663 if (mlx5_dev_to_pci_addr(ibv_list[n]->ibdev_path, &paddr) != 0)
665 if (rte_pci_addr_cmp(addr, &paddr) != 0)
667 ibv_match = ibv_list[n];
670 if (ibv_match == NULL)
672 mlx5_glue->free_device_list(ibv_list);
677 mlx5_compress_hw_global_release(struct mlx5_compress_priv *priv)
679 if (priv->pd != NULL) {
680 claim_zero(mlx5_glue->dealloc_pd(priv->pd));
683 if (priv->uar != NULL) {
684 mlx5_glue->devx_free_uar(priv->uar);
690 mlx5_compress_pd_create(struct mlx5_compress_priv *priv)
692 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
693 struct mlx5dv_obj obj;
694 struct mlx5dv_pd pd_info;
697 priv->pd = mlx5_glue->alloc_pd(priv->ctx);
698 if (priv->pd == NULL) {
699 DRV_LOG(ERR, "Failed to allocate PD.");
700 return errno ? -errno : -ENOMEM;
702 obj.pd.in = priv->pd;
703 obj.pd.out = &pd_info;
704 ret = mlx5_glue->dv_init_obj(&obj, MLX5DV_OBJ_PD);
706 DRV_LOG(ERR, "Fail to get PD object info.");
707 mlx5_glue->dealloc_pd(priv->pd);
711 priv->pdn = pd_info.pdn;
715 DRV_LOG(ERR, "Cannot get pdn - no DV support.");
717 #endif /* HAVE_IBV_FLOW_DV_SUPPORT */
721 mlx5_compress_hw_global_prepare(struct mlx5_compress_priv *priv)
723 if (mlx5_compress_pd_create(priv) != 0)
725 priv->uar = mlx5_devx_alloc_uar(priv->ctx, -1);
726 if (priv->uar == NULL || mlx5_os_get_devx_uar_reg_addr(priv->uar) ==
729 claim_zero(mlx5_glue->dealloc_pd(priv->pd));
730 DRV_LOG(ERR, "Failed to allocate UAR.");
733 priv->uar_addr = mlx5_os_get_devx_uar_reg_addr(priv->uar);
734 MLX5_ASSERT(priv->uar_addr);
736 rte_spinlock_init(&priv->uar32_sl);
737 #endif /* RTE_ARCH_64 */
742 * DPDK callback to register a PCI device.
744 * This function spawns compress device out of a given PCI device.
747 * PCI driver structure (mlx5_compress_driver).
749 * PCI device information.
752 * 0 on success, 1 to skip this driver, a negative errno value otherwise
753 * and rte_errno is set.
756 mlx5_compress_pci_probe(struct rte_pci_driver *pci_drv,
757 struct rte_pci_device *pci_dev)
759 struct ibv_device *ibv;
760 struct rte_compressdev *cdev;
761 struct ibv_context *ctx;
762 struct mlx5_compress_priv *priv;
763 struct mlx5_hca_attr att = { 0 };
764 struct rte_compressdev_pmd_init_params init_params = {
766 .socket_id = pci_dev->device.numa_node,
769 RTE_SET_USED(pci_drv);
770 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
771 DRV_LOG(ERR, "Non-primary process type is not supported.");
775 ibv = mlx5_compress_get_ib_device_match(&pci_dev->addr);
777 DRV_LOG(ERR, "No matching IB device for PCI slot "
778 PCI_PRI_FMT ".", pci_dev->addr.domain,
779 pci_dev->addr.bus, pci_dev->addr.devid,
780 pci_dev->addr.function);
783 DRV_LOG(INFO, "PCI information matches for device \"%s\".", ibv->name);
784 ctx = mlx5_glue->dv_open_device(ibv);
786 DRV_LOG(ERR, "Failed to open IB device \"%s\".", ibv->name);
790 if (mlx5_devx_cmd_query_hca_attr(ctx, &att) != 0 ||
791 att.mmo_compress_en == 0 || att.mmo_decompress_en == 0 ||
792 att.mmo_dma_en == 0) {
793 DRV_LOG(ERR, "Not enough capabilities to support compress "
794 "operations, maybe old FW/OFED version?");
795 claim_zero(mlx5_glue->close_device(ctx));
799 cdev = rte_compressdev_pmd_create(ibv->name, &pci_dev->device,
800 sizeof(*priv), &init_params);
802 DRV_LOG(ERR, "Failed to create device \"%s\".", ibv->name);
803 claim_zero(mlx5_glue->close_device(ctx));
807 "Compress device %s was created successfully.", ibv->name);
808 cdev->dev_ops = &mlx5_compress_ops;
809 cdev->dequeue_burst = mlx5_compress_dequeue_burst;
810 cdev->enqueue_burst = mlx5_compress_enqueue_burst;
811 cdev->feature_flags = RTE_COMPDEV_FF_HW_ACCELERATED;
812 priv = cdev->data->dev_private;
814 priv->pci_dev = pci_dev;
816 priv->min_block_size = att.compress_min_block_size;
817 if (mlx5_compress_hw_global_prepare(priv) != 0) {
818 rte_compressdev_pmd_destroy(priv->cdev);
819 claim_zero(mlx5_glue->close_device(priv->ctx));
822 if (mlx5_mr_btree_init(&priv->mr_scache.cache,
823 MLX5_MR_BTREE_CACHE_N * 2, rte_socket_id()) != 0) {
824 DRV_LOG(ERR, "Failed to allocate shared cache MR memory.");
825 mlx5_compress_hw_global_release(priv);
826 rte_compressdev_pmd_destroy(priv->cdev);
827 claim_zero(mlx5_glue->close_device(priv->ctx));
831 priv->mr_scache.reg_mr_cb = mlx5_common_verbs_reg_mr;
832 priv->mr_scache.dereg_mr_cb = mlx5_common_verbs_dereg_mr;
833 pthread_mutex_lock(&priv_list_lock);
834 TAILQ_INSERT_TAIL(&mlx5_compress_priv_list, priv, next);
835 pthread_mutex_unlock(&priv_list_lock);
840 * DPDK callback to remove a PCI device.
842 * This function removes all compress devices belong to a given PCI device.
845 * Pointer to the PCI device.
848 * 0 on success, the function cannot fail.
851 mlx5_compress_pci_remove(struct rte_pci_device *pdev)
853 struct mlx5_compress_priv *priv = NULL;
855 pthread_mutex_lock(&priv_list_lock);
856 TAILQ_FOREACH(priv, &mlx5_compress_priv_list, next)
857 if (rte_pci_addr_cmp(&priv->pci_dev->addr, &pdev->addr) != 0)
860 TAILQ_REMOVE(&mlx5_compress_priv_list, priv, next);
861 pthread_mutex_unlock(&priv_list_lock);
863 mlx5_mr_release_cache(&priv->mr_scache);
864 mlx5_compress_hw_global_release(priv);
865 rte_compressdev_pmd_destroy(priv->cdev);
866 claim_zero(mlx5_glue->close_device(priv->ctx));
871 static const struct rte_pci_id mlx5_compress_pci_id_map[] = {
873 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
874 PCI_DEVICE_ID_MELLANOX_CONNECTX6DXBF)
881 static struct mlx5_pci_driver mlx5_compress_driver = {
882 .driver_class = MLX5_CLASS_COMPRESS,
885 .name = RTE_STR(MLX5_COMPRESS_DRIVER_NAME),
887 .id_table = mlx5_compress_pci_id_map,
888 .probe = mlx5_compress_pci_probe,
889 .remove = mlx5_compress_pci_remove,
894 RTE_INIT(rte_mlx5_compress_init)
897 if (mlx5_glue != NULL)
898 mlx5_pci_driver_register(&mlx5_compress_driver);
901 RTE_LOG_REGISTER(mlx5_compress_logtype, MLX5_COMPRESS_LOG_NAME, NOTICE)
902 RTE_PMD_EXPORT_NAME(MLX5_COMPRESS_DRIVER_NAME, __COUNTER__);
903 RTE_PMD_REGISTER_PCI_TABLE(MLX5_COMPRESS_DRIVER_NAME, mlx5_compress_pci_id_map);
904 RTE_PMD_REGISTER_KMOD_DEP(MLX5_COMPRESS_DRIVER_NAME, "* ib_uverbs & mlx5_core & mlx5_ib");