1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2021 Mellanox Technologies, Ltd
5 #include <rte_malloc.h>
8 #include <rte_bus_pci.h>
9 #include <rte_spinlock.h>
11 #include <rte_compressdev.h>
12 #include <rte_compressdev_pmd.h>
14 #include <mlx5_glue.h>
15 #include <mlx5_common.h>
16 #include <mlx5_devx_cmds.h>
17 #include <mlx5_common_os.h>
18 #include <mlx5_common_devx.h>
19 #include <mlx5_common_mr.h>
22 #include "mlx5_compress_utils.h"
24 #define MLX5_COMPRESS_DRIVER_NAME mlx5_compress
25 #define MLX5_COMPRESS_MAX_QPS 1024
26 #define MLX5_COMP_MAX_WIN_SIZE_CONF 6u
28 struct mlx5_compress_xform {
29 LIST_ENTRY(mlx5_compress_xform) next;
30 enum rte_comp_xform_type type;
31 enum rte_comp_checksum_type csum_type;
33 uint32_t gga_ctrl1; /* BE. */
36 struct mlx5_compress_priv {
37 TAILQ_ENTRY(mlx5_compress_priv) next;
38 struct ibv_context *ctx; /* Device context. */
39 struct rte_compressdev *compressdev;
41 uint32_t pdn; /* Protection Domain number. */
42 uint8_t min_block_size;
43 uint8_t qp_ts_format; /* Whether SQ supports timestamp formats. */
44 /* Minimum huffman block size supported by the device. */
46 struct rte_compressdev_config dev_config;
47 LIST_HEAD(xform_list, mlx5_compress_xform) xform_list;
48 rte_spinlock_t xform_sl;
49 struct mlx5_mr_share_cache mr_scache; /* Global shared MR cache. */
50 volatile uint64_t *uar_addr;
52 uint32_t mmo_decomp_sq:1;
53 uint32_t mmo_decomp_qp:1;
54 uint32_t mmo_comp_sq:1;
55 uint32_t mmo_comp_qp:1;
56 uint32_t mmo_dma_sq:1;
57 uint32_t mmo_dma_qp:1;
59 rte_spinlock_t uar32_sl;
60 #endif /* RTE_ARCH_64 */
63 struct mlx5_compress_qp {
68 struct mlx5_mr_ctrl mr_ctrl;
70 struct mlx5_devx_cq cq;
71 struct mlx5_devx_qp qp;
72 struct mlx5_pmd_mr opaque_mr;
73 struct rte_comp_op **ops;
74 struct mlx5_compress_priv *priv;
75 struct rte_compressdev_stats stats;
78 TAILQ_HEAD(mlx5_compress_privs, mlx5_compress_priv) mlx5_compress_priv_list =
79 TAILQ_HEAD_INITIALIZER(mlx5_compress_priv_list);
80 static pthread_mutex_t priv_list_lock = PTHREAD_MUTEX_INITIALIZER;
82 int mlx5_compress_logtype;
84 static const struct rte_compressdev_capabilities mlx5_caps[] = {
86 .algo = RTE_COMP_ALGO_NULL,
87 .comp_feature_flags = RTE_COMP_FF_ADLER32_CHECKSUM |
88 RTE_COMP_FF_CRC32_CHECKSUM |
89 RTE_COMP_FF_CRC32_ADLER32_CHECKSUM |
90 RTE_COMP_FF_SHAREABLE_PRIV_XFORM,
93 .algo = RTE_COMP_ALGO_DEFLATE,
94 .comp_feature_flags = RTE_COMP_FF_ADLER32_CHECKSUM |
95 RTE_COMP_FF_CRC32_CHECKSUM |
96 RTE_COMP_FF_CRC32_ADLER32_CHECKSUM |
97 RTE_COMP_FF_SHAREABLE_PRIV_XFORM |
98 RTE_COMP_FF_HUFFMAN_FIXED |
99 RTE_COMP_FF_HUFFMAN_DYNAMIC,
100 .window_size = {.min = 10, .max = 15, .increment = 1},
103 .algo = RTE_COMP_ALGO_LIST_END,
108 mlx5_compress_dev_info_get(struct rte_compressdev *dev,
109 struct rte_compressdev_info *info)
113 info->max_nb_queue_pairs = MLX5_COMPRESS_MAX_QPS;
114 info->feature_flags = RTE_COMPDEV_FF_HW_ACCELERATED;
115 info->capabilities = mlx5_caps;
120 mlx5_compress_dev_configure(struct rte_compressdev *dev,
121 struct rte_compressdev_config *config)
123 struct mlx5_compress_priv *priv;
125 if (dev == NULL || config == NULL)
127 priv = dev->data->dev_private;
128 priv->dev_config = *config;
133 mlx5_compress_dev_close(struct rte_compressdev *dev)
140 mlx5_compress_qp_release(struct rte_compressdev *dev, uint16_t qp_id)
142 struct mlx5_compress_qp *qp = dev->data->queue_pairs[qp_id];
144 if (qp->qp.qp != NULL)
145 mlx5_devx_qp_destroy(&qp->qp);
146 if (qp->cq.cq != NULL)
147 mlx5_devx_cq_destroy(&qp->cq);
148 if (qp->opaque_mr.obj != NULL) {
149 void *opaq = qp->opaque_mr.addr;
151 mlx5_common_verbs_dereg_mr(&qp->opaque_mr);
155 mlx5_mr_btree_free(&qp->mr_ctrl.cache_bh);
157 dev->data->queue_pairs[qp_id] = NULL;
162 mlx5_compress_init_qp(struct mlx5_compress_qp *qp)
164 volatile struct mlx5_gga_wqe *restrict wqe =
165 (volatile struct mlx5_gga_wqe *)qp->qp.wqes;
166 volatile struct mlx5_gga_compress_opaque *opaq = qp->opaque_mr.addr;
167 const uint32_t sq_ds = rte_cpu_to_be_32((qp->qp.qp->id << 8) | 4u);
168 const uint32_t flags = RTE_BE32(MLX5_COMP_ALWAYS <<
169 MLX5_COMP_MODE_OFFSET);
170 const uint32_t opaq_lkey = rte_cpu_to_be_32(qp->opaque_mr.lkey);
173 /* All the next fields state should stay constant. */
174 for (i = 0; i < qp->entries_n; ++i, ++wqe) {
177 wqe->opaque_lkey = opaq_lkey;
178 wqe->opaque_vaddr = rte_cpu_to_be_64
179 ((uint64_t)(uintptr_t)&opaq[i]);
184 mlx5_compress_qp_setup(struct rte_compressdev *dev, uint16_t qp_id,
185 uint32_t max_inflight_ops, int socket_id)
187 struct mlx5_compress_priv *priv = dev->data->dev_private;
188 struct mlx5_compress_qp *qp;
189 struct mlx5_devx_cq_attr cq_attr = {
190 .uar_page_id = mlx5_os_get_devx_uar_page_id(priv->uar),
192 struct mlx5_devx_qp_attr qp_attr = {
194 .uar_index = mlx5_os_get_devx_uar_page_id(priv->uar),
197 uint32_t log_ops_n = rte_log2_u32(max_inflight_ops);
198 uint32_t alloc_size = sizeof(*qp);
202 alloc_size = RTE_ALIGN(alloc_size, RTE_CACHE_LINE_SIZE);
203 alloc_size += sizeof(struct rte_comp_op *) * (1u << log_ops_n);
204 qp = rte_zmalloc_socket(__func__, alloc_size, RTE_CACHE_LINE_SIZE,
207 DRV_LOG(ERR, "Failed to allocate qp memory.");
211 dev->data->queue_pairs[qp_id] = qp;
212 if (mlx5_mr_btree_init(&qp->mr_ctrl.cache_bh, MLX5_MR_BTREE_CACHE_N,
213 priv->dev_config.socket_id)) {
214 DRV_LOG(ERR, "Cannot allocate MR Btree for qp %u.",
219 opaq_buf = rte_calloc(__func__, (size_t)1 << log_ops_n,
220 sizeof(struct mlx5_gga_compress_opaque),
221 sizeof(struct mlx5_gga_compress_opaque));
222 if (opaq_buf == NULL) {
223 DRV_LOG(ERR, "Failed to allocate opaque memory.");
227 qp->entries_n = 1 << log_ops_n;
228 qp->socket_id = socket_id;
231 qp->ops = (struct rte_comp_op **)RTE_ALIGN((uintptr_t)(qp + 1),
232 RTE_CACHE_LINE_SIZE);
233 if (mlx5_common_verbs_reg_mr(priv->pd, opaq_buf, qp->entries_n *
234 sizeof(struct mlx5_gga_compress_opaque),
235 &qp->opaque_mr) != 0) {
237 DRV_LOG(ERR, "Failed to register opaque MR.");
241 ret = mlx5_devx_cq_create(priv->ctx, &qp->cq, log_ops_n, &cq_attr,
244 DRV_LOG(ERR, "Failed to create CQ.");
247 qp_attr.cqn = qp->cq.cq->id;
248 qp_attr.ts_format = mlx5_ts_format_conv(priv->qp_ts_format);
250 qp_attr.sq_size = RTE_BIT32(log_ops_n);
251 qp_attr.mmo = priv->mmo_decomp_qp && priv->mmo_comp_qp
253 ret = mlx5_devx_qp_create(priv->ctx, &qp->qp, log_ops_n, &qp_attr,
256 DRV_LOG(ERR, "Failed to create QP.");
259 mlx5_compress_init_qp(qp);
260 ret = mlx5_devx_qp2rts(&qp->qp, 0);
263 /* Save pointer of global generation number to check memory event. */
264 qp->mr_ctrl.dev_gen_ptr = &priv->mr_scache.dev_gen;
265 DRV_LOG(INFO, "QP %u: SQN=0x%X CQN=0x%X entries num = %u",
266 (uint32_t)qp_id, qp->qp.qp->id, qp->cq.cq->id, qp->entries_n);
269 mlx5_compress_qp_release(dev, qp_id);
274 mlx5_compress_xform_free(struct rte_compressdev *dev, void *xform)
276 struct mlx5_compress_priv *priv = dev->data->dev_private;
278 rte_spinlock_lock(&priv->xform_sl);
279 LIST_REMOVE((struct mlx5_compress_xform *)xform, next);
280 rte_spinlock_unlock(&priv->xform_sl);
286 mlx5_compress_xform_create(struct rte_compressdev *dev,
287 const struct rte_comp_xform *xform,
288 void **private_xform)
290 struct mlx5_compress_priv *priv = dev->data->dev_private;
291 struct mlx5_compress_xform *xfrm;
294 if (xform->type == RTE_COMP_COMPRESS && xform->compress.level ==
295 RTE_COMP_LEVEL_NONE) {
296 DRV_LOG(ERR, "Non-compressed block is not supported.");
299 if ((xform->type == RTE_COMP_COMPRESS && xform->compress.hash_algo !=
300 RTE_COMP_HASH_ALGO_NONE) || (xform->type == RTE_COMP_DECOMPRESS &&
301 xform->decompress.hash_algo != RTE_COMP_HASH_ALGO_NONE)) {
302 DRV_LOG(ERR, "SHA is not supported.");
305 xfrm = rte_zmalloc_socket(__func__, sizeof(*xfrm), 0,
306 priv->dev_config.socket_id);
309 xfrm->opcode = MLX5_OPCODE_MMO;
310 xfrm->type = xform->type;
311 switch (xform->type) {
312 case RTE_COMP_COMPRESS:
313 switch (xform->compress.algo) {
314 case RTE_COMP_ALGO_NULL:
315 xfrm->opcode += MLX5_OPC_MOD_MMO_DMA <<
316 WQE_CSEG_OPC_MOD_OFFSET;
318 case RTE_COMP_ALGO_DEFLATE:
319 size = 1 << xform->compress.window_size;
320 size /= MLX5_GGA_COMP_WIN_SIZE_UNITS;
321 xfrm->gga_ctrl1 += RTE_MIN(rte_log2_u32(size),
322 MLX5_COMP_MAX_WIN_SIZE_CONF) <<
323 WQE_GGA_COMP_WIN_SIZE_OFFSET;
324 switch (xform->compress.level) {
325 case RTE_COMP_LEVEL_PMD_DEFAULT:
326 size = MLX5_GGA_COMP_LOG_BLOCK_SIZE_MAX;
328 case RTE_COMP_LEVEL_MAX:
329 size = priv->min_block_size;
332 size = RTE_MAX(MLX5_GGA_COMP_LOG_BLOCK_SIZE_MAX
333 + 1 - xform->compress.level,
334 priv->min_block_size);
336 xfrm->gga_ctrl1 += RTE_MIN(size,
337 MLX5_GGA_COMP_LOG_BLOCK_SIZE_MAX) <<
338 WQE_GGA_COMP_BLOCK_SIZE_OFFSET;
339 xfrm->opcode += MLX5_OPC_MOD_MMO_COMP <<
340 WQE_CSEG_OPC_MOD_OFFSET;
341 size = xform->compress.deflate.huffman ==
342 RTE_COMP_HUFFMAN_DYNAMIC ?
343 MLX5_GGA_COMP_LOG_DYNAMIC_SIZE_MAX :
344 MLX5_GGA_COMP_LOG_DYNAMIC_SIZE_MIN;
345 xfrm->gga_ctrl1 += size <<
346 WQE_GGA_COMP_DYNAMIC_SIZE_OFFSET;
351 xfrm->csum_type = xform->compress.chksum;
353 case RTE_COMP_DECOMPRESS:
354 switch (xform->decompress.algo) {
355 case RTE_COMP_ALGO_NULL:
356 xfrm->opcode += MLX5_OPC_MOD_MMO_DMA <<
357 WQE_CSEG_OPC_MOD_OFFSET;
359 case RTE_COMP_ALGO_DEFLATE:
360 xfrm->opcode += MLX5_OPC_MOD_MMO_DECOMP <<
361 WQE_CSEG_OPC_MOD_OFFSET;
366 xfrm->csum_type = xform->decompress.chksum;
369 DRV_LOG(ERR, "Algorithm %u is not supported.", xform->type);
372 DRV_LOG(DEBUG, "New xform: gga ctrl1 = 0x%08X opcode = 0x%08X csum "
373 "type = %d.", xfrm->gga_ctrl1, xfrm->opcode, xfrm->csum_type);
374 xfrm->gga_ctrl1 = rte_cpu_to_be_32(xfrm->gga_ctrl1);
375 rte_spinlock_lock(&priv->xform_sl);
376 LIST_INSERT_HEAD(&priv->xform_list, xfrm, next);
377 rte_spinlock_unlock(&priv->xform_sl);
378 *private_xform = xfrm;
386 mlx5_compress_dev_stop(struct rte_compressdev *dev)
392 mlx5_compress_dev_start(struct rte_compressdev *dev)
399 mlx5_compress_stats_get(struct rte_compressdev *dev,
400 struct rte_compressdev_stats *stats)
404 for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
405 struct mlx5_compress_qp *qp = dev->data->queue_pairs[qp_id];
407 stats->enqueued_count += qp->stats.enqueued_count;
408 stats->dequeued_count += qp->stats.dequeued_count;
409 stats->enqueue_err_count += qp->stats.enqueue_err_count;
410 stats->dequeue_err_count += qp->stats.dequeue_err_count;
415 mlx5_compress_stats_reset(struct rte_compressdev *dev)
419 for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
420 struct mlx5_compress_qp *qp = dev->data->queue_pairs[qp_id];
422 memset(&qp->stats, 0, sizeof(qp->stats));
426 static struct rte_compressdev_ops mlx5_compress_ops = {
427 .dev_configure = mlx5_compress_dev_configure,
428 .dev_start = mlx5_compress_dev_start,
429 .dev_stop = mlx5_compress_dev_stop,
430 .dev_close = mlx5_compress_dev_close,
431 .dev_infos_get = mlx5_compress_dev_info_get,
432 .stats_get = mlx5_compress_stats_get,
433 .stats_reset = mlx5_compress_stats_reset,
434 .queue_pair_setup = mlx5_compress_qp_setup,
435 .queue_pair_release = mlx5_compress_qp_release,
436 .private_xform_create = mlx5_compress_xform_create,
437 .private_xform_free = mlx5_compress_xform_free,
438 .stream_create = NULL,
443 * Query LKey from a packet buffer for QP. If not found, add the mempool.
446 * Pointer to the priv object.
450 * Pointer to per-queue MR control structure.
452 * Mbuf offload features.
455 * Searched LKey on success, UINT32_MAX on no match.
457 static __rte_always_inline uint32_t
458 mlx5_compress_addr2mr(struct mlx5_compress_priv *priv, uintptr_t addr,
459 struct mlx5_mr_ctrl *mr_ctrl, uint64_t ol_flags)
463 /* Check generation bit to see if there's any change on existing MRs. */
464 if (unlikely(*mr_ctrl->dev_gen_ptr != mr_ctrl->cur_gen))
465 mlx5_mr_flush_local_cache(mr_ctrl);
466 /* Linear search on MR cache array. */
467 lkey = mlx5_mr_lookup_lkey(mr_ctrl->cache, &mr_ctrl->mru,
468 MLX5_MR_CACHE_N, addr);
469 if (likely(lkey != UINT32_MAX))
471 /* Take slower bottom-half on miss. */
472 return mlx5_mr_addr2mr_bh(priv->pd, 0, &priv->mr_scache, mr_ctrl, addr,
473 !!(ol_flags & EXT_ATTACHED_MBUF));
476 static __rte_always_inline uint32_t
477 mlx5_compress_dseg_set(struct mlx5_compress_qp *qp,
478 volatile struct mlx5_wqe_dseg *restrict dseg,
479 struct rte_mbuf *restrict mbuf,
480 uint32_t offset, uint32_t len)
482 uintptr_t addr = rte_pktmbuf_mtod_offset(mbuf, uintptr_t, offset);
484 dseg->bcount = rte_cpu_to_be_32(len);
485 dseg->lkey = mlx5_compress_addr2mr(qp->priv, addr, &qp->mr_ctrl,
487 dseg->pbuf = rte_cpu_to_be_64(addr);
492 * Provide safe 64bit store operation to mlx5 UAR region for both 32bit and
493 * 64bit architectures.
495 static __rte_always_inline void
496 mlx5_compress_uar_write(uint64_t val, struct mlx5_compress_priv *priv)
499 *priv->uar_addr = val;
500 #else /* !RTE_ARCH_64 */
501 rte_spinlock_lock(&priv->uar32_sl);
502 *(volatile uint32_t *)priv->uar_addr = val;
504 *((volatile uint32_t *)priv->uar_addr + 1) = val >> 32;
505 rte_spinlock_unlock(&priv->uar32_sl);
510 mlx5_compress_enqueue_burst(void *queue_pair, struct rte_comp_op **ops,
513 struct mlx5_compress_qp *qp = queue_pair;
514 volatile struct mlx5_gga_wqe *wqes = (volatile struct mlx5_gga_wqe *)
516 struct mlx5_compress_xform *xform;
517 struct rte_comp_op *op;
518 uint16_t mask = qp->entries_n - 1;
519 uint16_t remain = qp->entries_n - (qp->pi - qp->ci);
527 if (unlikely(remain == 0))
532 rte_prefetch0(&wqes[(qp->pi + 1) & mask]);
534 xform = op->private_xform;
536 * Check operation arguments and error cases:
537 * - Operation type must be state-less.
538 * - Compress operation flush flag must be FULL or FINAL.
539 * - Source and destination buffers must be mapped internally.
541 invalid = op->op_type != RTE_COMP_OP_STATELESS ||
542 (xform->type == RTE_COMP_COMPRESS &&
543 op->flush_flag < RTE_COMP_FLUSH_FULL);
544 if (unlikely(invalid ||
545 (mlx5_compress_dseg_set(qp, &wqe->gather,
550 (mlx5_compress_dseg_set(qp, &wqe->scatter,
553 rte_pktmbuf_pkt_len(op->m_dst) -
556 op->status = invalid ? RTE_COMP_OP_STATUS_INVALID_ARGS :
557 RTE_COMP_OP_STATUS_ERROR;
559 if (unlikely(nb_ops == 0))
563 wqe->gga_ctrl1 = xform->gga_ctrl1;
564 wqe->opcode = rte_cpu_to_be_32(xform->opcode + (qp->pi << 8));
568 qp->stats.enqueued_count += nb_ops;
570 qp->qp.db_rec[MLX5_SND_DBR] = rte_cpu_to_be_32(qp->pi);
572 mlx5_compress_uar_write(*(volatile uint64_t *)wqe, qp->priv);
578 mlx5_compress_dump_err_objs(volatile uint32_t *cqe, volatile uint32_t *wqe,
579 volatile uint32_t *opaq)
583 DRV_LOG(ERR, "Error cqe:");
584 for (i = 0; i < sizeof(struct mlx5_err_cqe) >> 2; i += 4)
585 DRV_LOG(ERR, "%08X %08X %08X %08X", cqe[i], cqe[i + 1],
586 cqe[i + 2], cqe[i + 3]);
587 DRV_LOG(ERR, "\nError wqe:");
588 for (i = 0; i < sizeof(struct mlx5_gga_wqe) >> 2; i += 4)
589 DRV_LOG(ERR, "%08X %08X %08X %08X", wqe[i], wqe[i + 1],
590 wqe[i + 2], wqe[i + 3]);
591 DRV_LOG(ERR, "\nError opaq:");
592 for (i = 0; i < sizeof(struct mlx5_gga_compress_opaque) >> 2; i += 4)
593 DRV_LOG(ERR, "%08X %08X %08X %08X", opaq[i], opaq[i + 1],
594 opaq[i + 2], opaq[i + 3]);
598 mlx5_compress_cqe_err_handle(struct mlx5_compress_qp *qp,
599 struct rte_comp_op *op)
601 const uint32_t idx = qp->ci & (qp->entries_n - 1);
602 volatile struct mlx5_err_cqe *cqe = (volatile struct mlx5_err_cqe *)
604 volatile struct mlx5_gga_wqe *wqes = (volatile struct mlx5_gga_wqe *)
606 volatile struct mlx5_gga_compress_opaque *opaq = qp->opaque_mr.addr;
608 op->status = RTE_COMP_OP_STATUS_ERROR;
611 op->output_chksum = 0;
612 op->debug_status = rte_be_to_cpu_32(opaq[idx].syndrom) |
613 ((uint64_t)rte_be_to_cpu_32(cqe->syndrome) << 32);
614 mlx5_compress_dump_err_objs((volatile uint32_t *)cqe,
615 (volatile uint32_t *)&wqes[idx],
616 (volatile uint32_t *)&opaq[idx]);
617 qp->stats.dequeue_err_count++;
621 mlx5_compress_dequeue_burst(void *queue_pair, struct rte_comp_op **ops,
624 struct mlx5_compress_qp *qp = queue_pair;
625 volatile struct mlx5_compress_xform *restrict xform;
626 volatile struct mlx5_cqe *restrict cqe;
627 volatile struct mlx5_gga_compress_opaque *opaq = qp->opaque_mr.addr;
628 struct rte_comp_op *restrict op;
629 const unsigned int cq_size = qp->entries_n;
630 const unsigned int mask = cq_size - 1;
632 uint32_t next_idx = qp->ci & mask;
633 const uint16_t max = RTE_MIN((uint16_t)(qp->pi - qp->ci), nb_ops);
637 if (unlikely(max == 0))
641 next_idx = (qp->ci + 1) & mask;
642 rte_prefetch0(&qp->cq.cqes[next_idx]);
643 rte_prefetch0(qp->ops[next_idx]);
645 cqe = &qp->cq.cqes[idx];
646 ret = check_cqe(cqe, cq_size, qp->ci);
648 * Be sure owner read is done before any other cookie field or
652 if (unlikely(ret != MLX5_CQE_STATUS_SW_OWN)) {
653 if (likely(ret == MLX5_CQE_STATUS_HW_OWN))
655 mlx5_compress_cqe_err_handle(qp, op);
657 xform = op->private_xform;
658 op->status = RTE_COMP_OP_STATUS_SUCCESS;
659 op->consumed = op->src.length;
660 op->produced = rte_be_to_cpu_32(cqe->byte_cnt);
661 MLX5_ASSERT(cqe->byte_cnt ==
662 opaq[idx].scattered_length);
663 switch (xform->csum_type) {
664 case RTE_COMP_CHECKSUM_CRC32:
665 op->output_chksum = (uint64_t)rte_be_to_cpu_32
668 case RTE_COMP_CHECKSUM_ADLER32:
669 op->output_chksum = (uint64_t)rte_be_to_cpu_32
670 (opaq[idx].adler32) << 32;
672 case RTE_COMP_CHECKSUM_CRC32_ADLER32:
673 op->output_chksum = (uint64_t)rte_be_to_cpu_32
675 ((uint64_t)rte_be_to_cpu_32
676 (opaq[idx].adler32) << 32);
685 if (likely(i != 0)) {
687 qp->cq.db_rec[0] = rte_cpu_to_be_32(qp->ci);
688 qp->stats.dequeued_count += i;
694 mlx5_compress_hw_global_release(struct mlx5_compress_priv *priv)
696 if (priv->pd != NULL) {
697 claim_zero(mlx5_glue->dealloc_pd(priv->pd));
700 if (priv->uar != NULL) {
701 mlx5_glue->devx_free_uar(priv->uar);
707 mlx5_compress_pd_create(struct mlx5_compress_priv *priv)
709 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
710 struct mlx5dv_obj obj;
711 struct mlx5dv_pd pd_info;
714 priv->pd = mlx5_glue->alloc_pd(priv->ctx);
715 if (priv->pd == NULL) {
716 DRV_LOG(ERR, "Failed to allocate PD.");
717 return errno ? -errno : -ENOMEM;
719 obj.pd.in = priv->pd;
720 obj.pd.out = &pd_info;
721 ret = mlx5_glue->dv_init_obj(&obj, MLX5DV_OBJ_PD);
723 DRV_LOG(ERR, "Fail to get PD object info.");
724 mlx5_glue->dealloc_pd(priv->pd);
728 priv->pdn = pd_info.pdn;
732 DRV_LOG(ERR, "Cannot get pdn - no DV support.");
734 #endif /* HAVE_IBV_FLOW_DV_SUPPORT */
738 mlx5_compress_hw_global_prepare(struct mlx5_compress_priv *priv)
740 if (mlx5_compress_pd_create(priv) != 0)
742 priv->uar = mlx5_devx_alloc_uar(priv->ctx, -1);
743 if (priv->uar == NULL || mlx5_os_get_devx_uar_reg_addr(priv->uar) ==
746 claim_zero(mlx5_glue->dealloc_pd(priv->pd));
747 DRV_LOG(ERR, "Failed to allocate UAR.");
750 priv->uar_addr = mlx5_os_get_devx_uar_reg_addr(priv->uar);
751 MLX5_ASSERT(priv->uar_addr);
753 rte_spinlock_init(&priv->uar32_sl);
754 #endif /* RTE_ARCH_64 */
759 * Callback for memory event.
769 mlx5_compress_mr_mem_event_cb(enum rte_mem_event event_type, const void *addr,
770 size_t len, void *arg __rte_unused)
772 struct mlx5_compress_priv *priv;
774 /* Must be called from the primary process. */
775 MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
776 switch (event_type) {
777 case RTE_MEM_EVENT_FREE:
778 pthread_mutex_lock(&priv_list_lock);
779 /* Iterate all the existing mlx5 devices. */
780 TAILQ_FOREACH(priv, &mlx5_compress_priv_list, next)
781 mlx5_free_mr_by_addr(&priv->mr_scache,
782 priv->ctx->device->name,
784 pthread_mutex_unlock(&priv_list_lock);
786 case RTE_MEM_EVENT_ALLOC:
793 mlx5_compress_dev_probe(struct mlx5_common_device *cdev)
795 struct ibv_device *ibv;
796 struct rte_compressdev *compressdev;
797 struct ibv_context *ctx;
798 struct mlx5_compress_priv *priv;
799 struct mlx5_hca_attr att = { 0 };
800 struct rte_compressdev_pmd_init_params init_params = {
802 .socket_id = cdev->dev->numa_node,
805 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
806 DRV_LOG(ERR, "Non-primary process type is not supported.");
810 ibv = mlx5_os_get_ibv_dev(cdev->dev);
813 ctx = mlx5_glue->dv_open_device(ibv);
815 DRV_LOG(ERR, "Failed to open IB device \"%s\".", ibv->name);
819 if (mlx5_devx_cmd_query_hca_attr(ctx, &att) != 0 ||
820 ((att.mmo_compress_sq_en == 0 || att.mmo_decompress_sq_en == 0 ||
821 att.mmo_dma_sq_en == 0) && (att.mmo_compress_qp_en == 0 ||
822 att.mmo_decompress_qp_en == 0 || att.mmo_dma_qp_en == 0))) {
823 DRV_LOG(ERR, "Not enough capabilities to support compress "
824 "operations, maybe old FW/OFED version?");
825 claim_zero(mlx5_glue->close_device(ctx));
829 compressdev = rte_compressdev_pmd_create(ibv->name, cdev->dev,
830 sizeof(*priv), &init_params);
831 if (compressdev == NULL) {
832 DRV_LOG(ERR, "Failed to create device \"%s\".", ibv->name);
833 claim_zero(mlx5_glue->close_device(ctx));
837 "Compress device %s was created successfully.", ibv->name);
838 compressdev->dev_ops = &mlx5_compress_ops;
839 compressdev->dequeue_burst = mlx5_compress_dequeue_burst;
840 compressdev->enqueue_burst = mlx5_compress_enqueue_burst;
841 compressdev->feature_flags = RTE_COMPDEV_FF_HW_ACCELERATED;
842 priv = compressdev->data->dev_private;
843 priv->mmo_decomp_sq = att.mmo_decompress_sq_en;
844 priv->mmo_decomp_qp = att.mmo_decompress_qp_en;
845 priv->mmo_comp_sq = att.mmo_compress_sq_en;
846 priv->mmo_comp_qp = att.mmo_compress_qp_en;
847 priv->mmo_dma_sq = att.mmo_dma_sq_en;
848 priv->mmo_dma_qp = att.mmo_dma_qp_en;
850 priv->compressdev = compressdev;
851 priv->min_block_size = att.compress_min_block_size;
852 priv->qp_ts_format = att.qp_ts_format;
853 if (mlx5_compress_hw_global_prepare(priv) != 0) {
854 rte_compressdev_pmd_destroy(priv->compressdev);
855 claim_zero(mlx5_glue->close_device(priv->ctx));
858 if (mlx5_mr_btree_init(&priv->mr_scache.cache,
859 MLX5_MR_BTREE_CACHE_N * 2, rte_socket_id()) != 0) {
860 DRV_LOG(ERR, "Failed to allocate shared cache MR memory.");
861 mlx5_compress_hw_global_release(priv);
862 rte_compressdev_pmd_destroy(priv->compressdev);
863 claim_zero(mlx5_glue->close_device(priv->ctx));
867 priv->mr_scache.reg_mr_cb = mlx5_common_verbs_reg_mr;
868 priv->mr_scache.dereg_mr_cb = mlx5_common_verbs_dereg_mr;
869 /* Register callback function for global shared MR cache management. */
870 if (TAILQ_EMPTY(&mlx5_compress_priv_list))
871 rte_mem_event_callback_register("MLX5_MEM_EVENT_CB",
872 mlx5_compress_mr_mem_event_cb,
874 pthread_mutex_lock(&priv_list_lock);
875 TAILQ_INSERT_TAIL(&mlx5_compress_priv_list, priv, next);
876 pthread_mutex_unlock(&priv_list_lock);
881 mlx5_compress_dev_remove(struct mlx5_common_device *cdev)
883 struct mlx5_compress_priv *priv = NULL;
885 pthread_mutex_lock(&priv_list_lock);
886 TAILQ_FOREACH(priv, &mlx5_compress_priv_list, next)
887 if (priv->compressdev->device == cdev->dev)
890 TAILQ_REMOVE(&mlx5_compress_priv_list, priv, next);
891 pthread_mutex_unlock(&priv_list_lock);
893 if (TAILQ_EMPTY(&mlx5_compress_priv_list))
894 rte_mem_event_callback_unregister("MLX5_MEM_EVENT_CB",
896 mlx5_mr_release_cache(&priv->mr_scache);
897 mlx5_compress_hw_global_release(priv);
898 rte_compressdev_pmd_destroy(priv->compressdev);
899 claim_zero(mlx5_glue->close_device(priv->ctx));
904 static const struct rte_pci_id mlx5_compress_pci_id_map[] = {
906 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
907 PCI_DEVICE_ID_MELLANOX_CONNECTX6DXBF)
914 static struct mlx5_class_driver mlx5_compress_driver = {
915 .drv_class = MLX5_CLASS_COMPRESS,
916 .name = RTE_STR(MLX5_COMPRESS_DRIVER_NAME),
917 .id_table = mlx5_compress_pci_id_map,
918 .probe = mlx5_compress_dev_probe,
919 .remove = mlx5_compress_dev_remove,
922 RTE_INIT(rte_mlx5_compress_init)
925 if (mlx5_glue != NULL)
926 mlx5_class_driver_register(&mlx5_compress_driver);
929 RTE_LOG_REGISTER_DEFAULT(mlx5_compress_logtype, NOTICE)
930 RTE_PMD_EXPORT_NAME(MLX5_COMPRESS_DRIVER_NAME, __COUNTER__);
931 RTE_PMD_REGISTER_PCI_TABLE(MLX5_COMPRESS_DRIVER_NAME, mlx5_compress_pci_id_map);
932 RTE_PMD_REGISTER_KMOD_DEP(MLX5_COMPRESS_DRIVER_NAME, "* ib_uverbs & mlx5_core & mlx5_ib");