+static const struct rte_compressdev_capabilities mlx5_caps[] = {
+ {
+ .algo = RTE_COMP_ALGO_NULL,
+ .comp_feature_flags = RTE_COMP_FF_ADLER32_CHECKSUM |
+ RTE_COMP_FF_CRC32_CHECKSUM |
+ RTE_COMP_FF_CRC32_ADLER32_CHECKSUM |
+ RTE_COMP_FF_SHAREABLE_PRIV_XFORM,
+ },
+ {
+ .algo = RTE_COMP_ALGO_DEFLATE,
+ .comp_feature_flags = RTE_COMP_FF_ADLER32_CHECKSUM |
+ RTE_COMP_FF_CRC32_CHECKSUM |
+ RTE_COMP_FF_CRC32_ADLER32_CHECKSUM |
+ RTE_COMP_FF_SHAREABLE_PRIV_XFORM |
+ RTE_COMP_FF_HUFFMAN_FIXED |
+ RTE_COMP_FF_HUFFMAN_DYNAMIC,
+ .window_size = {.min = 10, .max = 15, .increment = 1},
+ },
+ {
+ .algo = RTE_COMP_ALGO_LIST_END,
+ }
+};
+
+static void
+mlx5_compress_dev_info_get(struct rte_compressdev *dev,
+ struct rte_compressdev_info *info)
+{
+ RTE_SET_USED(dev);
+ if (info != NULL) {
+ info->max_nb_queue_pairs = MLX5_COMPRESS_MAX_QPS;
+ info->feature_flags = RTE_COMPDEV_FF_HW_ACCELERATED;
+ info->capabilities = mlx5_caps;
+ }
+}
+
+static int
+mlx5_compress_dev_configure(struct rte_compressdev *dev,
+ struct rte_compressdev_config *config)
+{
+ struct mlx5_compress_priv *priv;
+
+ if (dev == NULL || config == NULL)
+ return -EINVAL;
+ priv = dev->data->dev_private;
+ priv->dev_config = *config;
+ return 0;
+}
+
+static int
+mlx5_compress_dev_close(struct rte_compressdev *dev)
+{
+ RTE_SET_USED(dev);
+ return 0;
+}
+
+static int
+mlx5_compress_qp_release(struct rte_compressdev *dev, uint16_t qp_id)
+{
+ struct mlx5_compress_qp *qp = dev->data->queue_pairs[qp_id];
+
+ if (qp->sq.sq != NULL)
+ mlx5_devx_sq_destroy(&qp->sq);
+ if (qp->cq.cq != NULL)
+ mlx5_devx_cq_destroy(&qp->cq);
+ if (qp->opaque_mr.obj != NULL) {
+ void *opaq = qp->opaque_mr.addr;
+
+ mlx5_common_verbs_dereg_mr(&qp->opaque_mr);
+ if (opaq != NULL)
+ rte_free(opaq);
+ }
+ mlx5_mr_btree_free(&qp->mr_ctrl.cache_bh);
+ rte_free(qp);
+ dev->data->queue_pairs[qp_id] = NULL;
+ return 0;
+}
+
+static void
+mlx5_compress_init_sq(struct mlx5_compress_qp *qp)
+{
+ volatile struct mlx5_gga_wqe *restrict wqe =
+ (volatile struct mlx5_gga_wqe *)qp->sq.wqes;
+ volatile struct mlx5_gga_compress_opaque *opaq = qp->opaque_mr.addr;
+ const uint32_t sq_ds = rte_cpu_to_be_32((qp->sq.sq->id << 8) | 4u);
+ const uint32_t flags = RTE_BE32(MLX5_COMP_ALWAYS <<
+ MLX5_COMP_MODE_OFFSET);
+ const uint32_t opaq_lkey = rte_cpu_to_be_32(qp->opaque_mr.lkey);
+ int i;
+
+ /* All the next fields state should stay constant. */
+ for (i = 0; i < qp->entries_n; ++i, ++wqe) {
+ wqe->sq_ds = sq_ds;
+ wqe->flags = flags;
+ wqe->opaque_lkey = opaq_lkey;
+ wqe->opaque_vaddr = rte_cpu_to_be_64
+ ((uint64_t)(uintptr_t)&opaq[i]);
+ }
+}
+
+static int
+mlx5_compress_qp_setup(struct rte_compressdev *dev, uint16_t qp_id,
+ uint32_t max_inflight_ops, int socket_id)
+{
+ struct mlx5_compress_priv *priv = dev->data->dev_private;
+ struct mlx5_compress_qp *qp;
+ struct mlx5_devx_cq_attr cq_attr = {
+ .uar_page_id = mlx5_os_get_devx_uar_page_id(priv->uar),
+ };
+ struct mlx5_devx_create_sq_attr sq_attr = {
+ .user_index = qp_id,
+ .wq_attr = (struct mlx5_devx_wq_attr){
+ .pd = priv->pdn,
+ .uar_page = mlx5_os_get_devx_uar_page_id(priv->uar),
+ },
+ };
+ struct mlx5_devx_modify_sq_attr modify_attr = {
+ .state = MLX5_SQC_STATE_RDY,
+ };
+ uint32_t log_ops_n = rte_log2_u32(max_inflight_ops);
+ uint32_t alloc_size = sizeof(*qp);
+ void *opaq_buf;
+ int ret;
+
+ alloc_size = RTE_ALIGN(alloc_size, RTE_CACHE_LINE_SIZE);
+ alloc_size += sizeof(struct rte_comp_op *) * (1u << log_ops_n);
+ qp = rte_zmalloc_socket(__func__, alloc_size, RTE_CACHE_LINE_SIZE,
+ socket_id);
+ if (qp == NULL) {
+ DRV_LOG(ERR, "Failed to allocate qp memory.");
+ rte_errno = ENOMEM;
+ return -rte_errno;
+ }
+ dev->data->queue_pairs[qp_id] = qp;
+ opaq_buf = rte_calloc(__func__, 1u << log_ops_n,
+ sizeof(struct mlx5_gga_compress_opaque),
+ sizeof(struct mlx5_gga_compress_opaque));
+ if (opaq_buf == NULL) {
+ DRV_LOG(ERR, "Failed to allocate opaque memory.");
+ rte_errno = ENOMEM;
+ goto err;
+ }
+ if (mlx5_mr_btree_init(&qp->mr_ctrl.cache_bh, MLX5_MR_BTREE_CACHE_N,
+ priv->dev_config.socket_id)) {
+ DRV_LOG(ERR, "Cannot allocate MR Btree for qp %u.",
+ (uint32_t)qp_id);
+ rte_errno = ENOMEM;
+ goto err;
+ }
+ qp->entries_n = 1 << log_ops_n;
+ qp->socket_id = socket_id;
+ qp->qp_id = qp_id;
+ qp->priv = priv;
+ qp->ops = (struct rte_comp_op **)RTE_ALIGN((uintptr_t)(qp + 1),
+ RTE_CACHE_LINE_SIZE);
+ if (mlx5_common_verbs_reg_mr(priv->pd, opaq_buf, qp->entries_n *
+ sizeof(struct mlx5_gga_compress_opaque),
+ &qp->opaque_mr) != 0) {
+ rte_free(opaq_buf);
+ DRV_LOG(ERR, "Failed to register opaque MR.");
+ rte_errno = ENOMEM;
+ goto err;
+ }
+ ret = mlx5_devx_cq_create(priv->ctx, &qp->cq, log_ops_n, &cq_attr,
+ socket_id);
+ if (ret != 0) {
+ DRV_LOG(ERR, "Failed to create CQ.");
+ goto err;
+ }
+ sq_attr.cqn = qp->cq.cq->id;
+ ret = mlx5_devx_sq_create(priv->ctx, &qp->sq, log_ops_n, &sq_attr,
+ socket_id);
+ if (ret != 0) {
+ DRV_LOG(ERR, "Failed to create SQ.");
+ goto err;
+ }
+ mlx5_compress_init_sq(qp);
+ ret = mlx5_devx_cmd_modify_sq(qp->sq.sq, &modify_attr);
+ if (ret != 0) {
+ DRV_LOG(ERR, "Can't change SQ state to ready.");
+ goto err;
+ }
+ DRV_LOG(INFO, "QP %u: SQN=0x%X CQN=0x%X entries num = %u\n",
+ (uint32_t)qp_id, qp->sq.sq->id, qp->cq.cq->id, qp->entries_n);
+ return 0;
+err:
+ mlx5_compress_qp_release(dev, qp_id);
+ return -1;
+}
+
+static int
+mlx5_compress_xform_free(struct rte_compressdev *dev, void *xform)
+{
+ struct mlx5_compress_priv *priv = dev->data->dev_private;
+
+ rte_spinlock_lock(&priv->xform_sl);
+ LIST_REMOVE((struct mlx5_compress_xform *)xform, next);
+ rte_spinlock_unlock(&priv->xform_sl);
+ rte_free(xform);
+ return 0;
+}
+
+static int
+mlx5_compress_xform_create(struct rte_compressdev *dev,
+ const struct rte_comp_xform *xform,
+ void **private_xform)
+{
+ struct mlx5_compress_priv *priv = dev->data->dev_private;
+ struct mlx5_compress_xform *xfrm;
+ uint32_t size;
+
+ if (xform->type == RTE_COMP_COMPRESS && xform->compress.level ==
+ RTE_COMP_LEVEL_NONE) {
+ DRV_LOG(ERR, "Non-compressed block is not supported.");
+ return -ENOTSUP;
+ }
+ if ((xform->type == RTE_COMP_COMPRESS && xform->compress.hash_algo !=
+ RTE_COMP_HASH_ALGO_NONE) || (xform->type == RTE_COMP_DECOMPRESS &&
+ xform->decompress.hash_algo != RTE_COMP_HASH_ALGO_NONE)) {
+ DRV_LOG(ERR, "SHA is not supported.");
+ return -ENOTSUP;
+ }
+ xfrm = rte_zmalloc_socket(__func__, sizeof(*xfrm), 0,
+ priv->dev_config.socket_id);
+ if (xfrm == NULL)
+ return -ENOMEM;
+ xfrm->opcode = MLX5_OPCODE_MMO;
+ xfrm->type = xform->type;
+ switch (xform->type) {
+ case RTE_COMP_COMPRESS:
+ switch (xform->compress.algo) {
+ case RTE_COMP_ALGO_NULL:
+ xfrm->opcode += MLX5_OPC_MOD_MMO_DMA <<
+ WQE_CSEG_OPC_MOD_OFFSET;
+ break;
+ case RTE_COMP_ALGO_DEFLATE:
+ size = 1 << xform->compress.window_size;
+ size /= MLX5_GGA_COMP_WIN_SIZE_UNITS;
+ xfrm->gga_ctrl1 += RTE_MIN(rte_log2_u32(size),
+ MLX5_COMP_MAX_WIN_SIZE_CONF) <<
+ WQE_GGA_COMP_WIN_SIZE_OFFSET;
+ if (xform->compress.level == RTE_COMP_LEVEL_PMD_DEFAULT)
+ size = MLX5_GGA_COMP_LOG_BLOCK_SIZE_MAX;
+ else
+ size = priv->min_block_size - 1 +
+ xform->compress.level;
+ xfrm->gga_ctrl1 += RTE_MIN(size,
+ MLX5_GGA_COMP_LOG_BLOCK_SIZE_MAX) <<
+ WQE_GGA_COMP_BLOCK_SIZE_OFFSET;
+ xfrm->opcode += MLX5_OPC_MOD_MMO_COMP <<
+ WQE_CSEG_OPC_MOD_OFFSET;
+ size = xform->compress.deflate.huffman ==
+ RTE_COMP_HUFFMAN_DYNAMIC ?
+ MLX5_GGA_COMP_LOG_DYNAMIC_SIZE_MAX :
+ MLX5_GGA_COMP_LOG_DYNAMIC_SIZE_MIN;
+ xfrm->gga_ctrl1 += size <<
+ WQE_GGA_COMP_DYNAMIC_SIZE_OFFSET;
+ break;
+ default:
+ goto err;
+ }
+ xfrm->csum_type = xform->compress.chksum;
+ break;
+ case RTE_COMP_DECOMPRESS:
+ switch (xform->decompress.algo) {
+ case RTE_COMP_ALGO_NULL:
+ xfrm->opcode += MLX5_OPC_MOD_MMO_DMA <<
+ WQE_CSEG_OPC_MOD_OFFSET;
+ break;
+ case RTE_COMP_ALGO_DEFLATE:
+ xfrm->opcode += MLX5_OPC_MOD_MMO_DECOMP <<
+ WQE_CSEG_OPC_MOD_OFFSET;
+ break;
+ default:
+ goto err;
+ }
+ xfrm->csum_type = xform->decompress.chksum;
+ break;
+ default:
+ DRV_LOG(ERR, "Algorithm %u is not supported.", xform->type);
+ goto err;
+ }
+ DRV_LOG(DEBUG, "New xform: gga ctrl1 = 0x%08X opcode = 0x%08X csum "
+ "type = %d.", xfrm->gga_ctrl1, xfrm->opcode, xfrm->csum_type);
+ xfrm->gga_ctrl1 = rte_cpu_to_be_32(xfrm->gga_ctrl1);
+ rte_spinlock_lock(&priv->xform_sl);
+ LIST_INSERT_HEAD(&priv->xform_list, xfrm, next);
+ rte_spinlock_unlock(&priv->xform_sl);
+ *private_xform = xfrm;
+ return 0;
+err:
+ rte_free(xfrm);
+ return -ENOTSUP;
+}
+
+static void
+mlx5_compress_dev_stop(struct rte_compressdev *dev)
+{
+ RTE_SET_USED(dev);
+}
+
+static int
+mlx5_compress_dev_start(struct rte_compressdev *dev)
+{
+ RTE_SET_USED(dev);
+ return 0;
+}
+
+static void
+mlx5_compress_stats_get(struct rte_compressdev *dev,
+ struct rte_compressdev_stats *stats)
+{
+ int qp_id;
+
+ for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
+ struct mlx5_compress_qp *qp = dev->data->queue_pairs[qp_id];
+
+ stats->enqueued_count += qp->stats.enqueued_count;
+ stats->dequeued_count += qp->stats.dequeued_count;
+ stats->enqueue_err_count += qp->stats.enqueue_err_count;
+ stats->dequeue_err_count += qp->stats.dequeue_err_count;
+ }
+}
+
+static void
+mlx5_compress_stats_reset(struct rte_compressdev *dev)
+{
+ int qp_id;
+
+ for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
+ struct mlx5_compress_qp *qp = dev->data->queue_pairs[qp_id];
+
+ memset(&qp->stats, 0, sizeof(qp->stats));
+ }
+}
+