#define MLX5_COMPRESS_MAX_QPS 1024
#define MLX5_COMP_MAX_WIN_SIZE_CONF 6u
+struct mlx5_compress_devarg_params {
+ uint32_t log_block_sz;
+};
+
struct mlx5_compress_xform {
LIST_ENTRY(mlx5_compress_xform) next;
enum rte_comp_xform_type type;
TAILQ_ENTRY(mlx5_compress_priv) next;
struct rte_compressdev *compressdev;
struct mlx5_common_device *cdev; /* Backend mlx5 device. */
- void *uar;
+ struct mlx5_uar uar;
uint8_t min_block_size;
/* Minimum huffman block size supported by the device. */
struct rte_compressdev_config dev_config;
LIST_HEAD(xform_list, mlx5_compress_xform) xform_list;
rte_spinlock_t xform_sl;
- volatile uint64_t *uar_addr;
- /* HCA caps*/
+ /* HCA caps */
uint32_t mmo_decomp_sq:1;
uint32_t mmo_decomp_qp:1;
uint32_t mmo_comp_sq:1;
uint32_t mmo_comp_qp:1;
uint32_t mmo_dma_sq:1;
uint32_t mmo_dma_qp:1;
-#ifndef RTE_ARCH_64
- rte_spinlock_t uar32_sl;
-#endif /* RTE_ARCH_64 */
+ uint32_t log_block_sz;
};
struct mlx5_compress_qp {
struct mlx5_compress_priv *priv = dev->data->dev_private;
struct mlx5_compress_qp *qp;
struct mlx5_devx_cq_attr cq_attr = {
- .uar_page_id = mlx5_os_get_devx_uar_page_id(priv->uar),
+ .uar_page_id = mlx5_os_get_devx_uar_page_id(priv->uar.obj),
};
struct mlx5_devx_qp_attr qp_attr = {
.pd = priv->cdev->pdn,
- .uar_index = mlx5_os_get_devx_uar_page_id(priv->uar),
+ .uar_index = mlx5_os_get_devx_uar_page_id(priv->uar.obj),
.user_index = qp_id,
};
uint32_t log_ops_n = rte_log2_u32(max_inflight_ops);
return -rte_errno;
}
dev->data->queue_pairs[qp_id] = qp;
- if (mlx5_mr_ctrl_init(&qp->mr_ctrl, &priv->cdev->mr_scache.dev_gen,
+ if (mlx5_mr_ctrl_init(&qp->mr_ctrl, priv->cdev,
priv->dev_config.socket_id)) {
DRV_LOG(ERR, "Cannot allocate MR Btree for qp %u.",
(uint32_t)qp_id);
qp_attr.cqn = qp->cq.cq->id;
qp_attr.ts_format =
mlx5_ts_format_conv(priv->cdev->config.hca_attr.qp_ts_format);
- qp_attr.rq_size = 0;
- qp_attr.sq_size = RTE_BIT32(log_ops_n);
+ qp_attr.num_of_receive_wqes = 0;
+ qp_attr.num_of_send_wqbbs = RTE_BIT32(log_ops_n);
qp_attr.mmo = priv->mmo_decomp_qp && priv->mmo_comp_qp
&& priv->mmo_dma_qp;
- ret = mlx5_devx_qp_create(priv->cdev->ctx, &qp->qp, log_ops_n, &qp_attr,
- socket_id);
+ ret = mlx5_devx_qp_create(priv->cdev->ctx, &qp->qp,
+ qp_attr.num_of_send_wqbbs *
+ MLX5_WQE_SIZE, &qp_attr, socket_id);
if (ret != 0) {
DRV_LOG(ERR, "Failed to create QP.");
goto err;
xfrm->gga_ctrl1 += RTE_MIN(rte_log2_u32(size),
MLX5_COMP_MAX_WIN_SIZE_CONF) <<
WQE_GGA_COMP_WIN_SIZE_OFFSET;
- switch (xform->compress.level) {
- case RTE_COMP_LEVEL_PMD_DEFAULT:
- size = MLX5_GGA_COMP_LOG_BLOCK_SIZE_MAX;
- break;
- case RTE_COMP_LEVEL_MAX:
- size = priv->min_block_size;
- break;
- default:
- size = RTE_MAX(MLX5_GGA_COMP_LOG_BLOCK_SIZE_MAX
- + 1 - xform->compress.level,
- priv->min_block_size);
- }
- xfrm->gga_ctrl1 += RTE_MIN(size,
- MLX5_GGA_COMP_LOG_BLOCK_SIZE_MAX) <<
- WQE_GGA_COMP_BLOCK_SIZE_OFFSET;
+ size = priv->log_block_sz;
+ xfrm->gga_ctrl1 += size <<
+ WQE_GGA_COMP_BLOCK_SIZE_OFFSET;
xfrm->opcode += MLX5_OPC_MOD_MMO_COMP <<
WQE_CSEG_OPC_MOD_OFFSET;
size = xform->compress.deflate.huffman ==
uintptr_t addr = rte_pktmbuf_mtod_offset(mbuf, uintptr_t, offset);
dseg->bcount = rte_cpu_to_be_32(len);
- dseg->lkey = mlx5_mr_mb2mr(qp->priv->cdev, 0, &qp->mr_ctrl, mbuf);
+ dseg->lkey = mlx5_mr_mb2mr(&qp->mr_ctrl, mbuf);
dseg->pbuf = rte_cpu_to_be_64(addr);
return dseg->lkey;
}
-/*
- * Provide safe 64bit store operation to mlx5 UAR region for both 32bit and
- * 64bit architectures.
- */
-static __rte_always_inline void
-mlx5_compress_uar_write(uint64_t val, struct mlx5_compress_priv *priv)
-{
-#ifdef RTE_ARCH_64
- *priv->uar_addr = val;
-#else /* !RTE_ARCH_64 */
- rte_spinlock_lock(&priv->uar32_sl);
- *(volatile uint32_t *)priv->uar_addr = val;
- rte_io_wmb();
- *((volatile uint32_t *)priv->uar_addr + 1) = val >> 32;
- rte_spinlock_unlock(&priv->uar32_sl);
-#endif
-}
-
static uint16_t
mlx5_compress_enqueue_burst(void *queue_pair, struct rte_comp_op **ops,
uint16_t nb_ops)
qp->pi++;
} while (--remain);
qp->stats.enqueued_count += nb_ops;
- rte_io_wmb();
- qp->qp.db_rec[MLX5_SND_DBR] = rte_cpu_to_be_32(qp->pi);
- rte_wmb();
- mlx5_compress_uar_write(*(volatile uint64_t *)wqe, qp->priv);
- rte_wmb();
+ mlx5_doorbell_ring(&qp->priv->uar.bf_db, *(volatile uint64_t *)wqe,
+ qp->pi, &qp->qp.db_rec[MLX5_SND_DBR],
+ !qp->priv->uar.dbnc);
return nb_ops;
}
return i;
}
-static void
-mlx5_compress_uar_release(struct mlx5_compress_priv *priv)
+static int
+mlx5_compress_args_check_handler(const char *key, const char *val, void *opaque)
{
- if (priv->uar != NULL) {
- mlx5_glue->devx_free_uar(priv->uar);
- priv->uar = NULL;
+ struct mlx5_compress_devarg_params *devarg_prms = opaque;
+
+ if (strcmp(key, "log-block-size") == 0) {
+ errno = 0;
+ devarg_prms->log_block_sz = (uint32_t)strtoul(val, NULL, 10);
+ if (errno) {
+ DRV_LOG(WARNING, "%s: \"%s\" is an invalid integer."
+ , key, val);
+ return -errno;
+ }
+ return 0;
}
+ return 0;
}
static int
-mlx5_compress_uar_prepare(struct mlx5_compress_priv *priv)
+mlx5_compress_handle_devargs(struct rte_devargs *devargs,
+ struct mlx5_compress_devarg_params *devarg_prms,
+ struct mlx5_hca_attr *att)
{
- priv->uar = mlx5_devx_alloc_uar(priv->cdev->ctx, -1);
- if (priv->uar == NULL || mlx5_os_get_devx_uar_reg_addr(priv->uar) ==
- NULL) {
- rte_errno = errno;
- DRV_LOG(ERR, "Failed to allocate UAR.");
+ struct rte_kvargs *kvlist;
+
+ devarg_prms->log_block_sz = MLX5_GGA_COMP_LOG_BLOCK_SIZE_MAX;
+ if (devargs == NULL)
+ return 0;
+ kvlist = rte_kvargs_parse(devargs->args, NULL);
+ if (kvlist == NULL) {
+ DRV_LOG(ERR, "Failed to parse devargs.");
+ rte_errno = EINVAL;
return -1;
}
- priv->uar_addr = mlx5_os_get_devx_uar_reg_addr(priv->uar);
- MLX5_ASSERT(priv->uar_addr);
-#ifndef RTE_ARCH_64
- rte_spinlock_init(&priv->uar32_sl);
-#endif /* RTE_ARCH_64 */
+ if (rte_kvargs_process(kvlist, NULL, mlx5_compress_args_check_handler,
+ devarg_prms) != 0) {
+ DRV_LOG(ERR, "Devargs handler function Failed.");
+ rte_kvargs_free(kvlist);
+ rte_errno = EINVAL;
+ return -1;
+ }
+ rte_kvargs_free(kvlist);
+ if (devarg_prms->log_block_sz > MLX5_GGA_COMP_LOG_BLOCK_SIZE_MAX ||
+ devarg_prms->log_block_sz < att->compress_min_block_size) {
+ DRV_LOG(WARNING, "Log block size provided is out of range("
+ "%u); default it to %u.",
+ devarg_prms->log_block_sz,
+ MLX5_GGA_COMP_LOG_BLOCK_SIZE_MAX);
+ devarg_prms->log_block_sz = MLX5_GGA_COMP_LOG_BLOCK_SIZE_MAX;
+ }
return 0;
}
struct rte_compressdev *compressdev;
struct mlx5_compress_priv *priv;
struct mlx5_hca_attr *attr = &cdev->config.hca_attr;
+ struct mlx5_compress_devarg_params devarg_prms = {0};
struct rte_compressdev_pmd_init_params init_params = {
.name = "",
.socket_id = cdev->dev->numa_node,
rte_errno = ENOTSUP;
return -ENOTSUP;
}
+ mlx5_compress_handle_devargs(cdev->dev->devargs, &devarg_prms, attr);
compressdev = rte_compressdev_pmd_create(ibdev_name, cdev->dev,
sizeof(*priv), &init_params);
if (compressdev == NULL) {
compressdev->enqueue_burst = mlx5_compress_enqueue_burst;
compressdev->feature_flags = RTE_COMPDEV_FF_HW_ACCELERATED;
priv = compressdev->data->dev_private;
+ priv->log_block_sz = devarg_prms.log_block_sz;
priv->mmo_decomp_sq = attr->mmo_decompress_sq_en;
priv->mmo_decomp_qp = attr->mmo_decompress_qp_en;
priv->mmo_comp_sq = attr->mmo_compress_sq_en;
priv->cdev = cdev;
priv->compressdev = compressdev;
priv->min_block_size = attr->compress_min_block_size;
- if (mlx5_compress_uar_prepare(priv) != 0) {
+ if (mlx5_devx_uar_prepare(cdev, &priv->uar) != 0) {
rte_compressdev_pmd_destroy(priv->compressdev);
return -1;
}
TAILQ_REMOVE(&mlx5_compress_priv_list, priv, next);
pthread_mutex_unlock(&priv_list_lock);
if (priv) {
- mlx5_compress_uar_release(priv);
+ mlx5_devx_uar_release(&priv->uar);
rte_compressdev_pmd_destroy(priv->compressdev);
}
return 0;