net/enic: support GTP header flow matching
[dpdk.git] / drivers / compress / mlx5 / mlx5_compress.c
index 4c8e67c..c4081c5 100644 (file)
@@ -39,12 +39,10 @@ struct mlx5_compress_priv {
        struct mlx5_common_device *cdev; /* Backend mlx5 device. */
        void *uar;
        uint8_t min_block_size;
-       uint8_t qp_ts_format; /* Whether SQ supports timestamp formats. */
        /* Minimum huffman block size supported by the device. */
        struct rte_compressdev_config dev_config;
        LIST_HEAD(xform_list, mlx5_compress_xform) xform_list;
        rte_spinlock_t xform_sl;
-       struct mlx5_mr_share_cache mr_scache; /* Global shared MR cache. */
        volatile uint64_t *uar_addr;
        /* HCA caps*/
        uint32_t mmo_decomp_sq:1;
@@ -207,8 +205,8 @@ mlx5_compress_qp_setup(struct rte_compressdev *dev, uint16_t qp_id,
                return -rte_errno;
        }
        dev->data->queue_pairs[qp_id] = qp;
-       if (mlx5_mr_btree_init(&qp->mr_ctrl.cache_bh, MLX5_MR_BTREE_CACHE_N,
-                              priv->dev_config.socket_id)) {
+       if (mlx5_mr_ctrl_init(&qp->mr_ctrl, &priv->cdev->mr_scache.dev_gen,
+                             priv->dev_config.socket_id)) {
                DRV_LOG(ERR, "Cannot allocate MR Btree for qp %u.",
                        (uint32_t)qp_id);
                rte_errno = ENOMEM;
@@ -243,7 +241,8 @@ mlx5_compress_qp_setup(struct rte_compressdev *dev, uint16_t qp_id,
                goto err;
        }
        qp_attr.cqn = qp->cq.cq->id;
-       qp_attr.ts_format = mlx5_ts_format_conv(priv->qp_ts_format);
+       qp_attr.ts_format =
+               mlx5_ts_format_conv(priv->cdev->config.hca_attr.qp_ts_format);
        qp_attr.rq_size = 0;
        qp_attr.sq_size = RTE_BIT32(log_ops_n);
        qp_attr.mmo = priv->mmo_decomp_qp && priv->mmo_comp_qp
@@ -258,8 +257,6 @@ mlx5_compress_qp_setup(struct rte_compressdev *dev, uint16_t qp_id,
        ret = mlx5_devx_qp2rts(&qp->qp, 0);
        if (ret)
                goto err;
-       /* Save pointer of global generation number to check memory event. */
-       qp->mr_ctrl.dev_gen_ptr = &priv->mr_scache.dev_gen;
        DRV_LOG(INFO, "QP %u: SQN=0x%X CQN=0x%X entries num = %u",
                (uint32_t)qp_id, qp->qp.qp->id, qp->cq.cq->id, qp->entries_n);
        return 0;
@@ -289,17 +286,44 @@ mlx5_compress_xform_create(struct rte_compressdev *dev,
        struct mlx5_compress_xform *xfrm;
        uint32_t size;
 
-       if (xform->type == RTE_COMP_COMPRESS && xform->compress.level ==
-                                                         RTE_COMP_LEVEL_NONE) {
-               DRV_LOG(ERR, "Non-compressed block is not supported.");
-               return -ENOTSUP;
-       }
-       if ((xform->type == RTE_COMP_COMPRESS && xform->compress.hash_algo !=
-            RTE_COMP_HASH_ALGO_NONE) || (xform->type == RTE_COMP_DECOMPRESS &&
-                     xform->decompress.hash_algo != RTE_COMP_HASH_ALGO_NONE)) {
-               DRV_LOG(ERR, "SHA is not supported.");
+       switch (xform->type) {
+       case RTE_COMP_COMPRESS:
+               if (xform->compress.algo == RTE_COMP_ALGO_NULL &&
+                               !priv->mmo_dma_qp && !priv->mmo_dma_sq) {
+                       DRV_LOG(ERR, "Not enough capabilities to support DMA operation, maybe old FW/OFED version?");
+                       return -ENOTSUP;
+               } else if (!priv->mmo_comp_qp && !priv->mmo_comp_sq) {
+                       DRV_LOG(ERR, "Not enough capabilities to support compress operation, maybe old FW/OFED version?");
+                       return -ENOTSUP;
+               }
+               if (xform->compress.level == RTE_COMP_LEVEL_NONE) {
+                       DRV_LOG(ERR, "Non-compressed block is not supported.");
+                       return -ENOTSUP;
+               }
+               if (xform->compress.hash_algo != RTE_COMP_HASH_ALGO_NONE) {
+                       DRV_LOG(ERR, "SHA is not supported.");
+                       return -ENOTSUP;
+               }
+               break;
+       case RTE_COMP_DECOMPRESS:
+               if (xform->decompress.algo == RTE_COMP_ALGO_NULL &&
+                               !priv->mmo_dma_qp && !priv->mmo_dma_sq) {
+                       DRV_LOG(ERR, "Not enough capabilities to support DMA operation, maybe old FW/OFED version?");
+                       return -ENOTSUP;
+               } else if (!priv->mmo_decomp_qp && !priv->mmo_decomp_sq) {
+                       DRV_LOG(ERR, "Not enough capabilities to support decompress operation, maybe old FW/OFED version?");
+                       return -ENOTSUP;
+               }
+               if (xform->compress.hash_algo != RTE_COMP_HASH_ALGO_NONE) {
+                       DRV_LOG(ERR, "SHA is not supported.");
+                       return -ENOTSUP;
+               }
+               break;
+       default:
+               DRV_LOG(ERR, "Xform type should be compress/decompress");
                return -ENOTSUP;
        }
+
        xfrm = rte_zmalloc_socket(__func__, sizeof(*xfrm), 0,
                                                    priv->dev_config.socket_id);
        if (xfrm == NULL)
@@ -389,8 +413,9 @@ mlx5_compress_dev_stop(struct rte_compressdev *dev)
 static int
 mlx5_compress_dev_start(struct rte_compressdev *dev)
 {
-       RTE_SET_USED(dev);
-       return 0;
+       struct mlx5_compress_priv *priv = dev->data->dev_private;
+
+       return mlx5_dev_mempool_subscribe(priv->cdev);
 }
 
 static void
@@ -437,40 +462,6 @@ static struct rte_compressdev_ops mlx5_compress_ops = {
        .stream_free            = NULL,
 };
 
-/**
- * Query LKey from a packet buffer for QP. If not found, add the mempool.
- *
- * @param priv
- *   Pointer to the priv object.
- * @param addr
- *   Search key.
- * @param mr_ctrl
- *   Pointer to per-queue MR control structure.
- * @param ol_flags
- *   Mbuf offload features.
- *
- * @return
- *   Searched LKey on success, UINT32_MAX on no match.
- */
-static __rte_always_inline uint32_t
-mlx5_compress_addr2mr(struct mlx5_compress_priv *priv, uintptr_t addr,
-                     struct mlx5_mr_ctrl *mr_ctrl, uint64_t ol_flags)
-{
-       uint32_t lkey;
-
-       /* Check generation bit to see if there's any change on existing MRs. */
-       if (unlikely(*mr_ctrl->dev_gen_ptr != mr_ctrl->cur_gen))
-               mlx5_mr_flush_local_cache(mr_ctrl);
-       /* Linear search on MR cache array. */
-       lkey = mlx5_mr_lookup_lkey(mr_ctrl->cache, &mr_ctrl->mru,
-                                  MLX5_MR_CACHE_N, addr);
-       if (likely(lkey != UINT32_MAX))
-               return lkey;
-       /* Take slower bottom-half on miss. */
-       return mlx5_mr_addr2mr_bh(priv->cdev->pd, 0, &priv->mr_scache, mr_ctrl,
-                                 addr, !!(ol_flags & EXT_ATTACHED_MBUF));
-}
-
 static __rte_always_inline uint32_t
 mlx5_compress_dseg_set(struct mlx5_compress_qp *qp,
                       volatile struct mlx5_wqe_dseg *restrict dseg,
@@ -480,8 +471,7 @@ mlx5_compress_dseg_set(struct mlx5_compress_qp *qp,
        uintptr_t addr = rte_pktmbuf_mtod_offset(mbuf, uintptr_t, offset);
 
        dseg->bcount = rte_cpu_to_be_32(len);
-       dseg->lkey = mlx5_compress_addr2mr(qp->priv, addr, &qp->mr_ctrl,
-                                          mbuf->ol_flags);
+       dseg->lkey = mlx5_mr_mb2mr(qp->priv->cdev, 0, &qp->mr_ctrl, mbuf);
        dseg->pbuf = rte_cpu_to_be_64(addr);
        return dseg->lkey;
 }
@@ -715,47 +705,12 @@ mlx5_compress_uar_prepare(struct mlx5_compress_priv *priv)
        return 0;
 }
 
-/**
- * Callback for memory event.
- *
- * @param event_type
- *   Memory event type.
- * @param addr
- *   Address of memory.
- * @param len
- *   Size of memory.
- */
-static void
-mlx5_compress_mr_mem_event_cb(enum rte_mem_event event_type, const void *addr,
-                             size_t len, void *arg __rte_unused)
-{
-       struct mlx5_compress_priv *priv;
-
-       /* Must be called from the primary process. */
-       MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
-       switch (event_type) {
-       case RTE_MEM_EVENT_FREE:
-               pthread_mutex_lock(&priv_list_lock);
-               /* Iterate all the existing mlx5 devices. */
-               TAILQ_FOREACH(priv, &mlx5_compress_priv_list, next)
-                       mlx5_free_mr_by_addr(&priv->mr_scache,
-                                            mlx5_os_get_ctx_device_name
-                                                             (priv->cdev->ctx),
-                                            addr, len);
-               pthread_mutex_unlock(&priv_list_lock);
-               break;
-       case RTE_MEM_EVENT_ALLOC:
-       default:
-               break;
-       }
-}
-
 static int
 mlx5_compress_dev_probe(struct mlx5_common_device *cdev)
 {
        struct rte_compressdev *compressdev;
        struct mlx5_compress_priv *priv;
-       struct mlx5_hca_attr att = { 0 };
+       struct mlx5_hca_attr *attr = &cdev->config.hca_attr;
        struct rte_compressdev_pmd_init_params init_params = {
                .name = "",
                .socket_id = cdev->dev->numa_node,
@@ -767,12 +722,11 @@ mlx5_compress_dev_probe(struct mlx5_common_device *cdev)
                rte_errno = ENOTSUP;
                return -rte_errno;
        }
-       if (mlx5_devx_cmd_query_hca_attr(cdev->ctx, &att) != 0 ||
-           ((att.mmo_compress_sq_en == 0 || att.mmo_decompress_sq_en == 0 ||
-               att.mmo_dma_sq_en == 0) && (att.mmo_compress_qp_en == 0 ||
-               att.mmo_decompress_qp_en == 0 || att.mmo_dma_qp_en == 0))) {
-               DRV_LOG(ERR, "Not enough capabilities to support compress "
-                       "operations, maybe old FW/OFED version?");
+       if (!attr->mmo_decompress_qp_en && !attr->mmo_decompress_sq_en
+               && !attr->mmo_compress_qp_en && !attr->mmo_compress_sq_en
+               && !attr->mmo_dma_qp_en && !attr->mmo_dma_sq_en) {
+               DRV_LOG(ERR, "Not enough capabilities to support compress operations, maybe old FW/OFED version?");
+               claim_zero(mlx5_glue->close_device(cdev->ctx));
                rte_errno = ENOTSUP;
                return -ENOTSUP;
        }
@@ -789,35 +743,19 @@ mlx5_compress_dev_probe(struct mlx5_common_device *cdev)
        compressdev->enqueue_burst = mlx5_compress_enqueue_burst;
        compressdev->feature_flags = RTE_COMPDEV_FF_HW_ACCELERATED;
        priv = compressdev->data->dev_private;
-       priv->mmo_decomp_sq = att.mmo_decompress_sq_en;
-       priv->mmo_decomp_qp = att.mmo_decompress_qp_en;
-       priv->mmo_comp_sq = att.mmo_compress_sq_en;
-       priv->mmo_comp_qp = att.mmo_compress_qp_en;
-       priv->mmo_dma_sq = att.mmo_dma_sq_en;
-       priv->mmo_dma_qp = att.mmo_dma_qp_en;
+       priv->mmo_decomp_sq = attr->mmo_decompress_sq_en;
+       priv->mmo_decomp_qp = attr->mmo_decompress_qp_en;
+       priv->mmo_comp_sq = attr->mmo_compress_sq_en;
+       priv->mmo_comp_qp = attr->mmo_compress_qp_en;
+       priv->mmo_dma_sq = attr->mmo_dma_sq_en;
+       priv->mmo_dma_qp = attr->mmo_dma_qp_en;
        priv->cdev = cdev;
        priv->compressdev = compressdev;
-       priv->min_block_size = att.compress_min_block_size;
-       priv->qp_ts_format = att.qp_ts_format;
+       priv->min_block_size = attr->compress_min_block_size;
        if (mlx5_compress_uar_prepare(priv) != 0) {
                rte_compressdev_pmd_destroy(priv->compressdev);
                return -1;
        }
-       if (mlx5_mr_btree_init(&priv->mr_scache.cache,
-                            MLX5_MR_BTREE_CACHE_N * 2, rte_socket_id()) != 0) {
-               DRV_LOG(ERR, "Failed to allocate shared cache MR memory.");
-               mlx5_compress_uar_release(priv);
-               rte_compressdev_pmd_destroy(priv->compressdev);
-               rte_errno = ENOMEM;
-               return -rte_errno;
-       }
-       priv->mr_scache.reg_mr_cb = mlx5_common_verbs_reg_mr;
-       priv->mr_scache.dereg_mr_cb = mlx5_common_verbs_dereg_mr;
-       /* Register callback function for global shared MR cache management. */
-       if (TAILQ_EMPTY(&mlx5_compress_priv_list))
-               rte_mem_event_callback_register("MLX5_MEM_EVENT_CB",
-                                               mlx5_compress_mr_mem_event_cb,
-                                               NULL);
        pthread_mutex_lock(&priv_list_lock);
        TAILQ_INSERT_TAIL(&mlx5_compress_priv_list, priv, next);
        pthread_mutex_unlock(&priv_list_lock);
@@ -837,10 +775,6 @@ mlx5_compress_dev_remove(struct mlx5_common_device *cdev)
                TAILQ_REMOVE(&mlx5_compress_priv_list, priv, next);
        pthread_mutex_unlock(&priv_list_lock);
        if (priv) {
-               if (TAILQ_EMPTY(&mlx5_compress_priv_list))
-                       rte_mem_event_callback_unregister("MLX5_MEM_EVENT_CB",
-                                                         NULL);
-               mlx5_mr_release_cache(&priv->mr_scache);
                mlx5_compress_uar_release(priv);
                rte_compressdev_pmd_destroy(priv->compressdev);
        }