crypto/mlx5: add maximum segments configuration
authorSuanming Mou <suanmingm@nvidia.com>
Tue, 20 Jul 2021 13:09:38 +0000 (16:09 +0300)
committerAkhil Goyal <gakhil@marvell.com>
Tue, 20 Jul 2021 20:27:00 +0000 (22:27 +0200)
The mlx5 HW crypto operations are done by attaching crypto property
to a memory region. Once done, every access to the memory via the
crypto-enabled memory region will result with in-line encryption or
decryption of the data.

As a result, the design choice is to provide two types of WQEs. One
is UMR WQE which sets the crypto property and the other is rdma write
WQE which sends DMA command to copy data from local MR to remote MR.

The size of the WQEs will be defined by a new devarg called
max_segs_num.

This devarg also defines the maximum segments in mbuf chain that will be
supported for crypto operations.

Signed-off-by: Suanming Mou <suanmingm@nvidia.com>
Signed-off-by: Matan Azrad <matan@nvidia.com>
Acked-by: Akhil Goyal <gakhil@marvell.com>
doc/guides/cryptodevs/mlx5.rst
drivers/crypto/mlx5/mlx5_crypto.c
drivers/crypto/mlx5/mlx5_crypto.h

index 2476fa6..2199ac7 100644 (file)
@@ -127,6 +127,10 @@ Driver options
 
   The plaintext of the keytag appanded to the AES-XTS keys, default value is 0.
 
+- ``max_segs_num`` parameter [int]
+
+  Maximum number of mbuf chain segments(src or dest), default value is 8.
+
 
 Supported NICs
 --------------
index b24e685..f7e23d1 100644 (file)
@@ -21,6 +21,7 @@
 #define MLX5_CRYPTO_DRIVER_NAME crypto_mlx5
 #define MLX5_CRYPTO_LOG_NAME pmd.crypto.mlx5
 #define MLX5_CRYPTO_MAX_QPS 1024
+#define MLX5_CRYPTO_MAX_SEGS 56
 
 #define MLX5_CRYPTO_FEATURE_FLAGS \
        (RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO | RTE_CRYPTODEV_FF_HW_ACCELERATED | \
@@ -494,14 +495,24 @@ mlx5_crypto_args_check_handler(const char *key, const char *val, void *opaque)
                DRV_LOG(WARNING, "%s: \"%s\" is an invalid integer.", key, val);
                return -errno;
        }
-       if (strcmp(key, "import_kek_id") == 0)
+       if (strcmp(key, "max_segs_num") == 0) {
+               if (!tmp || tmp > MLX5_CRYPTO_MAX_SEGS) {
+                       DRV_LOG(WARNING, "Invalid max_segs_num: %d, should"
+                               " be less than %d.",
+                               (uint32_t)tmp, MLX5_CRYPTO_MAX_SEGS);
+                       rte_errno = EINVAL;
+                       return -rte_errno;
+               }
+               devarg_prms->max_segs_num = (uint32_t)tmp;
+       } else if (strcmp(key, "import_kek_id") == 0) {
                attr->session_import_kek_ptr = (uint32_t)tmp;
-       else if (strcmp(key, "credential_id") == 0)
+       } else if (strcmp(key, "credential_id") == 0) {
                attr->credential_pointer = (uint32_t)tmp;
-       else if (strcmp(key, "keytag") == 0)
+       } else if (strcmp(key, "keytag") == 0) {
                devarg_prms->keytag = tmp;
-       else
+       } else {
                DRV_LOG(WARNING, "Invalid key %s.", key);
+       }
        return 0;
 }
 
@@ -516,6 +527,7 @@ mlx5_crypto_parse_devargs(struct rte_devargs *devargs,
        attr->credential_pointer = 0;
        attr->session_import_kek_ptr = 0;
        devarg_prms->keytag = 0;
+       devarg_prms->max_segs_num = 8;
        if (devargs == NULL) {
                DRV_LOG(ERR,
        "No login devargs in order to enable crypto operations in the device.");
@@ -612,6 +624,7 @@ mlx5_crypto_pci_probe(struct rte_pci_driver *pci_drv,
                .max_nb_queue_pairs =
                                RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_QUEUE_PAIRS,
        };
+       uint16_t rdmw_wqe_size;
        int ret;
 
        RTE_SET_USED(pci_drv);
@@ -690,6 +703,18 @@ mlx5_crypto_pci_probe(struct rte_pci_driver *pci_drv,
        priv->mr_scache.reg_mr_cb = mlx5_common_verbs_reg_mr;
        priv->mr_scache.dereg_mr_cb = mlx5_common_verbs_dereg_mr;
        priv->keytag = rte_cpu_to_be_64(devarg_prms.keytag);
+       priv->max_segs_num = devarg_prms.max_segs_num;
+       priv->umr_wqe_size = sizeof(struct mlx5_wqe_umr_bsf_seg) +
+                            sizeof(struct mlx5_umr_wqe) +
+                            RTE_ALIGN(priv->max_segs_num, 4) *
+                            sizeof(struct mlx5_wqe_dseg);
+       rdmw_wqe_size = sizeof(struct mlx5_rdma_write_wqe) +
+                             sizeof(struct mlx5_wqe_dseg) *
+                             (priv->max_segs_num <= 2 ? 2 : 2 +
+                              RTE_ALIGN(priv->max_segs_num - 2, 4));
+       priv->wqe_set_size = priv->umr_wqe_size + rdmw_wqe_size;
+       priv->umr_wqe_stride = priv->umr_wqe_size / MLX5_SEND_WQE_BB;
+       priv->max_rdmar_ds = rdmw_wqe_size / sizeof(struct mlx5_wqe_dseg);
        /* Register callback function for global shared MR cache management. */
        if (TAILQ_EMPTY(&mlx5_crypto_priv_list))
                rte_mem_event_callback_register("MLX5_MEM_EVENT_CB",
index a513e9e..c0fa8ad 100644 (file)
@@ -25,12 +25,17 @@ struct mlx5_crypto_priv {
        struct rte_cryptodev *crypto_dev;
        void *uar; /* User Access Region. */
        uint32_t pdn; /* Protection Domain number. */
+       uint32_t max_segs_num; /* Maximum supported data segs. */
        struct ibv_pd *pd;
        struct mlx5_hlist *dek_hlist; /* Dek hash list. */
        struct rte_cryptodev_config dev_config;
        struct mlx5_mr_share_cache mr_scache; /* Global shared MR cache. */
        struct mlx5_devx_obj *login_obj;
        uint64_t keytag;
+       uint16_t wqe_set_size;
+       uint16_t umr_wqe_size;
+       uint16_t umr_wqe_stride;
+       uint16_t max_rdmar_ds;
 };
 
 struct mlx5_crypto_qp {
@@ -54,6 +59,7 @@ struct mlx5_crypto_devarg_params {
        bool login_devarg;
        struct mlx5_devx_crypto_login_attr login_attr;
        uint64_t keytag;
+       uint32_t max_segs_num;
 };
 
 int