See the :ref:`mlx5 common configuration <mlx5_common_env>`.
+A device comes out of Mellanox factory with pre-defined import methods.
+There are two possible import methods: wrapped or plaintext.
+
+In case the device is in wrapped mode, it needs to be moved to crypto operational mode.
In order to move the device to crypto operational mode, credential and KEK
(Key Encrypting Key) should be set as the first step.
The credential will be used by the software in order to perform crypto login, and the KEK is
The "wrapped_crypto_operational" value will be "0x00000001" if the mode was
successfully changed to operational mode.
+On the other hand, in case of plaintext mode, there is no need for all the above,
+DEK is passed in plaintext without keytag.
+
The mlx5 crypto PMD can be verified by running the test application::
+ Wrapped mode:
+ dpdk-test -c 1 -n 1 -w <dev>,class=crypto,wcs_file=<file_path>
+ RTE>>cryptodev_mlx5_autotest
- dpdk-test -c 1 -n 1 -w <dev>,class=crypto,wcs_file=<file_path>
- RTE>>cryptodev_mlx5_autotest
+ Plaintext mode:
+ dpdk-test -c 1 -n 1 -w <dev>,class=crypto
+ RTE>>cryptodev_mlx5_autotest
Driver options
Please refer to :ref:`mlx5 common options <mlx5_common_driver_options>`
for an additional list of options shared with other mlx5 drivers.
-- ``wcs_file`` parameter [string] - mandatory
+- ``wcs_file`` parameter [string] - mandatory in wrapped mode
File path including only the wrapped credential in string format of hexadecimal
numbers, represent 48 bytes (8 bytes IV added by the AES key wrap algorithm).
MLX5_GET(cmd_hca_cap, hcattr, umr_modify_entity_size_disabled);
attr->wait_on_time = MLX5_GET(cmd_hca_cap, hcattr, wait_on_time);
attr->crypto = MLX5_GET(cmd_hca_cap, hcattr, crypto);
- if (attr->crypto)
- attr->aes_xts = MLX5_GET(cmd_hca_cap, hcattr, aes_xts);
attr->ct_offload = !!(MLX5_GET64(cmd_hca_cap, hcattr,
general_obj_types) &
MLX5_GENERAL_OBJ_TYPES_CAP_CONN_TRACK_OFFLOAD);
attr->rq_delay_drop = MLX5_GET(cmd_hca_cap, hcattr, rq_delay_drop);
+ if (attr->crypto) {
+ attr->aes_xts = MLX5_GET(cmd_hca_cap, hcattr, aes_xts);
+ hcattr = mlx5_devx_get_hca_cap(ctx, in, out, &rc,
+ MLX5_GET_HCA_CAP_OP_MOD_CRYPTO |
+ MLX5_HCA_CAP_OPMOD_GET_CUR);
+ if (!hcattr)
+ return -1;
+ attr->crypto_wrapped_import_method = !!(MLX5_GET(crypto_caps,
+ hcattr, wrapped_import_method)
+ & 1 << 2);
+ }
if (hca_cap_2_sup) {
hcattr = mlx5_devx_get_hca_cap(ctx, in, out, &rc,
MLX5_GET_HCA_CAP_OP_MOD_GENERAL_DEVICE_2 |
uint32_t umr_indirect_mkey_disabled:1;
uint32_t log_min_stride_wqe_sz:5;
uint32_t esw_mgr_vport_id_valid:1; /* E-Switch Mgr vport ID is valid. */
+ uint32_t crypto_wrapped_import_method:1;
uint16_t esw_mgr_vport_id; /* E-Switch Mgr vport ID . */
uint16_t max_wqe_sz_sq;
};
MLX5_GET_HCA_CAP_OP_MOD_NIC_FLOW_TABLE = 0x7 << 1,
MLX5_SET_HCA_CAP_OP_MOD_ESW = 0x9 << 1,
MLX5_GET_HCA_CAP_OP_MOD_VDPA_EMULATION = 0x13 << 1,
+ MLX5_GET_HCA_CAP_OP_MOD_CRYPTO = 0x1A << 1,
MLX5_GET_HCA_CAP_OP_MOD_PARSE_GRAPH_NODE_CAP = 0x1C << 1,
MLX5_GET_HCA_CAP_OP_MOD_GENERAL_DEVICE_2 = 0x20 << 1,
};
u8 reserved_at_280[0x180];
};
+struct mlx5_ifc_crypto_caps_bits {
+ u8 wrapped_crypto_operational[0x1];
+ u8 wrapped_crypto_going_to_commissioning[0x1];
+ u8 sw_wrapped_dek[0x1];
+ u8 synchronize_dek[0x1];
+ u8 int_kek_manual[0x1];
+ u8 int_kek_auto[0x1];
+ u8 reserved_at_6[0x12];
+ u8 wrapped_import_method[0x8];
+ u8 reserved_at_20[0x3];
+ u8 log_dek_max_alloc[0x5];
+ u8 reserved_at_28[0x3];
+ u8 log_max_num_deks[0x5];
+ u8 reserved_at_30[0x3];
+ u8 log_max_num_import_keks[0x5];
+ u8 reserved_at_38[0x3];
+ u8 log_max_num_creds[0x5];
+ u8 failed_selftests[0x10];
+ u8 num_nv_import_keks[0x8];
+ u8 num_nv_credentials[0x8];
+ u8 reserved_at_60[0x3];
+ u8 log_dek_granularity[0x5];
+ u8 reserved_at_68[0x3];
+ u8 log_max_num_int_kek[0x5];
+ u8 reserved_at_70[0x10];
+ u8 reserved_at_80[0x780];
+};
+
struct mlx5_ifc_crypto_commissioning_register_bits {
u8 token[0x1]; /* TODO: add size after PRM update */
};
#define MLX5_CRYPTO_MAX_QPS 128
#define MLX5_CRYPTO_MAX_SEGS 56
-#define MLX5_CRYPTO_FEATURE_FLAGS \
+#define MLX5_CRYPTO_FEATURE_FLAGS(wrapped_mode) \
(RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO | RTE_CRYPTODEV_FF_HW_ACCELERATED | \
RTE_CRYPTODEV_FF_IN_PLACE_SGL | RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT | \
RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT | \
RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT | \
RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT | \
- RTE_CRYPTODEV_FF_CIPHER_WRAPPED_KEY | \
+ (wrapped_mode ? RTE_CRYPTODEV_FF_CIPHER_WRAPPED_KEY : 0) | \
RTE_CRYPTODEV_FF_CIPHER_MULTIPLE_DATA_UNITS)
TAILQ_HEAD(mlx5_crypto_privs, mlx5_crypto_priv) mlx5_crypto_priv_list =
mlx5_crypto_dev_infos_get(struct rte_cryptodev *dev,
struct rte_cryptodev_info *dev_info)
{
+ struct mlx5_crypto_priv *priv = dev->data->dev_private;
+
RTE_SET_USED(dev);
if (dev_info != NULL) {
dev_info->driver_id = mlx5_crypto_driver_id;
- dev_info->feature_flags = MLX5_CRYPTO_FEATURE_FLAGS;
+ dev_info->feature_flags =
+ MLX5_CRYPTO_FEATURE_FLAGS(priv->is_wrapped_mode);
dev_info->capabilities = mlx5_crypto_caps;
dev_info->max_nb_queue_pairs = MLX5_CRYPTO_MAX_QPS;
dev_info->min_mbuf_headroom_req = 0;
static int
mlx5_crypto_parse_devargs(struct mlx5_kvargs_ctrl *mkvlist,
- struct mlx5_crypto_devarg_params *devarg_prms)
+ struct mlx5_crypto_devarg_params *devarg_prms,
+ bool wrapped_mode)
{
struct mlx5_devx_crypto_login_attr *attr = &devarg_prms->login_attr;
const char **params = (const char *[]){
devarg_prms->keytag = 0;
devarg_prms->max_segs_num = 8;
if (mkvlist == NULL) {
+ if (!wrapped_mode)
+ return 0;
DRV_LOG(ERR,
"No login devargs in order to enable crypto operations in the device.");
rte_errno = EINVAL;
rte_errno = EINVAL;
return -1;
}
- if (devarg_prms->login_devarg == false) {
+ if (devarg_prms->login_devarg == false && wrapped_mode) {
DRV_LOG(ERR,
- "No login credential devarg in order to enable crypto operations in the device.");
+ "No login credential devarg in order to enable crypto operations in the device while in wrapped import method.");
rte_errno = EINVAL;
return -1;
}
};
const char *ibdev_name = mlx5_os_get_ctx_device_name(cdev->ctx);
int ret;
+ bool wrapped_mode;
if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
DRV_LOG(ERR, "Non-primary process type is not supported.");
rte_errno = ENOTSUP;
return -ENOTSUP;
}
- ret = mlx5_crypto_parse_devargs(mkvlist, &devarg_prms);
+ wrapped_mode = !!cdev->config.hca_attr.crypto_wrapped_import_method;
+ ret = mlx5_crypto_parse_devargs(mkvlist, &devarg_prms, wrapped_mode);
if (ret) {
DRV_LOG(ERR, "Failed to parse devargs.");
return -rte_errno;
crypto_dev->dev_ops = &mlx5_crypto_ops;
crypto_dev->dequeue_burst = mlx5_crypto_dequeue_burst;
crypto_dev->enqueue_burst = mlx5_crypto_enqueue_burst;
- crypto_dev->feature_flags = MLX5_CRYPTO_FEATURE_FLAGS;
+ crypto_dev->feature_flags = MLX5_CRYPTO_FEATURE_FLAGS(wrapped_mode);
crypto_dev->driver_id = mlx5_crypto_driver_id;
priv = crypto_dev->data->dev_private;
priv->cdev = cdev;
priv->crypto_dev = crypto_dev;
+ priv->is_wrapped_mode = wrapped_mode;
if (mlx5_devx_uar_prepare(cdev, &priv->uar) != 0) {
rte_cryptodev_pmd_destroy(priv->crypto_dev);
return -1;
}
- login = mlx5_devx_cmd_create_crypto_login_obj(cdev->ctx,
+ if (wrapped_mode) {
+ login = mlx5_devx_cmd_create_crypto_login_obj(cdev->ctx,
&devarg_prms.login_attr);
- if (login == NULL) {
- DRV_LOG(ERR, "Failed to configure login.");
- mlx5_devx_uar_release(&priv->uar);
- rte_cryptodev_pmd_destroy(priv->crypto_dev);
- return -rte_errno;
+ if (login == NULL) {
+ DRV_LOG(ERR, "Failed to configure login.");
+ mlx5_devx_uar_release(&priv->uar);
+ rte_cryptodev_pmd_destroy(priv->crypto_dev);
+ return -rte_errno;
+ }
+ priv->login_obj = login;
}
- priv->login_obj = login;
- priv->keytag = rte_cpu_to_be_64(devarg_prms.keytag);
ret = mlx5_crypto_configure_wqe_size(priv,
cdev->config.hca_attr.max_wqe_sz_sq, devarg_prms.max_segs_num);
if (ret) {
rte_cryptodev_pmd_destroy(priv->crypto_dev);
return -1;
}
+ priv->keytag = rte_cpu_to_be_64(devarg_prms.keytag);
DRV_LOG(INFO, "Max number of segments: %u.",
(unsigned int)RTE_MIN(
MLX5_CRYPTO_KLM_SEGS_NUM(priv->umr_wqe_size),
uint16_t umr_wqe_size;
uint16_t umr_wqe_stride;
uint16_t max_rdmar_ds;
+ uint32_t is_wrapped_mode:1;
};
struct mlx5_crypto_qp {
struct mlx5_list_entry entry; /* Pointer to DEK hash list entry. */
struct mlx5_devx_obj *obj; /* Pointer to DEK DevX object. */
uint8_t data[MLX5_CRYPTO_KEY_LENGTH]; /* DEK key data. */
- bool size_is_48; /* Whether the key\data size is 48 bytes or not. */
+ uint32_t size; /* key+keytag size. */
} __rte_cache_aligned;
struct mlx5_crypto_devarg_params {
struct rte_crypto_cipher_xform *cipher_ctx = ctx->cipher;
struct mlx5_crypto_dek *dek =
container_of(entry, typeof(*dek), entry);
- uint32_t key_len = dek->size_is_48 ? 48 : 80;
+ uint32_t key_len = dek->size;
if (key_len != cipher_ctx->key.length)
return -1;
- return memcmp(cipher_ctx->key.data, dek->data, key_len);
+ return memcmp(cipher_ctx->key.data, dek->data, cipher_ctx->key.length);
}
static struct mlx5_list_entry *
.key_purpose = MLX5_CRYPTO_KEY_PURPOSE_AES_XTS,
.has_keytag = 1,
};
+ bool is_wrapped = ctx->priv->is_wrapped_mode;
if (dek == NULL) {
DRV_LOG(ERR, "Failed to allocate dek memory.");
return NULL;
}
- switch (cipher_ctx->key.length) {
- case 48:
- dek->size_is_48 = true;
- dek_attr.key_size = MLX5_CRYPTO_KEY_SIZE_128b;
- break;
- case 80:
- dek->size_is_48 = false;
- dek_attr.key_size = MLX5_CRYPTO_KEY_SIZE_256b;
- break;
- default:
- DRV_LOG(ERR, "Key size not supported.");
- return NULL;
+ if (is_wrapped) {
+ switch (cipher_ctx->key.length) {
+ case 48:
+ dek->size = 48;
+ dek_attr.key_size = MLX5_CRYPTO_KEY_SIZE_128b;
+ break;
+ case 80:
+ dek->size = 80;
+ dek_attr.key_size = MLX5_CRYPTO_KEY_SIZE_256b;
+ break;
+ default:
+ DRV_LOG(ERR, "Wrapped key size not supported.");
+ return NULL;
+ }
+ } else {
+ switch (cipher_ctx->key.length) {
+ case 32:
+ dek->size = 40;
+ dek_attr.key_size = MLX5_CRYPTO_KEY_SIZE_128b;
+ break;
+ case 64:
+ dek->size = 72;
+ dek_attr.key_size = MLX5_CRYPTO_KEY_SIZE_256b;
+ break;
+ default:
+ DRV_LOG(ERR, "Key size not supported.");
+ return NULL;
+ }
+ memcpy(&dek_attr.key[cipher_ctx->key.length],
+ &ctx->priv->keytag, 8);
}
memcpy(&dek_attr.key, cipher_ctx->key.data, cipher_ctx->key.length);
dek->obj = mlx5_devx_cmd_create_dek_obj(ctx->priv->cdev->ctx,