/*-
* BSD LICENSE
*
- * Copyright(c) 2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
};
struct crypto_unittest_params {
- struct rte_crypto_xform cipher_xform;
- struct rte_crypto_xform auth_xform;
+ struct rte_crypto_sym_xform cipher_xform;
+ struct rte_crypto_sym_xform auth_xform;
- struct rte_cryptodev_session *sess;
+ struct rte_cryptodev_sym_session *sess;
struct rte_mbuf_offload *ol;
- struct rte_crypto_op *op;
+ struct rte_crypto_sym_op *op;
struct rte_mbuf *obuf, *ibuf;
struct crypto_unittest_params *ut_params);
static int
-test_AES_CBC_HMAC_SHA512_decrypt_perform(struct rte_cryptodev_session *sess,
+test_AES_CBC_HMAC_SHA512_decrypt_perform(struct rte_cryptodev_sym_session *sess,
struct crypto_unittest_params *ut_params,
struct crypto_testsuite_params *ts_param);
"MBUF_OFFLOAD_POOL",
NUM_MBUFS, MBUF_CACHE_SIZE,
DEFAULT_NUM_XFORMS *
- sizeof(struct rte_crypto_xform),
+ sizeof(struct rte_crypto_sym_xform),
rte_socket_id());
if (ts_params->mbuf_ol_pool == NULL) {
RTE_LOG(ERR, USER1, "Can't create CRYPTO_OP_POOL\n");
ts_params->conf.nb_queue_pairs = info.max_nb_queue_pairs;
ts_params->conf.socket_id = SOCKET_ID_ANY;
- ts_params->conf.session_mp.nb_objs = info.max_nb_sessions;
+ ts_params->conf.session_mp.nb_objs = info.sym.max_nb_sessions;
TEST_ASSERT_SUCCESS(rte_cryptodev_configure(dev_id,
&ts_params->conf),
ts_params->conf.nb_queue_pairs = DEFAULT_NUM_QPS_PER_QAT_DEVICE;
ts_params->conf.socket_id = SOCKET_ID_ANY;
ts_params->conf.session_mp.nb_objs =
- (gbl_cryptodev_type == RTE_CRYPTODEV_QAT_PMD) ?
+ (gbl_cryptodev_type == RTE_CRYPTODEV_QAT_SYM_PMD) ?
DEFAULT_NUM_OPS_INFLIGHT :
DEFAULT_NUM_OPS_INFLIGHT;
/* free crypto session structure */
if (ut_params->sess) {
- rte_cryptodev_session_free(ts_params->valid_devs[0],
+ rte_cryptodev_sym_session_free(ts_params->valid_devs[0],
ut_params->sess);
ut_params->sess = NULL;
}
rte_cryptodev_info_get(ts_params->valid_devs[0], &dev_info);
- ts_params->conf.session_mp.nb_objs = dev_info.max_nb_sessions;
+ ts_params->conf.session_mp.nb_objs = dev_info.sym.max_nb_sessions;
TEST_ASSERT_SUCCESS(rte_cryptodev_configure(ts_params->valid_devs[0],
&ts_params->conf), "Failed to configure cryptodev %u",
TEST_ASSERT_NOT_NULL(ut_params->digest, "no room to append digest");
/* Setup Cipher Parameters */
- ut_params->cipher_xform.type = RTE_CRYPTO_XFORM_CIPHER;
+ ut_params->cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
ut_params->cipher_xform.next = &ut_params->auth_xform;
ut_params->cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC;
ut_params->cipher_xform.cipher.key.length = CIPHER_KEY_LENGTH_AES_CBC;
/* Setup HMAC Parameters */
- ut_params->auth_xform.type = RTE_CRYPTO_XFORM_AUTH;
+ ut_params->auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
+
ut_params->auth_xform.next = NULL;
ut_params->auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_GENERATE;
ut_params->auth_xform.auth.digest_length = DIGEST_BYTE_LENGTH_SHA1;
/* Create crypto session*/
- ut_params->sess = rte_cryptodev_session_create(
+ ut_params->sess = rte_cryptodev_sym_session_create(
ts_params->valid_devs[0],
&ut_params->cipher_xform);
TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
/* Generate Crypto op data structure */
ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO);
+ RTE_PKTMBUF_OL_CRYPTO_SYM);
TEST_ASSERT_NOT_NULL(ut_params->ol,
"Failed to allocate pktmbuf offload");
ut_params->op = &ut_params->ol->op.crypto;
/* Set crypto operation data parameters */
- rte_crypto_op_attach_session(ut_params->op, ut_params->sess);
+ rte_crypto_sym_op_attach_session(ut_params->op, ut_params->sess);
ut_params->op->digest.data = ut_params->digest;
ut_params->op->digest.phys_addr = rte_pktmbuf_mtophys_offset(
/* Generate Crypto op data structure */
ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO);
+ RTE_PKTMBUF_OL_CRYPTO_SYM);
TEST_ASSERT_NOT_NULL(ut_params->ol,
"Failed to allocate pktmbuf offload");
ut_params->op = &ut_params->ol->op.crypto;
- TEST_ASSERT_NOT_NULL(rte_pktmbuf_offload_alloc_crypto_xforms(
+ TEST_ASSERT_NOT_NULL(rte_pktmbuf_offload_alloc_crypto_sym_xforms(
ut_params->ol, 2),
"failed to allocate space for crypto transforms");
/* Set crypto operation data parameters */
- ut_params->op->xform->type = RTE_CRYPTO_XFORM_CIPHER;
+ ut_params->op->xform->type = RTE_CRYPTO_SYM_XFORM_CIPHER;
/* cipher parameters */
ut_params->op->xform->cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT;
ut_params->op->xform->cipher.key.length = CIPHER_KEY_LENGTH_AES_CBC;
/* hash parameters */
- ut_params->op->xform->next->type = RTE_CRYPTO_XFORM_AUTH;
+ ut_params->op->xform->next->type = RTE_CRYPTO_SYM_XFORM_AUTH;
ut_params->op->xform->next->auth.op = RTE_CRYPTO_AUTH_OP_GENERATE;
ut_params->op->xform->next->auth.algo = RTE_CRYPTO_AUTH_SHA1_HMAC;
DIGEST_BYTE_LENGTH_SHA1);
/* Setup Cipher Parameters */
- ut_params->cipher_xform.type = RTE_CRYPTO_XFORM_CIPHER;
+ ut_params->cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
ut_params->cipher_xform.next = NULL;
ut_params->cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC;
ut_params->cipher_xform.cipher.key.length = CIPHER_KEY_LENGTH_AES_CBC;
/* Setup HMAC Parameters */
- ut_params->auth_xform.type = RTE_CRYPTO_XFORM_AUTH;
+ ut_params->auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
ut_params->auth_xform.next = &ut_params->cipher_xform;
ut_params->auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_VERIFY;
ut_params->auth_xform.auth.digest_length = DIGEST_BYTE_LENGTH_SHA1;
/* Create Crypto session*/
- ut_params->sess = rte_cryptodev_session_create(ts_params->valid_devs[0],
- &ut_params->auth_xform);
+ ut_params->sess =
+ rte_cryptodev_sym_session_create(ts_params->valid_devs[0],
+ &ut_params->auth_xform);
TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
/* Generate Crypto op data structure */
ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO);
+ RTE_PKTMBUF_OL_CRYPTO_SYM);
TEST_ASSERT_NOT_NULL(ut_params->ol,
"Failed to allocate pktmbuf offload");
/* Set crypto operation data parameters */
- rte_crypto_op_attach_session(ut_params->op, ut_params->sess);
+ rte_crypto_sym_op_attach_session(ut_params->op, ut_params->sess);
ut_params->op->digest.data = ut_params->digest;
ut_params->op->digest.phys_addr = rte_pktmbuf_mtophys_offset(
TEST_ASSERT_NOT_NULL(ut_params->digest, "no room to append digest");
/* Setup Cipher Parameters */
- ut_params->cipher_xform.type = RTE_CRYPTO_XFORM_CIPHER;
+ ut_params->cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
ut_params->cipher_xform.next = &ut_params->auth_xform;
ut_params->cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC;
ut_params->cipher_xform.cipher.key.length = CIPHER_KEY_LENGTH_AES_CBC;
/* Setup HMAC Parameters */
- ut_params->auth_xform.type = RTE_CRYPTO_XFORM_AUTH;
+ ut_params->auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
ut_params->auth_xform.next = NULL;
ut_params->auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_GENERATE;
ut_params->auth_xform.auth.digest_length = DIGEST_BYTE_LENGTH_SHA256;
/* Create Crypto session*/
- ut_params->sess = rte_cryptodev_session_create(ts_params->valid_devs[0],
- &ut_params->cipher_xform);
+ ut_params->sess =
+ rte_cryptodev_sym_session_create(ts_params->valid_devs[0],
+ &ut_params->cipher_xform);
TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
/* Generate Crypto op data structure */
ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO);
+ RTE_PKTMBUF_OL_CRYPTO_SYM);
TEST_ASSERT_NOT_NULL(ut_params->ol,
"Failed to allocate pktmbuf offload");
/* Set crypto operation data parameters */
- rte_crypto_op_attach_session(ut_params->op, ut_params->sess);
+ rte_crypto_sym_op_attach_session(ut_params->op, ut_params->sess);
ut_params->op->digest.data = ut_params->digest;
ut_params->op->digest.phys_addr = rte_pktmbuf_mtophys_offset(
DIGEST_BYTE_LENGTH_SHA256);
/* Setup Cipher Parameters */
- ut_params->cipher_xform.type = RTE_CRYPTO_XFORM_CIPHER;
+ ut_params->cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
ut_params->cipher_xform.next = NULL;
ut_params->cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC;
ut_params->cipher_xform.cipher.key.length = CIPHER_KEY_LENGTH_AES_CBC;
/* Setup HMAC Parameters */
- ut_params->auth_xform.type = RTE_CRYPTO_XFORM_AUTH;
+ ut_params->auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
ut_params->auth_xform.next = &ut_params->cipher_xform;
ut_params->auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_VERIFY;
ut_params->auth_xform.auth.digest_length = DIGEST_BYTE_LENGTH_SHA256;
/* Create Crypto session*/
- ut_params->sess = rte_cryptodev_session_create(ts_params->valid_devs[0],
- &ut_params->auth_xform);
+ ut_params->sess =
+ rte_cryptodev_sym_session_create(ts_params->valid_devs[0],
+ &ut_params->auth_xform);
TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
/* Generate Crypto op data structure */
ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO);
+ RTE_PKTMBUF_OL_CRYPTO_SYM);
TEST_ASSERT_NOT_NULL(ut_params->ol,
"Failed to allocate pktmbuf offload");
/* Set crypto operation data parameters */
- rte_crypto_op_attach_session(ut_params->op, ut_params->sess);
+ rte_crypto_sym_op_attach_session(ut_params->op, ut_params->sess);
ut_params->op->digest.data = ut_params->digest;
ut_params->op->digest.phys_addr = rte_pktmbuf_mtophys_offset(
TEST_ASSERT_NOT_NULL(ut_params->digest, "no room to append digest");
/* Setup Cipher Parameters */
- ut_params->cipher_xform.type = RTE_CRYPTO_XFORM_CIPHER;
+ ut_params->cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
ut_params->cipher_xform.next = &ut_params->auth_xform;
ut_params->cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC;
ut_params->cipher_xform.cipher.key.length = CIPHER_KEY_LENGTH_AES_CBC;
/* Setup HMAC Parameters */
- ut_params->auth_xform.type = RTE_CRYPTO_XFORM_AUTH;
+ ut_params->auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
ut_params->auth_xform.next = NULL;
ut_params->auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_GENERATE;
ut_params->auth_xform.auth.digest_length = DIGEST_BYTE_LENGTH_SHA512;
/* Create Crypto session*/
- ut_params->sess = rte_cryptodev_session_create(ts_params->valid_devs[0],
- &ut_params->cipher_xform);
+ ut_params->sess =
+ rte_cryptodev_sym_session_create(ts_params->valid_devs[0],
+ &ut_params->cipher_xform);
TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
/* Generate Crypto op data structure */
ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO);
+ RTE_PKTMBUF_OL_CRYPTO_SYM);
TEST_ASSERT_NOT_NULL(ut_params->ol,
"Failed to allocate pktmbuf offload");
/* Set crypto operation data parameters */
- rte_crypto_op_attach_session(ut_params->op, ut_params->sess);
+ rte_crypto_sym_op_attach_session(ut_params->op, ut_params->sess);
ut_params->op->digest.data = ut_params->digest;
ut_params->op->digest.phys_addr = rte_pktmbuf_mtophys_offset(
struct crypto_unittest_params *ut_params);
static int
-test_AES_CBC_HMAC_SHA512_decrypt_perform(struct rte_cryptodev_session *sess,
+test_AES_CBC_HMAC_SHA512_decrypt_perform(struct rte_cryptodev_sym_session *sess,
struct crypto_unittest_params *ut_params,
struct crypto_testsuite_params *ts_params);
"Failed to create session params");
/* Create Crypto session*/
- ut_params->sess = rte_cryptodev_session_create(ts_params->valid_devs[0],
- &ut_params->auth_xform);
+ ut_params->sess =
+ rte_cryptodev_sym_session_create(ts_params->valid_devs[0],
+ &ut_params->auth_xform);
TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
return test_AES_CBC_HMAC_SHA512_decrypt_perform(ut_params->sess,
{
/* Setup Cipher Parameters */
- ut_params->cipher_xform.type = RTE_CRYPTO_XFORM_CIPHER;
+ ut_params->cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
ut_params->cipher_xform.next = NULL;
ut_params->cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC;
ut_params->cipher_xform.cipher.key.length = CIPHER_KEY_LENGTH_AES_CBC;
/* Setup HMAC Parameters */
- ut_params->auth_xform.type = RTE_CRYPTO_XFORM_AUTH;
+ ut_params->auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
ut_params->auth_xform.next = &ut_params->cipher_xform;
ut_params->auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_VERIFY;
static int
-test_AES_CBC_HMAC_SHA512_decrypt_perform(struct rte_cryptodev_session *sess,
+test_AES_CBC_HMAC_SHA512_decrypt_perform(struct rte_cryptodev_sym_session *sess,
struct crypto_unittest_params *ut_params,
struct crypto_testsuite_params *ts_params)
{
/* Generate Crypto op data structure */
ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO);
+ RTE_PKTMBUF_OL_CRYPTO_SYM);
TEST_ASSERT_NOT_NULL(ut_params->ol,
"Failed to allocate pktmbuf offload");
/* Set crypto operation data parameters */
- rte_crypto_op_attach_session(ut_params->op, sess);
+ rte_crypto_sym_op_attach_session(ut_params->op, sess);
ut_params->op->digest.data = ut_params->digest;
ut_params->op->digest.phys_addr = rte_pktmbuf_mtophys_offset(
TEST_ASSERT_NOT_NULL(ut_params->digest, "no room to append digest");
/* Setup Cipher Parameters */
- ut_params->cipher_xform.type = RTE_CRYPTO_XFORM_CIPHER;
+ ut_params->cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
ut_params->cipher_xform.next = &ut_params->auth_xform;
ut_params->cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC;
ut_params->cipher_xform.cipher.key.length = CIPHER_KEY_LENGTH_AES_CBC;
/* Setup HMAC Parameters */
- ut_params->auth_xform.type = RTE_CRYPTO_XFORM_AUTH;
+ ut_params->auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
ut_params->auth_xform.next = NULL;
ut_params->auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_GENERATE;
ut_params->auth_xform.auth.digest_length = DIGEST_BYTE_LENGTH_AES_XCBC;
/* Create Crypto session*/
- ut_params->sess = rte_cryptodev_session_create(ts_params->valid_devs[0],
- &ut_params->cipher_xform);
+ ut_params->sess =
+ rte_cryptodev_sym_session_create(ts_params->valid_devs[0],
+ &ut_params->cipher_xform);
TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
/* Generate Crypto op data structure */
ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO);
+ RTE_PKTMBUF_OL_CRYPTO_SYM);
TEST_ASSERT_NOT_NULL(ut_params->ol,
"Failed to allocate pktmbuf offload");
/* Set crypto operation data parameters */
- rte_crypto_op_attach_session(ut_params->op, ut_params->sess);
+ rte_crypto_sym_op_attach_session(ut_params->op, ut_params->sess);
ut_params->op->iv.data = (uint8_t *)
rte_pktmbuf_prepend(ut_params->ibuf,
DIGEST_BYTE_LENGTH_AES_XCBC);
/* Setup Cipher Parameters */
- ut_params->cipher_xform.type = RTE_CRYPTO_XFORM_CIPHER;
+ ut_params->cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
ut_params->cipher_xform.next = NULL;
ut_params->cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC;
ut_params->cipher_xform.cipher.key.length = CIPHER_KEY_LENGTH_AES_CBC;
/* Setup HMAC Parameters */
- ut_params->auth_xform.type = RTE_CRYPTO_XFORM_AUTH;
+ ut_params->auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
ut_params->auth_xform.next = &ut_params->cipher_xform;
ut_params->auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_VERIFY;
ut_params->auth_xform.auth.digest_length = DIGEST_BYTE_LENGTH_AES_XCBC;
/* Create Crypto session*/
- ut_params->sess = rte_cryptodev_session_create(ts_params->valid_devs[0],
- &ut_params->auth_xform);
+ ut_params->sess =
+ rte_cryptodev_sym_session_create(ts_params->valid_devs[0],
+ &ut_params->auth_xform);
TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
/* Generate Crypto op data structure */
ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO);
+ RTE_PKTMBUF_OL_CRYPTO_SYM);
TEST_ASSERT_NOT_NULL(ut_params->ol,
"Failed to allocate pktmbuf offload");
/* Set crypto operation data parameters */
- rte_crypto_op_attach_session(ut_params->op, ut_params->sess);
+ rte_crypto_sym_op_attach_session(ut_params->op, ut_params->sess);
ut_params->op->iv.data = (uint8_t *)rte_pktmbuf_prepend(ut_params->ibuf,
CIPHER_IV_LENGTH_AES_CBC);
struct crypto_unittest_params *ut_params = &unittest_params;
struct rte_cryptodev_info dev_info;
- struct rte_cryptodev_session **sessions;
+ struct rte_cryptodev_sym_session **sessions;
uint16_t i;
rte_cryptodev_info_get(ts_params->valid_devs[0], &dev_info);
- sessions = rte_malloc(NULL, (sizeof(struct rte_cryptodev_session *) *
- dev_info.max_nb_sessions) + 1, 0);
+ sessions = rte_malloc(NULL,
+ (sizeof(struct rte_cryptodev_sym_session *) *
+ dev_info.sym.max_nb_sessions) + 1, 0);
/* Create multiple crypto sessions*/
- for (i = 0; i < dev_info.max_nb_sessions; i++) {
- sessions[i] = rte_cryptodev_session_create(
+ for (i = 0; i < dev_info.sym.max_nb_sessions; i++) {
+ sessions[i] = rte_cryptodev_sym_session_create(
ts_params->valid_devs[0],
&ut_params->auth_xform);
TEST_ASSERT_NOT_NULL(sessions[i],
}
/* Next session create should fail */
- sessions[i] = rte_cryptodev_session_create(ts_params->valid_devs[0],
+ sessions[i] = rte_cryptodev_sym_session_create(ts_params->valid_devs[0],
&ut_params->auth_xform);
TEST_ASSERT_NULL(sessions[i],
"Session creation succeeded unexpectedly!");
- for (i = 0; i < dev_info.max_nb_sessions; i++)
- rte_cryptodev_session_free(ts_params->valid_devs[0],
+ for (i = 0; i < dev_info.sym.max_nb_sessions; i++)
+ rte_cryptodev_sym_session_free(ts_params->valid_devs[0],
sessions[i]);
rte_free(sessions);
/* Create multiple crypto sessions*/
- ut_params->sess = rte_cryptodev_session_create(
+ ut_params->sess = rte_cryptodev_sym_session_create(
ts_params->valid_devs[0], &ut_params->auth_xform);
TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
/* Generate Crypto op data structure */
ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO);
+ RTE_PKTMBUF_OL_CRYPTO_SYM);
TEST_ASSERT_NOT_NULL(ut_params->ol,
"Failed to allocate pktmbuf offload");
/* Set crypto operation data parameters */
- rte_crypto_op_attach_session(ut_params->op, ut_params->sess);
+ rte_crypto_sym_op_attach_session(ut_params->op, ut_params->sess);
ut_params->op->digest.data = ut_params->digest;
ut_params->op->digest.phys_addr = rte_pktmbuf_mtophys_offset(
static int
test_cryptodev_qat(void /*argv __rte_unused, int argc __rte_unused*/)
{
- gbl_cryptodev_type = RTE_CRYPTODEV_QAT_PMD;
+ gbl_cryptodev_type = RTE_CRYPTODEV_QAT_SYM_PMD;
return unit_test_suite_runner(&cryptodev_qat_testsuite);
}
static struct test_command cryptodev_qat_cmd = {
/*-
* BSD LICENSE
*
- * Copyright(c) 2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
#define MAX_NUM_OF_OPS_PER_UT (128)
struct crypto_unittest_params {
- struct rte_crypto_xform cipher_xform;
- struct rte_crypto_xform auth_xform;
+ struct rte_crypto_sym_xform cipher_xform;
+ struct rte_crypto_sym_xform auth_xform;
- struct rte_cryptodev_session *sess;
+ struct rte_cryptodev_sym_session *sess;
- struct rte_crypto_op *op;
+ struct rte_crypto_sym_op *op;
struct rte_mbuf_offload *ol;
struct rte_mbuf *obuf[MAX_NUM_OF_OPS_PER_UT];
ts_params->mbuf_ol_pool = rte_pktmbuf_offload_pool_create("CRYPTO_OP_POOL",
NUM_MBUFS, MBUF_CACHE_SIZE,
DEFAULT_NUM_XFORMS *
- sizeof(struct rte_crypto_xform),
+ sizeof(struct rte_crypto_sym_xform),
rte_socket_id());
if (ts_params->mbuf_ol_pool == NULL) {
RTE_LOG(ERR, USER1, "Can't create CRYPTO_OP_POOL\n");
ts_params->conf.nb_queue_pairs = DEFAULT_NUM_QPS_PER_QAT_DEVICE;
ts_params->conf.socket_id = SOCKET_ID_ANY;
- ts_params->conf.session_mp.nb_objs = info.max_nb_sessions;
+ ts_params->conf.session_mp.nb_objs = info.sym.max_nb_sessions;
TEST_ASSERT_SUCCESS(rte_cryptodev_configure(ts_params->dev_id,
&ts_params->conf),
/* free crypto session structure */
if (ut_params->sess)
- rte_cryptodev_session_free(ts_params->dev_id,
+ rte_cryptodev_sym_session_free(ts_params->dev_id,
ut_params->sess);
/* free crypto operation structure */
}
/* Setup Cipher Parameters */
- ut_params->cipher_xform.type = RTE_CRYPTO_XFORM_CIPHER;
+ ut_params->cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
ut_params->cipher_xform.next = &ut_params->auth_xform;
ut_params->cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC;
/* Setup HMAC Parameters */
- ut_params->auth_xform.type = RTE_CRYPTO_XFORM_AUTH;
+ ut_params->auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
ut_params->auth_xform.next = NULL;
ut_params->auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_VERIFY;
ut_params->auth_xform.auth.digest_length = DIGEST_BYTE_LENGTH_SHA256;
/* Create Crypto session*/
- ut_params->sess = rte_cryptodev_session_create(ts_params->dev_id,
+ ut_params->sess = rte_cryptodev_sym_session_create(ts_params->dev_id,
&ut_params->cipher_xform);
TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
DIGEST_BYTE_LENGTH_SHA256);
struct rte_mbuf_offload *ol = rte_pktmbuf_offload_alloc(
- ts_params->mbuf_ol_pool, RTE_PKTMBUF_OL_CRYPTO);
+ ts_params->mbuf_ol_pool, RTE_PKTMBUF_OL_CRYPTO_SYM);
TEST_ASSERT_NOT_NULL(ol, "Failed to allocate pktmbuf offload");
- struct rte_crypto_op *cop = &ol->op.crypto;
+ struct rte_crypto_sym_op *cop = &ol->op.crypto;
- rte_crypto_op_attach_session(cop, ut_params->sess);
+ rte_crypto_sym_op_attach_session(cop, ut_params->sess);
cop->digest.data = ut_params->digest;
cop->digest.phys_addr = rte_pktmbuf_mtophys_offset(tx_mbufs[b],
}
/* Setup Cipher Parameters */
- ut_params->cipher_xform.type = RTE_CRYPTO_XFORM_CIPHER;
+ ut_params->cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
ut_params->cipher_xform.next = &ut_params->auth_xform;
ut_params->cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC;
ut_params->cipher_xform.cipher.key.length = CIPHER_IV_LENGTH_AES_CBC;
/* Setup HMAC Parameters */
- ut_params->auth_xform.type = RTE_CRYPTO_XFORM_AUTH;
+ ut_params->auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
ut_params->auth_xform.next = NULL;
ut_params->auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_GENERATE;
ut_params->auth_xform.auth.digest_length = DIGEST_BYTE_LENGTH_SHA256;
/* Create Crypto session*/
- ut_params->sess = rte_cryptodev_session_create(ts_params->dev_id,
+ ut_params->sess = rte_cryptodev_sym_session_create(ts_params->dev_id,
&ut_params->cipher_xform);
TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
struct rte_mbuf_offload *ol = rte_pktmbuf_offload_alloc(
ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO);
+ RTE_PKTMBUF_OL_CRYPTO_SYM);
TEST_ASSERT_NOT_NULL(ol, "Failed to allocate pktmbuf offload");
- struct rte_crypto_op *cop = &ol->op.crypto;
+ struct rte_crypto_sym_op *cop = &ol->op.crypto;
- rte_crypto_op_attach_session(cop, ut_params->sess);
+ rte_crypto_sym_op_attach_session(cop, ut_params->sess);
cop->digest.data = ut_params->digest;
cop->digest.phys_addr = rte_pktmbuf_mtophys_offset(
static int
perftest_qat_cryptodev(void /*argv __rte_unused, int argc __rte_unused*/)
{
- gbl_cryptodev_preftest_devtype = RTE_CRYPTODEV_QAT_PMD;
+ gbl_cryptodev_preftest_devtype = RTE_CRYPTODEV_QAT_SYM_PMD;
return unit_test_suite_runner(&cryptodev_testsuite);
}
/*-
* BSD LICENSE
*
- * Copyright(c) 2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
/** Get xform chain order */
static int
-aesni_mb_get_chain_order(const struct rte_crypto_xform *xform)
+aesni_mb_get_chain_order(const struct rte_crypto_sym_xform *xform)
{
/*
* Multi-buffer only supports HASH_CIPHER or CIPHER_HASH chained
if (xform->next == NULL || xform->next->next != NULL)
return -1;
- if (xform->type == RTE_CRYPTO_XFORM_AUTH &&
- xform->next->type == RTE_CRYPTO_XFORM_CIPHER)
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
+ xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
return HASH_CIPHER;
- if (xform->type == RTE_CRYPTO_XFORM_CIPHER &&
- xform->next->type == RTE_CRYPTO_XFORM_AUTH)
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
+ xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)
return CIPHER_HASH;
return -1;
static int
aesni_mb_set_session_auth_parameters(const struct aesni_mb_ops *mb_ops,
struct aesni_mb_session *sess,
- const struct rte_crypto_xform *xform)
+ const struct rte_crypto_sym_xform *xform)
{
hash_one_block_t hash_oneblock_fn;
- if (xform->type != RTE_CRYPTO_XFORM_AUTH) {
+ if (xform->type != RTE_CRYPTO_SYM_XFORM_AUTH) {
MB_LOG_ERR("Crypto xform struct not of type auth");
return -1;
}
static int
aesni_mb_set_session_cipher_parameters(const struct aesni_mb_ops *mb_ops,
struct aesni_mb_session *sess,
- const struct rte_crypto_xform *xform)
+ const struct rte_crypto_sym_xform *xform)
{
aes_keyexp_t aes_keyexp_fn;
- if (xform->type != RTE_CRYPTO_XFORM_CIPHER) {
+ if (xform->type != RTE_CRYPTO_SYM_XFORM_CIPHER) {
MB_LOG_ERR("Crypto xform struct not of type cipher");
return -1;
}
int
aesni_mb_set_session_parameters(const struct aesni_mb_ops *mb_ops,
struct aesni_mb_session *sess,
- const struct rte_crypto_xform *xform)
+ const struct rte_crypto_sym_xform *xform)
{
- const struct rte_crypto_xform *auth_xform = NULL;
- const struct rte_crypto_xform *cipher_xform = NULL;
+ const struct rte_crypto_sym_xform *auth_xform = NULL;
+ const struct rte_crypto_sym_xform *cipher_xform = NULL;
/* Select Crypto operation - hash then cipher / cipher then hash */
switch (aesni_mb_get_chain_order(xform)) {
/** Get multi buffer session */
static struct aesni_mb_session *
-get_session(struct aesni_mb_qp *qp, struct rte_crypto_op *crypto_op)
+get_session(struct aesni_mb_qp *qp, struct rte_crypto_sym_op *crypto_op)
{
struct aesni_mb_session *sess = NULL;
- if (crypto_op->type == RTE_CRYPTO_OP_WITH_SESSION) {
+ if (crypto_op->type == RTE_CRYPTO_SYM_OP_WITH_SESSION) {
if (unlikely(crypto_op->session->type !=
RTE_CRYPTODEV_AESNI_MB_PMD))
return NULL;
return NULL;
sess = (struct aesni_mb_session *)
- ((struct rte_cryptodev_session *)_sess)->_private;
+ ((struct rte_cryptodev_sym_session *)_sess)->_private;
if (unlikely(aesni_mb_set_session_parameters(qp->ops,
sess, crypto_op->xform) != 0)) {
*/
static JOB_AES_HMAC *
process_crypto_op(struct aesni_mb_qp *qp, struct rte_mbuf *m,
- struct rte_crypto_op *c_op, struct aesni_mb_session *session)
+ struct rte_crypto_sym_op *c_op,
+ struct aesni_mb_session *session)
{
JOB_AES_HMAC *job;
post_process_mb_job(struct aesni_mb_qp *qp, JOB_AES_HMAC *job)
{
struct rte_mbuf *m;
- struct rte_crypto_op *c_op;
+ struct rte_crypto_sym_op *c_op;
if (job->user_data == NULL)
return NULL;
/* handled retrieved job */
m = (struct rte_mbuf *)job->user_data;
- c_op = (struct rte_crypto_op *)job->user_data2;
+ c_op = (struct rte_crypto_sym_op *)job->user_data2;
/* set status as successful by default */
c_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
}
/* Free session if a session-less crypto op */
- if (c_op->type == RTE_CRYPTO_OP_SESSIONLESS) {
+ if (c_op->type == RTE_CRYPTO_SYM_OP_SESSIONLESS) {
rte_mempool_put(qp->sess_mp, c_op->session);
c_op->session = NULL;
}
int i, processed_jobs = 0;
for (i = 0; i < nb_bufs; i++) {
- ol = rte_pktmbuf_offload_get(bufs[i], RTE_PKTMBUF_OL_CRYPTO);
+ ol = rte_pktmbuf_offload_get(bufs[i],
+ RTE_PKTMBUF_OL_CRYPTO_SYM);
if (unlikely(ol == NULL)) {
qp->stats.enqueue_err_count++;
goto flush_jobs;
/*-
* BSD LICENSE
*
- * Copyright(c) 2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
if (dev_info != NULL) {
dev_info->dev_type = dev->dev_type;
dev_info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
- dev_info->max_nb_sessions = internals->max_nb_sessions;
+ dev_info->sym.max_nb_sessions = internals->max_nb_sessions;
}
}
/** Configure a aesni multi-buffer session from a crypto xform chain */
static void *
aesni_mb_pmd_session_configure(struct rte_cryptodev *dev,
- struct rte_crypto_xform *xform, void *sess)
+ struct rte_crypto_sym_xform *xform, void *sess)
{
struct aesni_mb_private *internals = dev->data->dev_private;
/*-
* BSD LICENSE
*
- * Copyright(c) 2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
extern int
aesni_mb_set_session_parameters(const struct aesni_mb_ops *mb_ops,
struct aesni_mb_session *sess,
- const struct rte_crypto_xform *xform);
+ const struct rte_crypto_sym_xform *xform);
/** device specific operations function pointer structure */
/*-
* BSD LICENSE
*
- * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
}
static int
-qat_get_cmd_id(const struct rte_crypto_xform *xform)
+qat_get_cmd_id(const struct rte_crypto_sym_xform *xform)
{
if (xform->next == NULL)
return -1;
/* Cipher Only */
- if (xform->type == RTE_CRYPTO_XFORM_CIPHER && xform->next == NULL)
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL)
return -1; /* return ICP_QAT_FW_LA_CMD_CIPHER; */
/* Authentication Only */
- if (xform->type == RTE_CRYPTO_XFORM_AUTH && xform->next == NULL)
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH && xform->next == NULL)
return -1; /* return ICP_QAT_FW_LA_CMD_AUTH; */
/* Cipher then Authenticate */
- if (xform->type == RTE_CRYPTO_XFORM_CIPHER &&
- xform->next->type == RTE_CRYPTO_XFORM_AUTH)
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
+ xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)
return ICP_QAT_FW_LA_CMD_CIPHER_HASH;
/* Authenticate then Cipher */
- if (xform->type == RTE_CRYPTO_XFORM_AUTH &&
- xform->next->type == RTE_CRYPTO_XFORM_CIPHER)
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
+ xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
return ICP_QAT_FW_LA_CMD_HASH_CIPHER;
return -1;
}
static struct rte_crypto_auth_xform *
-qat_get_auth_xform(struct rte_crypto_xform *xform)
+qat_get_auth_xform(struct rte_crypto_sym_xform *xform)
{
do {
- if (xform->type == RTE_CRYPTO_XFORM_AUTH)
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH)
return &xform->auth;
xform = xform->next;
}
static struct rte_crypto_cipher_xform *
-qat_get_cipher_xform(struct rte_crypto_xform *xform)
+qat_get_cipher_xform(struct rte_crypto_sym_xform *xform)
{
do {
- if (xform->type == RTE_CRYPTO_XFORM_CIPHER)
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
return &xform->cipher;
xform = xform->next;
void *
qat_crypto_sym_configure_session(struct rte_cryptodev *dev,
- struct rte_crypto_xform *xform, void *session_private)
+ struct rte_crypto_sym_xform *xform, void *session_private)
{
struct qat_pmd_private *internals = dev->data->dev_private;
auth_xform->digest_length))
goto error_out;
- return (struct rte_cryptodev_session *)session;
+ return (struct rte_crypto_sym_session *)session;
error_out:
rte_mempool_put(internals->sess_mp, session);
}
-uint16_t qat_crypto_pkt_tx_burst(void *qp, struct rte_mbuf **tx_pkts,
+uint16_t qat_sym_crypto_pkt_tx_burst(void *qp, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts)
{
register struct qat_queue *queue;
}
uint16_t
-qat_crypto_pkt_rx_burst(void *qp, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
+qat_sym_crypto_pkt_rx_burst(void *qp, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
{
struct rte_mbuf_offload *ol;
struct qat_queue *queue;
while (*(uint32_t *)resp_msg != ADF_RING_EMPTY_SIG &&
msg_counter != nb_pkts) {
rx_mbuf = (struct rte_mbuf *)(uintptr_t)(resp_msg->opaque_data);
- ol = rte_pktmbuf_offload_get(rx_mbuf, RTE_PKTMBUF_OL_CRYPTO);
-
+ ol = rte_pktmbuf_offload_get(rx_mbuf,
+ RTE_PKTMBUF_OL_CRYPTO_SYM);
if (ICP_QAT_FW_COMN_STATUS_FLAG_OK !=
ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(
resp_msg->comn_hdr.comn_status)) {
- ol->op.crypto.status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
+ ol->op.crypto.status =
+ RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
} else {
ol->op.crypto.status = RTE_CRYPTO_OP_STATUS_SUCCESS;
}
struct icp_qat_fw_la_auth_req_params *auth_param;
register struct icp_qat_fw_la_bulk_req *qat_req;
- ol = rte_pktmbuf_offload_get(mbuf, RTE_PKTMBUF_OL_CRYPTO);
+ ol = rte_pktmbuf_offload_get(mbuf, RTE_PKTMBUF_OL_CRYPTO_SYM);
if (unlikely(ol == NULL)) {
PMD_DRV_LOG(ERR, "No valid crypto off-load operation attached "
"to (%p) mbuf.", mbuf);
return -EINVAL;
}
- if (unlikely(ol->op.crypto.type == RTE_CRYPTO_OP_SESSIONLESS)) {
+ if (unlikely(ol->op.crypto.type == RTE_CRYPTO_SYM_OP_SESSIONLESS)) {
PMD_DRV_LOG(ERR, "QAT PMD only supports session oriented"
" requests mbuf (%p) is sessionless.", mbuf);
return -EINVAL;
}
- if (unlikely(ol->op.crypto.session->type != RTE_CRYPTODEV_QAT_PMD)) {
+ if (unlikely(ol->op.crypto.session->type
+ != RTE_CRYPTODEV_QAT_SYM_PMD)) {
PMD_DRV_LOG(ERR, "Session was not created for this device");
return -EINVAL;
}
ADF_NUM_SYM_QPS_PER_BUNDLE *
ADF_NUM_BUNDLES_PER_DEV;
- info->max_nb_sessions = internals->max_nb_sessions;
- info->dev_type = RTE_CRYPTODEV_QAT_PMD;
+ info->sym.max_nb_sessions = internals->max_nb_sessions;
+ info->dev_type = RTE_CRYPTODEV_QAT_SYM_PMD;
}
}
/*-
* BSD LICENSE
*
- * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
extern void *
qat_crypto_sym_configure_session(struct rte_cryptodev *dev,
- struct rte_crypto_xform *xform, void *session_private);
+ struct rte_crypto_sym_xform *xform, void *session_private);
extern void
qat_crypto_sym_clear_session(struct rte_cryptodev *dev, void *session);
uint16_t
-qat_crypto_pkt_tx_burst(void *txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts);
+qat_sym_crypto_pkt_tx_burst(void *txq, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts);
uint16_t
-qat_crypto_pkt_rx_burst(void *rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
+qat_sym_crypto_pkt_rx_burst(void *rxq, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts);
#endif /* _QAT_CRYPTO_H_ */
/*-
* BSD LICENSE
*
- * Copyright(c) 2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
cryptodev->pci_dev->addr.devid,
cryptodev->pci_dev->addr.function);
- cryptodev->dev_type = RTE_CRYPTODEV_QAT_PMD;
+ cryptodev->dev_type = RTE_CRYPTODEV_QAT_SYM_PMD;
cryptodev->dev_ops = &crypto_qat_ops;
- cryptodev->enqueue_burst = qat_crypto_pkt_tx_burst;
- cryptodev->dequeue_burst = qat_crypto_pkt_rx_burst;
+ cryptodev->enqueue_burst = qat_sym_crypto_pkt_tx_burst;
+ cryptodev->dequeue_burst = qat_sym_crypto_pkt_rx_burst;
internals = cryptodev->data->dev_private;
/*-
* BSD LICENSE
*
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
enum l2fwd_crypto_xform_chain xform_chain;
- struct rte_crypto_xform cipher_xform;
+ struct rte_crypto_sym_xform cipher_xform;
uint8_t ckey_data[32];
struct rte_crypto_key iv_key;
uint8_t ivkey_data[16];
- struct rte_crypto_xform auth_xform;
+ struct rte_crypto_sym_xform auth_xform;
uint8_t akey_data[128];
};
unsigned digest_length;
unsigned block_size;
-
struct rte_crypto_key iv_key;
- struct rte_cryptodev_session *session;
+ struct rte_cryptodev_sym_session *session;
};
/** lcore configuration */
}
/* Set crypto operation data parameters */
- rte_crypto_op_attach_session(&ol->op.crypto, cparams->session);
+ rte_crypto_sym_op_attach_session(&ol->op.crypto, cparams->session);
/* Append space for digest to end of packet */
ol->op.crypto.digest.data = (uint8_t *)rte_pktmbuf_append(m,
key[i] = rand() % 0xff;
}
-static struct rte_cryptodev_session *
+static struct rte_cryptodev_sym_session *
initialize_crypto_session(struct l2fwd_crypto_options *options,
uint8_t cdev_id)
{
- struct rte_crypto_xform *first_xform;
+ struct rte_crypto_sym_xform *first_xform;
if (options->xform_chain == L2FWD_CRYPTO_CIPHER_HASH) {
first_xform = &options->cipher_xform;
}
/* Setup Cipher Parameters */
- return rte_cryptodev_session_create(cdev_id, first_xform);
+ return rte_cryptodev_sym_session_create(cdev_id, first_xform);
}
static void
m = pkts_burst[j];
ol = rte_pktmbuf_offload_alloc(
l2fwd_mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO);
+ RTE_PKTMBUF_OL_CRYPTO_SYM);
/*
* If we can't allocate a offload, then drop
* the rest of the burst and dequeue and
*type = RTE_CRYPTODEV_AESNI_MB_PMD;
return 0;
} else if (strcmp("QAT", optarg) == 0) {
- *type = RTE_CRYPTODEV_QAT_PMD;
+ *type = RTE_CRYPTODEV_QAT_SYM_PMD;
return 0;
}
options->xform_chain = L2FWD_CRYPTO_CIPHER_HASH;
/* Cipher Data */
- options->cipher_xform.type = RTE_CRYPTO_XFORM_CIPHER;
+ options->cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
options->cipher_xform.next = NULL;
options->cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC;
generate_random_key(options->ckey_data, sizeof(options->ckey_data));
options->cipher_xform.cipher.key.data = options->ckey_data;
- options->cipher_xform.cipher.key.phys_addr = 0;
options->cipher_xform.cipher.key.length = 16;
/* Authentication Data */
- options->auth_xform.type = RTE_CRYPTO_XFORM_AUTH;
+ options->auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
options->auth_xform.next = NULL;
options->auth_xform.auth.algo = RTE_CRYPTO_AUTH_SHA1_HMAC;
generate_random_key(options->akey_data, sizeof(options->akey_data));
options->auth_xform.auth.key.data = options->akey_data;
- options->auth_xform.auth.key.phys_addr = 0;
options->auth_xform.auth.key.length = 20;
}
switch (options->cdev_type) {
case RTE_CRYPTODEV_AESNI_MB_PMD:
printf("cryptodev type: AES-NI MB PMD\n"); break;
- case RTE_CRYPTODEV_QAT_PMD:
+ case RTE_CRYPTODEV_QAT_SYM_PMD:
printf("cryptodev type: QAT PMD\n"); break;
default:
break;
unsigned i, cdev_id, cdev_count, enabled_cdev_count = 0;
int retval;
- if (options->cdev_type == RTE_CRYPTODEV_QAT_PMD) {
+ if (options->cdev_type == RTE_CRYPTODEV_QAT_SYM_PMD) {
if (rte_cryptodev_count() < nb_ports)
return -1;
} else if (options->cdev_type == RTE_CRYPTODEV_AESNI_MB_PMD) {
# export include files
SYMLINK-y-include += rte_crypto.h
+SYMLINK-y-include += rte_crypto_sym.h
SYMLINK-y-include += rte_cryptodev.h
SYMLINK-y-include += rte_cryptodev_pmd.h
/*-
* BSD LICENSE
*
- * Copyright(c) 2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2016 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
/**
* @file rte_crypto.h
*
- * RTE Cryptographic Definitions
+ * RTE Cryptography Common Definitions
*
- * Defines symmetric cipher and authentication algorithms and modes, as well
- * as supported symmetric crypto operation combinations.
*/
#ifdef __cplusplus
extern "C" {
#endif
-#include <rte_mbuf.h>
-#include <rte_memory.h>
-#include <rte_mempool.h>
-
-/** Symmetric Cipher Algorithms */
-enum rte_crypto_cipher_algorithm {
- RTE_CRYPTO_CIPHER_NULL = 1,
- /**< NULL cipher algorithm. No mode applies to the NULL algorithm. */
-
- RTE_CRYPTO_CIPHER_3DES_CBC,
- /**< Triple DES algorithm in CBC mode */
- RTE_CRYPTO_CIPHER_3DES_CTR,
- /**< Triple DES algorithm in CTR mode */
- RTE_CRYPTO_CIPHER_3DES_ECB,
- /**< Triple DES algorithm in ECB mode */
-
- RTE_CRYPTO_CIPHER_AES_CBC,
- /**< AES algorithm in CBC mode */
- RTE_CRYPTO_CIPHER_AES_CCM,
- /**< AES algorithm in CCM mode. When this cipher algorithm is used the
- * *RTE_CRYPTO_AUTH_AES_CCM* element of the
- * *rte_crypto_hash_algorithm* enum MUST be used to set up the related
- * *rte_crypto_auth_xform* structure in the session context or in
- * the op_params of the crypto operation structure in the case of a
- * session-less crypto operation
- */
- RTE_CRYPTO_CIPHER_AES_CTR,
- /**< AES algorithm in Counter mode */
- RTE_CRYPTO_CIPHER_AES_ECB,
- /**< AES algorithm in ECB mode */
- RTE_CRYPTO_CIPHER_AES_F8,
- /**< AES algorithm in F8 mode */
- RTE_CRYPTO_CIPHER_AES_GCM,
- /**< AES algorithm in GCM mode. When this cipher algorithm is used the
- * *RTE_CRYPTO_AUTH_AES_GCM* element of the
- * *rte_crypto_auth_algorithm* enum MUST be used to set up the related
- * *rte_crypto_auth_setup_data* structure in the session context or in
- * the op_params of the crypto operation structure in the case of a
- * session-less crypto operation.
- */
- RTE_CRYPTO_CIPHER_AES_XTS,
- /**< AES algorithm in XTS mode */
-
- RTE_CRYPTO_CIPHER_ARC4,
- /**< (A)RC4 cipher algorithm */
-
- RTE_CRYPTO_CIPHER_KASUMI_F8,
- /**< Kasumi algorithm in F8 mode */
-
- RTE_CRYPTO_CIPHER_SNOW3G_UEA2,
- /**< SNOW3G algorithm in UEA2 mode */
-
- RTE_CRYPTO_CIPHER_ZUC_EEA3
- /**< ZUC algorithm in EEA3 mode */
-};
-
-/** Symmetric Cipher Direction */
-enum rte_crypto_cipher_operation {
- RTE_CRYPTO_CIPHER_OP_ENCRYPT,
- /**< Encrypt cipher operation */
- RTE_CRYPTO_CIPHER_OP_DECRYPT
- /**< Decrypt cipher operation */
-};
-
-/** Crypto key structure */
-struct rte_crypto_key {
- uint8_t *data; /**< pointer to key data */
- phys_addr_t phys_addr;
- size_t length; /**< key length in bytes */
-};
-
-/**
- * Symmetric Cipher Setup Data.
- *
- * This structure contains data relating to Cipher (Encryption and Decryption)
- * use to create a session.
- */
-struct rte_crypto_cipher_xform {
- enum rte_crypto_cipher_operation op;
- /**< This parameter determines if the cipher operation is an encrypt or
- * a decrypt operation. For the RC4 algorithm and the F8/CTR modes,
- * only encrypt operations are valid.
- */
- enum rte_crypto_cipher_algorithm algo;
- /**< Cipher algorithm */
-
- struct rte_crypto_key key;
- /**< Cipher key
- *
- * For the RTE_CRYPTO_CIPHER_AES_F8 mode of operation, key.data will
- * point to a concatenation of the AES encryption key followed by a
- * keymask. As per RFC3711, the keymask should be padded with trailing
- * bytes to match the length of the encryption key used.
- *
- * For AES-XTS mode of operation, two keys must be provided and
- * key.data must point to the two keys concatenated together (Key1 ||
- * Key2). The cipher key length will contain the total size of both
- * keys.
- *
- * Cipher key length is in bytes. For AES it can be 128 bits (16 bytes),
- * 192 bits (24 bytes) or 256 bits (32 bytes).
- *
- * For the CCM mode of operation, the only supported key length is 128
- * bits (16 bytes).
- *
- * For the RTE_CRYPTO_CIPHER_AES_F8 mode of operation, key.length
- * should be set to the combined length of the encryption key and the
- * keymask. Since the keymask and the encryption key are the same size,
- * key.length should be set to 2 x the AES encryption key length.
- *
- * For the AES-XTS mode of operation:
- * - Two keys must be provided and key.length refers to total length of
- * the two keys.
- * - Each key can be either 128 bits (16 bytes) or 256 bits (32 bytes).
- * - Both keys must have the same size.
- **/
-};
-
-/** Symmetric Authentication / Hash Algorithms */
-enum rte_crypto_auth_algorithm {
- RTE_CRYPTO_AUTH_NULL = 1,
- /**< NULL hash algorithm. */
-
- RTE_CRYPTO_AUTH_AES_CBC_MAC,
- /**< AES-CBC-MAC algorithm. Only 128-bit keys are supported. */
- RTE_CRYPTO_AUTH_AES_CCM,
- /**< AES algorithm in CCM mode. This is an authenticated cipher. When
- * this hash algorithm is used, the *RTE_CRYPTO_CIPHER_AES_CCM*
- * element of the *rte_crypto_cipher_algorithm* enum MUST be used to
- * set up the related rte_crypto_cipher_setup_data structure in the
- * session context or the corresponding parameter in the crypto
- * operation data structures op_params parameter MUST be set for a
- * session-less crypto operation.
- */
- RTE_CRYPTO_AUTH_AES_CMAC,
- /**< AES CMAC algorithm. */
- RTE_CRYPTO_AUTH_AES_GCM,
- /**< AES algorithm in GCM mode. When this hash algorithm
- * is used, the RTE_CRYPTO_CIPHER_AES_GCM element of the
- * rte_crypto_cipher_algorithm enum MUST be used to set up the related
- * rte_crypto_cipher_setup_data structure in the session context, or
- * the corresponding parameter in the crypto operation data structures
- * op_params parameter MUST be set for a session-less crypto operation.
- */
- RTE_CRYPTO_AUTH_AES_GMAC,
- /**< AES GMAC algorithm. When this hash algorithm
- * is used, the RTE_CRYPTO_CIPHER_AES_GCM element of the
- * rte_crypto_cipher_algorithm enum MUST be used to set up the related
- * rte_crypto_cipher_setup_data structure in the session context, or
- * the corresponding parameter in the crypto operation data structures
- * op_params parameter MUST be set for a session-less crypto operation.
- */
- RTE_CRYPTO_AUTH_AES_XCBC_MAC,
- /**< AES XCBC algorithm. */
-
- RTE_CRYPTO_AUTH_KASUMI_F9,
- /**< Kasumi algorithm in F9 mode. */
-
- RTE_CRYPTO_AUTH_MD5,
- /**< MD5 algorithm */
- RTE_CRYPTO_AUTH_MD5_HMAC,
- /**< HMAC using MD5 algorithm */
-
- RTE_CRYPTO_AUTH_SHA1,
- /**< 128 bit SHA algorithm. */
- RTE_CRYPTO_AUTH_SHA1_HMAC,
- /**< HMAC using 128 bit SHA algorithm. */
- RTE_CRYPTO_AUTH_SHA224,
- /**< 224 bit SHA algorithm. */
- RTE_CRYPTO_AUTH_SHA224_HMAC,
- /**< HMAC using 224 bit SHA algorithm. */
- RTE_CRYPTO_AUTH_SHA256,
- /**< 256 bit SHA algorithm. */
- RTE_CRYPTO_AUTH_SHA256_HMAC,
- /**< HMAC using 256 bit SHA algorithm. */
- RTE_CRYPTO_AUTH_SHA384,
- /**< 384 bit SHA algorithm. */
- RTE_CRYPTO_AUTH_SHA384_HMAC,
- /**< HMAC using 384 bit SHA algorithm. */
- RTE_CRYPTO_AUTH_SHA512,
- /**< 512 bit SHA algorithm. */
- RTE_CRYPTO_AUTH_SHA512_HMAC,
- /**< HMAC using 512 bit SHA algorithm. */
-
- RTE_CRYPTO_AUTH_SNOW3G_UIA2,
- /**< SNOW3G algorithm in UIA2 mode. */
-
- RTE_CRYPTO_AUTH_ZUC_EIA3,
- /**< ZUC algorithm in EIA3 mode */
-};
-
-/** Symmetric Authentication / Hash Operations */
-enum rte_crypto_auth_operation {
- RTE_CRYPTO_AUTH_OP_VERIFY, /**< Verify authentication digest */
- RTE_CRYPTO_AUTH_OP_GENERATE /**< Generate authentication digest */
-};
-
-/**
- * Authentication / Hash transform data.
- *
- * This structure contains data relating to an authentication/hash crypto
- * transforms. The fields op, algo and digest_length are common to all
- * authentication transforms and MUST be set.
- */
-struct rte_crypto_auth_xform {
- enum rte_crypto_auth_operation op;
- /**< Authentication operation type */
- enum rte_crypto_auth_algorithm algo;
- /**< Authentication algorithm selection */
-
- struct rte_crypto_key key; /**< Authentication key data.
- * The authentication key length MUST be less than or equal to the
- * block size of the algorithm. It is the callers responsibility to
- * ensure that the key length is compliant with the standard being used
- * (for example RFC 2104, FIPS 198a).
- */
-
- uint32_t digest_length;
- /**< Length of the digest to be returned. If the verify option is set,
- * this specifies the length of the digest to be compared for the
- * session.
- *
- * If the value is less than the maximum length allowed by the hash,
- * the result shall be truncated. If the value is greater than the
- * maximum length allowed by the hash then an error will be generated
- * by *rte_cryptodev_session_create* or by the
- * *rte_cryptodev_enqueue_burst* if using session-less APIs.
- */
-
- uint32_t add_auth_data_length;
- /**< The length of the additional authenticated data (AAD) in bytes.
- * The maximum permitted value is 240 bytes, unless otherwise specified
- * below.
- *
- * This field must be specified when the hash algorithm is one of the
- * following:
- *
- * - For SNOW3G (@ref RTE_CRYPTO_AUTH_SNOW3G_UIA2), this is the
- * length of the IV (which should be 16).
- *
- * - For GCM (@ref RTE_CRYPTO_AUTH_AES_GCM). In this case, this is
- * the length of the Additional Authenticated Data (called A, in NIST
- * SP800-38D).
- *
- * - For CCM (@ref RTE_CRYPTO_AUTH_AES_CCM). In this case, this is
- * the length of the associated data (called A, in NIST SP800-38C).
- * Note that this does NOT include the length of any padding, or the
- * 18 bytes reserved at the start of the above field to store the
- * block B0 and the encoded length. The maximum permitted value in
- * this case is 222 bytes.
- *
- * @note
- * For AES-GMAC (@ref RTE_CRYPTO_AUTH_AES_GMAC) mode of operation
- * this field is not used and should be set to 0. Instead the length
- * of the AAD data is specified in the message length to hash field of
- * the rte_crypto_op_data structure.
- */
-};
-
-/** Crypto transformation types */
-enum rte_crypto_xform_type {
- RTE_CRYPTO_XFORM_NOT_SPECIFIED = 0, /**< No xform specified */
- RTE_CRYPTO_XFORM_AUTH, /**< Authentication xform */
- RTE_CRYPTO_XFORM_CIPHER /**< Cipher xform */
-};
-
-/**
- * Crypto transform structure.
- *
- * This is used to specify the crypto transforms required, multiple transforms
- * can be chained together to specify a chain transforms such as authentication
- * then cipher, or cipher then authentication. Each transform structure can
- * hold a single transform, the type field is used to specify which transform
- * is contained within the union
- */
-struct rte_crypto_xform {
- struct rte_crypto_xform *next; /**< next xform in chain */
-
- enum rte_crypto_xform_type type; /**< xform type */
- union {
- struct rte_crypto_auth_xform auth;
- /**< Authentication / hash xform */
- struct rte_crypto_cipher_xform cipher;
- /**< Cipher xform */
- };
-};
-
-/**
- * Crypto operation session type. This is used to specify whether a crypto
- * operation has session structure attached for immutable parameters or if all
- * operation information is included in the operation data structure.
- */
-enum rte_crypto_op_sess_type {
- RTE_CRYPTO_OP_WITH_SESSION, /**< Session based crypto operation */
- RTE_CRYPTO_OP_SESSIONLESS /**< Session-less crypto operation */
-};
-
/** Status of crypto operation */
enum rte_crypto_op_status {
RTE_CRYPTO_OP_STATUS_SUCCESS,
/**< Error handling operation */
};
-/**
- * Cryptographic Operation Data.
- *
- * This structure contains data relating to performing cryptographic processing
- * on a data buffer. This request is used with rte_crypto_enqueue_burst() call
- * for performing cipher, hash, or a combined hash and cipher operations.
- */
-struct rte_crypto_op {
- enum rte_crypto_op_sess_type type;
- enum rte_crypto_op_status status;
-
- struct {
- struct rte_mbuf *m; /**< Destination mbuf */
- uint8_t offset; /**< Data offset */
- } dst;
-
- union {
- struct rte_cryptodev_session *session;
- /**< Handle for the initialised session context */
- struct rte_crypto_xform *xform;
- /**< Session-less API crypto operation parameters */
- };
-
- struct {
- struct {
- uint32_t offset;
- /**< Starting point for cipher processing, specified
- * as number of bytes from start of data in the source
- * buffer. The result of the cipher operation will be
- * written back into the output buffer starting at
- * this location.
- */
-
- uint32_t length;
- /**< The message length, in bytes, of the source buffer
- * on which the cryptographic operation will be
- * computed. This must be a multiple of the block size
- * if a block cipher is being used. This is also the
- * same as the result length.
- *
- * @note
- * In the case of CCM @ref RTE_CRYPTO_AUTH_AES_CCM,
- * this value should not include the length of the
- * padding or the length of the MAC; the driver will
- * compute the actual number of bytes over which the
- * encryption will occur, which will include these
- * values.
- *
- * @note
- * For AES-GMAC @ref RTE_CRYPTO_AUTH_AES_GMAC, this
- * field should be set to 0.
- */
- } to_cipher; /**< Data offsets and length for ciphering */
-
- struct {
- uint32_t offset;
- /**< Starting point for hash processing, specified as
- * number of bytes from start of packet in source
- * buffer.
- *
- * @note
- * For CCM and GCM modes of operation, this field is
- * ignored. The field @ref additional_auth field
- * should be set instead.
- *
- * @note For AES-GMAC (@ref RTE_CRYPTO_AUTH_AES_GMAC)
- * mode of operation, this field specifies the start
- * of the AAD data in the source buffer.
- */
-
- uint32_t length;
- /**< The message length, in bytes, of the source
- * buffer that the hash will be computed on.
- *
- * @note
- * For CCM and GCM modes of operation, this field is
- * ignored. The field @ref additional_auth field
- * should be set instead.
- *
- * @note
- * For AES-GMAC @ref RTE_CRYPTO_AUTH_AES_GMAC mode
- * of operation, this field specifies the length of
- * the AAD data in the source buffer.
- */
- } to_hash; /**< Data offsets and length for authentication */
- } data; /**< Details of data to be operated on */
-
- struct {
- uint8_t *data;
- /**< Initialisation Vector or Counter.
- *
- * - For block ciphers in CBC or F8 mode, or for Kasumi in F8
- * mode, or for SNOW3G in UEA2 mode, this is the Initialisation
- * Vector (IV) value.
- *
- * - For block ciphers in CTR mode, this is the counter.
- *
- * - For GCM mode, this is either the IV (if the length is 96
- * bits) or J0 (for other sizes), where J0 is as defined by
- * NIST SP800-38D. Regardless of the IV length, a full 16 bytes
- * needs to be allocated.
- *
- * - For CCM mode, the first byte is reserved, and the nonce
- * should be written starting at &iv[1] (to allow space for the
- * implementation to write in the flags in the first byte).
- * Note that a full 16 bytes should be allocated, even though
- * the length field will have a value less than this.
- *
- * - For AES-XTS, this is the 128bit tweak, i, from IEEE Std
- * 1619-2007.
- *
- * For optimum performance, the data pointed to SHOULD be
- * 8-byte aligned.
- */
- phys_addr_t phys_addr;
- size_t length;
- /**< Length of valid IV data.
- *
- * - For block ciphers in CBC or F8 mode, or for Kasumi in F8
- * mode, or for SNOW3G in UEA2 mode, this is the length of the
- * IV (which must be the same as the block length of the
- * cipher).
- *
- * - For block ciphers in CTR mode, this is the length of the
- * counter (which must be the same as the block length of the
- * cipher).
- *
- * - For GCM mode, this is either 12 (for 96-bit IVs) or 16, in
- * which case data points to J0.
- *
- * - For CCM mode, this is the length of the nonce, which can
- * be in the range 7 to 13 inclusive.
- */
- } iv; /**< Initialisation vector parameters */
-
- struct {
- uint8_t *data;
- /**< If this member of this structure is set this is a
- * pointer to the location where the digest result should be
- * inserted (in the case of digest generation) or where the
- * purported digest exists (in the case of digest
- * verification).
- *
- * At session creation time, the client specified the digest
- * result length with the digest_length member of the @ref
- * rte_crypto_auth_xform structure. For physical crypto
- * devices the caller must allocate at least digest_length of
- * physically contiguous memory at this location.
- *
- * For digest generation, the digest result will overwrite
- * any data at this location.
- *
- * @note
- * For GCM (@ref RTE_CRYPTO_AUTH_AES_GCM), for
- * "digest result" read "authentication tag T".
- *
- * If this member is not set the digest result is understood
- * to be in the destination buffer for digest generation, and
- * in the source buffer for digest verification. The location
- * of the digest result in this case is immediately following
- * the region over which the digest is computed.
- */
- phys_addr_t phys_addr; /**< Physical address of digest */
- uint32_t length; /**< Length of digest */
- } digest; /**< Digest parameters */
-
- struct {
- uint8_t *data;
- /**< Pointer to Additional Authenticated Data (AAD) needed for
- * authenticated cipher mechanisms (CCM and GCM), and to the IV
- * for SNOW3G authentication
- * (@ref RTE_CRYPTO_AUTH_SNOW3G_UIA2). For other
- * authentication mechanisms this pointer is ignored.
- *
- * The length of the data pointed to by this field is set up
- * for the session in the @ref rte_crypto_auth_xform structure
- * as part of the @ref rte_cryptodev_session_create function
- * call. This length must not exceed 240 bytes.
- *
- * Specifically for CCM (@ref RTE_CRYPTO_AUTH_AES_CCM), the
- * caller should setup this field as follows:
- *
- * - the nonce should be written starting at an offset of one
- * byte into the array, leaving room for the implementation
- * to write in the flags to the first byte.
- *
- * - the additional authentication data itself should be
- * written starting at an offset of 18 bytes into the array,
- * leaving room for the length encoding in the first two
- * bytes of the second block.
- *
- * - the array should be big enough to hold the above fields,
- * plus any padding to round this up to the nearest multiple
- * of the block size (16 bytes). Padding will be added by
- * the implementation.
- *
- * Finally, for GCM (@ref RTE_CRYPTO_AUTH_AES_GCM), the
- * caller should setup this field as follows:
- *
- * - the AAD is written in starting at byte 0
- * - the array must be big enough to hold the AAD, plus any
- * space to round this up to the nearest multiple of the
- * block size (16 bytes).
- *
- * @note
- * For AES-GMAC (@ref RTE_CRYPTO_AUTH_AES_GMAC) mode of
- * operation, this field is not used and should be set to 0.
- * Instead the AAD data should be placed in the source buffer.
- */
- phys_addr_t phys_addr; /**< physical address */
- uint32_t length; /**< Length of digest */
- } additional_auth;
- /**< Additional authentication parameters */
-
- struct rte_mempool *pool;
- /**< mempool used to allocate crypto op */
-
- void *user_data;
- /**< opaque pointer for user data */
-};
-
-
-/**
- * Reset the fields of a crypto operation to their default values.
- *
- * @param op The crypto operation to be reset.
- */
-static inline void
-__rte_crypto_op_reset(struct rte_crypto_op *op)
-{
- op->type = RTE_CRYPTO_OP_SESSIONLESS;
- op->dst.m = NULL;
- op->dst.offset = 0;
-}
-
-/** Attach a session to a crypto operation */
-static inline void
-rte_crypto_op_attach_session(struct rte_crypto_op *op,
- struct rte_cryptodev_session *sess)
-{
- op->session = sess;
- op->type = RTE_CRYPTO_OP_WITH_SESSION;
-}
+#include <rte_crypto_sym.h>
#ifdef __cplusplus
}
--- /dev/null
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_CRYPTO_SYM_H_
+#define _RTE_CRYPTO_SYM_H_
+
+/**
+ * @file rte_crypto_sym.h
+ *
+ * RTE Definitions for Symmetric Cryptography
+ *
+ * Defines symmetric cipher and authentication algorithms and modes, as well
+ * as supported symmetric crypto operation combinations.
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <rte_mbuf.h>
+#include <rte_memory.h>
+#include <rte_mempool.h>
+
+
+/** Symmetric Cipher Algorithms */
+enum rte_crypto_cipher_algorithm {
+ RTE_CRYPTO_CIPHER_NULL = 1,
+ /**< NULL cipher algorithm. No mode applies to the NULL algorithm. */
+
+ RTE_CRYPTO_CIPHER_3DES_CBC,
+ /**< Triple DES algorithm in CBC mode */
+ RTE_CRYPTO_CIPHER_3DES_CTR,
+ /**< Triple DES algorithm in CTR mode */
+ RTE_CRYPTO_CIPHER_3DES_ECB,
+ /**< Triple DES algorithm in ECB mode */
+
+ RTE_CRYPTO_CIPHER_AES_CBC,
+ /**< AES algorithm in CBC mode */
+ RTE_CRYPTO_CIPHER_AES_CCM,
+ /**< AES algorithm in CCM mode. When this cipher algorithm is used the
+ * *RTE_CRYPTO_AUTH_AES_CCM* element of the
+ * *rte_crypto_hash_algorithm* enum MUST be used to set up the related
+ * *rte_crypto_auth_xform* structure in the session context or in
+ * the op_params of the crypto operation structure in the case of a
+ * session-less crypto operation
+ */
+ RTE_CRYPTO_CIPHER_AES_CTR,
+ /**< AES algorithm in Counter mode */
+ RTE_CRYPTO_CIPHER_AES_ECB,
+ /**< AES algorithm in ECB mode */
+ RTE_CRYPTO_CIPHER_AES_F8,
+ /**< AES algorithm in F8 mode */
+ RTE_CRYPTO_CIPHER_AES_GCM,
+ /**< AES algorithm in GCM mode. When this cipher algorithm is used the
+ * *RTE_CRYPTO_AUTH_AES_GCM* element of the
+ * *rte_crypto_auth_algorithm* enum MUST be used to set up the related
+ * *rte_crypto_auth_setup_data* structure in the session context or in
+ * the op_params of the crypto operation structure in the case of a
+ * session-less crypto operation.
+ */
+ RTE_CRYPTO_CIPHER_AES_XTS,
+ /**< AES algorithm in XTS mode */
+
+ RTE_CRYPTO_CIPHER_ARC4,
+ /**< (A)RC4 cipher algorithm */
+
+ RTE_CRYPTO_CIPHER_KASUMI_F8,
+ /**< Kasumi algorithm in F8 mode */
+
+ RTE_CRYPTO_CIPHER_SNOW3G_UEA2,
+ /**< SNOW3G algorithm in UEA2 mode */
+
+ RTE_CRYPTO_CIPHER_ZUC_EEA3
+ /**< ZUC algorithm in EEA3 mode */
+};
+
+/** Symmetric Cipher Direction */
+enum rte_crypto_cipher_operation {
+ RTE_CRYPTO_CIPHER_OP_ENCRYPT,
+ /**< Encrypt cipher operation */
+ RTE_CRYPTO_CIPHER_OP_DECRYPT
+ /**< Decrypt cipher operation */
+};
+
+/** Crypto key structure */
+struct rte_crypto_key {
+ uint8_t *data; /**< pointer to key data */
+ phys_addr_t phys_addr;
+ size_t length; /**< key length in bytes */
+};
+
+/**
+ * Symmetric Cipher Setup Data.
+ *
+ * This structure contains data relating to Cipher (Encryption and Decryption)
+ * use to create a session.
+ */
+struct rte_crypto_cipher_xform {
+ enum rte_crypto_cipher_operation op;
+ /**< This parameter determines if the cipher operation is an encrypt or
+ * a decrypt operation. For the RC4 algorithm and the F8/CTR modes,
+ * only encrypt operations are valid.
+ */
+ enum rte_crypto_cipher_algorithm algo;
+ /**< Cipher algorithm */
+
+ struct rte_crypto_key key;
+ /**< Cipher key
+ *
+ * For the RTE_CRYPTO_CIPHER_AES_F8 mode of operation, key.data will
+ * point to a concatenation of the AES encryption key followed by a
+ * keymask. As per RFC3711, the keymask should be padded with trailing
+ * bytes to match the length of the encryption key used.
+ *
+ * For AES-XTS mode of operation, two keys must be provided and
+ * key.data must point to the two keys concatenated together (Key1 ||
+ * Key2). The cipher key length will contain the total size of both
+ * keys.
+ *
+ * Cipher key length is in bytes. For AES it can be 128 bits (16 bytes),
+ * 192 bits (24 bytes) or 256 bits (32 bytes).
+ *
+ * For the CCM mode of operation, the only supported key length is 128
+ * bits (16 bytes).
+ *
+ * For the RTE_CRYPTO_CIPHER_AES_F8 mode of operation, key.length
+ * should be set to the combined length of the encryption key and the
+ * keymask. Since the keymask and the encryption key are the same size,
+ * key.length should be set to 2 x the AES encryption key length.
+ *
+ * For the AES-XTS mode of operation:
+ * - Two keys must be provided and key.length refers to total length of
+ * the two keys.
+ * - Each key can be either 128 bits (16 bytes) or 256 bits (32 bytes).
+ * - Both keys must have the same size.
+ **/
+};
+
+/** Symmetric Authentication / Hash Algorithms */
+enum rte_crypto_auth_algorithm {
+ RTE_CRYPTO_AUTH_NULL = 1,
+ /**< NULL hash algorithm. */
+
+ RTE_CRYPTO_AUTH_AES_CBC_MAC,
+ /**< AES-CBC-MAC algorithm. Only 128-bit keys are supported. */
+ RTE_CRYPTO_AUTH_AES_CCM,
+ /**< AES algorithm in CCM mode. This is an authenticated cipher. When
+ * this hash algorithm is used, the *RTE_CRYPTO_CIPHER_AES_CCM*
+ * element of the *rte_crypto_cipher_algorithm* enum MUST be used to
+ * set up the related rte_crypto_cipher_setup_data structure in the
+ * session context or the corresponding parameter in the crypto
+ * operation data structures op_params parameter MUST be set for a
+ * session-less crypto operation.
+ */
+ RTE_CRYPTO_AUTH_AES_CMAC,
+ /**< AES CMAC algorithm. */
+ RTE_CRYPTO_AUTH_AES_GCM,
+ /**< AES algorithm in GCM mode. When this hash algorithm
+ * is used, the RTE_CRYPTO_CIPHER_AES_GCM element of the
+ * rte_crypto_cipher_algorithm enum MUST be used to set up the related
+ * rte_crypto_cipher_setup_data structure in the session context, or
+ * the corresponding parameter in the crypto operation data structures
+ * op_params parameter MUST be set for a session-less crypto operation.
+ */
+ RTE_CRYPTO_AUTH_AES_GMAC,
+ /**< AES GMAC algorithm. When this hash algorithm
+ * is used, the RTE_CRYPTO_CIPHER_AES_GCM element of the
+ * rte_crypto_cipher_algorithm enum MUST be used to set up the related
+ * rte_crypto_cipher_setup_data structure in the session context, or
+ * the corresponding parameter in the crypto operation data structures
+ * op_params parameter MUST be set for a session-less crypto operation.
+ */
+ RTE_CRYPTO_AUTH_AES_XCBC_MAC,
+ /**< AES XCBC algorithm. */
+
+ RTE_CRYPTO_AUTH_KASUMI_F9,
+ /**< Kasumi algorithm in F9 mode. */
+
+ RTE_CRYPTO_AUTH_MD5,
+ /**< MD5 algorithm */
+ RTE_CRYPTO_AUTH_MD5_HMAC,
+ /**< HMAC using MD5 algorithm */
+
+ RTE_CRYPTO_AUTH_SHA1,
+ /**< 128 bit SHA algorithm. */
+ RTE_CRYPTO_AUTH_SHA1_HMAC,
+ /**< HMAC using 128 bit SHA algorithm. */
+ RTE_CRYPTO_AUTH_SHA224,
+ /**< 224 bit SHA algorithm. */
+ RTE_CRYPTO_AUTH_SHA224_HMAC,
+ /**< HMAC using 224 bit SHA algorithm. */
+ RTE_CRYPTO_AUTH_SHA256,
+ /**< 256 bit SHA algorithm. */
+ RTE_CRYPTO_AUTH_SHA256_HMAC,
+ /**< HMAC using 256 bit SHA algorithm. */
+ RTE_CRYPTO_AUTH_SHA384,
+ /**< 384 bit SHA algorithm. */
+ RTE_CRYPTO_AUTH_SHA384_HMAC,
+ /**< HMAC using 384 bit SHA algorithm. */
+ RTE_CRYPTO_AUTH_SHA512,
+ /**< 512 bit SHA algorithm. */
+ RTE_CRYPTO_AUTH_SHA512_HMAC,
+ /**< HMAC using 512 bit SHA algorithm. */
+
+ RTE_CRYPTO_AUTH_SNOW3G_UIA2,
+ /**< SNOW3G algorithm in UIA2 mode. */
+
+ RTE_CRYPTO_AUTH_ZUC_EIA3,
+ /**< ZUC algorithm in EIA3 mode */
+};
+
+/** Symmetric Authentication / Hash Operations */
+enum rte_crypto_auth_operation {
+ RTE_CRYPTO_AUTH_OP_VERIFY, /**< Verify authentication digest */
+ RTE_CRYPTO_AUTH_OP_GENERATE /**< Generate authentication digest */
+};
+
+/**
+ * Authentication / Hash transform data.
+ *
+ * This structure contains data relating to an authentication/hash crypto
+ * transforms. The fields op, algo and digest_length are common to all
+ * authentication transforms and MUST be set.
+ */
+struct rte_crypto_auth_xform {
+ enum rte_crypto_auth_operation op;
+ /**< Authentication operation type */
+ enum rte_crypto_auth_algorithm algo;
+ /**< Authentication algorithm selection */
+
+ struct rte_crypto_key key;
+ /**< Authentication key data.
+ * The authentication key length MUST be less than or equal to the
+ * block size of the algorithm. It is the callers responsibility to
+ * ensure that the key length is compliant with the standard being used
+ * (for example RFC 2104, FIPS 198a).
+ */
+
+ uint32_t digest_length;
+ /**< Length of the digest to be returned. If the verify option is set,
+ * this specifies the length of the digest to be compared for the
+ * session.
+ *
+ * If the value is less than the maximum length allowed by the hash,
+ * the result shall be truncated. If the value is greater than the
+ * maximum length allowed by the hash then an error will be generated
+ * by *rte_cryptodev_sym_session_create* or by the
+ * *rte_cryptodev_sym_enqueue_burst* if using session-less APIs.
+ */
+
+ uint32_t add_auth_data_length;
+ /**< The length of the additional authenticated data (AAD) in bytes.
+ * The maximum permitted value is 240 bytes, unless otherwise specified
+ * below.
+ *
+ * This field must be specified when the hash algorithm is one of the
+ * following:
+ *
+ * - For SNOW3G (@ref RTE_CRYPTO_AUTH_SNOW3G_UIA2), this is the
+ * length of the IV (which should be 16).
+ *
+ * - For GCM (@ref RTE_CRYPTO_AUTH_AES_GCM). In this case, this is
+ * the length of the Additional Authenticated Data (called A, in NIST
+ * SP800-38D).
+ *
+ * - For CCM (@ref RTE_CRYPTO_AUTH_AES_CCM). In this case, this is
+ * the length of the associated data (called A, in NIST SP800-38C).
+ * Note that this does NOT include the length of any padding, or the
+ * 18 bytes reserved at the start of the above field to store the
+ * block B0 and the encoded length. The maximum permitted value in
+ * this case is 222 bytes.
+ *
+ * @note
+ * For AES-GMAC (@ref RTE_CRYPTO_AUTH_AES_GMAC) mode of operation
+ * this field is not used and should be set to 0. Instead the length
+ * of the AAD data is specified in the message length to hash field of
+ * the rte_crypto_sym_op_data structure.
+ */
+};
+
+/** Crypto transformation types */
+enum rte_crypto_sym_xform_type {
+ RTE_CRYPTO_SYM_XFORM_NOT_SPECIFIED = 0, /**< No xform specified */
+ RTE_CRYPTO_SYM_XFORM_AUTH, /**< Authentication xform */
+ RTE_CRYPTO_SYM_XFORM_CIPHER /**< Cipher xform */
+};
+
+/**
+ * Symmetric crypto transform structure.
+ *
+ * This is used to specify the crypto transforms required, multiple transforms
+ * can be chained together to specify a chain transforms such as authentication
+ * then cipher, or cipher then authentication. Each transform structure can
+ * hold a single transform, the type field is used to specify which transform
+ * is contained within the union
+ */
+struct rte_crypto_sym_xform {
+ struct rte_crypto_sym_xform *next;
+ /**< next xform in chain */
+ enum rte_crypto_sym_xform_type type
+ ; /**< xform type */
+ union {
+ struct rte_crypto_auth_xform auth;
+ /**< Authentication / hash xform */
+ struct rte_crypto_cipher_xform cipher;
+ /**< Cipher xform */
+ };
+};
+
+/**
+ * Crypto operation session type. This is used to specify whether a crypto
+ * operation has session structure attached for immutable parameters or if all
+ * operation information is included in the operation data structure.
+ */
+enum rte_crypto_sym_op_sess_type {
+ RTE_CRYPTO_SYM_OP_WITH_SESSION, /**< Session based crypto operation */
+ RTE_CRYPTO_SYM_OP_SESSIONLESS /**< Session-less crypto operation */
+};
+
+
+/**
+ * Cryptographic Operation Data.
+ *
+ * This structure contains data relating to performing cryptographic processing
+ * on a data buffer. This request is used with rte_crypto_sym_enqueue_burst()
+ * call for performing cipher, hash, or a combined hash and cipher operations.
+ */
+struct rte_crypto_sym_op {
+ enum rte_crypto_sym_op_sess_type type;
+ enum rte_crypto_op_status status;
+
+ struct {
+ struct rte_mbuf *m; /**< Destination mbuf */
+ uint8_t offset; /**< Data offset */
+ } dst;
+
+ union {
+ struct rte_cryptodev_sym_session *session;
+ /**< Handle for the initialised session context */
+ struct rte_crypto_sym_xform *xform;
+ /**< Session-less API crypto operation parameters */
+ };
+
+ struct {
+ struct {
+ uint32_t offset;
+ /**< Starting point for cipher processing, specified
+ * as number of bytes from start of data in the source
+ * buffer. The result of the cipher operation will be
+ * written back into the output buffer starting at
+ * this location.
+ */
+
+ uint32_t length;
+ /**< The message length, in bytes, of the source buffer
+ * on which the cryptographic operation will be
+ * computed. This must be a multiple of the block size
+ * if a block cipher is being used. This is also the
+ * same as the result length.
+ *
+ * @note
+ * In the case of CCM @ref RTE_CRYPTO_AUTH_AES_CCM,
+ * this value should not include the length of the
+ * padding or the length of the MAC; the driver will
+ * compute the actual number of bytes over which the
+ * encryption will occur, which will include these
+ * values.
+ *
+ * @note
+ * For AES-GMAC @ref RTE_CRYPTO_AUTH_AES_GMAC, this
+ * field should be set to 0.
+ */
+ } to_cipher; /**< Data offsets and length for ciphering */
+
+ struct {
+ uint32_t offset;
+ /**< Starting point for hash processing, specified as
+ * number of bytes from start of packet in source
+ * buffer.
+ *
+ * @note
+ * For CCM and GCM modes of operation, this field is
+ * ignored. The field @ref additional_auth field
+ * should be set instead.
+ *
+ * @note For AES-GMAC (@ref RTE_CRYPTO_AUTH_AES_GMAC)
+ * mode of operation, this field specifies the start
+ * of the AAD data in the source buffer.
+ */
+
+ uint32_t length;
+ /**< The message length, in bytes, of the source
+ * buffer that the hash will be computed on.
+ *
+ * @note
+ * For CCM and GCM modes of operation, this field is
+ * ignored. The field @ref additional_auth field
+ * should be set instead.
+ *
+ * @note
+ * For AES-GMAC @ref RTE_CRYPTO_AUTH_AES_GMAC mode
+ * of operation, this field specifies the length of
+ * the AAD data in the source buffer.
+ */
+ } to_hash; /**< Data offsets and length for authentication */
+ } data; /**< Details of data to be operated on */
+
+ struct {
+ uint8_t *data;
+ /**< Initialisation Vector or Counter.
+ *
+ * - For block ciphers in CBC or F8 mode, or for Kasumi in F8
+ * mode, or for SNOW3G in UEA2 mode, this is the Initialisation
+ * Vector (IV) value.
+ *
+ * - For block ciphers in CTR mode, this is the counter.
+ *
+ * - For GCM mode, this is either the IV (if the length is 96
+ * bits) or J0 (for other sizes), where J0 is as defined by
+ * NIST SP800-38D. Regardless of the IV length, a full 16 bytes
+ * needs to be allocated.
+ *
+ * - For CCM mode, the first byte is reserved, and the nonce
+ * should be written starting at &iv[1] (to allow space for the
+ * implementation to write in the flags in the first byte).
+ * Note that a full 16 bytes should be allocated, even though
+ * the length field will have a value less than this.
+ *
+ * - For AES-XTS, this is the 128bit tweak, i, from IEEE Std
+ * 1619-2007.
+ *
+ * For optimum performance, the data pointed to SHOULD be
+ * 8-byte aligned.
+ */
+ phys_addr_t phys_addr;
+ size_t length;
+ /**< Length of valid IV data.
+ *
+ * - For block ciphers in CBC or F8 mode, or for Kasumi in F8
+ * mode, or for SNOW3G in UEA2 mode, this is the length of the
+ * IV (which must be the same as the block length of the
+ * cipher).
+ *
+ * - For block ciphers in CTR mode, this is the length of the
+ * counter (which must be the same as the block length of the
+ * cipher).
+ *
+ * - For GCM mode, this is either 12 (for 96-bit IVs) or 16, in
+ * which case data points to J0.
+ *
+ * - For CCM mode, this is the length of the nonce, which can
+ * be in the range 7 to 13 inclusive.
+ */
+ } iv; /**< Initialisation vector parameters */
+
+ struct {
+ uint8_t *data;
+ /**< If this member of this structure is set this is a
+ * pointer to the location where the digest result should be
+ * inserted (in the case of digest generation) or where the
+ * purported digest exists (in the case of digest
+ * verification).
+ *
+ * At session creation time, the client specified the digest
+ * result length with the digest_length member of the @ref
+ * rte_crypto_auth_xform structure. For physical crypto
+ * devices the caller must allocate at least digest_length of
+ * physically contiguous memory at this location.
+ *
+ * For digest generation, the digest result will overwrite
+ * any data at this location.
+ *
+ * @note
+ * For GCM (@ref RTE_CRYPTO_AUTH_AES_GCM), for
+ * "digest result" read "authentication tag T".
+ *
+ * If this member is not set the digest result is understood
+ * to be in the destination buffer for digest generation, and
+ * in the source buffer for digest verification. The location
+ * of the digest result in this case is immediately following
+ * the region over which the digest is computed.
+ */
+ phys_addr_t phys_addr; /**< Physical address of digest */
+ uint32_t length; /**< Length of digest */
+ } digest; /**< Digest parameters */
+
+ struct {
+ uint8_t *data;
+ /**< Pointer to Additional Authenticated Data (AAD) needed for
+ * authenticated cipher mechanisms (CCM and GCM), and to the IV
+ * for SNOW3G authentication
+ * (@ref RTE_CRYPTO_AUTH_SNOW3G_UIA2). For other
+ * authentication mechanisms this pointer is ignored.
+ *
+ * The length of the data pointed to by this field is set up
+ * for the session in the @ref rte_crypto_auth_xform structure
+ * as part of the @ref rte_cryptodev_sym_session_create function
+ * call. This length must not exceed 240 bytes.
+ *
+ * Specifically for CCM (@ref RTE_CRYPTO_AUTH_AES_CCM), the
+ * caller should setup this field as follows:
+ *
+ * - the nonce should be written starting at an offset of one
+ * byte into the array, leaving room for the implementation
+ * to write in the flags to the first byte.
+ *
+ * - the additional authentication data itself should be
+ * written starting at an offset of 18 bytes into the array,
+ * leaving room for the length encoding in the first two
+ * bytes of the second block.
+ *
+ * - the array should be big enough to hold the above fields,
+ * plus any padding to round this up to the nearest multiple
+ * of the block size (16 bytes). Padding will be added by
+ * the implementation.
+ *
+ * Finally, for GCM (@ref RTE_CRYPTO_AUTH_AES_GCM), the
+ * caller should setup this field as follows:
+ *
+ * - the AAD is written in starting at byte 0
+ * - the array must be big enough to hold the AAD, plus any
+ * space to round this up to the nearest multiple of the
+ * block size (16 bytes).
+ *
+ * @note
+ * For AES-GMAC (@ref RTE_CRYPTO_AUTH_AES_GMAC) mode of
+ * operation, this field is not used and should be set to 0.
+ * Instead the AAD data should be placed in the source buffer.
+ */
+ phys_addr_t phys_addr; /**< physical address */
+ uint32_t length; /**< Length of digest */
+ } additional_auth;
+ /**< Additional authentication parameters */
+
+ struct rte_mempool *pool;
+ /**< mempool used to allocate crypto op */
+
+ void *user_data;
+ /**< opaque pointer for user data */
+};
+
+
+/**
+ * Reset the fields of a crypto operation to their default values.
+ *
+ * @param op The crypto operation to be reset.
+ */
+static inline void
+__rte_crypto_sym_op_reset(struct rte_crypto_sym_op *op)
+{
+ op->type = RTE_CRYPTO_SYM_OP_SESSIONLESS;
+ op->dst.m = NULL;
+ op->dst.offset = 0;
+}
+
+/** Attach a session to a crypto operation */
+static inline void
+rte_crypto_sym_op_attach_session(struct rte_crypto_sym_op *op,
+ struct rte_cryptodev_sym_session *sess)
+{
+ op->session = sess;
+ op->type = RTE_CRYPTO_SYM_OP_WITH_SESSION;
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_CRYPTO_SYM_H_ */
/*-
* BSD LICENSE
*
- * Copyright(c) 2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
}
static int
-rte_crypto_session_pool_create(struct rte_cryptodev *dev, unsigned nb_objs,
- unsigned obj_cache_size, int socket_id);
+rte_cryptodev_sym_session_pool_create(struct rte_cryptodev *dev,
+ unsigned nb_objs, unsigned obj_cache_size, int socket_id);
int
rte_cryptodev_configure(uint8_t dev_id, struct rte_cryptodev_config *config)
}
/* Setup Session mempool for device */
- return rte_crypto_session_pool_create(dev, config->session_mp.nb_objs,
- config->session_mp.cache_size, config->socket_id);
+ return rte_cryptodev_sym_session_pool_create(dev,
+ config->session_mp.nb_objs,
+ config->session_mp.cache_size,
+ config->socket_id);
}
static void
-rte_crypto_session_init(struct rte_mempool *mp,
+rte_cryptodev_sym_session_init(struct rte_mempool *mp,
void *opaque_arg,
void *_sess,
__rte_unused unsigned i)
{
- struct rte_cryptodev_session *sess = _sess;
+ struct rte_cryptodev_sym_session *sess = _sess;
struct rte_cryptodev *dev = opaque_arg;
memset(sess, 0, mp->elt_size);
}
static int
-rte_crypto_session_pool_create(struct rte_cryptodev *dev, unsigned nb_objs,
- unsigned obj_cache_size, int socket_id)
+rte_cryptodev_sym_session_pool_create(struct rte_cryptodev *dev,
+ unsigned nb_objs, unsigned obj_cache_size, int socket_id)
{
char mp_name[RTE_CRYPTODEV_NAME_MAX_LEN];
unsigned priv_sess_size;
return -ENOMEM;
}
- unsigned elt_size = sizeof(struct rte_cryptodev_session) +
+ unsigned elt_size = sizeof(struct rte_cryptodev_sym_session) +
priv_sess_size;
dev->data->session_pool = rte_mempool_lookup(mp_name);
0, /* private data size */
NULL, /* obj initialization constructor */
NULL, /* obj initialization constructor arg */
- rte_crypto_session_init, /* obj constructor */
+ rte_cryptodev_sym_session_init,
+ /**< obj constructor*/
dev, /* obj constructor arg */
socket_id, /* socket id */
0); /* flags */
return 0;
}
-struct rte_cryptodev_session *
-rte_cryptodev_session_create(uint8_t dev_id, struct rte_crypto_xform *xform)
+struct rte_cryptodev_sym_session *
+rte_cryptodev_sym_session_create(uint8_t dev_id,
+ struct rte_crypto_sym_xform *xform)
{
struct rte_cryptodev *dev;
- struct rte_cryptodev_session *sess;
+ struct rte_cryptodev_sym_session *sess;
void *_sess;
if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
return NULL;
}
- sess = (struct rte_cryptodev_session *)_sess;
+ sess = (struct rte_cryptodev_sym_session *)_sess;
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->session_configure, NULL);
if (dev->dev_ops->session_configure(dev, xform, sess->_private) ==
return sess;
}
-struct rte_cryptodev_session *
-rte_cryptodev_session_free(uint8_t dev_id, struct rte_cryptodev_session *sess)
+struct rte_cryptodev_sym_session *
+rte_cryptodev_sym_session_free(uint8_t dev_id,
+ struct rte_cryptodev_sym_session *sess)
{
struct rte_cryptodev *dev;
/*-
*
- * Copyright(c) 2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
/**< Null crypto PMD device name */
#define CRYPTODEV_NAME_AESNI_MB_PMD ("cryptodev_aesni_mb_pmd")
/**< AES-NI Multi buffer PMD device name */
-#define CRYPTODEV_NAME_QAT_PMD ("cryptodev_qat_pmd")
-/**< Intel QAT PMD device name */
+#define CRYPTODEV_NAME_QAT_SYM_PMD ("cryptodev_qat_sym_pmd")
+/**< Intel QAT Symmetric Crypto PMD device name */
/** Crypto device type */
enum rte_cryptodev_type {
RTE_CRYPTODEV_NULL_PMD = 1, /**< Null crypto PMD */
RTE_CRYPTODEV_AESNI_MB_PMD, /**< AES-NI multi buffer PMD */
- RTE_CRYPTODEV_QAT_PMD, /**< QAT PMD */
+ RTE_CRYPTODEV_QAT_SYM_PMD, /**< QAT PMD Symmetric Crypto */
};
/* Logging Macros */
unsigned max_nb_queue_pairs;
/**< Maximum number of queues pairs supported by device. */
- unsigned max_nb_sessions;
- /**< Maximum number of sessions supported by device. */
+
+ struct {
+ unsigned max_nb_sessions;
+ /**< Maximum number of sessions supported by device. */
+ } sym;
};
#define RTE_CRYPTODEV_DETACHED (0)
}
+/** Cryptodev symmetric crypto session */
+struct rte_cryptodev_sym_session {
+ struct {
+ uint8_t dev_id;
+ /**< Device Id */
+ enum rte_cryptodev_type type;
+ /** Crypto Device type session created on */
+ struct rte_mempool *mp;
+ /**< Mempool session allocated from */
+ } __rte_aligned(8);
+ /**< Public symmetric session details */
+
+ char _private[0];
+ /**< Private session material */
+};
+
+
/**
* Initialise a session for symmetric cryptographic operations.
*
* @return
* Pointer to the created session or NULL
*/
-extern struct rte_cryptodev_session *
-rte_cryptodev_session_create(uint8_t dev_id,
- struct rte_crypto_xform *xform);
+extern struct rte_cryptodev_sym_session *
+rte_cryptodev_sym_session_create(uint8_t dev_id,
+ struct rte_crypto_sym_xform *xform);
/**
* Free the memory associated with a previously allocated session.
*
* @param dev_id The device identifier.
* @param session Session pointer previously allocated by
- * *rte_cryptodev_session_create*.
+ * *rte_cryptodev_sym_session_create*.
*
* @return
* NULL on successful freeing of session.
* Session pointer on failure to free session.
*/
-extern struct rte_cryptodev_session *
-rte_cryptodev_session_free(uint8_t dev_id,
- struct rte_cryptodev_session *session);
+extern struct rte_cryptodev_sym_session *
+rte_cryptodev_sym_session_free(uint8_t dev_id,
+ struct rte_cryptodev_sym_session *session);
#ifdef __cplusplus
/*-
*
- * Copyright(c) 2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* - On success returns a pointer to a rte_mempool
* - On failure returns a NULL pointer
*/
-typedef int (*cryptodev_create_session_pool_t)(
+typedef int (*cryptodev_sym_create_session_pool_t)(
struct rte_cryptodev *dev, unsigned nb_objs,
unsigned obj_cache_size, int socket_id);
* - On success returns the size of the session structure for device
* - On failure returns 0
*/
-typedef unsigned (*cryptodev_get_session_private_size_t)(
+typedef unsigned (*cryptodev_sym_get_session_private_size_t)(
struct rte_cryptodev *dev);
/**
* - Returns private session structure on success.
* - Returns NULL on failure.
*/
-typedef void (*cryptodev_initialize_session_t)(struct rte_mempool *mempool,
+typedef void (*cryptodev_sym_initialize_session_t)(struct rte_mempool *mempool,
void *session_private);
/**
* - Returns private session structure on success.
* - Returns NULL on failure.
*/
-typedef void * (*cryptodev_configure_session_t)(struct rte_cryptodev *dev,
- struct rte_crypto_xform *xform, void *session_private);
+typedef void * (*cryptodev_sym_configure_session_t)(struct rte_cryptodev *dev,
+ struct rte_crypto_sym_xform *xform, void *session_private);
/**
* Free Crypto session.
* @param session Cryptodev session structure to free
*/
-typedef void (*cryptodev_free_session_t)(struct rte_cryptodev *dev,
+typedef void (*cryptodev_sym_free_session_t)(struct rte_cryptodev *dev,
void *session_private);
cryptodev_queue_pair_count_t queue_pair_count;
/**< Get count of the queue pairs. */
- cryptodev_get_session_private_size_t session_get_size;
+ cryptodev_sym_get_session_private_size_t session_get_size;
/**< Return private session. */
- cryptodev_initialize_session_t session_initialize;
+ cryptodev_sym_initialize_session_t session_initialize;
/**< Initialization function for private session data */
- cryptodev_configure_session_t session_configure;
+ cryptodev_sym_configure_session_t session_configure;
/**< Configure a Crypto session. */
- cryptodev_free_session_t session_clear;
+ cryptodev_sym_free_session_t session_clear;
/**< Clear a Crypto sessions private data. */
};
rte_cryptodev_pmd_driver_register;
rte_cryptodev_pmd_release_device;
rte_cryptodev_pmd_virtual_dev_init;
- rte_cryptodev_session_create;
- rte_cryptodev_session_free;
+ rte_cryptodev_sym_session_create;
+ rte_cryptodev_sym_session_free;
rte_cryptodev_socket_id;
rte_cryptodev_start;
rte_cryptodev_stats_get;
rte_cryptodev_queue_pair_stop;
local: *;
-};
\ No newline at end of file
+};
/*-
* BSD LICENSE
*
- * Copyright(c) 2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
enum rte_mbuf_ol_op_type {
RTE_PKTMBUF_OL_NOT_SPECIFIED = 0,
/**< Off-load not specified */
- RTE_PKTMBUF_OL_CRYPTO
+ RTE_PKTMBUF_OL_CRYPTO_SYM
/**< Crypto offload operation */
};
enum rte_mbuf_ol_op_type type; /**< offload type */
union {
- struct rte_crypto_op crypto; /**< Crypto operation */
+ struct rte_crypto_sym_op crypto; /**< Crypto operation */
} op;
};
ol->type = type;
switch (type) {
- case RTE_PKTMBUF_OL_CRYPTO:
- __rte_crypto_op_reset(&ol->op.crypto); break;
+ case RTE_PKTMBUF_OL_CRYPTO_SYM:
+ __rte_crypto_sym_op_reset(&ol->op.crypto); break;
default:
break;
}
* - On success returns pointer to first crypto xform in crypto operations chain
* - On failure returns NULL
*/
-static inline struct rte_crypto_xform *
-rte_pktmbuf_offload_alloc_crypto_xforms(struct rte_mbuf_offload *ol,
+static inline struct rte_crypto_sym_xform *
+rte_pktmbuf_offload_alloc_crypto_sym_xforms(struct rte_mbuf_offload *ol,
unsigned nb_xforms)
{
- struct rte_crypto_xform *xform;
+ struct rte_crypto_sym_xform *xform;
void *priv_data;
uint16_t size;
- size = sizeof(struct rte_crypto_xform) * nb_xforms;
+ size = sizeof(struct rte_crypto_sym_xform) * nb_xforms;
priv_data = __rte_pktmbuf_offload_check_priv_data_size(ol, size);
if (priv_data == NULL)
return NULL;
- ol->op.crypto.xform = xform = (struct rte_crypto_xform *)priv_data;
+ ol->op.crypto.xform = xform = (struct rte_crypto_sym_xform *)priv_data;
do {
- xform->type = RTE_CRYPTO_XFORM_NOT_SPECIFIED;
+ xform->type = RTE_CRYPTO_SYM_XFORM_NOT_SPECIFIED;
xform = xform->next = --nb_xforms > 0 ? xform + 1 : NULL;
} while (xform);