#include <rte_cryptodev_pmd.h>
#include <rte_string_fns.h>
-#ifdef RTE_LIBRTE_PMD_CRYPTO_SCHEDULER
+#ifdef RTE_CRYPTO_SCHEDULER
#include <rte_cryptodev_scheduler.h>
#include <rte_cryptodev_scheduler_operations.h>
#endif
#include "test_cryptodev_aead_test_vectors.h"
#include "test_cryptodev_hmac_test_vectors.h"
#include "test_cryptodev_mixed_test_vectors.h"
-#ifdef RTE_LIBRTE_SECURITY
+#ifdef RTE_LIB_SECURITY
#include "test_cryptodev_security_pdcp_test_vectors.h"
#include "test_cryptodev_security_pdcp_sdap_test_vectors.h"
#include "test_cryptodev_security_pdcp_test_func.h"
struct rte_crypto_sym_xform cipher_xform;
struct rte_crypto_sym_xform auth_xform;
struct rte_crypto_sym_xform aead_xform;
-#ifdef RTE_LIBRTE_SECURITY
+#ifdef RTE_LIB_SECURITY
struct rte_security_docsis_xform docsis_xform;
#endif
union {
struct rte_cryptodev_sym_session *sess;
-#ifdef RTE_LIBRTE_SECURITY
+#ifdef RTE_LIB_SECURITY
struct rte_security_session *sec_session;
#endif
};
-#ifdef RTE_LIBRTE_SECURITY
+#ifdef RTE_LIB_SECURITY
enum rte_security_session_action_type type;
#endif
struct rte_crypto_op *op;
return (num_bits >> 3);
}
-static uint32_t
-get_raw_dp_dequeue_count(void *user_data __rte_unused)
-{
- return 1;
-}
-
static void
post_process_raw_dp_op(void *user_data, uint32_t index __rte_unused,
uint8_t is_op_success)
n = n_success = 0;
while (count++ < MAX_RAW_DEQUEUE_COUNT && n == 0) {
n = rte_cryptodev_raw_dequeue_burst(ctx,
- get_raw_dp_dequeue_count, post_process_raw_dp_op,
+ NULL, 1, post_process_raw_dp_op,
(void **)&ret_op, 0, &n_success,
&dequeue_status);
if (dequeue_status < 0) {
}
}
-#ifdef RTE_LIBRTE_PMD_CRYPTO_SCHEDULER
+#ifdef RTE_CRYPTO_SCHEDULER
char vdev_args[VDEV_ARGS_SIZE] = {""};
char temp_str[VDEV_ARGS_SIZE] = {"mode=multi-core,"
"ordering=enable,name=cryptodev_test_scheduler,corelist="};
/* Identify the Worker Cores
* Use 2 worker cores for the device args
*/
- RTE_LCORE_FOREACH_SLAVE(i) {
+ RTE_LCORE_FOREACH_WORKER(i) {
if (worker_core_count > 1)
break;
snprintf(vdev_args, sizeof(vdev_args),
i, RTE_STR(CRYPTODEV_NAME_SCHEDULER_PMD));
}
}
-#endif /* RTE_LIBRTE_PMD_CRYPTO_SCHEDULER */
+#endif /* RTE_CRYPTO_SCHEDULER */
nb_devs = rte_cryptodev_count();
if (nb_devs < 1) {
unsigned int session_size =
rte_cryptodev_sym_get_private_session_size(dev_id);
-#ifdef RTE_LIBRTE_SECURITY
+#ifdef RTE_LIB_SECURITY
unsigned int security_session_size = rte_security_session_get_size(
rte_cryptodev_get_sec_ctx(dev_id));
testsuite_teardown(void)
{
struct crypto_testsuite_params *ts_params = &testsuite_params;
+ int res;
if (ts_params->mbuf_pool != NULL) {
RTE_LOG(DEBUG, USER1, "CRYPTO_MBUFPOOL count %u\n",
rte_mempool_free(ts_params->session_mpool);
ts_params->session_mpool = NULL;
}
+
+ res = rte_cryptodev_close(ts_params->valid_devs[0]);
+ if (res)
+ RTE_LOG(ERR, USER1, "Crypto device close error %d\n", res);
}
static int
struct rte_cryptodev_stats stats;
/* free crypto session structure */
-#ifdef RTE_LIBRTE_SECURITY
+#ifdef RTE_LIB_SECURITY
if (ut_params->type == RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL) {
if (ut_params->sec_session) {
rte_security_session_destroy(rte_cryptodev_get_sec_ctx
return -ENOTSUP;
}
+ if (gbl_action_type == RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO)
+ return -ENOTSUP;
+
/* Verify the capabilities */
struct rte_cryptodev_sym_capability_idx cap_idx;
cap_idx.type = RTE_CRYPTO_SYM_XFORM_AUTH;
return -ENOTSUP;
}
+ if (gbl_action_type == RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO)
+ return -ENOTSUP;
+
/* Verify the capabilities */
struct rte_cryptodev_sym_capability_idx cap_idx;
cap_idx.type = RTE_CRYPTO_SYM_XFORM_AUTH;
return -ENOTSUP;
}
+ if (gbl_action_type == RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO)
+ return -ENOTSUP;
+
/* Verify the capabilities */
struct rte_cryptodev_sym_capability_idx cap_idx;
cap_idx.type = RTE_CRYPTO_SYM_XFORM_AUTH;
return -ENOTSUP;
}
+ if (gbl_action_type == RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO)
+ return -ENOTSUP;
+
/* Verify the capabilities */
struct rte_cryptodev_sym_capability_idx cap_idx;
cap_idx.type = RTE_CRYPTO_SYM_XFORM_AUTH;
return -ENOTSUP;
}
+ if (gbl_action_type == RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO)
+ return -ENOTSUP;
+
/* Verify the capabilities */
struct rte_cryptodev_sym_capability_idx cap_idx;
cap_idx.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
return -ENOTSUP;
}
+ if (gbl_action_type == RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO)
+ return -ENOTSUP;
+
/* Create KASUMI session */
retval = create_wireless_algo_cipher_session(ts_params->valid_devs[0],
RTE_CRYPTO_CIPHER_OP_ENCRYPT,
if (global_api_test_type == CRYPTODEV_RAW_API_TEST)
return -ENOTSUP;
+ if (gbl_action_type == RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO)
+ return -ENOTSUP;
+
/* Create KASUMI session */
retval = create_wireless_algo_cipher_session(ts_params->valid_devs[0],
RTE_CRYPTO_CIPHER_OP_ENCRYPT,
if (global_api_test_type == CRYPTODEV_RAW_API_TEST)
return -ENOTSUP;
+ if (gbl_action_type == RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO)
+ return -ENOTSUP;
+
rte_cryptodev_info_get(ts_params->valid_devs[0], &dev_info);
uint64_t feat_flags = dev_info.feature_flags;
if (global_api_test_type == CRYPTODEV_RAW_API_TEST)
return -ENOTSUP;
+ if (gbl_action_type == RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO)
+ return -ENOTSUP;
+
/* Create KASUMI session */
retval = create_wireless_algo_cipher_session(ts_params->valid_devs[0],
RTE_CRYPTO_CIPHER_OP_DECRYPT,
return -ENOTSUP;
}
+ if (gbl_action_type == RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO)
+ return -ENOTSUP;
+
/* Verify the capabilities */
struct rte_cryptodev_sym_capability_idx cap_idx;
cap_idx.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
return -ENOTSUP;
}
+ if (gbl_action_type == RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO)
+ return -ENOTSUP;
+
/* Verify the capabilities */
struct rte_cryptodev_sym_capability_idx cap_idx;
cap_idx.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
if (global_api_test_type == CRYPTODEV_RAW_API_TEST)
return -ENOTSUP;
+ if (gbl_action_type == RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO)
+ return -ENOTSUP;
+
/* Create SNOW 3G session */
retval = create_wireless_algo_cipher_session(ts_params->valid_devs[0],
RTE_CRYPTO_CIPHER_OP_ENCRYPT,
if (global_api_test_type == CRYPTODEV_RAW_API_TEST)
return -ENOTSUP;
+ if (gbl_action_type == RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO)
+ return -ENOTSUP;
+
rte_cryptodev_info_get(ts_params->valid_devs[0], &dev_info);
uint64_t feat_flags = dev_info.feature_flags;
if (global_api_test_type == CRYPTODEV_RAW_API_TEST)
return -ENOTSUP;
+ if (gbl_action_type == RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO)
+ return -ENOTSUP;
+
/* Create SNOW 3G session */
retval = create_wireless_algo_cipher_session(ts_params->valid_devs[0],
RTE_CRYPTO_CIPHER_OP_ENCRYPT,
return -ENOTSUP;
}
+ if (gbl_action_type == RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO)
+ return -ENOTSUP;
+
/* Verify the capabilities */
struct rte_cryptodev_sym_capability_idx cap_idx;
cap_idx.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
if (global_api_test_type == CRYPTODEV_RAW_API_TEST)
return -ENOTSUP;
+ if (gbl_action_type == RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO)
+ return -ENOTSUP;
+
/* Create SNOW 3G session */
retval = create_wireless_algo_cipher_session(ts_params->valid_devs[0],
RTE_CRYPTO_CIPHER_OP_DECRYPT,
return -ENOTSUP;
}
+ if (gbl_action_type == RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO)
+ return -ENOTSUP;
+
/* Check if device supports ZUC EEA3 */
cap_idx.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
cap_idx.algo.cipher = RTE_CRYPTO_CIPHER_ZUC_EEA3;
return -ENOTSUP;
}
+ if (gbl_action_type == RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO)
+ return -ENOTSUP;
+
/* Verify the capabilities */
struct rte_cryptodev_sym_capability_idx cap_idx;
cap_idx.type = RTE_CRYPTO_SYM_XFORM_AUTH;
&cap_idx) == NULL)
return -ENOTSUP;
+ if (gbl_action_type == RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO)
+ return -ENOTSUP;
+
rte_cryptodev_info_get(ts_params->valid_devs[0], &dev_info);
uint64_t feat_flags = dev_info.feature_flags;
&cap_idx) == NULL)
return -ENOTSUP;
+ if (gbl_action_type == RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO)
+ return -ENOTSUP;
+
rte_cryptodev_info_get(ts_params->valid_devs[0], &dev_info);
uint64_t feat_flags = dev_info.feature_flags;
return -ENOTSUP;
}
+ if (gbl_action_type == RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO)
+ return -ENOTSUP;
+
if (op_mode == OUT_OF_PLACE) {
if (global_api_test_type == CRYPTODEV_RAW_API_TEST)
return -ENOTSUP;
&cap_idx) == NULL)
return -ENOTSUP;
+ if (gbl_action_type == RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO)
+ return -ENOTSUP;
+
rte_cryptodev_info_get(ts_params->valid_devs[0], &dev_info);
uint64_t feat_flags = dev_info.feature_flags;
return -ENOTSUP;
}
+ if (gbl_action_type == RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO)
+ return -ENOTSUP;
+
/* Verify the capabilities */
struct rte_cryptodev_sym_capability_idx cap_idx;
cap_idx.type = RTE_CRYPTO_SYM_XFORM_AUTH;
return -ENOTSUP;
}
+ if (gbl_action_type == RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO)
+ return -ENOTSUP;
+
struct rte_cryptodev_sym_capability_idx cap_idx;
/* Check if device supports ZUC EEA3 */
&cap_idx) == NULL)
return -ENOTSUP;
+ if (gbl_action_type == RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO)
+ return -ENOTSUP;
+
rte_cryptodev_info_get(ts_params->valid_devs[0], &dev_info);
uint64_t feat_flags = dev_info.feature_flags;
return -ENOTSUP;
}
+ if (gbl_action_type == RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO)
+ return -ENOTSUP;
+
/* Check if device supports ZUC EIA3 */
cap_idx.type = RTE_CRYPTO_SYM_XFORM_AUTH;
cap_idx.algo.auth = RTE_CRYPTO_AUTH_ZUC_EIA3;
test_snow3g_decryption_with_digest_test_case_1(void)
{
struct snow3g_hash_test_data snow3g_hash_data;
+ struct rte_cryptodev_info dev_info;
+ struct crypto_testsuite_params *ts_params = &testsuite_params;
+
+ rte_cryptodev_info_get(ts_params->valid_devs[0], &dev_info);
+ uint64_t feat_flags = dev_info.feature_flags;
+
+ if (!(feat_flags & RTE_CRYPTODEV_FF_DIGEST_ENCRYPTED)) {
+ printf("Device doesn't support encrypted digest operations.\n");
+ return -ENOTSUP;
+ }
/*
* Function prepare data for hash veryfication test case.
unsigned int ciphertext_len;
struct rte_cryptodev_info dev_info;
+ struct rte_crypto_op *op;
/* Check if device supports particular algorithms separately */
if (test_mixed_check_if_unsupported(tdata))
return -ENOTSUP;
}
- if (op_mode == OUT_OF_PLACE)
- return -ENOTSUP;
-
/* Create the session */
if (verify)
retval = create_wireless_algo_cipher_auth_session(
if (retval < 0)
return retval;
- ut_params->op = process_crypto_request(ts_params->valid_devs[0],
- ut_params->op);
+ op = process_crypto_request(ts_params->valid_devs[0], ut_params->op);
/* Check if the op failed because the device doesn't */
/* support this particular combination of algorithms */
- if (ut_params->op == NULL && ut_params->op->status ==
+ if (op == NULL && ut_params->op->status ==
RTE_CRYPTO_OP_STATUS_INVALID_SESSION) {
printf("Device doesn't support this mixed combination. "
"Test Skipped.\n");
return -ENOTSUP;
}
+ ut_params->op = op;
TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf");
uint8_t digest_buffer[10000];
struct rte_cryptodev_info dev_info;
+ struct rte_crypto_op *op;
/* Check if device supports particular algorithms */
if (test_mixed_check_if_unsupported(tdata))
if (retval < 0)
return retval;
- ut_params->op = process_crypto_request(ts_params->valid_devs[0],
- ut_params->op);
+ op = process_crypto_request(ts_params->valid_devs[0], ut_params->op);
/* Check if the op failed because the device doesn't */
/* support this particular combination of algorithms */
- if (ut_params->op == NULL && ut_params->op->status ==
+ if (op == NULL && ut_params->op->status ==
RTE_CRYPTO_OP_STATUS_INVALID_SESSION) {
printf("Device doesn't support this mixed combination. "
"Test Skipped.\n");
return -ENOTSUP;
}
+ ut_params->op = op;
TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf");
}
-#ifdef RTE_LIBRTE_SECURITY
+#ifdef RTE_LIB_SECURITY
static int
security_proto_supported(enum rte_security_session_action_type action,
enum rte_security_session_protocol proto)
return TEST_SUCCESS;
}
+static uint16_t
+test_enq_callback(uint16_t dev_id, uint16_t qp_id, struct rte_crypto_op **ops,
+ uint16_t nb_ops, void *user_param)
+{
+ RTE_SET_USED(dev_id);
+ RTE_SET_USED(qp_id);
+ RTE_SET_USED(ops);
+ RTE_SET_USED(user_param);
+
+ printf("crypto enqueue callback called\n");
+ return nb_ops;
+}
+
+static uint16_t
+test_deq_callback(uint16_t dev_id, uint16_t qp_id, struct rte_crypto_op **ops,
+ uint16_t nb_ops, void *user_param)
+{
+ RTE_SET_USED(dev_id);
+ RTE_SET_USED(qp_id);
+ RTE_SET_USED(ops);
+ RTE_SET_USED(user_param);
+
+ printf("crypto dequeue callback called\n");
+ return nb_ops;
+}
+
+/*
+ * Thread using enqueue/dequeue callback with RCU.
+ */
+static int
+test_enqdeq_callback_thread(void *arg)
+{
+ RTE_SET_USED(arg);
+ /* DP thread calls rte_cryptodev_enqueue_burst()/
+ * rte_cryptodev_dequeue_burst() and invokes callback.
+ */
+ test_null_burst_operation();
+ return 0;
+}
+
+static int
+test_enq_callback_setup(void)
+{
+ struct crypto_testsuite_params *ts_params = &testsuite_params;
+ struct rte_cryptodev_info dev_info;
+ struct rte_cryptodev_qp_conf qp_conf = {
+ .nb_descriptors = MAX_NUM_OPS_INFLIGHT
+ };
+
+ struct rte_cryptodev_cb *cb;
+ uint16_t qp_id = 0;
+
+ /* Stop the device in case it's started so it can be configured */
+ rte_cryptodev_stop(ts_params->valid_devs[0]);
+
+ rte_cryptodev_info_get(ts_params->valid_devs[0], &dev_info);
+
+ TEST_ASSERT_SUCCESS(rte_cryptodev_configure(ts_params->valid_devs[0],
+ &ts_params->conf),
+ "Failed to configure cryptodev %u",
+ ts_params->valid_devs[0]);
+
+ qp_conf.nb_descriptors = MAX_NUM_OPS_INFLIGHT;
+ qp_conf.mp_session = ts_params->session_mpool;
+ qp_conf.mp_session_private = ts_params->session_priv_mpool;
+
+ TEST_ASSERT_SUCCESS(rte_cryptodev_queue_pair_setup(
+ ts_params->valid_devs[0], qp_id, &qp_conf,
+ rte_cryptodev_socket_id(ts_params->valid_devs[0])),
+ "Failed test for "
+ "rte_cryptodev_queue_pair_setup: num_inflights "
+ "%u on qp %u on cryptodev %u",
+ qp_conf.nb_descriptors, qp_id,
+ ts_params->valid_devs[0]);
+
+ /* Test with invalid crypto device */
+ cb = rte_cryptodev_add_enq_callback(RTE_CRYPTO_MAX_DEVS,
+ qp_id, test_enq_callback, NULL);
+ TEST_ASSERT_NULL(cb, "Add callback on qp %u on "
+ "cryptodev %u did not fail",
+ qp_id, RTE_CRYPTO_MAX_DEVS);
+
+ /* Test with invalid queue pair */
+ cb = rte_cryptodev_add_enq_callback(ts_params->valid_devs[0],
+ dev_info.max_nb_queue_pairs + 1,
+ test_enq_callback, NULL);
+ TEST_ASSERT_NULL(cb, "Add callback on qp %u on "
+ "cryptodev %u did not fail",
+ dev_info.max_nb_queue_pairs + 1,
+ ts_params->valid_devs[0]);
+
+ /* Test with NULL callback */
+ cb = rte_cryptodev_add_enq_callback(ts_params->valid_devs[0],
+ qp_id, NULL, NULL);
+ TEST_ASSERT_NULL(cb, "Add callback on qp %u on "
+ "cryptodev %u did not fail",
+ qp_id, ts_params->valid_devs[0]);
+
+ /* Test with valid configuration */
+ cb = rte_cryptodev_add_enq_callback(ts_params->valid_devs[0],
+ qp_id, test_enq_callback, NULL);
+ TEST_ASSERT_NOT_NULL(cb, "Failed test to add callback on "
+ "qp %u on cryptodev %u",
+ qp_id, ts_params->valid_devs[0]);
+
+ rte_cryptodev_start(ts_params->valid_devs[0]);
+
+ /* Launch a thread */
+ rte_eal_remote_launch(test_enqdeq_callback_thread, NULL,
+ rte_get_next_lcore(-1, 1, 0));
+
+ /* Wait until reader exited. */
+ rte_eal_mp_wait_lcore();
+
+ /* Test with invalid crypto device */
+ TEST_ASSERT_FAIL(rte_cryptodev_remove_enq_callback(
+ RTE_CRYPTO_MAX_DEVS, qp_id, cb),
+ "Expected call to fail as crypto device is invalid");
+
+ /* Test with invalid queue pair */
+ TEST_ASSERT_FAIL(rte_cryptodev_remove_enq_callback(
+ ts_params->valid_devs[0],
+ dev_info.max_nb_queue_pairs + 1, cb),
+ "Expected call to fail as queue pair is invalid");
+
+ /* Test with NULL callback */
+ TEST_ASSERT_FAIL(rte_cryptodev_remove_enq_callback(
+ ts_params->valid_devs[0], qp_id, NULL),
+ "Expected call to fail as callback is NULL");
+
+ /* Test with valid configuration */
+ TEST_ASSERT_SUCCESS(rte_cryptodev_remove_enq_callback(
+ ts_params->valid_devs[0], qp_id, cb),
+ "Failed test to remove callback on "
+ "qp %u on cryptodev %u",
+ qp_id, ts_params->valid_devs[0]);
+
+ return TEST_SUCCESS;
+}
+
+static int
+test_deq_callback_setup(void)
+{
+ struct crypto_testsuite_params *ts_params = &testsuite_params;
+ struct rte_cryptodev_info dev_info;
+ struct rte_cryptodev_qp_conf qp_conf = {
+ .nb_descriptors = MAX_NUM_OPS_INFLIGHT
+ };
+
+ struct rte_cryptodev_cb *cb;
+ uint16_t qp_id = 0;
+
+ /* Stop the device in case it's started so it can be configured */
+ rte_cryptodev_stop(ts_params->valid_devs[0]);
+
+ rte_cryptodev_info_get(ts_params->valid_devs[0], &dev_info);
+
+ TEST_ASSERT_SUCCESS(rte_cryptodev_configure(ts_params->valid_devs[0],
+ &ts_params->conf),
+ "Failed to configure cryptodev %u",
+ ts_params->valid_devs[0]);
+
+ qp_conf.nb_descriptors = MAX_NUM_OPS_INFLIGHT;
+ qp_conf.mp_session = ts_params->session_mpool;
+ qp_conf.mp_session_private = ts_params->session_priv_mpool;
+
+ TEST_ASSERT_SUCCESS(rte_cryptodev_queue_pair_setup(
+ ts_params->valid_devs[0], qp_id, &qp_conf,
+ rte_cryptodev_socket_id(ts_params->valid_devs[0])),
+ "Failed test for "
+ "rte_cryptodev_queue_pair_setup: num_inflights "
+ "%u on qp %u on cryptodev %u",
+ qp_conf.nb_descriptors, qp_id,
+ ts_params->valid_devs[0]);
+
+ /* Test with invalid crypto device */
+ cb = rte_cryptodev_add_deq_callback(RTE_CRYPTO_MAX_DEVS,
+ qp_id, test_deq_callback, NULL);
+ TEST_ASSERT_NULL(cb, "Add callback on qp %u on "
+ "cryptodev %u did not fail",
+ qp_id, RTE_CRYPTO_MAX_DEVS);
+
+ /* Test with invalid queue pair */
+ cb = rte_cryptodev_add_deq_callback(ts_params->valid_devs[0],
+ dev_info.max_nb_queue_pairs + 1,
+ test_deq_callback, NULL);
+ TEST_ASSERT_NULL(cb, "Add callback on qp %u on "
+ "cryptodev %u did not fail",
+ dev_info.max_nb_queue_pairs + 1,
+ ts_params->valid_devs[0]);
+
+ /* Test with NULL callback */
+ cb = rte_cryptodev_add_deq_callback(ts_params->valid_devs[0],
+ qp_id, NULL, NULL);
+ TEST_ASSERT_NULL(cb, "Add callback on qp %u on "
+ "cryptodev %u did not fail",
+ qp_id, ts_params->valid_devs[0]);
+
+ /* Test with valid configuration */
+ cb = rte_cryptodev_add_deq_callback(ts_params->valid_devs[0],
+ qp_id, test_deq_callback, NULL);
+ TEST_ASSERT_NOT_NULL(cb, "Failed test to add callback on "
+ "qp %u on cryptodev %u",
+ qp_id, ts_params->valid_devs[0]);
+
+ rte_cryptodev_start(ts_params->valid_devs[0]);
+
+ /* Launch a thread */
+ rte_eal_remote_launch(test_enqdeq_callback_thread, NULL,
+ rte_get_next_lcore(-1, 1, 0));
+
+ /* Wait until reader exited. */
+ rte_eal_mp_wait_lcore();
+
+ /* Test with invalid crypto device */
+ TEST_ASSERT_FAIL(rte_cryptodev_remove_deq_callback(
+ RTE_CRYPTO_MAX_DEVS, qp_id, cb),
+ "Expected call to fail as crypto device is invalid");
+
+ /* Test with invalid queue pair */
+ TEST_ASSERT_FAIL(rte_cryptodev_remove_deq_callback(
+ ts_params->valid_devs[0],
+ dev_info.max_nb_queue_pairs + 1, cb),
+ "Expected call to fail as queue pair is invalid");
+
+ /* Test with NULL callback */
+ TEST_ASSERT_FAIL(rte_cryptodev_remove_deq_callback(
+ ts_params->valid_devs[0], qp_id, NULL),
+ "Expected call to fail as callback is NULL");
+
+ /* Test with valid configuration */
+ TEST_ASSERT_SUCCESS(rte_cryptodev_remove_deq_callback(
+ ts_params->valid_devs[0], qp_id, cb),
+ "Failed test to remove callback on "
+ "qp %u on cryptodev %u",
+ qp_id, ts_params->valid_devs[0]);
+
+ return TEST_SUCCESS;
+}
+
static void
generate_gmac_large_plaintext(uint8_t *data)
{
rte_cryptodev_info_get(ts_params->valid_devs[0], &dev_info);
feature_flags = dev_info.feature_flags;
- if ((!(feature_flags & RTE_CRYPTODEV_FF_IN_PLACE_SGL)) &&
- (!(feature_flags & RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT)) &&
+ if ((!(feature_flags & RTE_CRYPTODEV_FF_IN_PLACE_SGL)) ||
+ (!(feature_flags & RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT)) ||
(!(feature_flags & RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT)))
return -ENOTSUP;
return test_authenticated_decryption(&chacha20_poly1305_case_rfc8439);
}
-#ifdef RTE_LIBRTE_PMD_CRYPTO_SCHEDULER
+#ifdef RTE_CRYPTO_SCHEDULER
/* global AESNI worker IDs for the scheduler test */
uint8_t aesni_ids[2];
}
};
-#endif /* RTE_LIBRTE_PMD_CRYPTO_SCHEDULER */
+#endif /* RTE_CRYPTO_SCHEDULER */
static struct unit_test_suite cryptodev_testsuite = {
.suite_name = "Crypto Unit Test Suite",
test_queue_pair_descriptor_setup),
TEST_CASE_ST(ut_setup, ut_teardown,
test_device_configure_invalid_queue_pair_ids),
-
TEST_CASE_ST(ut_setup, ut_teardown,
test_multi_session),
TEST_CASE_ST(ut_setup, ut_teardown,
TEST_CASE_ST(ut_setup, ut_teardown,
test_null_invalid_operation),
TEST_CASE_ST(ut_setup, ut_teardown, test_null_burst_operation),
-
TEST_CASE_ST(ut_setup, ut_teardown, test_AES_chain_all),
TEST_CASE_ST(ut_setup, ut_teardown, test_AES_cipheronly_all),
TEST_CASE_ST(ut_setup, ut_teardown, test_3DES_chain_all),
TEST_CASE_ST(ut_setup, ut_teardown,
test_verify_auth_aes_cmac_cipher_null_test_case_1),
-#ifdef RTE_LIBRTE_SECURITY
+#ifdef RTE_LIB_SECURITY
TEST_CASE_ST(ut_setup_security, ut_teardown,
test_PDCP_PROTO_all),
TEST_CASE_ST(ut_setup_security, ut_teardown,
test_DOCSIS_PROTO_all),
#endif
+ TEST_CASE_ST(ut_setup, ut_teardown, test_enq_callback_setup),
+ TEST_CASE_ST(ut_setup, ut_teardown, test_deq_callback_setup),
TEST_CASES_END() /**< NULL terminate unit test array */
}
};
return unit_test_suite_runner(&cryptodev_mrvl_testsuite);
}
-#ifdef RTE_LIBRTE_PMD_CRYPTO_SCHEDULER
+#ifdef RTE_CRYPTO_SCHEDULER
static int
test_cryptodev_scheduler(void /*argv __rte_unused, int argc __rte_unused*/)
RTE_STR(CRYPTODEV_NAME_QAT_SYM_PMD));
if (gbl_driver_id == -1) {
- RTE_LOG(ERR, USER1, "QAT PMD must be loaded. Check that both "
- "CONFIG_RTE_LIBRTE_PMD_QAT and CONFIG_RTE_LIBRTE_PMD_QAT_SYM "
- "are enabled in config file to run this testsuite.\n");
+ RTE_LOG(ERR, USER1, "QAT PMD must be loaded.\n");
return TEST_SKIPPED;
}