tile: fix build
[dpdk.git] / app / test / test_cryptodev_perf.c
index b0c8abf..7f1adf8 100644 (file)
 
 #include <rte_common.h>
 #include <rte_mbuf.h>
-#include <rte_mbuf_offload.h>
 #include <rte_malloc.h>
 #include <rte_memcpy.h>
 
 #include <rte_crypto.h>
 #include <rte_cryptodev.h>
 #include <rte_cycles.h>
-#include <rte_hexdump.h>
 
 #include "test.h"
 #include "test_cryptodev.h"
+#include "test_cryptodev_gcm_test_vectors.h"
 
 
 #define PERF_NUM_OPS_INFLIGHT          (128)
@@ -50,7 +49,7 @@
 
 struct crypto_testsuite_params {
        struct rte_mempool *mbuf_mp;
-       struct rte_mempool *mbuf_ol_pool;
+       struct rte_mempool *op_mpool;
 
        uint16_t nb_queue_pairs;
 
@@ -59,6 +58,78 @@ struct crypto_testsuite_params {
        uint8_t dev_id;
 };
 
+enum chain_mode {
+       CIPHER_HASH,
+       HASH_CIPHER,
+       CIPHER_ONLY,
+       HASH_ONLY
+};
+
+
+struct symmetric_op {
+       const uint8_t *iv_data;
+       uint32_t iv_len;
+
+       const uint8_t *aad_data;
+       uint32_t aad_len;
+
+       const uint8_t *p_data;
+       uint32_t p_len;
+
+       const uint8_t *c_data;
+       uint32_t c_len;
+
+       const uint8_t *t_data;
+       uint32_t t_len;
+
+};
+
+struct symmetric_session_attrs {
+       enum rte_crypto_cipher_operation cipher;
+       enum rte_crypto_auth_operation auth;
+
+       enum rte_crypto_cipher_algorithm cipher_algorithm;
+       const uint8_t *key_cipher_data;
+       uint32_t key_cipher_len;
+
+       enum rte_crypto_auth_algorithm auth_algorithm;
+       const uint8_t *key_auth_data;
+       uint32_t key_auth_len;
+
+       uint32_t digest_len;
+};
+
+#define ALIGN_POW2_ROUNDUP(num, align) \
+       (((num) + (align) - 1) & ~((align) - 1))
+
+/*
+ * This struct is needed to avoid unnecessary allocation or checking
+ * of allocation of crypto params with current alloc on the fly
+ * implementation.
+ */
+
+struct crypto_params {
+       uint8_t *aad;
+       uint8_t *iv;
+       uint8_t *digest;
+};
+
+struct perf_test_params {
+
+       unsigned total_operations;
+       unsigned burst_size;
+       unsigned buf_size;
+
+       enum chain_mode chain;
+
+       enum rte_crypto_cipher_algorithm cipher_algo;
+       unsigned cipher_key_length;
+       enum rte_crypto_auth_algorithm auth_algo;
+
+       struct symmetric_session_attrs *session_attrs;
+
+       struct symmetric_op *symmetric_op;
+};
 
 #define MAX_NUM_OF_OPS_PER_UT  (128)
 
@@ -68,8 +139,7 @@ struct crypto_unittest_params {
 
        struct rte_cryptodev_sym_session *sess;
 
-       struct rte_crypto_sym_op *op;
-       struct rte_mbuf_offload *ol;
+       struct rte_crypto_op *op;
 
        struct rte_mbuf *obuf[MAX_NUM_OF_OPS_PER_UT];
        struct rte_mbuf *ibuf[MAX_NUM_OF_OPS_PER_UT];
@@ -77,9 +147,125 @@ struct crypto_unittest_params {
        uint8_t *digest;
 };
 
+static struct rte_cryptodev_sym_session *
+test_perf_create_snow3g_session(uint8_t dev_id, enum chain_mode chain,
+               enum rte_crypto_cipher_algorithm cipher_algo,
+               unsigned int cipher_key_len,
+               enum rte_crypto_auth_algorithm auth_algo);
+static struct rte_cryptodev_sym_session *
+test_perf_create_openssl_session(uint8_t dev_id, enum chain_mode chain,
+               enum rte_crypto_cipher_algorithm cipher_algo,
+               unsigned int cipher_key_len,
+               enum rte_crypto_auth_algorithm auth_algo);
+static struct rte_cryptodev_sym_session *
+test_perf_create_armv8_session(uint8_t dev_id, enum chain_mode chain,
+               enum rte_crypto_cipher_algorithm cipher_algo,
+               unsigned int cipher_key_len,
+               enum rte_crypto_auth_algorithm auth_algo);
+
+static struct rte_mbuf *
+test_perf_create_pktmbuf(struct rte_mempool *mpool, unsigned buf_sz);
+static inline struct rte_crypto_op *
+test_perf_set_crypto_op_snow3g(struct rte_crypto_op *op, struct rte_mbuf *m,
+               struct rte_cryptodev_sym_session *sess, unsigned data_len,
+               unsigned digest_len);
+static inline struct rte_crypto_op *
+test_perf_set_crypto_op_aes(struct rte_crypto_op *op, struct rte_mbuf *m,
+               struct rte_cryptodev_sym_session *sess, unsigned int data_len,
+               unsigned int digest_len, enum chain_mode chain);
+static inline struct rte_crypto_op *
+test_perf_set_crypto_op_aes_gcm(struct rte_crypto_op *op, struct rte_mbuf *m,
+               struct rte_cryptodev_sym_session *sess, unsigned int data_len,
+               unsigned int digest_len, enum chain_mode chain __rte_unused);
+static inline struct rte_crypto_op *
+test_perf_set_crypto_op_3des(struct rte_crypto_op *op, struct rte_mbuf *m,
+               struct rte_cryptodev_sym_session *sess, unsigned int data_len,
+               unsigned int digest_len, enum chain_mode chain __rte_unused);
+static uint32_t get_auth_digest_length(enum rte_crypto_auth_algorithm algo);
+
+
+static const char *chain_mode_name(enum chain_mode mode)
+{
+       switch (mode) {
+       case CIPHER_HASH: return "cipher_hash"; break;
+       case HASH_CIPHER: return "hash_cipher"; break;
+       case CIPHER_ONLY: return "cipher_only"; break;
+       case HASH_ONLY: return "hash_only"; break;
+       default: return ""; break;
+       }
+}
+
+static const char *pmd_name(enum rte_cryptodev_type pmd)
+{
+       switch (pmd) {
+       case RTE_CRYPTODEV_NULL_PMD: return RTE_STR(CRYPTODEV_NAME_NULL_PMD); break;
+       case RTE_CRYPTODEV_AESNI_GCM_PMD:
+               return RTE_STR(CRYPTODEV_NAME_AESNI_GCM_PMD);
+       case RTE_CRYPTODEV_AESNI_MB_PMD:
+               return RTE_STR(CRYPTODEV_NAME_AESNI_MB_PMD);
+       case RTE_CRYPTODEV_QAT_SYM_PMD:
+               return RTE_STR(CRYPTODEV_NAME_QAT_SYM_PMD);
+       case RTE_CRYPTODEV_SNOW3G_PMD:
+               return RTE_STR(CRYPTODEV_NAME_SNOW3G_PMD);
+       default:
+               return "";
+       }
+}
+
+static const char *cipher_algo_name(enum rte_crypto_cipher_algorithm cipher_algo)
+{
+       switch (cipher_algo) {
+       case RTE_CRYPTO_CIPHER_NULL: return "NULL";
+       case RTE_CRYPTO_CIPHER_3DES_CBC: return "3DES_CBC";
+       case RTE_CRYPTO_CIPHER_3DES_CTR: return "3DES_CTR";
+       case RTE_CRYPTO_CIPHER_3DES_ECB: return "3DES_ECB";
+       case RTE_CRYPTO_CIPHER_AES_CBC: return "AES_CBC";
+       case RTE_CRYPTO_CIPHER_AES_CCM: return "AES_CCM";
+       case RTE_CRYPTO_CIPHER_AES_CTR: return "AES_CTR";
+       case RTE_CRYPTO_CIPHER_AES_ECB: return "AES_ECB";
+       case RTE_CRYPTO_CIPHER_AES_F8: return "AES_F8";
+       case RTE_CRYPTO_CIPHER_AES_GCM: return "AES_GCM";
+       case RTE_CRYPTO_CIPHER_AES_XTS: return "AES_XTS";
+       case RTE_CRYPTO_CIPHER_ARC4: return "ARC4";
+       case RTE_CRYPTO_CIPHER_KASUMI_F8: return "KASUMI_F8";
+       case RTE_CRYPTO_CIPHER_SNOW3G_UEA2: return "SNOW3G_UEA2";
+       case RTE_CRYPTO_CIPHER_ZUC_EEA3: return "ZUC_EEA3";
+       default: return "Another cipher algo";
+       }
+}
+
+static const char *auth_algo_name(enum rte_crypto_auth_algorithm auth_algo)
+{
+       switch (auth_algo) {
+       case RTE_CRYPTO_AUTH_NULL: return "NULL"; break;
+       case RTE_CRYPTO_AUTH_AES_CBC_MAC: return "AES_CBC_MAC"; break;
+       case RTE_CRYPTO_AUTH_AES_CCM: return "AES_CCM"; break;
+       case RTE_CRYPTO_AUTH_AES_CMAC: return "AES_CMAC,"; break;
+       case RTE_CRYPTO_AUTH_AES_GCM: return "AES_GCM"; break;
+       case RTE_CRYPTO_AUTH_AES_GMAC: return "AES_GMAC"; break;
+       case RTE_CRYPTO_AUTH_AES_XCBC_MAC: return "AES_XCBC_MAC"; break;
+       case RTE_CRYPTO_AUTH_KASUMI_F9: return "KASUMI_F9"; break;
+       case RTE_CRYPTO_AUTH_MD5: return "MD5"; break;
+       case RTE_CRYPTO_AUTH_MD5_HMAC: return "MD5_HMAC,"; break;
+       case RTE_CRYPTO_AUTH_SHA1: return "SHA1"; break;
+       case RTE_CRYPTO_AUTH_SHA1_HMAC: return "SHA1_HMAC"; break;
+       case RTE_CRYPTO_AUTH_SHA224: return "SHA224"; break;
+       case RTE_CRYPTO_AUTH_SHA224_HMAC: return "SHA224_HMAC"; break;
+       case RTE_CRYPTO_AUTH_SHA256: return "SHA256"; break;
+       case RTE_CRYPTO_AUTH_SHA256_HMAC: return "SHA256_HMAC"; break;
+       case RTE_CRYPTO_AUTH_SHA384: return "SHA384,"; break;
+       case RTE_CRYPTO_AUTH_SHA384_HMAC: return "SHA384_HMAC,"; break;
+       case RTE_CRYPTO_AUTH_SHA512: return "SHA512,"; break;
+       case RTE_CRYPTO_AUTH_SHA512_HMAC: return "SHA512_HMAC,"; break;
+       case RTE_CRYPTO_AUTH_SNOW3G_UIA2: return "SNOW3G_UIA2"; break;
+       case RTE_CRYPTO_AUTH_ZUC_EIA3: return "RTE_CRYPTO_AUTH_ZUC_EIA3"; break;
+       default: return "Another auth algo"; break;
+       };
+}
+
 static struct rte_mbuf *
 setup_test_string(struct rte_mempool *mpool,
-               const char *string, size_t len, uint8_t blocksize)
+               const uint8_t *data, size_t len, uint8_t blocksize)
 {
        struct rte_mbuf *m = rte_pktmbuf_alloc(mpool);
        size_t t_len = len - (blocksize ? (len % blocksize) : 0);
@@ -92,14 +278,14 @@ setup_test_string(struct rte_mempool *mpool,
                        return NULL;
                }
 
-               rte_memcpy(dst, string, t_len);
+               rte_memcpy(dst, (const void *)data, t_len);
        }
        return m;
 }
 
 static struct crypto_testsuite_params testsuite_params = { NULL };
 static struct crypto_unittest_params unittest_params;
-static enum rte_cryptodev_type gbl_cryptodev_preftest_devtype;
+static enum rte_cryptodev_type gbl_cryptodev_perftest_devtype;
 
 static int
 testsuite_setup(void)
@@ -113,52 +299,150 @@ testsuite_setup(void)
        ts_params->mbuf_mp = rte_mempool_lookup("CRYPTO_PERF_MBUFPOOL");
        if (ts_params->mbuf_mp == NULL) {
                /* Not already created so create */
-               ts_params->mbuf_mp = rte_mempool_create("CRYPTO_PERF_MBUFPOOL", NUM_MBUFS,
-                       MBUF_SIZE, MBUF_CACHE_SIZE,
-                       sizeof(struct rte_pktmbuf_pool_private),
-                       rte_pktmbuf_pool_init, NULL, rte_pktmbuf_init, NULL,
-                       rte_socket_id(), 0);
+               ts_params->mbuf_mp = rte_pktmbuf_pool_create(
+                               "CRYPTO_PERF_MBUFPOOL",
+                               NUM_MBUFS, MBUF_CACHE_SIZE, 0, MBUF_SIZE,
+                               rte_socket_id());
                if (ts_params->mbuf_mp == NULL) {
                        RTE_LOG(ERR, USER1, "Can't create CRYPTO_PERF_MBUFPOOL\n");
                        return TEST_FAILED;
                }
        }
 
-       ts_params->mbuf_ol_pool = rte_pktmbuf_offload_pool_create("CRYPTO_OP_POOL",
-                               NUM_MBUFS, MBUF_CACHE_SIZE,
-                               DEFAULT_NUM_XFORMS *
-                               sizeof(struct rte_crypto_sym_xform),
-                               rte_socket_id());
-               if (ts_params->mbuf_ol_pool == NULL) {
+
+       ts_params->op_mpool = rte_crypto_op_pool_create("CRYPTO_OP_POOL",
+                       RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+                       NUM_MBUFS, MBUF_CACHE_SIZE,
+                       DEFAULT_NUM_XFORMS *
+                       sizeof(struct rte_crypto_sym_xform),
+                       rte_socket_id());
+               if (ts_params->op_mpool == NULL) {
                        RTE_LOG(ERR, USER1, "Can't create CRYPTO_OP_POOL\n");
                        return TEST_FAILED;
                }
 
        /* Create 2 AESNI MB devices if required */
-       if (gbl_cryptodev_preftest_devtype == RTE_CRYPTODEV_AESNI_MB_PMD) {
+       if (gbl_cryptodev_perftest_devtype == RTE_CRYPTODEV_AESNI_MB_PMD) {
+#ifndef RTE_LIBRTE_PMD_AESNI_MB
+               RTE_LOG(ERR, USER1, "CONFIG_RTE_LIBRTE_PMD_AESNI_MB must be"
+                       " enabled in config file to run this testsuite.\n");
+               return TEST_FAILED;
+#endif
                nb_devs = rte_cryptodev_count_devtype(RTE_CRYPTODEV_AESNI_MB_PMD);
                if (nb_devs < 2) {
                        for (i = nb_devs; i < 2; i++) {
                                ret = rte_eal_vdev_init(
-                                       CRYPTODEV_NAME_AESNI_MB_PMD, NULL);
+                                       RTE_STR(CRYPTODEV_NAME_AESNI_MB_PMD), NULL);
+
+                               TEST_ASSERT(ret == 0,
+                                       "Failed to create instance %u of pmd : %s",
+                                       i, RTE_STR(CRYPTODEV_NAME_AESNI_MB_PMD));
+                       }
+               }
+       }
+
+       /* Create 2 AESNI GCM devices if required */
+       if (gbl_cryptodev_perftest_devtype == RTE_CRYPTODEV_AESNI_GCM_PMD) {
+#ifndef RTE_LIBRTE_PMD_AESNI_GCM
+               RTE_LOG(ERR, USER1, "CONFIG_RTE_LIBRTE_PMD_AESNI_GCM must be"
+                       " enabled in config file to run this testsuite.\n");
+               return TEST_FAILED;
+#endif
+               nb_devs = rte_cryptodev_count_devtype(RTE_CRYPTODEV_AESNI_GCM_PMD);
+               if (nb_devs < 2) {
+                       for (i = nb_devs; i < 2; i++) {
+                               ret = rte_eal_vdev_init(
+                                       RTE_STR(CRYPTODEV_NAME_AESNI_GCM_PMD), NULL);
+
+                               TEST_ASSERT(ret == 0,
+                                       "Failed to create instance %u of pmd : %s",
+                                       i, RTE_STR(CRYPTODEV_NAME_AESNI_GCM_PMD));
+                       }
+               }
+       }
+
+       /* Create 2 SNOW3G devices if required */
+       if (gbl_cryptodev_perftest_devtype == RTE_CRYPTODEV_SNOW3G_PMD) {
+#ifndef RTE_LIBRTE_PMD_SNOW3G
+               RTE_LOG(ERR, USER1, "CONFIG_RTE_LIBRTE_PMD_SNOW3G must be"
+                       " enabled in config file to run this testsuite.\n");
+               return TEST_FAILED;
+#endif
+               nb_devs = rte_cryptodev_count_devtype(RTE_CRYPTODEV_SNOW3G_PMD);
+               if (nb_devs < 2) {
+                       for (i = nb_devs; i < 2; i++) {
+                               ret = rte_eal_vdev_init(
+                                       RTE_STR(CRYPTODEV_NAME_SNOW3G_PMD), NULL);
 
                                TEST_ASSERT(ret == 0,
                                        "Failed to create instance %u of pmd : %s",
-                                       i, CRYPTODEV_NAME_AESNI_MB_PMD);
+                                       i, RTE_STR(CRYPTODEV_NAME_SNOW3G_PMD));
+                       }
+               }
+       }
+
+       /* Create 2 OPENSSL devices if required */
+       if (gbl_cryptodev_perftest_devtype == RTE_CRYPTODEV_OPENSSL_PMD) {
+#ifndef RTE_LIBRTE_PMD_OPENSSL
+               RTE_LOG(ERR, USER1, "CONFIG_RTE_LIBRTE_PMD_OPENSSL must be"
+                       " enabled in config file to run this testsuite.\n");
+               return TEST_FAILED;
+#endif
+               nb_devs = rte_cryptodev_count_devtype(
+                               RTE_CRYPTODEV_OPENSSL_PMD);
+               if (nb_devs < 2) {
+                       for (i = nb_devs; i < 2; i++) {
+                               ret = rte_eal_vdev_init(
+                                       RTE_STR(CRYPTODEV_NAME_OPENSSL_PMD),
+                                       NULL);
+
+                               TEST_ASSERT(ret == 0, "Failed to create "
+                                       "instance %u of pmd : %s", i,
+                                       RTE_STR(CRYPTODEV_NAME_OPENSSL_PMD));
+                       }
+               }
+       }
+
+       /* Create 2 ARMv8 devices if required */
+       if (gbl_cryptodev_perftest_devtype == RTE_CRYPTODEV_ARMV8_PMD) {
+#ifndef RTE_LIBRTE_PMD_ARMV8_CRYPTO
+               RTE_LOG(ERR, USER1, "CONFIG_RTE_LIBRTE_PMD_ARMV8_CRYPTO must be"
+                       " enabled in config file to run this testsuite.\n");
+               return TEST_FAILED;
+#endif
+               nb_devs = rte_cryptodev_count_devtype(
+                               RTE_CRYPTODEV_ARMV8_PMD);
+               if (nb_devs < 2) {
+                       for (i = nb_devs; i < 2; i++) {
+                               ret = rte_eal_vdev_init(
+                                       RTE_STR(CRYPTODEV_NAME_ARMV8_PMD),
+                                       NULL);
+
+                               TEST_ASSERT(ret == 0, "Failed to create "
+                                       "instance %u of pmd : %s", i,
+                                       RTE_STR(CRYPTODEV_NAME_ARMV8_PMD));
                        }
                }
        }
 
+#ifndef RTE_LIBRTE_PMD_QAT
+       if (gbl_cryptodev_perftest_devtype == RTE_CRYPTODEV_QAT_SYM_PMD) {
+               RTE_LOG(ERR, USER1, "CONFIG_RTE_LIBRTE_PMD_QAT must be enabled "
+                               "in config file to run this testsuite.\n");
+               return TEST_FAILED;
+       }
+#endif
+
        nb_devs = rte_cryptodev_count();
        if (nb_devs < 1) {
-               RTE_LOG(ERR, USER1, "No crypto devices found?");
+               RTE_LOG(ERR, USER1, "No crypto devices found?\n");
                return TEST_FAILED;
        }
 
        /* Search for the first valid */
        for (i = 0; i < nb_devs; i++) {
                rte_cryptodev_info_get(i, &info);
-               if (info.dev_type == gbl_cryptodev_preftest_devtype) {
+               if (info.dev_type == gbl_cryptodev_perftest_devtype) {
                        ts_params->dev_id = i;
                        valid_dev_id = 1;
                        break;
@@ -170,14 +454,12 @@ testsuite_setup(void)
 
        /*
         * Using Crypto Device Id 0 by default.
-        * Since we can't free and re-allocate queue memory always set the queues
-        * on this device up to max size first so enough memory is allocated for
-        * any later re-configures needed by other tests
+        * Set up all the qps on this device
         */
 
        rte_cryptodev_info_get(ts_params->dev_id, &info);
 
-       ts_params->conf.nb_queue_pairs = DEFAULT_NUM_QPS_PER_QAT_DEVICE;
+       ts_params->conf.nb_queue_pairs = info.max_nb_queue_pairs;
        ts_params->conf.socket_id = SOCKET_ID_ANY;
        ts_params->conf.session_mp.nb_objs = info.sym.max_nb_sessions;
 
@@ -186,19 +468,6 @@ testsuite_setup(void)
                        "Failed to configure cryptodev %u",
                        ts_params->dev_id);
 
-
-       ts_params->qp_conf.nb_descriptors = MAX_NUM_OPS_INFLIGHT;
-
-       for (qp_id = 0; qp_id < ts_params->conf.nb_queue_pairs ; qp_id++) {
-               TEST_ASSERT_SUCCESS(rte_cryptodev_queue_pair_setup(
-                       ts_params->dev_id, qp_id,
-                       &ts_params->qp_conf,
-                       rte_cryptodev_socket_id(ts_params->dev_id)),
-                       "Failed to setup queue pair %u on cryptodev %u",
-                       qp_id, ts_params->dev_id);
-       }
-
-       /*Now reconfigure queues to size we actually want to use in this testsuite.*/
        ts_params->qp_conf.nb_descriptors = PERF_NUM_OPS_INFLIGHT;
        for (qp_id = 0; qp_id < ts_params->conf.nb_queue_pairs ; qp_id++) {
 
@@ -215,11 +484,15 @@ testsuite_setup(void)
 static void
 testsuite_teardown(void)
 {
-       struct crypto_testsuite_params *ts_params = &testsuite_params;
+       struct crypto_testsuite_params *ts_params =
+                       &testsuite_params;
 
        if (ts_params->mbuf_mp != NULL)
                RTE_LOG(DEBUG, USER1, "CRYPTO_PERF_MBUFPOOL count %u\n",
-               rte_mempool_count(ts_params->mbuf_mp));
+               rte_mempool_avail_count(ts_params->mbuf_mp));
+       if (ts_params->op_mpool != NULL)
+               RTE_LOG(DEBUG, USER1, "CRYPTO_PERF_OP POOL count %u\n",
+               rte_mempool_avail_count(ts_params->op_mpool));
 }
 
 static int
@@ -256,8 +529,8 @@ ut_teardown(void)
                                ut_params->sess);
 
        /* free crypto operation structure */
-       if (ut_params->ol)
-               rte_pktmbuf_offload_free(ut_params->ol);
+       if (ut_params->op)
+               rte_crypto_op_free(ut_params->op);
 
        for (i = 0; i < MAX_NUM_OF_OPS_PER_UT; i++) {
                if (ut_params->obuf[i])
@@ -268,7 +541,7 @@ ut_teardown(void)
 
        if (ts_params->mbuf_mp != NULL)
                RTE_LOG(DEBUG, USER1, "CRYPTO_PERF_MBUFPOOL count %u\n",
-                       rte_mempool_count(ts_params->mbuf_mp));
+                       rte_mempool_avail_count(ts_params->mbuf_mp));
 
        rte_cryptodev_stats_get(ts_params->dev_id, &stats);
 
@@ -364,12 +637,11 @@ const char plaintext_quote[] =
 #define CIPHER_KEY_LENGTH_AES_CBC      (16)
 #define CIPHER_IV_LENGTH_AES_CBC       (CIPHER_KEY_LENGTH_AES_CBC)
 
-
-static uint8_t aes_cbc_key[] = {
+static uint8_t aes_cbc_128_key[] = {
                0xE4, 0x23, 0x33, 0x8A, 0x35, 0x64, 0x61, 0xE2,
                0xF1, 0x35, 0x5C, 0x3B, 0xDD, 0x9A, 0x65, 0xBA };
 
-static uint8_t aes_cbc_iv[] = {
+static uint8_t aes_cbc_128_iv[] = {
                0xf5, 0xd3, 0x89, 0x0f, 0x47, 0x00, 0xcb, 0x52,
                0x42, 0x1a, 0x7d, 0x3d, 0xf5, 0x82, 0x80, 0xf1 };
 
@@ -1694,15 +1966,15 @@ struct crypto_data_params aes_cbc_hmac_sha256_output[MAX_PACKET_SIZE_INDEX] = {
                { AES_CBC_ciphertext_2048B, HMAC_SHA256_ciphertext_2048B_digest } }
 };
 
-
 static int
 test_perf_crypto_qp_vary_burst_size(uint16_t dev_num)
 {
-       uint32_t num_to_submit = 2048, max_outstanding_reqs = 512;
-       struct rte_mbuf *rx_mbufs[num_to_submit], *tx_mbufs[num_to_submit];
+       uint32_t num_to_submit = 4096;
+       struct rte_crypto_op *c_ops[num_to_submit];
+       struct rte_crypto_op *proc_ops[num_to_submit];
        uint64_t failed_polls, retries, start_cycles, end_cycles, total_cycles = 0;
        uint32_t burst_sent, burst_received;
-       uint32_t b, burst_size, num_sent, num_received;
+       uint32_t i, burst_size, num_sent, num_received;
        struct crypto_testsuite_params *ts_params = &testsuite_params;
        struct crypto_unittest_params *ut_params = &unittest_params;
        struct crypto_data_params *data_params = aes_cbc_hmac_sha256_output;
@@ -1718,7 +1990,7 @@ test_perf_crypto_qp_vary_burst_size(uint16_t dev_num)
 
        ut_params->cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC;
        ut_params->cipher_xform.cipher.op = RTE_CRYPTO_CIPHER_OP_DECRYPT;
-       ut_params->cipher_xform.cipher.key.data = aes_cbc_key;
+       ut_params->cipher_xform.cipher.key.data = aes_cbc_128_key;
        ut_params->cipher_xform.cipher.key.length = CIPHER_IV_LENGTH_AES_CBC;
 
 
@@ -1739,46 +2011,50 @@ test_perf_crypto_qp_vary_burst_size(uint16_t dev_num)
        TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
 
        /* Generate Crypto op data structure(s) */
-       for (b = 0; b < num_to_submit ; b++) {
-               tx_mbufs[b] = setup_test_string(ts_params->mbuf_mp,
-                       (const char *)data_params[0].expected.ciphertext,
+       for (i = 0; i < num_to_submit ; i++) {
+               struct rte_mbuf *m = setup_test_string(ts_params->mbuf_mp,
+                               data_params[0].expected.ciphertext,
                                data_params[0].length, 0);
-               TEST_ASSERT_NOT_NULL(tx_mbufs[b], "Failed to allocate tx_buf");
+               TEST_ASSERT_NOT_NULL(m, "Failed to allocate tx_buf");
 
-               ut_params->digest = (uint8_t *)rte_pktmbuf_append(tx_mbufs[b],
+               ut_params->digest = (uint8_t *)rte_pktmbuf_append(m,
                                DIGEST_BYTE_LENGTH_SHA256);
-               TEST_ASSERT_NOT_NULL(ut_params->digest, "no room to append digest");
+               TEST_ASSERT_NOT_NULL(ut_params->digest,
+                               "no room to append digest");
 
                rte_memcpy(ut_params->digest, data_params[0].expected.digest,
                        DIGEST_BYTE_LENGTH_SHA256);
 
-               struct rte_mbuf_offload *ol = rte_pktmbuf_offload_alloc(
-                       ts_params->mbuf_ol_pool, RTE_PKTMBUF_OL_CRYPTO_SYM);
-               TEST_ASSERT_NOT_NULL(ol, "Failed to allocate pktmbuf offload");
 
-               struct rte_crypto_sym_op *cop = &ol->op.crypto;
+               struct rte_crypto_op *op =
+                               rte_crypto_op_alloc(ts_params->op_mpool,
+                                               RTE_CRYPTO_OP_TYPE_SYMMETRIC);
 
-               rte_crypto_sym_op_attach_session(cop, ut_params->sess);
+               rte_crypto_op_attach_sym_session(op, ut_params->sess);
 
-               cop->digest.data = ut_params->digest;
-               cop->digest.phys_addr = rte_pktmbuf_mtophys_offset(tx_mbufs[b],
+               op->sym->auth.digest.data = ut_params->digest;
+               op->sym->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(m,
                                data_params[0].length);
-               cop->digest.length = DIGEST_BYTE_LENGTH_SHA256;
+               op->sym->auth.digest.length = DIGEST_BYTE_LENGTH_SHA256;
+
+               op->sym->auth.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+               op->sym->auth.data.length = data_params[0].length;
 
-               cop->iv.data = (uint8_t *)rte_pktmbuf_prepend(tx_mbufs[b],
+
+               op->sym->cipher.iv.data = (uint8_t *)rte_pktmbuf_prepend(m,
                                CIPHER_IV_LENGTH_AES_CBC);
-               cop->iv.phys_addr = rte_pktmbuf_mtophys(tx_mbufs[b]);
-               cop->iv.length = CIPHER_IV_LENGTH_AES_CBC;
+               op->sym->cipher.iv.phys_addr = rte_pktmbuf_mtophys(m);
+               op->sym->cipher.iv.length = CIPHER_IV_LENGTH_AES_CBC;
 
-               rte_memcpy(cop->iv.data, aes_cbc_iv, CIPHER_IV_LENGTH_AES_CBC);
+               rte_memcpy(op->sym->cipher.iv.data, aes_cbc_128_iv,
+                               CIPHER_IV_LENGTH_AES_CBC);
 
-               cop->data.to_cipher.offset = CIPHER_IV_LENGTH_AES_CBC;
-               cop->data.to_cipher.length = data_params[0].length;
+               op->sym->cipher.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+               op->sym->cipher.data.length = data_params[0].length;
 
-               cop->data.to_hash.offset = CIPHER_IV_LENGTH_AES_CBC;
-               cop->data.to_hash.length = data_params[0].length;
+               op->sym->m_src = m;
 
-               rte_pktmbuf_offload_attach(tx_mbufs[b], ol);
+               c_ops[i] = op;
        }
 
        printf("\nTest to measure the IA cycle cost using AES128_CBC_SHA256_HMAC "
@@ -1789,17 +2065,17 @@ test_perf_crypto_qp_vary_burst_size(uint16_t dev_num)
        printf("\nDev No\tQP No\tNum Sent\tNum Received\tTx/Rx burst");
        printf("\tRetries (Device Busy)\tAverage IA cycle cost "
                        "(assuming 0 retries)");
-       for (b = 2; b <= 128 ; b *= 2) {
+       for (i = 2; i <= 128 ; i *= 2) {
                num_sent = 0;
                num_received = 0;
                retries = 0;
                failed_polls = 0;
-               burst_size = b;
+               burst_size = i;
                total_cycles = 0;
                while (num_sent < num_to_submit) {
                        start_cycles = rte_rdtsc_precise();
-                       burst_sent = rte_cryptodev_enqueue_burst(dev_num, 0,
-                                       &tx_mbufs[num_sent],
+                       burst_sent = rte_cryptodev_enqueue_burst(dev_num,
+                                       0, &c_ops[num_sent],
                                        ((num_to_submit-num_sent) < burst_size) ?
                                        num_to_submit-num_sent : burst_size);
                        if (burst_sent == 0)
@@ -1814,9 +2090,8 @@ test_perf_crypto_qp_vary_burst_size(uint16_t dev_num)
                        rte_delay_ms(1);
 
                        start_cycles = rte_rdtsc_precise();
-                       burst_received =
-                               rte_cryptodev_dequeue_burst(dev_num,
-                                               0, rx_mbufs, burst_size);
+                       burst_received = rte_cryptodev_dequeue_burst(
+                                       dev_num, 0, proc_ops, burst_size);
                        if (burst_received == 0)
                                failed_polls++;
                        else
@@ -1826,14 +2101,13 @@ test_perf_crypto_qp_vary_burst_size(uint16_t dev_num)
                }
 
                while (num_received != num_to_submit) {
-                       if (gbl_cryptodev_preftest_devtype ==
+                       if (gbl_cryptodev_perftest_devtype ==
                                        RTE_CRYPTODEV_AESNI_MB_PMD)
                                rte_cryptodev_enqueue_burst(dev_num, 0,
                                                NULL, 0);
 
-                       burst_received =
-                               rte_cryptodev_dequeue_burst(dev_num,
-                                               0, rx_mbufs, burst_size);
+                       burst_received = rte_cryptodev_dequeue_burst(
+                                       dev_num, 0, proc_ops, burst_size);
                        if (burst_received == 0)
                                failed_polls++;
                        else
@@ -1847,233 +2121,2677 @@ test_perf_crypto_qp_vary_burst_size(uint16_t dev_num)
        }
        printf("\n");
 
-       for (b = 0; b < max_outstanding_reqs ; b++) {
-               struct rte_mbuf_offload *ol = tx_mbufs[b]->offload_ops;
-
-               if (ol) {
-                       do {
-                               rte_pktmbuf_offload_free(ol);
-                               ol = ol->next;
-                       } while (ol != NULL);
-               }
-               rte_pktmbuf_free(tx_mbufs[b]);
+       for (i = 0; i < num_to_submit ; i++) {
+               rte_pktmbuf_free(c_ops[i]->sym->m_src);
+               rte_crypto_op_free(c_ops[i]);
        }
        return TEST_SUCCESS;
 }
 
 static int
-test_perf_AES_CBC_HMAC_SHA256_encrypt_digest_vary_req_size(uint16_t dev_num)
+test_perf_snow3G_optimise_cyclecount(struct perf_test_params *pparams)
 {
-       uint16_t index;
-       uint32_t burst_sent, burst_received;
-       uint32_t b, num_sent, num_received;
-       uint64_t failed_polls, retries, start_cycles, end_cycles;
-       const uint64_t mhz = rte_get_tsc_hz()/1000000;
-       double throughput, mmps;
-       struct rte_mbuf *rx_mbufs[DEFAULT_BURST_SIZE], *tx_mbufs[DEFAULT_BURST_SIZE];
+       uint32_t num_to_submit = pparams->total_operations;
+       struct rte_crypto_op *c_ops[num_to_submit];
+       struct rte_crypto_op *proc_ops[num_to_submit];
+       uint64_t failed_polls, retries, start_cycles, end_cycles, total_cycles = 0;
+       uint32_t burst_sent = 0, burst_received = 0;
+       uint32_t i, burst_size, num_sent, num_ops_received;
        struct crypto_testsuite_params *ts_params = &testsuite_params;
-       struct crypto_unittest_params *ut_params = &unittest_params;
-       struct crypto_data_params *data_params = aes_cbc_hmac_sha256_output;
+       static struct rte_cryptodev_sym_session *sess;
 
        if (rte_cryptodev_count() == 0) {
-               printf("\nNo crypto devices available. Is kernel driver loaded?\n");
+               printf("\nNo crypto devices found. Is PMD build configured?\n");
+               printf("\nAnd is kernel driver loaded for HW PMDs?\n");
                return TEST_FAILED;
        }
 
-       /* Setup Cipher Parameters */
-       ut_params->cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
-       ut_params->cipher_xform.next = &ut_params->auth_xform;
+       /* Create Crypto session*/
+       sess = test_perf_create_snow3g_session(ts_params->dev_id,
+                       pparams->chain, pparams->cipher_algo,
+                       pparams->cipher_key_length, pparams->auth_algo);
+       TEST_ASSERT_NOT_NULL(sess, "Session creation failed");
+
+       /* Generate Crypto op data structure(s)*/
+       for (i = 0; i < num_to_submit ; i++) {
+               struct rte_mbuf *m = test_perf_create_pktmbuf(
+                                               ts_params->mbuf_mp,
+                                               pparams->buf_size);
+               TEST_ASSERT_NOT_NULL(m, "Failed to allocate tx_buf");
+
+               struct rte_crypto_op *op =
+                               rte_crypto_op_alloc(ts_params->op_mpool,
+                                               RTE_CRYPTO_OP_TYPE_SYMMETRIC);
+               TEST_ASSERT_NOT_NULL(op, "Failed to allocate op");
+
+               op = test_perf_set_crypto_op_snow3g(op, m, sess, pparams->buf_size,
+                                       get_auth_digest_length(pparams->auth_algo));
+               TEST_ASSERT_NOT_NULL(op, "Failed to attach op to session");
+
+               c_ops[i] = op;
+       }
 
-       ut_params->cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC;
-       ut_params->cipher_xform.cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT;
-       ut_params->cipher_xform.cipher.key.data = aes_cbc_key;
-       ut_params->cipher_xform.cipher.key.length = CIPHER_IV_LENGTH_AES_CBC;
+       printf("\nOn %s dev%u qp%u, %s, cipher algo:%s, auth_algo:%s, "
+                       "Packet Size %u bytes",
+                       pmd_name(gbl_cryptodev_perftest_devtype),
+                       ts_params->dev_id, 0,
+                       chain_mode_name(pparams->chain),
+                       cipher_algo_name(pparams->cipher_algo),
+                       auth_algo_name(pparams->auth_algo),
+                       pparams->buf_size);
+       printf("\nOps Tx\tOps Rx\tOps/burst  ");
+       printf("Retries  EmptyPolls\tIACycles/CyOp\tIACycles/Burst\tIACycles/Byte");
+
+       for (i = 2; i <= 128 ; i *= 2) {
+               num_sent = 0;
+               num_ops_received = 0;
+               retries = 0;
+               failed_polls = 0;
+               burst_size = i;
+               total_cycles = 0;
+               while (num_sent < num_to_submit) {
+                       start_cycles = rte_rdtsc_precise();
+                       burst_sent = rte_cryptodev_enqueue_burst(ts_params->dev_id,
+                                       0, &c_ops[num_sent],
+                                       ((num_to_submit-num_sent) < burst_size) ?
+                                       num_to_submit-num_sent : burst_size);
+                       end_cycles = rte_rdtsc_precise();
+                       if (burst_sent == 0)
+                               retries++;
+                       num_sent += burst_sent;
+                       total_cycles += (end_cycles - start_cycles);
 
-       /* Setup HMAC Parameters */
-       ut_params->auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
-       ut_params->auth_xform.next = NULL;
+                       /* Wait until requests have been sent. */
 
-       ut_params->auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_GENERATE;
-       ut_params->auth_xform.auth.algo = RTE_CRYPTO_AUTH_SHA256_HMAC;
-       ut_params->auth_xform.auth.key.data = hmac_sha256_key;
-       ut_params->auth_xform.auth.key.length = HMAC_KEY_LENGTH_SHA256;
-       ut_params->auth_xform.auth.digest_length = DIGEST_BYTE_LENGTH_SHA256;
+                       rte_delay_ms(1);
 
-       /* Create Crypto session*/
-       ut_params->sess = rte_cryptodev_sym_session_create(ts_params->dev_id,
-                       &ut_params->cipher_xform);
+                       start_cycles = rte_rdtsc_precise();
+                       burst_received = rte_cryptodev_dequeue_burst(
+                                       ts_params->dev_id, 0, proc_ops, burst_size);
+                       end_cycles = rte_rdtsc_precise();
+                       if (burst_received < burst_sent)
+                               failed_polls++;
+                       num_ops_received += burst_received;
 
-       TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
+                       total_cycles += end_cycles - start_cycles;
+               }
 
-       printf("\nThroughput test which will continually attempt to send "
-                       "AES128_CBC_SHA256_HMAC requests with a constant burst "
-                       "size of %u while varying payload sizes", DEFAULT_BURST_SIZE);
-       printf("\nDev No\tQP No\tReq Size(B)\tNum Sent\tNum Received\t"
-                       "Mrps\tThoughput(Gbps)");
-       printf("\tRetries (Attempted a burst, but the device was busy)");
-       for (index = 0; index < MAX_PACKET_SIZE_INDEX; index++) {
-               num_sent = 0;
-               num_received = 0;
-               retries = 0;
-               failed_polls = 0;
+               while (num_ops_received != num_to_submit) {
+                       if (gbl_cryptodev_perftest_devtype ==
+                                       RTE_CRYPTODEV_AESNI_MB_PMD)
+                               rte_cryptodev_enqueue_burst(ts_params->dev_id, 0,
+                                               NULL, 0);
+                       start_cycles = rte_rdtsc_precise();
+                       burst_received = rte_cryptodev_dequeue_burst(
+                                       ts_params->dev_id, 0, proc_ops, burst_size);
+                       end_cycles = rte_rdtsc_precise();
+                       total_cycles += end_cycles - start_cycles;
+                       if (burst_received == 0)
+                               failed_polls++;
+                       num_ops_received += burst_received;
+               }
+
+               printf("\n%u\t%u\t%u", num_sent, num_ops_received, burst_size);
+               printf("\t\t%"PRIu64, retries);
+               printf("\t%"PRIu64, failed_polls);
+               printf("\t\t%"PRIu64, total_cycles/num_ops_received);
+               printf("\t\t%"PRIu64, (total_cycles/num_ops_received)*burst_size);
+               printf("\t\t%"PRIu64, total_cycles/(num_ops_received*pparams->buf_size));
+       }
+       printf("\n");
 
-               /* Generate Crypto op data structure(s) */
-               for (b = 0; b < DEFAULT_BURST_SIZE ; b++) {
-                       tx_mbufs[b] = setup_test_string(ts_params->mbuf_mp,
-                                       data_params[index].plaintext,
-                                       data_params[index].length,
-                                       0);
+       for (i = 0; i < num_to_submit ; i++) {
+               rte_pktmbuf_free(c_ops[i]->sym->m_src);
+               rte_crypto_op_free(c_ops[i]);
+       }
+       rte_cryptodev_sym_session_free(ts_params->dev_id, sess);
 
-                       ut_params->digest = (uint8_t *)rte_pktmbuf_append(
-                               tx_mbufs[b], DIGEST_BYTE_LENGTH_SHA256);
-                       TEST_ASSERT_NOT_NULL(ut_params->digest, "no room to append digest");
+       return TEST_SUCCESS;
+}
 
-                       rte_memcpy(ut_params->digest, data_params[index].expected.digest,
-                       DIGEST_BYTE_LENGTH_SHA256);
+static int
+test_perf_snow3G_vary_burst_size(void)
+{
+       unsigned total_operations = 4096;
+       /*no need to vary pkt size for QAT, should have no effect on IA cycles */
+       uint16_t buf_lengths[] = {40};
+       uint8_t i, j;
+
+       struct perf_test_params params_set[] = {
+                       {
+                                       .chain = CIPHER_ONLY,
+                                       .cipher_algo  = RTE_CRYPTO_CIPHER_SNOW3G_UEA2,
+                                       .cipher_key_length = 16,
+                                       .auth_algo  = RTE_CRYPTO_AUTH_NULL,
+                       },
+                       {
+                                       .chain = HASH_ONLY,
+                                       .cipher_algo = RTE_CRYPTO_CIPHER_NULL,
+                                       .auth_algo  = RTE_CRYPTO_AUTH_SNOW3G_UIA2,
+                                       .cipher_key_length = 16
+                       },
+       };
+
+       printf("\n\nStart %s.", __func__);
+       printf("\nThis Test measures the average IA cycle cost using a "
+                       "constant request(packet) size. ");
+       printf("Cycle cost is only valid when indicators show device is not busy,"
+                       " i.e. Retries and EmptyPolls = 0");
+
+       for (i = 0; i < RTE_DIM(params_set); i++) {
+               printf("\n");
+               params_set[i].total_operations = total_operations;
+
+               for (j = 0;
+                       j < RTE_DIM(buf_lengths);
+                       j++) {
+
+                       params_set[i].buf_size = buf_lengths[j];
+
+                       test_perf_snow3G_optimise_cyclecount(&params_set[i]);
+               }
 
-                       struct rte_mbuf_offload *ol = rte_pktmbuf_offload_alloc(
-                                               ts_params->mbuf_ol_pool,
-                                               RTE_PKTMBUF_OL_CRYPTO_SYM);
-                       TEST_ASSERT_NOT_NULL(ol, "Failed to allocate pktmbuf offload");
+       }
 
-                       struct rte_crypto_sym_op *cop = &ol->op.crypto;
+       return 0;
+}
 
-                       rte_crypto_sym_op_attach_session(cop, ut_params->sess);
+static int
+test_perf_openssl_optimise_cyclecount(struct perf_test_params *pparams)
+{
+       uint32_t num_to_submit = pparams->total_operations;
+       struct rte_crypto_op *c_ops[num_to_submit];
+       struct rte_crypto_op *proc_ops[num_to_submit];
+       uint64_t failed_polls, retries, start_cycles,
+               end_cycles, total_cycles = 0;
+       uint32_t burst_sent = 0, burst_received = 0;
+       uint32_t i, burst_size, num_sent, num_ops_received;
 
-                       cop->digest.data = ut_params->digest;
-                       cop->digest.phys_addr = rte_pktmbuf_mtophys_offset(
-                               tx_mbufs[b], data_params[index].length);
-                       cop->digest.length = DIGEST_BYTE_LENGTH_SHA256;
+       struct crypto_testsuite_params *ts_params = &testsuite_params;
 
-                       cop->iv.data = (uint8_t *)rte_pktmbuf_prepend(tx_mbufs[b],
-                                       CIPHER_IV_LENGTH_AES_CBC);
-                       cop->iv.phys_addr = rte_pktmbuf_mtophys(tx_mbufs[b]);
-                       cop->iv.length = CIPHER_IV_LENGTH_AES_CBC;
+       static struct rte_cryptodev_sym_session *sess;
 
-                       rte_memcpy(cop->iv.data, aes_cbc_iv, CIPHER_IV_LENGTH_AES_CBC);
+       static struct rte_crypto_op *(*test_perf_set_crypto_op)
+                       (struct rte_crypto_op *, struct rte_mbuf *,
+                                       struct rte_cryptodev_sym_session *,
+                                       unsigned int, unsigned int,
+                                       enum chain_mode);
 
-                       cop->data.to_cipher.offset = CIPHER_IV_LENGTH_AES_CBC;
-                       cop->data.to_cipher.length = data_params[index].length;
+       unsigned int digest_length = get_auth_digest_length(pparams->auth_algo);
 
-                       cop->data.to_hash.offset = CIPHER_IV_LENGTH_AES_CBC;
-                       cop->data.to_hash.length = data_params[index].length;
+       if (rte_cryptodev_count() == 0) {
+               printf("\nNo crypto devices found. Is PMD build configured?\n");
+               return TEST_FAILED;
+       }
 
-                       rte_pktmbuf_offload_attach(tx_mbufs[b], ol);
+       /* Create Crypto session*/
+       sess = test_perf_create_openssl_session(ts_params->dev_id,
+                       pparams->chain, pparams->cipher_algo,
+                       pparams->cipher_key_length, pparams->auth_algo);
+       TEST_ASSERT_NOT_NULL(sess, "Session creation failed");
+
+       /* Generate Crypto op data structure(s)*/
+       for (i = 0; i < num_to_submit ; i++) {
+               struct rte_mbuf *m = test_perf_create_pktmbuf(
+                                               ts_params->mbuf_mp,
+                                               pparams->buf_size);
+               TEST_ASSERT_NOT_NULL(m, "Failed to allocate tx_buf");
+
+               struct rte_crypto_op *op =
+                               rte_crypto_op_alloc(ts_params->op_mpool,
+                                               RTE_CRYPTO_OP_TYPE_SYMMETRIC);
+               TEST_ASSERT_NOT_NULL(op, "Failed to allocate op");
+
+               switch (pparams->cipher_algo) {
+               case RTE_CRYPTO_CIPHER_3DES_CBC:
+               case RTE_CRYPTO_CIPHER_3DES_CTR:
+                       test_perf_set_crypto_op = test_perf_set_crypto_op_3des;
+                       break;
+               case RTE_CRYPTO_CIPHER_AES_CBC:
+               case RTE_CRYPTO_CIPHER_AES_CTR:
+                       test_perf_set_crypto_op = test_perf_set_crypto_op_aes;
+                       break;
+               case RTE_CRYPTO_CIPHER_AES_GCM:
+                       test_perf_set_crypto_op =
+                                               test_perf_set_crypto_op_aes_gcm;
+                       break;
+               default:
+                       return TEST_FAILED;
                }
-               start_cycles = rte_rdtsc_precise();
-               while (num_sent < DEFAULT_NUM_REQS_TO_SUBMIT) {
-                       burst_sent = rte_cryptodev_enqueue_burst(dev_num,
-                                       0, tx_mbufs,
-                                       ((DEFAULT_NUM_REQS_TO_SUBMIT-num_sent)
-                                                       < DEFAULT_BURST_SIZE) ?
-                                       DEFAULT_NUM_REQS_TO_SUBMIT-num_sent :
-                                                       DEFAULT_BURST_SIZE);
+
+               op = test_perf_set_crypto_op(op, m, sess, pparams->buf_size,
+                               digest_length, pparams->chain);
+               TEST_ASSERT_NOT_NULL(op, "Failed to attach op to session");
+
+               c_ops[i] = op;
+       }
+
+       printf("\nOn %s dev%u qp%u, %s, cipher algo:%s, cipher key length:%u, "
+                       "auth_algo:%s, Packet Size %u bytes",
+                       pmd_name(gbl_cryptodev_perftest_devtype),
+                       ts_params->dev_id, 0,
+                       chain_mode_name(pparams->chain),
+                       cipher_algo_name(pparams->cipher_algo),
+                       pparams->cipher_key_length,
+                       auth_algo_name(pparams->auth_algo),
+                       pparams->buf_size);
+       printf("\nOps Tx\tOps Rx\tOps/burst  ");
+       printf("Retries  EmptyPolls\tIACycles/CyOp\tIACycles/Burst\t"
+                       "IACycles/Byte");
+
+       for (i = 2; i <= 128 ; i *= 2) {
+               num_sent = 0;
+               num_ops_received = 0;
+               retries = 0;
+               failed_polls = 0;
+               burst_size = i;
+               total_cycles = 0;
+               while (num_sent < num_to_submit) {
+                       start_cycles = rte_rdtsc_precise();
+                       burst_sent = rte_cryptodev_enqueue_burst(
+                                       ts_params->dev_id,
+                                       0, &c_ops[num_sent],
+                                       ((num_to_submit - num_sent) <
+                                               burst_size) ?
+                                       num_to_submit - num_sent : burst_size);
+                       end_cycles = rte_rdtsc_precise();
                        if (burst_sent == 0)
                                retries++;
-                       else
-                               num_sent += burst_sent;
+                       num_sent += burst_sent;
+                       total_cycles += (end_cycles - start_cycles);
 
-                       burst_received =
-                               rte_cryptodev_dequeue_burst(dev_num,
-                                       0, rx_mbufs, DEFAULT_BURST_SIZE);
-                       if (burst_received == 0)
+                       /* Wait until requests have been sent. */
+                       rte_delay_ms(1);
+
+                       start_cycles = rte_rdtsc_precise();
+                       burst_received = rte_cryptodev_dequeue_burst(
+                                       ts_params->dev_id, 0, proc_ops,
+                                       burst_size);
+                       end_cycles = rte_rdtsc_precise();
+                       if (burst_received < burst_sent)
                                failed_polls++;
-                       else
-                               num_received += burst_received;
+                       num_ops_received += burst_received;
+
+                       total_cycles += end_cycles - start_cycles;
                }
-               while (num_received != DEFAULT_NUM_REQS_TO_SUBMIT) {
-                       if (gbl_cryptodev_preftest_devtype ==
-                                       RTE_CRYPTODEV_AESNI_MB_PMD)
-                               rte_cryptodev_enqueue_burst(dev_num, 0,
-                                               NULL, 0);
 
-                       burst_received =
-                               rte_cryptodev_dequeue_burst(dev_num, 0,
-                                               rx_mbufs, DEFAULT_BURST_SIZE);
+               while (num_ops_received != num_to_submit) {
+                       /* Sending 0 length burst to flush sw crypto device */
+                       rte_cryptodev_enqueue_burst(ts_params->dev_id, 0,
+                                       NULL, 0);
+
+                       start_cycles = rte_rdtsc_precise();
+                       burst_received = rte_cryptodev_dequeue_burst(
+                                       ts_params->dev_id, 0, proc_ops,
+                                       burst_size);
+                       end_cycles = rte_rdtsc_precise();
+
+                       total_cycles += end_cycles - start_cycles;
                        if (burst_received == 0)
                                failed_polls++;
-                       else
-                               num_received += burst_received;
+                       num_ops_received += burst_received;
                }
-               end_cycles = rte_rdtsc_precise();
-               mmps = ((double)num_received * mhz) /
-                               (end_cycles - start_cycles);
-               throughput = (mmps * data_params[index].length * 8) / 1000;
-
-               printf("\n%u\t%u\t%u\t\t%u\t%u", dev_num, 0,
-                               data_params[index].length,
-                               num_sent, num_received);
-               printf("\t%.2f\t%.2f", mmps, throughput);
+
+               printf("\n%u\t%u\t%u", num_sent, num_ops_received, burst_size);
                printf("\t\t%"PRIu64, retries);
-               for (b = 0; b < DEFAULT_BURST_SIZE ; b++) {
-                       struct rte_mbuf_offload *ol = tx_mbufs[b]->offload_ops;
-
-                       if (ol) {
-                               do {
-                                       rte_pktmbuf_offload_free(ol);
-                                       ol = ol->next;
-                               } while (ol != NULL);
-                       }
-                       rte_pktmbuf_free(tx_mbufs[b]);
-               }
+               printf("\t%"PRIu64, failed_polls);
+               printf("\t\t%"PRIu64, total_cycles/num_ops_received);
+               printf("\t\t%"PRIu64, (total_cycles/num_ops_received) *
+                               burst_size);
+               printf("\t\t%"PRIu64,
+                               total_cycles /
+                               (num_ops_received * pparams->buf_size));
        }
-
        printf("\n");
+
+       for (i = 0; i < num_to_submit ; i++) {
+               rte_pktmbuf_free(c_ops[i]->sym->m_src);
+               rte_crypto_op_free(c_ops[i]);
+       }
+       rte_cryptodev_sym_session_free(ts_params->dev_id, sess);
+
        return TEST_SUCCESS;
 }
 
 static int
-test_perf_encrypt_digest_vary_req_size(void)
+test_perf_armv8_optimise_cyclecount(struct perf_test_params *pparams)
 {
-       return test_perf_AES_CBC_HMAC_SHA256_encrypt_digest_vary_req_size(
-                       testsuite_params.dev_id);
-}
+       uint32_t num_to_submit = pparams->total_operations;
+       struct rte_crypto_op *c_ops[num_to_submit];
+       struct rte_crypto_op *proc_ops[num_to_submit];
+       uint64_t failed_polls, retries, start_cycles, end_cycles,
+                total_cycles = 0;
+       uint32_t burst_sent = 0, burst_received = 0;
+       uint32_t i, burst_size, num_sent, num_ops_received;
+       uint32_t nb_ops;
 
-static int
-test_perf_vary_burst_size(void)
-{
-       return test_perf_crypto_qp_vary_burst_size(testsuite_params.dev_id);
-}
+       struct crypto_testsuite_params *ts_params = &testsuite_params;
 
+       static struct rte_cryptodev_sym_session *sess;
 
-static struct unit_test_suite cryptodev_testsuite  = {
-       .suite_name = "Crypto Device Unit Test Suite",
-       .setup = testsuite_setup,
-       .teardown = testsuite_teardown,
-       .unit_test_cases = {
-               TEST_CASE_ST(ut_setup, ut_teardown,
-                               test_perf_encrypt_digest_vary_req_size),
-               TEST_CASE_ST(ut_setup, ut_teardown,
-                               test_perf_vary_burst_size),
-               TEST_CASES_END() /**< NULL terminate unit test array */
+       unsigned int digest_length = get_auth_digest_length(pparams->auth_algo);
+
+       if (rte_cryptodev_count() == 0) {
+               printf("\nNo crypto devices found. Is PMD build configured?\n");
+               return TEST_FAILED;
        }
-};
 
-static int
-perftest_aesni_mb_cryptodev(void /*argv __rte_unused, int argc __rte_unused*/)
-{
-       gbl_cryptodev_preftest_devtype = RTE_CRYPTODEV_AESNI_MB_PMD;
+       /* Create Crypto session*/
+       sess = test_perf_create_armv8_session(ts_params->dev_id,
+                       pparams->chain, pparams->cipher_algo,
+                       pparams->cipher_key_length, pparams->auth_algo);
+       TEST_ASSERT_NOT_NULL(sess, "Session creation failed");
+
+       /* Generate Crypto op data structure(s)*/
+       for (i = 0; i < num_to_submit ; i++) {
+               struct rte_mbuf *m = test_perf_create_pktmbuf(
+                                               ts_params->mbuf_mp,
+                                               pparams->buf_size);
+               TEST_ASSERT_NOT_NULL(m, "Failed to allocate tx_buf");
+
+               struct rte_crypto_op *op =
+                               rte_crypto_op_alloc(ts_params->op_mpool,
+                                               RTE_CRYPTO_OP_TYPE_SYMMETRIC);
+               TEST_ASSERT_NOT_NULL(op, "Failed to allocate op");
+
+               op = test_perf_set_crypto_op_aes(op, m, sess, pparams->buf_size,
+                               digest_length, pparams->chain);
+               TEST_ASSERT_NOT_NULL(op, "Failed to attach op to session");
+
+               c_ops[i] = op;
+       }
 
-       return unit_test_suite_runner(&cryptodev_testsuite);
-}
+       printf("\nOn %s dev%u qp%u, %s, cipher algo:%s, cipher key length:%u, "
+                       "auth_algo:%s, Packet Size %u bytes",
+                       pmd_name(gbl_cryptodev_perftest_devtype),
+                       ts_params->dev_id, 0,
+                       chain_mode_name(pparams->chain),
+                       cipher_algo_name(pparams->cipher_algo),
+                       pparams->cipher_key_length,
+                       auth_algo_name(pparams->auth_algo),
+                       pparams->buf_size);
+       printf("\nOps Tx\tOps Rx\tOps/burst  ");
+       printf("Retries  "
+               "EmptyPolls\tIACycles/CyOp\tIACycles/Burst\tIACycles/Byte");
+
+       for (i = 2; i <= 128 ; i *= 2) {
+               num_sent = 0;
+               num_ops_received = 0;
+               retries = 0;
+               failed_polls = 0;
+               burst_size = i;
+               total_cycles = 0;
+               while (num_sent < num_to_submit) {
+                       if ((num_to_submit - num_sent) < burst_size)
+                               nb_ops = num_to_submit - num_sent;
+                       else
+                               nb_ops = burst_size;
 
-static int
-perftest_qat_cryptodev(void /*argv __rte_unused, int argc __rte_unused*/)
+                       start_cycles = rte_rdtsc();
+                       burst_sent = rte_cryptodev_enqueue_burst(
+                               ts_params->dev_id,
+                               0, &c_ops[num_sent],
+                               nb_ops);
+                       end_cycles = rte_rdtsc();
+
+                       if (burst_sent == 0)
+                               retries++;
+                       num_sent += burst_sent;
+                       total_cycles += (end_cycles - start_cycles);
+
+                       start_cycles = rte_rdtsc();
+                       burst_received = rte_cryptodev_dequeue_burst(
+                                       ts_params->dev_id, 0, proc_ops,
+                                       burst_size);
+                       end_cycles = rte_rdtsc();
+                       if (burst_received < burst_sent)
+                               failed_polls++;
+                       num_ops_received += burst_received;
+
+                       total_cycles += end_cycles - start_cycles;
+               }
+
+               while (num_ops_received != num_to_submit) {
+                       /* Sending 0 length burst to flush sw crypto device */
+                       rte_cryptodev_enqueue_burst(
+                                               ts_params->dev_id, 0, NULL, 0);
+
+                       start_cycles = rte_rdtsc();
+                       burst_received = rte_cryptodev_dequeue_burst(
+                               ts_params->dev_id, 0, proc_ops, burst_size);
+                       end_cycles = rte_rdtsc();
+
+                       total_cycles += end_cycles - start_cycles;
+                       if (burst_received == 0)
+                               failed_polls++;
+                       num_ops_received += burst_received;
+               }
+
+               printf("\n%u\t%u\t%u", num_sent, num_ops_received, burst_size);
+               printf("\t\t%"PRIu64, retries);
+               printf("\t%"PRIu64, failed_polls);
+               printf("\t\t%"PRIu64, total_cycles/num_ops_received);
+               printf("\t\t%"PRIu64,
+                       (total_cycles/num_ops_received)*burst_size);
+               printf("\t\t%"PRIu64,
+                       total_cycles/(num_ops_received*pparams->buf_size));
+       }
+       printf("\n");
+
+       for (i = 0; i < num_to_submit ; i++) {
+               rte_pktmbuf_free(c_ops[i]->sym->m_src);
+               rte_crypto_op_free(c_ops[i]);
+       }
+
+       return TEST_SUCCESS;
+}
+
+static uint32_t get_auth_key_max_length(enum rte_crypto_auth_algorithm algo)
+{
+       switch (algo) {
+       case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
+               return 16;
+       case RTE_CRYPTO_AUTH_SHA1_HMAC:
+               return 64;
+       case RTE_CRYPTO_AUTH_SHA224_HMAC:
+               return 64;
+       case RTE_CRYPTO_AUTH_SHA256_HMAC:
+               return 64;
+       case RTE_CRYPTO_AUTH_SHA384_HMAC:
+               return 128;
+       case RTE_CRYPTO_AUTH_SHA512_HMAC:
+               return 128;
+       case RTE_CRYPTO_AUTH_AES_GCM:
+               return 0;
+       default:
+               return 0;
+       }
+}
+
+static uint32_t get_auth_digest_length(enum rte_crypto_auth_algorithm algo)
+{
+       switch (algo) {
+       case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
+               return 4;
+       case RTE_CRYPTO_AUTH_SHA1_HMAC:
+               return TRUNCATED_DIGEST_BYTE_LENGTH_SHA1;
+       case RTE_CRYPTO_AUTH_SHA224_HMAC:
+               return TRUNCATED_DIGEST_BYTE_LENGTH_SHA224;
+       case RTE_CRYPTO_AUTH_SHA256_HMAC:
+               return TRUNCATED_DIGEST_BYTE_LENGTH_SHA256;
+       case RTE_CRYPTO_AUTH_SHA384_HMAC:
+               return TRUNCATED_DIGEST_BYTE_LENGTH_SHA384;
+       case RTE_CRYPTO_AUTH_SHA512_HMAC:
+               return TRUNCATED_DIGEST_BYTE_LENGTH_SHA512;
+       case RTE_CRYPTO_AUTH_AES_GCM:
+               return DIGEST_BYTE_LENGTH_AES_GCM;
+       default:
+               return 0;
+       }
+}
+
+static uint8_t aes_key[] = {
+               0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+               0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+               0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+               0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
+};
+
+static uint8_t aes_iv[] = {
+               0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+               0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
+};
+
+static uint8_t triple_des_key[] = {
+               0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+               0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+               0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+};
+
+static uint8_t triple_des_iv[] = {
+               0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+};
+
+static uint8_t hmac_sha_key[] = {
+               0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+               0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+               0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+               0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+               0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+               0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+               0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+               0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+               0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+               0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+               0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+               0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+               0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+               0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+               0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+               0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
+};
+
+static uint8_t snow3g_cipher_key[] = {
+               0x2B, 0xD6, 0x45, 0x9F, 0x82, 0xC5, 0xB3, 0x00,
+               0x95, 0x2C, 0x49, 0x10, 0x48, 0x81, 0xFF, 0x48
+};
+
+static uint8_t snow3g_iv[] = {
+               0x72, 0xA4, 0xF2, 0x0F, 0x64, 0x00, 0x00, 0x00,
+               0x72, 0xA4, 0xF2, 0x0F, 0x64, 0x00, 0x00, 0x00
+};
+
+static uint8_t snow3g_hash_key[] = {
+               0xC7, 0x36, 0xC6, 0xAA, 0xB2, 0x2B, 0xFF, 0xF9,
+               0x1E, 0x26, 0x98, 0xD2, 0xE2, 0x2A, 0xD5, 0x7E
+};
+
+static struct rte_cryptodev_sym_session *
+test_perf_create_aes_sha_session(uint8_t dev_id, enum chain_mode chain,
+               enum rte_crypto_cipher_algorithm cipher_algo,
+               unsigned cipher_key_len,
+               enum rte_crypto_auth_algorithm auth_algo)
+{
+       struct rte_crypto_sym_xform cipher_xform = { 0 };
+       struct rte_crypto_sym_xform auth_xform = { 0 };
+
+
+       /* Setup Cipher Parameters */
+       cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
+       cipher_xform.cipher.algo = cipher_algo;
+       cipher_xform.cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT;
+
+       cipher_xform.cipher.key.data = aes_key;
+       cipher_xform.cipher.key.length = cipher_key_len;
+       if (chain != CIPHER_ONLY) {
+               /* Setup HMAC Parameters */
+               auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
+               auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_GENERATE;
+               auth_xform.auth.algo = auth_algo;
+               auth_xform.auth.key.data = hmac_sha_key;
+               auth_xform.auth.key.length = get_auth_key_max_length(auth_algo);
+               auth_xform.auth.digest_length =
+                                       get_auth_digest_length(auth_algo);
+       }
+       switch (chain) {
+       case CIPHER_HASH:
+               cipher_xform.next = &auth_xform;
+               auth_xform.next = NULL;
+               /* Create Crypto session*/
+               return rte_cryptodev_sym_session_create(dev_id, &cipher_xform);
+       case HASH_CIPHER:
+               auth_xform.next = &cipher_xform;
+               cipher_xform.next = NULL;
+               /* Create Crypto session*/
+               return rte_cryptodev_sym_session_create(dev_id, &auth_xform);
+       case CIPHER_ONLY:
+               cipher_xform.next = NULL;
+               /* Create Crypto session*/
+               return rte_cryptodev_sym_session_create(dev_id, &cipher_xform);
+       default:
+               return NULL;
+       }
+}
+
+#define SNOW3G_CIPHER_IV_LENGTH 16
+
+static struct rte_cryptodev_sym_session *
+test_perf_create_snow3g_session(uint8_t dev_id, enum chain_mode chain,
+               enum rte_crypto_cipher_algorithm cipher_algo, unsigned cipher_key_len,
+               enum rte_crypto_auth_algorithm auth_algo)
+{
+       struct rte_crypto_sym_xform cipher_xform = {0};
+       struct rte_crypto_sym_xform auth_xform = {0};
+
+
+       /* Setup Cipher Parameters */
+       cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
+       cipher_xform.cipher.algo = cipher_algo;
+       cipher_xform.cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT;
+
+       cipher_xform.cipher.key.data = snow3g_cipher_key;
+       cipher_xform.cipher.key.length = cipher_key_len;
+
+       /* Setup HMAC Parameters */
+       auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
+       auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_GENERATE;
+       auth_xform.auth.algo = auth_algo;
+
+       auth_xform.auth.add_auth_data_length = SNOW3G_CIPHER_IV_LENGTH;
+       auth_xform.auth.key.data = snow3g_hash_key;
+       auth_xform.auth.key.length =  get_auth_key_max_length(auth_algo);
+       auth_xform.auth.digest_length = get_auth_digest_length(auth_algo);
+
+       switch (chain) {
+       case CIPHER_HASH:
+               cipher_xform.next = &auth_xform;
+               auth_xform.next = NULL;
+               /* Create Crypto session*/
+               return rte_cryptodev_sym_session_create(dev_id, &cipher_xform);
+       case HASH_CIPHER:
+               auth_xform.next = &cipher_xform;
+               cipher_xform.next = NULL;
+               /* Create Crypto session*/
+               return rte_cryptodev_sym_session_create(dev_id, &auth_xform);
+       case CIPHER_ONLY:
+               cipher_xform.next = NULL;
+               /* Create Crypto session*/
+               return rte_cryptodev_sym_session_create(dev_id, &cipher_xform);
+       case HASH_ONLY:
+               auth_xform.next = NULL;
+               /* Create Crypto session */
+               return rte_cryptodev_sym_session_create(dev_id, &auth_xform);
+       default:
+               return NULL;
+       }
+}
+
+static struct rte_cryptodev_sym_session *
+test_perf_create_openssl_session(uint8_t dev_id, enum chain_mode chain,
+               enum rte_crypto_cipher_algorithm cipher_algo,
+               unsigned int cipher_key_len,
+               enum rte_crypto_auth_algorithm auth_algo)
+{
+       struct rte_crypto_sym_xform cipher_xform = { 0 };
+       struct rte_crypto_sym_xform auth_xform = { 0 };
+
+       /* Setup Cipher Parameters */
+       cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
+       cipher_xform.cipher.algo = cipher_algo;
+       cipher_xform.cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT;
+
+       switch (cipher_algo) {
+       case RTE_CRYPTO_CIPHER_3DES_CBC:
+       case RTE_CRYPTO_CIPHER_3DES_CTR:
+               cipher_xform.cipher.key.data = triple_des_key;
+               break;
+       case RTE_CRYPTO_CIPHER_AES_CBC:
+       case RTE_CRYPTO_CIPHER_AES_CTR:
+       case RTE_CRYPTO_CIPHER_AES_GCM:
+               cipher_xform.cipher.key.data = aes_key;
+               break;
+       default:
+               return NULL;
+       }
+
+       cipher_xform.cipher.key.length = cipher_key_len;
+
+       /* Setup Auth Parameters */
+       auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
+       auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_GENERATE;
+       auth_xform.auth.algo = auth_algo;
+
+       switch (auth_algo) {
+       case RTE_CRYPTO_AUTH_SHA1_HMAC:
+               auth_xform.auth.key.data = hmac_sha_key;
+               break;
+       case RTE_CRYPTO_AUTH_AES_GCM:
+               auth_xform.auth.key.data = NULL;
+               break;
+       default:
+               return NULL;
+       }
+
+       auth_xform.auth.key.length =  get_auth_key_max_length(auth_algo);
+       auth_xform.auth.digest_length = get_auth_digest_length(auth_algo);
+
+       switch (chain) {
+       case CIPHER_HASH:
+               cipher_xform.next = &auth_xform;
+               auth_xform.next = NULL;
+               /* Create Crypto session*/
+               return rte_cryptodev_sym_session_create(dev_id, &cipher_xform);
+       case HASH_CIPHER:
+               auth_xform.next = &cipher_xform;
+               cipher_xform.next = NULL;
+               /* Create Crypto session*/
+               return rte_cryptodev_sym_session_create(dev_id, &auth_xform);
+       default:
+               return NULL;
+       }
+}
+
+static struct rte_cryptodev_sym_session *
+test_perf_create_armv8_session(uint8_t dev_id, enum chain_mode chain,
+               enum rte_crypto_cipher_algorithm cipher_algo,
+               unsigned int cipher_key_len,
+               enum rte_crypto_auth_algorithm auth_algo)
+{
+       struct rte_crypto_sym_xform cipher_xform = { 0 };
+       struct rte_crypto_sym_xform auth_xform = { 0 };
+
+       /* Setup Cipher Parameters */
+       cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
+       cipher_xform.cipher.algo = cipher_algo;
+
+       switch (cipher_algo) {
+       case RTE_CRYPTO_CIPHER_AES_CBC:
+               cipher_xform.cipher.key.data = aes_cbc_128_key;
+               break;
+       default:
+               return NULL;
+       }
+
+       cipher_xform.cipher.key.length = cipher_key_len;
+
+       /* Setup Auth Parameters */
+       auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
+       auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_GENERATE;
+       auth_xform.auth.algo = auth_algo;
+
+       auth_xform.auth.digest_length = get_auth_digest_length(auth_algo);
+
+       switch (chain) {
+       case CIPHER_HASH:
+               cipher_xform.next = &auth_xform;
+               auth_xform.next = NULL;
+               /* Encrypt and hash the result */
+               cipher_xform.cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT;
+               /* Create Crypto session*/
+               return rte_cryptodev_sym_session_create(dev_id, &cipher_xform);
+       case HASH_CIPHER:
+               auth_xform.next = &cipher_xform;
+               cipher_xform.next = NULL;
+               /* Hash encrypted message and decrypt */
+               cipher_xform.cipher.op = RTE_CRYPTO_CIPHER_OP_DECRYPT;
+               /* Create Crypto session*/
+               return rte_cryptodev_sym_session_create(dev_id, &auth_xform);
+       default:
+               return NULL;
+       }
+}
+
+#define AES_BLOCK_SIZE 16
+#define AES_CIPHER_IV_LENGTH 16
+
+#define TRIPLE_DES_BLOCK_SIZE 8
+#define TRIPLE_DES_CIPHER_IV_LENGTH 8
+
+static struct rte_mbuf *
+test_perf_create_pktmbuf(struct rte_mempool *mpool, unsigned buf_sz)
+{
+       struct rte_mbuf *m = rte_pktmbuf_alloc(mpool);
+
+       if (rte_pktmbuf_append(m, buf_sz) == NULL) {
+               rte_pktmbuf_free(m);
+               return NULL;
+       }
+
+       memset(rte_pktmbuf_mtod(m, uint8_t *), 0, buf_sz);
+
+       return m;
+}
+
+static inline struct rte_crypto_op *
+test_perf_set_crypto_op_aes(struct rte_crypto_op *op, struct rte_mbuf *m,
+               struct rte_cryptodev_sym_session *sess, unsigned int data_len,
+               unsigned int digest_len, enum chain_mode chain)
+{
+       if (rte_crypto_op_attach_sym_session(op, sess) != 0) {
+               rte_crypto_op_free(op);
+               return NULL;
+       }
+
+       /* Authentication Parameters */
+       if (chain == CIPHER_ONLY) {
+               op->sym->auth.digest.data = NULL;
+               op->sym->auth.digest.phys_addr = 0;
+               op->sym->auth.digest.length = 0;
+               op->sym->auth.aad.data = NULL;
+               op->sym->auth.aad.length = 0;
+               op->sym->auth.data.offset = 0;
+               op->sym->auth.data.length = 0;
+       } else {
+               op->sym->auth.digest.data = rte_pktmbuf_mtod_offset(m,
+                                uint8_t *, AES_CIPHER_IV_LENGTH + data_len);
+               op->sym->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(m,
+                               AES_CIPHER_IV_LENGTH + data_len);
+               op->sym->auth.digest.length = digest_len;
+               op->sym->auth.aad.data = aes_iv;
+               op->sym->auth.aad.length = AES_CIPHER_IV_LENGTH;
+               op->sym->auth.data.offset = AES_CIPHER_IV_LENGTH;
+               op->sym->auth.data.length = data_len;
+       }
+
+
+       /* Cipher Parameters */
+       op->sym->cipher.iv.data = rte_pktmbuf_mtod(m, uint8_t *);
+       op->sym->cipher.iv.phys_addr = rte_pktmbuf_mtophys(m);
+       op->sym->cipher.iv.length = AES_CIPHER_IV_LENGTH;
+
+       rte_memcpy(op->sym->cipher.iv.data, aes_iv, AES_CIPHER_IV_LENGTH);
+
+       op->sym->cipher.data.offset = AES_CIPHER_IV_LENGTH;
+       op->sym->cipher.data.length = data_len;
+
+       op->sym->m_src = m;
+
+       return op;
+}
+
+static inline struct rte_crypto_op *
+test_perf_set_crypto_op_aes_gcm(struct rte_crypto_op *op, struct rte_mbuf *m,
+               struct rte_cryptodev_sym_session *sess, unsigned int data_len,
+               unsigned int digest_len, enum chain_mode chain __rte_unused)
+{
+       if (rte_crypto_op_attach_sym_session(op, sess) != 0) {
+               rte_crypto_op_free(op);
+               return NULL;
+       }
+
+       /* Authentication Parameters */
+       op->sym->auth.digest.data = (uint8_t *)m->buf_addr +
+                                       (m->data_off + data_len);
+       op->sym->auth.digest.phys_addr =
+                               rte_pktmbuf_mtophys_offset(m, data_len);
+       op->sym->auth.digest.length = digest_len;
+       op->sym->auth.aad.data = aes_iv;
+       op->sym->auth.aad.length = AES_CIPHER_IV_LENGTH;
+
+       /* Cipher Parameters */
+       op->sym->cipher.iv.data = aes_iv;
+       op->sym->cipher.iv.length = AES_CIPHER_IV_LENGTH;
+
+       /* Data lengths/offsets Parameters */
+       op->sym->auth.data.offset = AES_BLOCK_SIZE;
+       op->sym->auth.data.length = data_len - AES_BLOCK_SIZE;
+
+       op->sym->cipher.data.offset = AES_BLOCK_SIZE;
+       op->sym->cipher.data.length = data_len - AES_BLOCK_SIZE;
+
+       op->sym->m_src = m;
+
+       return op;
+}
+
+static inline struct rte_crypto_op *
+test_perf_set_crypto_op_snow3g(struct rte_crypto_op *op, struct rte_mbuf *m,
+               struct rte_cryptodev_sym_session *sess, unsigned data_len,
+               unsigned digest_len)
+{
+       if (rte_crypto_op_attach_sym_session(op, sess) != 0) {
+               rte_crypto_op_free(op);
+               return NULL;
+       }
+
+       /* Authentication Parameters */
+       op->sym->auth.digest.data = (uint8_t *)m->buf_addr +
+                                               (m->data_off + data_len);
+       op->sym->auth.digest.phys_addr =
+                               rte_pktmbuf_mtophys_offset(m, data_len);
+       op->sym->auth.digest.length = digest_len;
+       op->sym->auth.aad.data = snow3g_iv;
+       op->sym->auth.aad.length = SNOW3G_CIPHER_IV_LENGTH;
+
+       /* Cipher Parameters */
+       op->sym->cipher.iv.data = snow3g_iv;
+       op->sym->cipher.iv.length = SNOW3G_CIPHER_IV_LENGTH;
+
+       /* Data lengths/offsets Parameters */
+       op->sym->auth.data.offset = 0;
+       op->sym->auth.data.length = data_len << 3;
+
+       op->sym->cipher.data.offset = 0;
+       op->sym->cipher.data.length = data_len << 3;
+
+       op->sym->m_src = m;
+
+       return op;
+}
+
+static inline struct rte_crypto_op *
+test_perf_set_crypto_op_snow3g_cipher(struct rte_crypto_op *op,
+               struct rte_mbuf *m,
+               struct rte_cryptodev_sym_session *sess,
+               unsigned data_len)
+{
+       if (rte_crypto_op_attach_sym_session(op, sess) != 0) {
+               rte_crypto_op_free(op);
+               return NULL;
+       }
+
+       /* Cipher Parameters */
+       op->sym->cipher.iv.data = rte_pktmbuf_mtod(m, uint8_t *);
+       op->sym->cipher.iv.length = SNOW3G_CIPHER_IV_LENGTH;
+       rte_memcpy(op->sym->cipher.iv.data, snow3g_iv, SNOW3G_CIPHER_IV_LENGTH);
+       op->sym->cipher.iv.phys_addr = rte_pktmbuf_mtophys(m);
+
+       op->sym->cipher.data.offset = SNOW3G_CIPHER_IV_LENGTH;
+       op->sym->cipher.data.length = data_len << 3;
+
+       op->sym->m_src = m;
+
+       return op;
+}
+
+
+static inline struct rte_crypto_op *
+test_perf_set_crypto_op_snow3g_hash(struct rte_crypto_op *op,
+               struct rte_mbuf *m,
+               struct rte_cryptodev_sym_session *sess,
+               unsigned data_len,
+               unsigned digest_len)
+{
+       if (rte_crypto_op_attach_sym_session(op, sess) != 0) {
+               rte_crypto_op_free(op);
+               return NULL;
+       }
+
+       /* Authentication Parameters */
+
+       op->sym->auth.digest.data =
+                       (uint8_t *)rte_pktmbuf_mtod_offset(m, uint8_t *,
+                       data_len);
+       op->sym->auth.digest.phys_addr =
+                               rte_pktmbuf_mtophys_offset(m, data_len +
+                                       SNOW3G_CIPHER_IV_LENGTH);
+       op->sym->auth.digest.length = digest_len;
+       op->sym->auth.aad.data = rte_pktmbuf_mtod(m, uint8_t *);
+       op->sym->auth.aad.length = SNOW3G_CIPHER_IV_LENGTH;
+       rte_memcpy(op->sym->auth.aad.data, snow3g_iv,
+                       SNOW3G_CIPHER_IV_LENGTH);
+       op->sym->auth.aad.phys_addr = rte_pktmbuf_mtophys(m);
+
+       /* Data lengths/offsets Parameters */
+       op->sym->auth.data.offset = SNOW3G_CIPHER_IV_LENGTH;
+       op->sym->auth.data.length = data_len << 3;
+
+       op->sym->m_src = m;
+
+       return op;
+}
+
+
+static inline struct rte_crypto_op *
+test_perf_set_crypto_op_3des(struct rte_crypto_op *op, struct rte_mbuf *m,
+               struct rte_cryptodev_sym_session *sess, unsigned int data_len,
+               unsigned int digest_len, enum chain_mode chain __rte_unused)
+{
+       if (rte_crypto_op_attach_sym_session(op, sess) != 0) {
+               rte_crypto_op_free(op);
+               return NULL;
+       }
+
+       /* Authentication Parameters */
+       op->sym->auth.digest.data = (uint8_t *)m->buf_addr +
+                                       (m->data_off + data_len);
+       op->sym->auth.digest.phys_addr =
+                               rte_pktmbuf_mtophys_offset(m, data_len);
+       op->sym->auth.digest.length = digest_len;
+       op->sym->auth.aad.data = triple_des_iv;
+       op->sym->auth.aad.length = TRIPLE_DES_CIPHER_IV_LENGTH;
+
+       /* Cipher Parameters */
+       op->sym->cipher.iv.data = triple_des_iv;
+       op->sym->cipher.iv.length = TRIPLE_DES_CIPHER_IV_LENGTH;
+
+       /* Data lengths/offsets Parameters */
+       op->sym->auth.data.offset = 0;
+       op->sym->auth.data.length = data_len;
+
+       op->sym->cipher.data.offset = TRIPLE_DES_BLOCK_SIZE;
+       op->sym->cipher.data.length = data_len - TRIPLE_DES_BLOCK_SIZE;
+
+       op->sym->m_src = m;
+
+       return op;
+}
+
+/* An mbuf set is used in each burst. An mbuf can be used by multiple bursts at
+ * same time, i.e. as they're not dereferenced there's no need to wait until
+ * finished with to re-use */
+#define NUM_MBUF_SETS 8
+
+static int
+test_perf_aes_sha(uint8_t dev_id, uint16_t queue_id,
+               struct perf_test_params *pparams)
+{
+       uint16_t i, k, l, m;
+       uint16_t j = 0;
+       uint16_t ops_unused = 0;
+
+       uint64_t burst_enqueued = 0, total_enqueued = 0, burst_dequeued = 0;
+       uint64_t processed = 0, failed_polls = 0, retries = 0;
+       uint64_t tsc_start = 0, tsc_end = 0;
+
+       uint16_t digest_length = get_auth_digest_length(pparams->auth_algo);
+
+       struct rte_crypto_op *ops[pparams->burst_size];
+       struct rte_crypto_op *proc_ops[pparams->burst_size];
+
+       struct rte_mbuf *mbufs[pparams->burst_size * 8];
+
+       struct crypto_testsuite_params *ts_params = &testsuite_params;
+
+       static struct rte_cryptodev_sym_session *sess;
+
+       if (rte_cryptodev_count() == 0) {
+               printf("\nNo crypto devices available. Is kernel driver loaded?\n");
+               return TEST_FAILED;
+       }
+
+       /* Create Crypto session*/
+       sess = test_perf_create_aes_sha_session(ts_params->dev_id,
+                       pparams->chain, pparams->cipher_algo,
+                       pparams->cipher_key_length, pparams->auth_algo);
+       TEST_ASSERT_NOT_NULL(sess, "Session creation failed");
+
+       /* Generate a burst of crypto operations */
+       for (i = 0; i < (pparams->burst_size * NUM_MBUF_SETS); i++) {
+               mbufs[i] = test_perf_create_pktmbuf(
+                               ts_params->mbuf_mp,
+                               pparams->buf_size);
+
+               if (mbufs[i] == NULL) {
+                       printf("\nFailed to get mbuf - freeing the rest.\n");
+                       for (k = 0; k < i; k++)
+                               rte_pktmbuf_free(mbufs[k]);
+                       return -1;
+               }
+
+               /* Make room for Digest and IV in mbuf */
+               if (pparams->chain != CIPHER_ONLY)
+                       rte_pktmbuf_append(mbufs[i], digest_length);
+               rte_pktmbuf_prepend(mbufs[i], AES_CIPHER_IV_LENGTH);
+       }
+
+
+       tsc_start = rte_rdtsc_precise();
+
+       while (total_enqueued < pparams->total_operations) {
+               uint16_t burst_size =
+               total_enqueued+pparams->burst_size <= pparams->total_operations ?
+               pparams->burst_size : pparams->total_operations-total_enqueued;
+               uint16_t ops_needed = burst_size-ops_unused;
+
+               if (ops_needed != rte_crypto_op_bulk_alloc(ts_params->op_mpool,
+                               RTE_CRYPTO_OP_TYPE_SYMMETRIC, ops, ops_needed)){
+                       printf("\nFailed to alloc enough ops, finish dequeuing "
+                               "and free ops below.");
+               } else {
+                       for (i = 0; i < ops_needed; i++)
+                               ops[i] = test_perf_set_crypto_op_aes(ops[i],
+                                       mbufs[i + (pparams->burst_size *
+                                               (j % NUM_MBUF_SETS))],
+                                       sess, pparams->buf_size, digest_length,
+                                       pparams->chain);
+
+                       /* enqueue burst */
+                       burst_enqueued = rte_cryptodev_enqueue_burst(dev_id,
+                                       queue_id, ops, burst_size);
+
+                       if (burst_enqueued < burst_size)
+                               retries++;
+
+                       ops_unused = burst_size-burst_enqueued;
+                       total_enqueued += burst_enqueued;
+               }
+
+               /* dequeue burst */
+               burst_dequeued = rte_cryptodev_dequeue_burst(dev_id, queue_id,
+                               proc_ops, pparams->burst_size);
+               if (burst_dequeued == 0)
+                       failed_polls++;
+               else {
+                       processed += burst_dequeued;
+
+                       for (l = 0; l < burst_dequeued; l++)
+                               rte_crypto_op_free(proc_ops[l]);
+               }
+               j++;
+       }
+
+       /* Dequeue any operations still in the crypto device */
+       while (processed < pparams->total_operations) {
+               /* Sending 0 length burst to flush sw crypto device */
+               rte_cryptodev_enqueue_burst(dev_id, queue_id, NULL, 0);
+
+               /* dequeue burst */
+               burst_dequeued = rte_cryptodev_dequeue_burst(dev_id, queue_id,
+                               proc_ops, pparams->burst_size);
+               if (burst_dequeued == 0)
+                       failed_polls++;
+               else {
+                       processed += burst_dequeued;
+
+                       for (m = 0; m < burst_dequeued; m++)
+                               rte_crypto_op_free(proc_ops[m]);
+               }
+       }
+
+       tsc_end = rte_rdtsc_precise();
+
+       double ops_s = ((double)processed / (tsc_end - tsc_start)) * rte_get_tsc_hz();
+       double throughput = (ops_s * pparams->buf_size * 8) / 1000000000;
+
+       printf("\t%u\t%6.2f\t%10.2f\t%8"PRIu64"\t%8"PRIu64, pparams->buf_size, ops_s/1000000,
+                       throughput, retries, failed_polls);
+
+       for (i = 0; i < pparams->burst_size * NUM_MBUF_SETS; i++)
+               rte_pktmbuf_free(mbufs[i]);
+       rte_cryptodev_sym_session_free(dev_id, sess);
+
+       printf("\n");
+       return TEST_SUCCESS;
+}
+
+
+static int
+test_perf_snow3g(uint8_t dev_id, uint16_t queue_id,
+               struct perf_test_params *pparams)
+{
+       uint16_t i, k, l, m;
+       uint16_t j = 0;
+       uint16_t ops_unused = 0;
+       uint64_t burst_enqueued = 0, total_enqueued = 0, burst_dequeued = 0;
+       uint64_t processed = 0, failed_polls = 0, retries = 0;
+       uint64_t tsc_start = 0, tsc_end = 0;
+
+       uint16_t digest_length = get_auth_digest_length(pparams->auth_algo);
+
+       struct rte_crypto_op *ops[pparams->burst_size];
+       struct rte_crypto_op *proc_ops[pparams->burst_size];
+
+       struct rte_mbuf *mbufs[pparams->burst_size * NUM_MBUF_SETS];
+
+       struct crypto_testsuite_params *ts_params = &testsuite_params;
+
+       static struct rte_cryptodev_sym_session *sess;
+
+       if (rte_cryptodev_count() == 0) {
+               printf("\nNo crypto devices found. Is PMD build configured?\n");
+               printf("\nAnd is kernel driver loaded for HW PMDs?\n");
+               return TEST_FAILED;
+       }
+
+       /* Create Crypto session*/
+       sess = test_perf_create_snow3g_session(ts_params->dev_id,
+                       pparams->chain, pparams->cipher_algo,
+                       pparams->cipher_key_length, pparams->auth_algo);
+       TEST_ASSERT_NOT_NULL(sess, "Session creation failed");
+
+       /* Generate a burst of crypto operations */
+       for (i = 0; i < (pparams->burst_size * NUM_MBUF_SETS); i++) {
+               /*
+                * Buffer size + iv/aad len is allocated, for perf tests they
+                * are equal + digest len.
+                */
+               mbufs[i] = test_perf_create_pktmbuf(
+                               ts_params->mbuf_mp,
+                               pparams->buf_size + SNOW3G_CIPHER_IV_LENGTH +
+                               digest_length);
+
+               if (mbufs[i] == NULL) {
+                       printf("\nFailed to get mbuf - freeing the rest.\n");
+                       for (k = 0; k < i; k++)
+                               rte_pktmbuf_free(mbufs[k]);
+                       return -1;
+               }
+
+       }
+
+       tsc_start = rte_rdtsc_precise();
+
+       while (total_enqueued < pparams->total_operations) {
+               uint16_t burst_size =
+                               (total_enqueued+pparams->burst_size)
+                                               <= pparams->total_operations ?
+               pparams->burst_size : pparams->total_operations-total_enqueued;
+               uint16_t ops_needed = burst_size-ops_unused;
+               /* Handle the last burst correctly */
+               uint16_t op_offset = pparams->burst_size - burst_size;
+
+               if (ops_needed !=
+                       rte_crypto_op_bulk_alloc(ts_params->op_mpool,
+                                               RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+                                               ops+op_offset, ops_needed)) {
+                       printf("\nFailed to alloc enough ops.");
+                       /*Don't exit, dequeue, more ops should become available*/
+               } else {
+                       for (i = 0; i < ops_needed; i++) {
+                               if (pparams->chain == HASH_ONLY)
+                                       ops[i+op_offset] =
+                                       test_perf_set_crypto_op_snow3g_hash(ops[i+op_offset],
+                                       mbufs[i +
+                                         (pparams->burst_size * (j % NUM_MBUF_SETS))],
+                                       sess,
+                                       pparams->buf_size, digest_length);
+                               else if (pparams->chain == CIPHER_ONLY)
+                                       ops[i+op_offset] =
+                                       test_perf_set_crypto_op_snow3g_cipher(ops[i+op_offset],
+                                       mbufs[i +
+                                         (pparams->burst_size * (j % NUM_MBUF_SETS))],
+                                       sess,
+                                       pparams->buf_size);
+                               else
+                                       return 1;
+                       }
+
+                       /* enqueue burst */
+                       burst_enqueued =
+                               rte_cryptodev_enqueue_burst(dev_id, queue_id,
+                                               ops+op_offset, burst_size);
+
+                       if (burst_enqueued < burst_size)
+                               retries++;
+
+                       ops_unused = burst_size-burst_enqueued;
+                       total_enqueued += burst_enqueued;
+               }
+
+               /* dequeue burst */
+               burst_dequeued = rte_cryptodev_dequeue_burst(dev_id, queue_id,
+                                       proc_ops, pparams->burst_size);
+               if (burst_dequeued == 0) {
+                       failed_polls++;
+               } else {
+                       processed += burst_dequeued;
+                       for (l = 0; l < burst_dequeued; l++)
+                               rte_crypto_op_free(proc_ops[l]);
+               }
+               j++;
+       }
+
+       /* Dequeue any operations still in the crypto device */
+       while (processed < pparams->total_operations) {
+               /* Sending 0 length burst to flush sw crypto device */
+               rte_cryptodev_enqueue_burst(dev_id, queue_id, NULL, 0);
+
+               /* dequeue burst */
+               burst_dequeued = rte_cryptodev_dequeue_burst(dev_id, queue_id,
+                               proc_ops, pparams->burst_size);
+               if (burst_dequeued == 0)
+                       failed_polls++;
+               else {
+                       processed += burst_dequeued;
+                       for (m = 0; m < burst_dequeued; m++)
+                               rte_crypto_op_free(proc_ops[m]);
+               }
+       }
+
+       tsc_end = rte_rdtsc_precise();
+
+       double ops_s = ((double)processed / (tsc_end - tsc_start)) * rte_get_tsc_hz();
+       double cycles_burst = (double) (tsc_end - tsc_start) /
+                                       (double) processed * pparams->burst_size;
+       double cycles_buff = (double) (tsc_end - tsc_start) / (double) processed;
+       double cycles_B = cycles_buff / pparams->buf_size;
+       double throughput = (ops_s * pparams->buf_size * 8) / 1000000;
+
+       if (gbl_cryptodev_perftest_devtype == RTE_CRYPTODEV_QAT_SYM_PMD) {
+               /* Cycle count misleading on HW devices for this test, so don't print */
+               printf("%4u\t%6.2f\t%10.2f\t n/a \t\t n/a "
+                       "\t\t n/a \t\t%8"PRIu64"\t%8"PRIu64,
+                       pparams->buf_size, ops_s/1000000,
+                       throughput, retries, failed_polls);
+       } else {
+               printf("%4u\t%6.2f\t%10.2f\t%10.2f\t%8.2f"
+                       "\t%8.2f\t%8"PRIu64"\t%8"PRIu64,
+                       pparams->buf_size, ops_s/1000000, throughput, cycles_burst,
+                       cycles_buff, cycles_B, retries, failed_polls);
+       }
+
+       for (i = 0; i < pparams->burst_size * NUM_MBUF_SETS; i++)
+               rte_pktmbuf_free(mbufs[i]);
+       rte_cryptodev_sym_session_free(dev_id, sess);
+
+       printf("\n");
+       return TEST_SUCCESS;
+}
+
+static int
+test_perf_openssl(uint8_t dev_id, uint16_t queue_id,
+               struct perf_test_params *pparams)
+{
+       uint16_t i, k, l, m;
+       uint16_t j = 0;
+       uint16_t ops_unused = 0;
+
+       uint64_t burst_enqueued = 0, total_enqueued = 0, burst_dequeued = 0;
+       uint64_t processed = 0, failed_polls = 0, retries = 0;
+       uint64_t tsc_start = 0, tsc_end = 0;
+
+       unsigned int digest_length = get_auth_digest_length(pparams->auth_algo);
+
+       struct rte_crypto_op *ops[pparams->burst_size];
+       struct rte_crypto_op *proc_ops[pparams->burst_size];
+
+       struct rte_mbuf *mbufs[pparams->burst_size * NUM_MBUF_SETS];
+
+       struct crypto_testsuite_params *ts_params = &testsuite_params;
+
+       static struct rte_cryptodev_sym_session *sess;
+
+       static struct rte_crypto_op *(*test_perf_set_crypto_op)
+                       (struct rte_crypto_op *, struct rte_mbuf *,
+                                       struct rte_cryptodev_sym_session *,
+                                       unsigned int, unsigned int,
+                                       enum chain_mode);
+
+       switch (pparams->cipher_algo) {
+       case RTE_CRYPTO_CIPHER_3DES_CBC:
+       case RTE_CRYPTO_CIPHER_3DES_CTR:
+               test_perf_set_crypto_op = test_perf_set_crypto_op_3des;
+               break;
+       case RTE_CRYPTO_CIPHER_AES_CBC:
+       case RTE_CRYPTO_CIPHER_AES_CTR:
+               test_perf_set_crypto_op = test_perf_set_crypto_op_aes;
+               break;
+       case RTE_CRYPTO_CIPHER_AES_GCM:
+               test_perf_set_crypto_op = test_perf_set_crypto_op_aes_gcm;
+               break;
+       default:
+               return TEST_FAILED;
+       }
+
+       if (rte_cryptodev_count() == 0) {
+               printf("\nNo crypto devices found. Is PMD build configured?\n");
+               return TEST_FAILED;
+       }
+
+       /* Create Crypto session*/
+       sess = test_perf_create_openssl_session(ts_params->dev_id,
+                       pparams->chain, pparams->cipher_algo,
+                       pparams->cipher_key_length, pparams->auth_algo);
+       TEST_ASSERT_NOT_NULL(sess, "Session creation failed");
+
+       /* Generate a burst of crypto operations */
+       for (i = 0; i < (pparams->burst_size * NUM_MBUF_SETS); i++) {
+               mbufs[i] = test_perf_create_pktmbuf(
+                               ts_params->mbuf_mp,
+                               pparams->buf_size);
+
+               if (mbufs[i] == NULL) {
+                       printf("\nFailed to get mbuf - freeing the rest.\n");
+                       for (k = 0; k < i; k++)
+                               rte_pktmbuf_free(mbufs[k]);
+                       return -1;
+               }
+       }
+
+       tsc_start = rte_rdtsc_precise();
+
+       while (total_enqueued < pparams->total_operations) {
+               uint16_t burst_size =
+               total_enqueued + pparams->burst_size <=
+               pparams->total_operations ? pparams->burst_size :
+                               pparams->total_operations - total_enqueued;
+               uint16_t ops_needed = burst_size - ops_unused;
+
+               if (ops_needed != rte_crypto_op_bulk_alloc(ts_params->op_mpool,
+                               RTE_CRYPTO_OP_TYPE_SYMMETRIC, ops, ops_needed)){
+                       printf("\nFailed to alloc enough ops, finish dequeuing "
+                               "and free ops below.");
+               } else {
+                       for (i = 0; i < ops_needed; i++)
+                               ops[i] = test_perf_set_crypto_op(ops[i],
+                                       mbufs[i + (pparams->burst_size *
+                                               (j % NUM_MBUF_SETS))],
+                                       sess, pparams->buf_size, digest_length,
+                                       pparams->chain);
+
+                       /* enqueue burst */
+                       burst_enqueued = rte_cryptodev_enqueue_burst(dev_id,
+                                       queue_id, ops, burst_size);
+
+                       if (burst_enqueued < burst_size)
+                               retries++;
+
+                       ops_unused = burst_size - burst_enqueued;
+                       total_enqueued += burst_enqueued;
+               }
+
+               /* dequeue burst */
+               burst_dequeued = rte_cryptodev_dequeue_burst(dev_id, queue_id,
+                               proc_ops, pparams->burst_size);
+               if (burst_dequeued == 0)
+                       failed_polls++;
+               else {
+                       processed += burst_dequeued;
+
+                       for (l = 0; l < burst_dequeued; l++)
+                               rte_crypto_op_free(proc_ops[l]);
+               }
+               j++;
+       }
+
+       /* Dequeue any operations still in the crypto device */
+       while (processed < pparams->total_operations) {
+               /* Sending 0 length burst to flush sw crypto device */
+               rte_cryptodev_enqueue_burst(dev_id, queue_id, NULL, 0);
+
+               /* dequeue burst */
+               burst_dequeued = rte_cryptodev_dequeue_burst(dev_id, queue_id,
+                               proc_ops, pparams->burst_size);
+               if (burst_dequeued == 0)
+                       failed_polls++;
+               else {
+                       processed += burst_dequeued;
+
+                       for (m = 0; m < burst_dequeued; m++)
+                               rte_crypto_op_free(proc_ops[m]);
+               }
+       }
+
+       tsc_end = rte_rdtsc_precise();
+
+       double ops_s = ((double)processed / (tsc_end - tsc_start))
+                                       * rte_get_tsc_hz();
+       double throughput = (ops_s * pparams->buf_size * NUM_MBUF_SETS)
+                                       / 1000000000;
+
+       printf("\t%u\t%6.2f\t%10.2f\t%8"PRIu64"\t%8"PRIu64, pparams->buf_size,
+                       ops_s / 1000000, throughput, retries, failed_polls);
+
+       for (i = 0; i < pparams->burst_size * NUM_MBUF_SETS; i++)
+               rte_pktmbuf_free(mbufs[i]);
+       rte_cryptodev_sym_session_free(dev_id, sess);
+
+       printf("\n");
+       return TEST_SUCCESS;
+}
+
+static int
+test_perf_armv8(uint8_t dev_id, uint16_t queue_id,
+               struct perf_test_params *pparams)
+{
+       uint16_t i, k, l, m;
+       uint16_t j = 0;
+       uint16_t ops_unused = 0;
+       uint16_t burst_size;
+       uint16_t ops_needed;
+
+       uint64_t burst_enqueued = 0, total_enqueued = 0, burst_dequeued = 0;
+       uint64_t processed = 0, failed_polls = 0, retries = 0;
+       uint64_t tsc_start = 0, tsc_end = 0;
+
+       unsigned int digest_length = get_auth_digest_length(pparams->auth_algo);
+
+       struct rte_crypto_op *ops[pparams->burst_size];
+       struct rte_crypto_op *proc_ops[pparams->burst_size];
+
+       struct rte_mbuf *mbufs[pparams->burst_size * NUM_MBUF_SETS];
+
+       struct crypto_testsuite_params *ts_params = &testsuite_params;
+
+       static struct rte_cryptodev_sym_session *sess;
+
+       if (rte_cryptodev_count() == 0) {
+               printf("\nNo crypto devices found. Is PMD build configured?\n");
+               return TEST_FAILED;
+       }
+
+       /* Create Crypto session*/
+       sess = test_perf_create_armv8_session(ts_params->dev_id,
+                       pparams->chain, pparams->cipher_algo,
+                       pparams->cipher_key_length, pparams->auth_algo);
+       TEST_ASSERT_NOT_NULL(sess, "Session creation failed");
+
+       /* Generate a burst of crypto operations */
+       for (i = 0; i < (pparams->burst_size * NUM_MBUF_SETS); i++) {
+               mbufs[i] = test_perf_create_pktmbuf(
+                               ts_params->mbuf_mp,
+                               pparams->buf_size);
+
+               if (mbufs[i] == NULL) {
+                       printf("\nFailed to get mbuf - freeing the rest.\n");
+                       for (k = 0; k < i; k++)
+                               rte_pktmbuf_free(mbufs[k]);
+                       return -1;
+               }
+       }
+
+       tsc_start = rte_rdtsc();
+
+       while (total_enqueued < pparams->total_operations) {
+               if ((total_enqueued + pparams->burst_size) <=
+                                       pparams->total_operations)
+                       burst_size = pparams->burst_size;
+               else
+                       burst_size = pparams->total_operations - total_enqueued;
+
+               ops_needed = burst_size - ops_unused;
+
+               if (ops_needed != rte_crypto_op_bulk_alloc(ts_params->op_mpool,
+                               RTE_CRYPTO_OP_TYPE_SYMMETRIC, ops, ops_needed)){
+                       printf("\nFailed to alloc enough ops, finish dequeuing "
+                               "and free ops below.");
+               } else {
+                       for (i = 0; i < ops_needed; i++)
+                               ops[i] = test_perf_set_crypto_op_aes(ops[i],
+                                       mbufs[i + (pparams->burst_size *
+                                               (j % NUM_MBUF_SETS))], sess,
+                                       pparams->buf_size, digest_length,
+                                       pparams->chain);
+
+                       /* enqueue burst */
+                       burst_enqueued = rte_cryptodev_enqueue_burst(dev_id,
+                                       queue_id, ops, burst_size);
+
+                       if (burst_enqueued < burst_size)
+                               retries++;
+
+                       ops_unused = burst_size - burst_enqueued;
+                       total_enqueued += burst_enqueued;
+               }
+
+               /* dequeue burst */
+               burst_dequeued = rte_cryptodev_dequeue_burst(dev_id, queue_id,
+                               proc_ops, pparams->burst_size);
+               if (burst_dequeued == 0)
+                       failed_polls++;
+               else {
+                       processed += burst_dequeued;
+
+                       for (l = 0; l < burst_dequeued; l++)
+                               rte_crypto_op_free(proc_ops[l]);
+               }
+               j++;
+       }
+
+       /* Dequeue any operations still in the crypto device */
+       while (processed < pparams->total_operations) {
+               /* Sending 0 length burst to flush sw crypto device */
+               rte_cryptodev_enqueue_burst(dev_id, queue_id, NULL, 0);
+
+               /* dequeue burst */
+               burst_dequeued = rte_cryptodev_dequeue_burst(dev_id, queue_id,
+                               proc_ops, pparams->burst_size);
+               if (burst_dequeued == 0)
+                       failed_polls++;
+               else {
+                       processed += burst_dequeued;
+
+                       for (m = 0; m < burst_dequeued; m++)
+                               rte_crypto_op_free(proc_ops[m]);
+               }
+       }
+
+       tsc_end = rte_rdtsc();
+
+       double ops_s = ((double)processed / (tsc_end - tsc_start))
+                                       * rte_get_tsc_hz();
+       double throughput = (ops_s * pparams->buf_size * NUM_MBUF_SETS)
+                                       / 1000000000;
+
+       printf("\t%u\t%6.2f\t%10.2f\t%8"PRIu64"\t%8"PRIu64, pparams->buf_size,
+                       ops_s / 1000000, throughput, retries, failed_polls);
+
+       for (i = 0; i < pparams->burst_size * NUM_MBUF_SETS; i++)
+               rte_pktmbuf_free(mbufs[i]);
+
+       printf("\n");
+       return TEST_SUCCESS;
+}
+
+/*
+
+    perf_test_aes_sha("avx2", HASH_CIPHER, 16, CBC, SHA1);
+    perf_test_aes_sha("avx2", HASH_CIPHER, 16, CBC, SHA_256);
+    perf_test_aes_sha("avx2", HASH_CIPHER, 16, CBC, SHA_512);
+
+    perf_test_aes_sha("avx2", CIPHER_HASH, 32, CBC, SHA1);
+    perf_test_aes_sha("avx2", CIPHER_HASH, 32, CBC, SHA_256);
+    perf_test_aes_sha("avx2", CIPHER_HASH, 32, CBC, SHA_512);
+
+    perf_test_aes_sha("avx2", HASH_CIPHER, 32, CBC, SHA1);
+    perf_test_aes_sha("avx2", HASH_CIPHER, 32, CBC, SHA_256);
+    perf_test_aes_sha("avx2", HASH_CIPHER, 32, CBC, SHA_512);
+ */
+static int
+test_perf_aes_cbc_encrypt_digest_vary_pkt_size(void)
+{
+       unsigned total_operations = 1000000;
+       unsigned burst_size = 32;
+       unsigned buf_lengths[] = { 64, 128, 256, 512, 768, 1024, 1280, 1536, 1792, 2048 };
+       uint8_t i, j;
+
+       struct perf_test_params params_set[] = {
+               {
+                       .chain = CIPHER_ONLY,
+                       .cipher_algo  = RTE_CRYPTO_CIPHER_AES_CBC,
+                       .cipher_key_length = 16,
+                       .auth_algo = RTE_CRYPTO_AUTH_NULL
+               },
+               {
+                       .chain = CIPHER_HASH,
+                       .cipher_algo  = RTE_CRYPTO_CIPHER_AES_CBC,
+                       .cipher_key_length = 16,
+                       .auth_algo = RTE_CRYPTO_AUTH_SHA1_HMAC
+               },
+               {
+                       .chain = CIPHER_HASH,
+
+                       .cipher_algo  = RTE_CRYPTO_CIPHER_AES_CBC,
+                       .cipher_key_length = 16,
+                       .auth_algo = RTE_CRYPTO_AUTH_SHA256_HMAC
+               },
+               {
+                       .chain = CIPHER_HASH,
+
+                       .cipher_algo  = RTE_CRYPTO_CIPHER_AES_CBC,
+                       .cipher_key_length = 16,
+                       .auth_algo = RTE_CRYPTO_AUTH_SHA512_HMAC
+               },
+               {
+                       .chain = CIPHER_HASH,
+
+                       .cipher_algo  = RTE_CRYPTO_CIPHER_AES_CBC,
+                       .cipher_key_length = 32,
+                       .auth_algo = RTE_CRYPTO_AUTH_SHA1_HMAC
+               },
+               {
+                       .chain = CIPHER_HASH,
+
+                       .cipher_algo  = RTE_CRYPTO_CIPHER_AES_CBC,
+                       .cipher_key_length = 32,
+                       .auth_algo = RTE_CRYPTO_AUTH_SHA256_HMAC
+               },
+               {
+                       .chain = CIPHER_HASH,
+
+                       .cipher_algo  = RTE_CRYPTO_CIPHER_AES_CBC,
+                       .cipher_key_length = 32,
+                       .auth_algo = RTE_CRYPTO_AUTH_SHA512_HMAC
+               },
+       };
+
+       for (i = 0; i < RTE_DIM(params_set); i++) {
+
+               params_set[i].total_operations = total_operations;
+               params_set[i].burst_size = burst_size;
+               printf("\n%s. cipher algo: %s auth algo: %s cipher key size=%u."
+                               " burst_size: %d ops\n",
+                               chain_mode_name(params_set[i].chain),
+                               cipher_algo_name(params_set[i].cipher_algo),
+                               auth_algo_name(params_set[i].auth_algo),
+                               params_set[i].cipher_key_length,
+                               burst_size);
+               printf("\nBuffer Size(B)\tOPS(M)\tThroughput(Gbps)\t"
+                       "Retries\tEmptyPolls\n");
+               for (j = 0; j < RTE_DIM(buf_lengths); j++) {
+                       params_set[i].buf_size = buf_lengths[j];
+                       test_perf_aes_sha(testsuite_params.dev_id, 0,
+                                       &params_set[i]);
+               }
+       }
+       return 0;
+}
+
+static int
+test_perf_snow3G_vary_pkt_size(void)
 {
-       gbl_cryptodev_preftest_devtype = RTE_CRYPTODEV_QAT_SYM_PMD;
+       unsigned total_operations = 1000000;
+       uint8_t i, j;
+       unsigned k;
+       uint16_t burst_sizes[] = { 64 };
+       uint16_t buf_lengths[] = { 40, 64, 80, 120, 240, 256, 400, 512, 600, 1024, 2048 };
+
+       struct perf_test_params params_set[] = {
+               {
+                       .chain = CIPHER_ONLY,
+                       .cipher_algo  = RTE_CRYPTO_CIPHER_SNOW3G_UEA2,
+                       .cipher_key_length = 16,
+                       .auth_algo  = RTE_CRYPTO_AUTH_NULL,
+               },
+               {
+                       .chain = HASH_ONLY,
+                       .cipher_algo = RTE_CRYPTO_CIPHER_NULL,
+                       .auth_algo  = RTE_CRYPTO_AUTH_SNOW3G_UIA2,
+                       .cipher_key_length = 16
+               },
+       };
+
+       printf("\n\nStart %s.", __func__);
+       printf("\nTest to measure max throughput at various pkt sizes.");
+       printf("\nOn HW devices t'put maximised when high Retries and EmptyPolls"
+                       " so cycle cost not relevant (n/a displayed).");
+
+       for (i = 0; i < RTE_DIM(params_set); i++) {
+               printf("\n\n");
+               params_set[i].total_operations = total_operations;
+               for (k = 0; k < RTE_DIM(burst_sizes); k++) {
+                       printf("\nOn %s dev%u qp%u, %s, "
+                               "cipher algo:%s, auth algo:%s, burst_size: %d ops",
+                               pmd_name(gbl_cryptodev_perftest_devtype),
+                               testsuite_params.dev_id, 0,
+                               chain_mode_name(params_set[i].chain),
+                               cipher_algo_name(params_set[i].cipher_algo),
+                               auth_algo_name(params_set[i].auth_algo),
+                               burst_sizes[k]);
+
+                       params_set[i].burst_size = burst_sizes[k];
+                       printf("\nPktSzB\tOp/s(M)\tThruput(Mbps)\tCycles/Burst\t"
+                               "Cycles/buf\tCycles/B\tRetries\t\tEmptyPolls\n");
+                       for (j = 0; j < RTE_DIM(buf_lengths); j++) {
+
+                               params_set[i].buf_size = buf_lengths[j];
+
+                               test_perf_snow3g(testsuite_params.dev_id, 0, &params_set[i]);
+                       }
+               }
+       }
 
-       return unit_test_suite_runner(&cryptodev_testsuite);
+       return 0;
+}
+
+static int
+test_perf_openssl_vary_pkt_size(void)
+{
+       unsigned int total_operations = 10000;
+       unsigned int burst_size = { 64 };
+       unsigned int buf_lengths[] = { 64, 128, 256, 512, 768, 1024, 1280, 1536,
+                       1792, 2048 };
+       uint8_t i, j;
+
+       struct perf_test_params params_set[] = {
+               {
+                       .chain = CIPHER_HASH,
+
+                       .cipher_algo  = RTE_CRYPTO_CIPHER_3DES_CBC,
+                       .cipher_key_length = 16,
+                       .auth_algo = RTE_CRYPTO_AUTH_SHA1_HMAC
+               },
+               {
+                       .chain = CIPHER_HASH,
+
+                       .cipher_algo  = RTE_CRYPTO_CIPHER_3DES_CBC,
+                       .cipher_key_length = 24,
+                       .auth_algo = RTE_CRYPTO_AUTH_SHA1_HMAC
+               },
+               {
+                       .chain = CIPHER_HASH,
+
+                       .cipher_algo  = RTE_CRYPTO_CIPHER_AES_CTR,
+                       .cipher_key_length = 16,
+                       .auth_algo = RTE_CRYPTO_AUTH_SHA1_HMAC
+               },
+               {
+                       .chain = CIPHER_HASH,
+
+                       .cipher_algo  = RTE_CRYPTO_CIPHER_AES_CTR,
+                       .cipher_key_length = 32,
+                       .auth_algo = RTE_CRYPTO_AUTH_SHA1_HMAC
+               },
+               {
+                       .chain = CIPHER_HASH,
+
+                       .cipher_algo  = RTE_CRYPTO_CIPHER_3DES_CTR,
+                       .cipher_key_length = 16,
+                       .auth_algo = RTE_CRYPTO_AUTH_SHA1_HMAC
+               },
+               {
+                       .chain = CIPHER_HASH,
+
+                       .cipher_algo  = RTE_CRYPTO_CIPHER_3DES_CTR,
+                       .cipher_key_length = 24,
+                       .auth_algo = RTE_CRYPTO_AUTH_SHA1_HMAC
+               },
+               {
+                       .chain = CIPHER_HASH,
+
+                       .cipher_algo  = RTE_CRYPTO_CIPHER_AES_GCM,
+                       .cipher_key_length = 16,
+                       .auth_algo = RTE_CRYPTO_AUTH_AES_GCM
+               },
+       };
+
+       for (i = 0; i < RTE_DIM(params_set); i++) {
+               params_set[i].total_operations = total_operations;
+               params_set[i].burst_size = burst_size;
+               printf("\n%s. cipher algo: %s auth algo: %s cipher key size=%u."
+                               " burst_size: %d ops\n",
+                               chain_mode_name(params_set[i].chain),
+                               cipher_algo_name(params_set[i].cipher_algo),
+                               auth_algo_name(params_set[i].auth_algo),
+                               params_set[i].cipher_key_length,
+                               burst_size);
+               printf("\nBuffer Size(B)\tOPS(M)\tThroughput(Gbps)\tRetries\t"
+                               "EmptyPolls\n");
+               for (j = 0; j < RTE_DIM(buf_lengths); j++) {
+                       params_set[i].buf_size = buf_lengths[j];
+                       test_perf_openssl(testsuite_params.dev_id, 0,
+                                       &params_set[i]);
+               }
+       }
+
+       return 0;
+}
+
+static int
+test_perf_openssl_vary_burst_size(void)
+{
+       unsigned int total_operations = 4096;
+       uint16_t buf_lengths[] = { 40 };
+       uint8_t i, j;
+
+       struct perf_test_params params_set[] = {
+               {
+                       .chain = CIPHER_HASH,
+
+                       .cipher_algo  = RTE_CRYPTO_CIPHER_3DES_CBC,
+                       .cipher_key_length = 16,
+                       .auth_algo = RTE_CRYPTO_AUTH_SHA1_HMAC
+               },
+               {
+                       .chain = CIPHER_HASH,
+
+                       .cipher_algo  = RTE_CRYPTO_CIPHER_3DES_CBC,
+                       .cipher_key_length = 24,
+                       .auth_algo = RTE_CRYPTO_AUTH_SHA1_HMAC
+               },
+               {
+                       .chain = CIPHER_HASH,
+
+                       .cipher_algo  = RTE_CRYPTO_CIPHER_AES_CTR,
+                       .cipher_key_length = 16,
+                       .auth_algo = RTE_CRYPTO_AUTH_SHA1_HMAC
+               },
+               {
+                       .chain = CIPHER_HASH,
+
+                       .cipher_algo  = RTE_CRYPTO_CIPHER_AES_CTR,
+                       .cipher_key_length = 32,
+                       .auth_algo = RTE_CRYPTO_AUTH_SHA1_HMAC
+               },
+               {
+                       .chain = CIPHER_HASH,
+
+                       .cipher_algo  = RTE_CRYPTO_CIPHER_3DES_CTR,
+                       .cipher_key_length = 16,
+                       .auth_algo = RTE_CRYPTO_AUTH_SHA1_HMAC
+               },
+               {
+                       .chain = CIPHER_HASH,
+
+                       .cipher_algo  = RTE_CRYPTO_CIPHER_3DES_CTR,
+                       .cipher_key_length = 24,
+                       .auth_algo = RTE_CRYPTO_AUTH_SHA1_HMAC
+               },
+               {
+                       .chain = CIPHER_HASH,
+
+                       .cipher_algo  = RTE_CRYPTO_CIPHER_AES_GCM,
+                       .cipher_key_length = 16,
+                       .auth_algo = RTE_CRYPTO_AUTH_AES_GCM
+               },
+       };
+
+       printf("\n\nStart %s.", __func__);
+       printf("\nThis Test measures the average IA cycle cost using a "
+                       "constant request(packet) size. ");
+       printf("Cycle cost is only valid when indicators show device is not"
+                       " busy, i.e. Retries and EmptyPolls = 0");
+
+       for (i = 0; i < RTE_DIM(params_set); i++) {
+               printf("\n");
+               params_set[i].total_operations = total_operations;
+
+       for (j = 0; j < RTE_DIM(buf_lengths); j++) {
+               params_set[i].buf_size = buf_lengths[j];
+               test_perf_openssl_optimise_cyclecount(&params_set[i]);
+               }
+       }
+
+       return 0;
+}
+
+static int
+test_perf_armv8_vary_pkt_size(void)
+{
+       unsigned int total_operations = 100000;
+       unsigned int burst_size = { 64 };
+       unsigned int buf_lengths[] = { 64, 128, 256, 512, 768, 1024, 1280, 1536,
+                       1792, 2048 };
+       uint8_t i, j;
+
+       struct perf_test_params params_set[] = {
+               {
+                       .chain = CIPHER_HASH,
+
+                       .cipher_algo  = RTE_CRYPTO_CIPHER_AES_CBC,
+                       .cipher_key_length = 16,
+                       .auth_algo = RTE_CRYPTO_AUTH_SHA1_HMAC
+               },
+               {
+                       .chain = HASH_CIPHER,
+
+                       .cipher_algo  = RTE_CRYPTO_CIPHER_AES_CBC,
+                       .cipher_key_length = 16,
+                       .auth_algo = RTE_CRYPTO_AUTH_SHA1_HMAC
+               },
+               {
+                       .chain = CIPHER_HASH,
+
+                       .cipher_algo  = RTE_CRYPTO_CIPHER_AES_CBC,
+                       .cipher_key_length = 16,
+                       .auth_algo = RTE_CRYPTO_AUTH_SHA256_HMAC
+               },
+               {
+                       .chain = HASH_CIPHER,
+
+                       .cipher_algo  = RTE_CRYPTO_CIPHER_AES_CBC,
+                       .cipher_key_length = 16,
+                       .auth_algo = RTE_CRYPTO_AUTH_SHA256_HMAC
+               },
+       };
+
+       for (i = 0; i < RTE_DIM(params_set); i++) {
+               params_set[i].total_operations = total_operations;
+               params_set[i].burst_size = burst_size;
+               printf("\n%s. cipher algo: %s auth algo: %s cipher key size=%u."
+                               " burst_size: %d ops\n",
+                               chain_mode_name(params_set[i].chain),
+                               cipher_algo_name(params_set[i].cipher_algo),
+                               auth_algo_name(params_set[i].auth_algo),
+                               params_set[i].cipher_key_length,
+                               burst_size);
+               printf("\nBuffer Size(B)\tOPS(M)\tThroughput(Gbps)\tRetries\t"
+                               "EmptyPolls\n");
+               for (j = 0; j < RTE_DIM(buf_lengths); j++) {
+                       params_set[i].buf_size = buf_lengths[j];
+                       test_perf_armv8(testsuite_params.dev_id, 0,
+                                                       &params_set[i]);
+               }
+       }
+
+       return 0;
+}
+
+static int
+test_perf_armv8_vary_burst_size(void)
+{
+       unsigned int total_operations = 4096;
+       uint16_t buf_lengths[] = { 64 };
+       uint8_t i, j;
+
+       struct perf_test_params params_set[] = {
+               {
+                       .chain = CIPHER_HASH,
+
+                       .cipher_algo  = RTE_CRYPTO_CIPHER_AES_CBC,
+                       .cipher_key_length = 16,
+                       .auth_algo = RTE_CRYPTO_AUTH_SHA1_HMAC
+               },
+               {
+                       .chain = HASH_CIPHER,
+
+                       .cipher_algo  = RTE_CRYPTO_CIPHER_AES_CBC,
+                       .cipher_key_length = 16,
+                       .auth_algo = RTE_CRYPTO_AUTH_SHA1_HMAC
+               },
+               {
+                       .chain = CIPHER_HASH,
+
+                       .cipher_algo  = RTE_CRYPTO_CIPHER_AES_CBC,
+                       .cipher_key_length = 16,
+                       .auth_algo = RTE_CRYPTO_AUTH_SHA256_HMAC
+               },
+               {
+                       .chain = HASH_CIPHER,
+
+                       .cipher_algo  = RTE_CRYPTO_CIPHER_AES_CBC,
+                       .cipher_key_length = 16,
+                       .auth_algo = RTE_CRYPTO_AUTH_SHA256_HMAC
+               },
+       };
+
+       printf("\n\nStart %s.", __func__);
+       printf("\nThis Test measures the average IA cycle cost using a "
+                       "constant request(packet) size. ");
+       printf("Cycle cost is only valid when indicators show device is "
+                       "not busy, i.e. Retries and EmptyPolls = 0");
+
+       for (i = 0; i < RTE_DIM(params_set); i++) {
+               printf("\n");
+               params_set[i].total_operations = total_operations;
+
+               for (j = 0; j < RTE_DIM(buf_lengths); j++) {
+                       params_set[i].buf_size = buf_lengths[j];
+                       test_perf_armv8_optimise_cyclecount(&params_set[i]);
+               }
+       }
+
+       return 0;
+}
+
+static int
+test_perf_aes_cbc_vary_burst_size(void)
+{
+       return test_perf_crypto_qp_vary_burst_size(testsuite_params.dev_id);
+}
+
+
+static struct rte_cryptodev_sym_session *
+test_perf_create_session(uint8_t dev_id, struct perf_test_params *pparams)
+{
+       static struct rte_cryptodev_sym_session *sess;
+       struct rte_crypto_sym_xform cipher_xform = { 0 };
+       struct rte_crypto_sym_xform auth_xform = { 0 };
+
+       uint8_t cipher_key[pparams->session_attrs->key_cipher_len];
+       uint8_t auth_key[pparams->session_attrs->key_auth_len];
+
+       memcpy(cipher_key, pparams->session_attrs->key_cipher_data,
+                pparams->session_attrs->key_cipher_len);
+       memcpy(auth_key, pparams->session_attrs->key_auth_data,
+                pparams->session_attrs->key_auth_len);
+
+       cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
+       cipher_xform.next = NULL;
+
+       cipher_xform.cipher.algo = pparams->session_attrs->cipher_algorithm;
+       cipher_xform.cipher.op = pparams->session_attrs->cipher;
+       cipher_xform.cipher.key.data = cipher_key;
+       cipher_xform.cipher.key.length = pparams->session_attrs->key_cipher_len;
+
+       auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
+       auth_xform.next = NULL;
+
+       auth_xform.auth.op = pparams->session_attrs->auth;
+       auth_xform.auth.algo = pparams->session_attrs->auth_algorithm;
+
+       auth_xform.auth.digest_length = pparams->session_attrs->digest_len;
+       auth_xform.auth.key.length = pparams->session_attrs->key_auth_len;
+
+
+       cipher_xform.cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT;
+       if (cipher_xform.cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
+               cipher_xform.next = &auth_xform;
+               sess = rte_cryptodev_sym_session_create(dev_id,
+                               &cipher_xform);
+       } else {
+               auth_xform.next = &cipher_xform;
+               sess = rte_cryptodev_sym_session_create(dev_id,
+                               &auth_xform);
+       }
+
+       return sess;
+}
+
+static inline struct rte_crypto_op *
+perf_gcm_set_crypto_op(struct rte_crypto_op *op, struct rte_mbuf *m,
+               struct rte_cryptodev_sym_session *sess,
+               struct crypto_params *m_hlp,
+               struct perf_test_params *params)
+{
+       if (rte_crypto_op_attach_sym_session(op, sess) != 0) {
+               rte_crypto_op_free(op);
+               return NULL;
+       }
+
+       uint16_t iv_pad_len = ALIGN_POW2_ROUNDUP(params->symmetric_op->iv_len,
+                                                16);
+
+       op->sym->auth.digest.data = m_hlp->digest;
+       op->sym->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(
+                                         m,
+                                         params->symmetric_op->aad_len +
+                                         iv_pad_len +
+                                         params->symmetric_op->p_len);
+
+       op->sym->auth.digest.length = params->symmetric_op->t_len;
+
+       op->sym->auth.aad.data = m_hlp->aad;
+       op->sym->auth.aad.length = params->symmetric_op->aad_len;
+       op->sym->auth.aad.phys_addr = rte_pktmbuf_mtophys_offset(
+                                         m,
+                                         iv_pad_len);
+
+       rte_memcpy(op->sym->auth.aad.data, params->symmetric_op->aad_data,
+                      params->symmetric_op->aad_len);
+
+       op->sym->cipher.iv.data = m_hlp->iv;
+       rte_memcpy(op->sym->cipher.iv.data, params->symmetric_op->iv_data,
+                      params->symmetric_op->iv_len);
+       if (params->symmetric_op->iv_len == 12)
+               op->sym->cipher.iv.data[15] = 1;
+
+       op->sym->cipher.iv.length = params->symmetric_op->iv_len;
+
+       op->sym->auth.data.offset =
+                       iv_pad_len + params->symmetric_op->aad_len;
+       op->sym->auth.data.length = params->symmetric_op->p_len;
+
+       op->sym->cipher.data.offset =
+                       iv_pad_len + params->symmetric_op->aad_len;
+       op->sym->cipher.data.length = params->symmetric_op->p_len;
+
+       op->sym->m_src = m;
+
+       return op;
+}
+
+static struct rte_mbuf *
+test_perf_create_pktmbuf_fill(struct rte_mempool *mpool,
+               struct perf_test_params *params,
+               unsigned buf_sz, struct crypto_params *m_hlp)
+{
+       struct rte_mbuf *m = rte_pktmbuf_alloc(mpool);
+       uint16_t iv_pad_len =
+                       ALIGN_POW2_ROUNDUP(params->symmetric_op->iv_len, 16);
+       uint16_t aad_len = params->symmetric_op->aad_len;
+       uint16_t digest_size = params->symmetric_op->t_len;
+       char *p;
+
+       p = rte_pktmbuf_append(m, aad_len);
+       if (p == NULL) {
+               rte_pktmbuf_free(m);
+               return NULL;
+       }
+       m_hlp->aad = (uint8_t *)p;
+
+       p = rte_pktmbuf_append(m, iv_pad_len);
+       if (p == NULL) {
+               rte_pktmbuf_free(m);
+               return NULL;
+       }
+       m_hlp->iv = (uint8_t *)p;
+
+       p = rte_pktmbuf_append(m, buf_sz);
+       if (p == NULL) {
+               rte_pktmbuf_free(m);
+               return NULL;
+       }
+       rte_memcpy(p, params->symmetric_op->p_data, buf_sz);
+
+       p = rte_pktmbuf_append(m, digest_size);
+       if (p == NULL) {
+               rte_pktmbuf_free(m);
+               return NULL;
+       }
+       m_hlp->digest = (uint8_t *)p;
+
+       return m;
+}
+
+static int
+perf_AES_GCM(uint8_t dev_id, uint16_t queue_id,
+            struct perf_test_params *pparams, uint32_t test_ops)
+{
+       int j = 0;
+       struct crypto_testsuite_params *ts_params = &testsuite_params;
+       struct rte_cryptodev_sym_session *sess;
+       struct rte_crypto_op *ops[pparams->burst_size];
+       struct rte_crypto_op *proc_ops[pparams->burst_size];
+       uint32_t total_operations = pparams->total_operations;
+
+       uint64_t burst_enqueued = 0, total_enqueued = 0, burst_dequeued = 0;
+       uint64_t processed = 0, failed_polls = 0, retries = 0;
+       uint64_t tsc_start = 0, tsc_end = 0;
+
+       uint16_t i = 0, l = 0, m = 0;
+       uint16_t burst = pparams->burst_size * NUM_MBUF_SETS;
+       uint16_t ops_unused = 0;
+
+       struct rte_mbuf *mbufs[burst];
+       struct crypto_params m_hlp[burst];
+
+       if (rte_cryptodev_count() == 0) {
+               printf("\nNo crypto devices available. "
+                               "Is kernel driver loaded?\n");
+               return TEST_FAILED;
+       }
+
+       sess = test_perf_create_session(dev_id, pparams);
+       TEST_ASSERT_NOT_NULL(sess, "Session creation failed");
+
+       for (i = 0; i < burst; i++) {
+               mbufs[i] = test_perf_create_pktmbuf_fill(
+                               ts_params->mbuf_mp,
+                               pparams, pparams->symmetric_op->p_len,
+                               &m_hlp[i]);
+       }
+
+       if (test_ops)
+               total_operations = test_ops;
+
+       tsc_start = rte_rdtsc_precise();
+       while (total_enqueued < total_operations) {
+               uint16_t burst_size =
+               total_enqueued+pparams->burst_size <= total_operations ?
+               pparams->burst_size : total_operations-total_enqueued;
+               uint16_t ops_needed = burst_size-ops_unused;
+
+               if (ops_needed != rte_crypto_op_bulk_alloc(ts_params->op_mpool,
+                               RTE_CRYPTO_OP_TYPE_SYMMETRIC, ops, ops_needed)){
+                       printf("\nFailed to alloc enough ops, "
+                                       "finish dequeuing");
+               } else {
+                       for (i = 0; i < ops_needed; i++)
+                               ops[i] = perf_gcm_set_crypto_op(ops[i],
+                                       mbufs[i + (pparams->burst_size *
+                                               (j % NUM_MBUF_SETS))],
+                                       sess, &m_hlp[i + (pparams->burst_size *
+                                               (j % NUM_MBUF_SETS))], pparams);
+
+                       /* enqueue burst */
+                       burst_enqueued = rte_cryptodev_enqueue_burst(dev_id,
+                                       queue_id, ops, burst_size);
+
+                       if (burst_enqueued < burst_size)
+                               retries++;
+
+                       ops_unused = burst_size-burst_enqueued;
+                       total_enqueued += burst_enqueued;
+               }
+
+               /* dequeue burst */
+               burst_dequeued = rte_cryptodev_dequeue_burst(dev_id, queue_id,
+                               proc_ops, pparams->burst_size);
+               if (burst_dequeued == 0)
+                       failed_polls++;
+               else {
+                       processed += burst_dequeued;
+
+                       for (l = 0; l < burst_dequeued; l++)
+                               rte_crypto_op_free(proc_ops[l]);
+               }
+
+               j++;
+       }
+
+       /* Dequeue any operations still in the crypto device */
+       while (processed < total_operations) {
+               /* Sending 0 length burst to flush sw crypto device */
+               rte_cryptodev_enqueue_burst(dev_id, queue_id, NULL, 0);
+
+               /* dequeue burst */
+               burst_dequeued = rte_cryptodev_dequeue_burst(dev_id, queue_id,
+                               proc_ops, pparams->burst_size);
+               if (burst_dequeued == 0)
+                       failed_polls++;
+               else {
+                       processed += burst_dequeued;
+
+               for (m = 0; m < burst_dequeued; m++) {
+                       if (test_ops) {
+                               uint16_t iv_pad_len = ALIGN_POW2_ROUNDUP
+                                       (pparams->symmetric_op->iv_len, 16);
+                               uint8_t *pkt = rte_pktmbuf_mtod(
+                                       proc_ops[m]->sym->m_src,
+                                       uint8_t *);
+
+                               TEST_ASSERT_BUFFERS_ARE_EQUAL(
+                                       pparams->symmetric_op->c_data,
+                                       pkt + iv_pad_len +
+                                       pparams->symmetric_op->aad_len,
+                                       pparams->symmetric_op->c_len,
+                                       "GCM Ciphertext data not as expected");
+
+                               TEST_ASSERT_BUFFERS_ARE_EQUAL(
+                                       pparams->symmetric_op->t_data,
+                                       pkt + iv_pad_len +
+                                       pparams->symmetric_op->aad_len +
+                                       pparams->symmetric_op->c_len,
+                                       pparams->symmetric_op->t_len,
+                                       "GCM MAC data not as expected");
+
+                               }
+                               rte_crypto_op_free(proc_ops[m]);
+                       }
+               }
+       }
+
+       tsc_end = rte_rdtsc_precise();
+
+       double ops_s = ((double)processed / (tsc_end - tsc_start))
+                       * rte_get_tsc_hz();
+       double throughput = (ops_s * pparams->symmetric_op->p_len * 8)
+                       / 1000000000;
+
+       if (!test_ops) {
+               printf("\n%u\t\t%6.2f\t%16.2f\t%8"PRIu64"\t%10"PRIu64,
+               pparams->symmetric_op->p_len,
+               ops_s/1000000, throughput, retries, failed_polls);
+       }
+
+       for (i = 0; i < burst; i++)
+               rte_pktmbuf_free(mbufs[i]);
+       rte_cryptodev_sym_session_free(dev_id, sess);
+
+       return 0;
+}
+
+static int
+test_perf_AES_GCM(int continual_buf_len, int continual_size)
+{
+       uint16_t i, j, k, loops = 1;
+
+       uint16_t buf_lengths[] = { 64, 128, 256, 512, 1024, 1536, 2048 };
+
+       static const struct cryptodev_perf_test_data *gcm_tests[] = {
+                       &AES_GCM_128_12IV_0AAD
+       };
+
+       if (continual_buf_len)
+               loops = continual_size;
+
+       int TEST_CASES_GCM = RTE_DIM(gcm_tests);
+
+       const unsigned burst_size = 32;
+
+       struct symmetric_op ops_set[TEST_CASES_GCM];
+       struct perf_test_params params_set[TEST_CASES_GCM];
+       struct symmetric_session_attrs session_attrs[TEST_CASES_GCM];
+       static const struct cryptodev_perf_test_data *gcm_test;
+
+       for (i = 0; i < TEST_CASES_GCM; ++i) {
+
+               gcm_test = gcm_tests[i];
+
+               session_attrs[i].cipher =
+                               RTE_CRYPTO_CIPHER_OP_ENCRYPT;
+               session_attrs[i].cipher_algorithm =
+                               RTE_CRYPTO_CIPHER_AES_GCM;
+               session_attrs[i].key_cipher_data =
+                               gcm_test->key.data;
+               session_attrs[i].key_cipher_len =
+                               gcm_test->key.len;
+               session_attrs[i].auth_algorithm =
+                               RTE_CRYPTO_AUTH_AES_GCM;
+               session_attrs[i].auth =
+                       RTE_CRYPTO_AUTH_OP_GENERATE;
+               session_attrs[i].key_auth_data = NULL;
+               session_attrs[i].key_auth_len = 0;
+               session_attrs[i].digest_len =
+                               gcm_test->auth_tag.len;
+
+               ops_set[i].aad_data = gcm_test->aad.data;
+               ops_set[i].aad_len = gcm_test->aad.len;
+               ops_set[i].iv_data = gcm_test->iv.data;
+               ops_set[i].iv_len = gcm_test->iv.len;
+               ops_set[i].p_data = gcm_test->plaintext.data;
+               ops_set[i].p_len = buf_lengths[i];
+               ops_set[i].c_data = gcm_test->ciphertext.data;
+               ops_set[i].c_len = buf_lengths[i];
+               ops_set[i].t_data = gcm_test->auth_tags[i].data;
+               ops_set[i].t_len = gcm_test->auth_tags[i].len;
+
+               params_set[i].chain = CIPHER_HASH;
+               params_set[i].session_attrs = &session_attrs[i];
+               params_set[i].symmetric_op = &ops_set[i];
+               if (continual_buf_len)
+                       params_set[i].total_operations = 0xFFFFFF;
+               else
+                       params_set[i].total_operations = 1000000;
+
+               params_set[i].burst_size = burst_size;
+
+       }
+
+       if (continual_buf_len)
+               printf("\nCipher algo: %s Cipher hash: %s cipher key size: %ub"
+                       " burst size: %u", "AES_GCM", "AES_GCM",
+                       gcm_test->key.len << 3, burst_size);
+
+       for (i = 0; i < RTE_DIM(gcm_tests); i++) {
+
+               if (!continual_buf_len) {
+                       printf("\nCipher algo: %s Cipher hash: %s cipher key size: %ub"
+                               " burst size: %u", "AES_GCM", "AES_GCM",
+                               gcm_test->key.len << 3, burst_size);
+                       printf("\nBuffer Size(B)\tOPS(M)\tThroughput(Gbps)\t"
+                               " Retries\tEmptyPolls");
+               }
+
+               uint16_t len = RTE_DIM(buf_lengths);
+               uint16_t p = 0;
+
+               if (continual_buf_len) {
+                       for (k = 0; k < RTE_DIM(buf_lengths); k++)
+                               if (buf_lengths[k] == continual_buf_len) {
+                                       len = k + 1;
+                                       p = k;
+                                       break;
+                               }
+               }
+               for (j = p; j < len; ++j) {
+
+                       params_set[i].symmetric_op->c_len = buf_lengths[j];
+                       params_set[i].symmetric_op->p_len = buf_lengths[j];
+
+                       ops_set[i].t_data = gcm_tests[i]->auth_tags[j].data;
+                       ops_set[i].t_len = gcm_tests[i]->auth_tags[j].len;
+
+                       /* Run is twice, one for encryption/hash checks,
+                        * one for perf
+                        */
+                       if (perf_AES_GCM(testsuite_params.dev_id, 0,
+                                       &params_set[i], 1))
+                               return TEST_FAILED;
+
+                       for (k = 0; k < loops; k++) {
+                               if (continual_buf_len)
+                                       printf("\n\nBuffer Size(B)\tOPS(M)\t"
+                                               "Throughput(Gbps)\t"
+                                               "Retries\tEmptyPolls");
+                               if (perf_AES_GCM(testsuite_params.dev_id, 0,
+                                               &params_set[i], 0))
+                                       return TEST_FAILED;
+                               if (continual_buf_len)
+                                       printf("\n\nCompleted loop %i of %i ...",
+                                               k+1, loops);
+                       }
+               }
+
+       }
+       printf("\n");
+       return 0;
+}
+
+static int test_cryptodev_perf_AES_GCM(void)
+{
+       return test_perf_AES_GCM(0, 0);
+}
+/*
+ * This function calls AES GCM performance tests providing
+ * size of packet as an argument. If size of packet is not
+ * in the buf_lengths array, all sizes will be used
+ */
+static int test_continual_perf_AES_GCM(void)
+{
+       return test_perf_AES_GCM(1024, 10);
+}
+
+static int
+test_perf_continual_performance_test(void)
+{
+       unsigned int total_operations = 0xFFFFFF;
+       unsigned int total_loops = 10;
+       unsigned int burst_size = 32;
+       uint8_t i;
+
+       struct perf_test_params params_set = {
+               .total_operations = total_operations,
+               .burst_size = burst_size,
+               .buf_size = 1024,
+
+               .chain = CIPHER_HASH,
+
+               .cipher_algo  = RTE_CRYPTO_CIPHER_AES_CBC,
+               .cipher_key_length = 16,
+               .auth_algo = RTE_CRYPTO_AUTH_SHA1_HMAC
+       };
+
+       for (i = 1; i <= total_loops; ++i) {
+               printf("\n%s. cipher algo: %s auth algo: %s cipher key size=%u."
+                               " burst_size: %d ops\n",
+                               chain_mode_name(params_set.chain),
+                               cipher_algo_name(params_set.cipher_algo),
+                               auth_algo_name(params_set.auth_algo),
+                               params_set.cipher_key_length,
+                               burst_size);
+               printf("\nBuffer Size(B)\tOPS(M)\tThroughput(Gbps)\t"
+                               "Retries\tEmptyPolls\n");
+                               test_perf_aes_sha(testsuite_params.dev_id, 0,
+                                       &params_set);
+               printf("\nCompleted loop %i of %i ...", i, total_loops);
+       }
+       return 0;
 }
 
-static struct test_command cryptodev_aesni_mb_perf_cmd = {
-       .command = "cryptodev_aesni_mb_perftest",
-       .callback = perftest_aesni_mb_cryptodev,
+static struct unit_test_suite cryptodev_qat_continual_testsuite  = {
+       .suite_name = "Crypto Device Continual Performance Test",
+       .setup = testsuite_setup,
+       .teardown = testsuite_teardown,
+       .unit_test_cases = {
+               TEST_CASE_ST(ut_setup, ut_teardown,
+                               test_perf_continual_performance_test),
+               TEST_CASE_ST(ut_setup, ut_teardown,
+                               test_continual_perf_AES_GCM),
+               TEST_CASES_END() /**< NULL terminate unit test array */
+       }
+};
+
+static struct unit_test_suite cryptodev_testsuite  = {
+       .suite_name = "Crypto Device Unit Test Suite",
+       .setup = testsuite_setup,
+       .teardown = testsuite_teardown,
+       .unit_test_cases = {
+               TEST_CASE_ST(ut_setup, ut_teardown,
+                               test_perf_aes_cbc_encrypt_digest_vary_pkt_size),
+               TEST_CASE_ST(ut_setup, ut_teardown,
+                               test_cryptodev_perf_AES_GCM),
+               TEST_CASE_ST(ut_setup, ut_teardown,
+                               test_perf_aes_cbc_vary_burst_size),
+               TEST_CASES_END() /**< NULL terminate unit test array */
+       }
+};
+
+static struct unit_test_suite cryptodev_gcm_testsuite  = {
+       .suite_name = "Crypto Device AESNI GCM Unit Test Suite",
+       .setup = testsuite_setup,
+       .teardown = testsuite_teardown,
+       .unit_test_cases = {
+               TEST_CASE_ST(ut_setup, ut_teardown,
+                               test_cryptodev_perf_AES_GCM),
+               TEST_CASES_END() /**< NULL terminate unit test array */
+       }
+};
+
+static struct unit_test_suite cryptodev_aes_testsuite  = {
+       .suite_name = "Crypto Device AESNI MB Unit Test Suite",
+       .setup = testsuite_setup,
+       .teardown = testsuite_teardown,
+       .unit_test_cases = {
+               TEST_CASE_ST(ut_setup, ut_teardown,
+                               test_perf_aes_cbc_encrypt_digest_vary_pkt_size),
+               TEST_CASES_END() /**< NULL terminate unit test array */
+       }
+};
+
+static struct unit_test_suite cryptodev_snow3g_testsuite  = {
+       .suite_name = "Crypto Device SNOW3G Unit Test Suite",
+       .setup = testsuite_setup,
+       .teardown = testsuite_teardown,
+       .unit_test_cases = {
+               TEST_CASE_ST(ut_setup, ut_teardown,
+                               test_perf_snow3G_vary_pkt_size),
+               TEST_CASE_ST(ut_setup, ut_teardown,
+                               test_perf_snow3G_vary_burst_size),
+               TEST_CASES_END() /**< NULL terminate unit test array */
+       }
+};
+
+static struct unit_test_suite cryptodev_openssl_testsuite  = {
+       .suite_name = "Crypto Device OPENSSL Unit Test Suite",
+       .setup = testsuite_setup,
+       .teardown = testsuite_teardown,
+       .unit_test_cases = {
+               TEST_CASE_ST(ut_setup, ut_teardown,
+                               test_perf_openssl_vary_pkt_size),
+               TEST_CASE_ST(ut_setup, ut_teardown,
+                               test_perf_openssl_vary_burst_size),
+               TEST_CASES_END() /**< NULL terminate unit test array */
+       }
 };
 
-static struct test_command cryptodev_qat_perf_cmd = {
-       .command = "cryptodev_qat_perftest",
-       .callback = perftest_qat_cryptodev,
+static struct unit_test_suite cryptodev_armv8_testsuite  = {
+       .suite_name = "Crypto Device ARMv8 Unit Test Suite",
+       .setup = testsuite_setup,
+       .teardown = testsuite_teardown,
+       .unit_test_cases = {
+               TEST_CASE_ST(ut_setup, ut_teardown,
+                               test_perf_armv8_vary_pkt_size),
+               TEST_CASE_ST(ut_setup, ut_teardown,
+                               test_perf_armv8_vary_burst_size),
+               TEST_CASES_END() /**< NULL terminate unit test array */
+       }
 };
 
-REGISTER_TEST_COMMAND(cryptodev_aesni_mb_perf_cmd);
-REGISTER_TEST_COMMAND(cryptodev_qat_perf_cmd);
+static int
+perftest_aesni_gcm_cryptodev(void)
+{
+       gbl_cryptodev_perftest_devtype = RTE_CRYPTODEV_AESNI_GCM_PMD;
+
+       return unit_test_suite_runner(&cryptodev_gcm_testsuite);
+}
+
+static int
+perftest_aesni_mb_cryptodev(void /*argv __rte_unused, int argc __rte_unused*/)
+{
+       gbl_cryptodev_perftest_devtype = RTE_CRYPTODEV_AESNI_MB_PMD;
+
+       return unit_test_suite_runner(&cryptodev_aes_testsuite);
+}
+
+static int
+perftest_qat_cryptodev(void /*argv __rte_unused, int argc __rte_unused*/)
+{
+       gbl_cryptodev_perftest_devtype = RTE_CRYPTODEV_QAT_SYM_PMD;
+
+       return unit_test_suite_runner(&cryptodev_testsuite);
+}
+
+static int
+perftest_sw_snow3g_cryptodev(void /*argv __rte_unused, int argc __rte_unused*/)
+{
+       gbl_cryptodev_perftest_devtype = RTE_CRYPTODEV_SNOW3G_PMD;
+
+       return unit_test_suite_runner(&cryptodev_snow3g_testsuite);
+}
+
+static int
+perftest_qat_snow3g_cryptodev(void /*argv __rte_unused, int argc __rte_unused*/)
+{
+       gbl_cryptodev_perftest_devtype = RTE_CRYPTODEV_QAT_SYM_PMD;
+
+       return unit_test_suite_runner(&cryptodev_snow3g_testsuite);
+}
+
+static int
+perftest_openssl_cryptodev(void /*argv __rte_unused, int argc __rte_unused*/)
+{
+       gbl_cryptodev_perftest_devtype = RTE_CRYPTODEV_OPENSSL_PMD;
+
+       return unit_test_suite_runner(&cryptodev_openssl_testsuite);
+}
+
+static int
+perftest_qat_continual_cryptodev(void)
+{
+       gbl_cryptodev_perftest_devtype = RTE_CRYPTODEV_QAT_SYM_PMD;
+
+       return unit_test_suite_runner(&cryptodev_qat_continual_testsuite);
+}
+
+static int
+perftest_sw_armv8_cryptodev(void /*argv __rte_unused, int argc __rte_unused*/)
+{
+       gbl_cryptodev_perftest_devtype = RTE_CRYPTODEV_ARMV8_PMD;
+
+       return unit_test_suite_runner(&cryptodev_armv8_testsuite);
+}
+
+REGISTER_TEST_COMMAND(cryptodev_aesni_mb_perftest, perftest_aesni_mb_cryptodev);
+REGISTER_TEST_COMMAND(cryptodev_qat_perftest, perftest_qat_cryptodev);
+REGISTER_TEST_COMMAND(cryptodev_sw_snow3g_perftest, perftest_sw_snow3g_cryptodev);
+REGISTER_TEST_COMMAND(cryptodev_qat_snow3g_perftest, perftest_qat_snow3g_cryptodev);
+REGISTER_TEST_COMMAND(cryptodev_aesni_gcm_perftest, perftest_aesni_gcm_cryptodev);
+REGISTER_TEST_COMMAND(cryptodev_openssl_perftest,
+               perftest_openssl_cryptodev);
+REGISTER_TEST_COMMAND(cryptodev_qat_continual_perftest,
+               perftest_qat_continual_cryptodev);
+REGISTER_TEST_COMMAND(cryptodev_sw_armv8_perftest,
+               perftest_sw_armv8_cryptodev);