return 0;
}
+static int
+prepare_auth_op(void)
+{
+ struct rte_crypto_sym_op *sym = env.op->sym;
+
+ __rte_crypto_op_reset(env.op, RTE_CRYPTO_OP_TYPE_SYMMETRIC);
+ rte_pktmbuf_reset(env.mbuf);
+
+ sym->m_src = env.mbuf;
+ sym->auth.data.offset = 0;
+
+ if (info.op == FIPS_TEST_ENC_AUTH_GEN) {
+ uint8_t *pt;
+
+ if (vec.pt.len > RTE_MBUF_MAX_NB_SEGS) {
+ RTE_LOG(ERR, USER1, "PT len %u\n", vec.pt.len);
+ return -EPERM;
+ }
+
+ pt = (uint8_t *)rte_pktmbuf_append(env.mbuf, vec.pt.len +
+ vec.cipher_auth.digest.len);
+
+ if (!pt) {
+ RTE_LOG(ERR, USER1, "Error %i: MBUF too small\n",
+ -ENOMEM);
+ return -ENOMEM;
+ }
+
+ memcpy(pt, vec.pt.val, vec.pt.len);
+ sym->auth.data.length = vec.pt.len;
+ sym->auth.digest.data = pt + vec.pt.len;
+ sym->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(
+ env.mbuf, vec.pt.len);
+
+ } else {
+ uint8_t *ct;
+
+ if (vec.ct.len > RTE_MBUF_MAX_NB_SEGS) {
+ RTE_LOG(ERR, USER1, "CT len %u\n", vec.ct.len);
+ return -EPERM;
+ }
+
+ ct = (uint8_t *)rte_pktmbuf_append(env.mbuf,
+ vec.ct.len + vec.cipher_auth.digest.len);
+
+ if (!ct) {
+ RTE_LOG(ERR, USER1, "Error %i: MBUF too small\n",
+ -ENOMEM);
+ return -ENOMEM;
+ }
+
+ memcpy(ct, vec.ct.val, vec.ct.len);
+ sym->auth.data.length = vec.ct.len;
+ sym->auth.digest.data = vec.cipher_auth.digest.val;
+ sym->auth.digest.phys_addr = rte_malloc_virt2iova(
+ sym->auth.digest.data);
+ }
+
+ rte_crypto_op_attach_sym_session(env.op, env.sess);
+}
+
+static int
+prepare_aead_op(void)
+{
+ struct rte_crypto_sym_op *sym = env.op->sym;
+ uint8_t *iv = rte_crypto_op_ctod_offset(env.op, uint8_t *, IV_OFF);
+
+ __rte_crypto_op_reset(env.op, RTE_CRYPTO_OP_TYPE_SYMMETRIC);
+ rte_pktmbuf_reset(env.mbuf);
+
+ if (info.algo == FIPS_TEST_ALGO_AES_CCM)
+ memcpy(iv + 1, vec.iv.val, vec.iv.len);
+ else
+ memcpy(iv, vec.iv.val, vec.iv.len);
+
+ sym->m_src = env.mbuf;
+ sym->aead.data.offset = 0;
+ sym->aead.aad.data = vec.aead.aad.val;
+ sym->aead.aad.phys_addr = rte_malloc_virt2iova(sym->aead.aad.data);
+
+ if (info.op == FIPS_TEST_ENC_AUTH_GEN) {
+ uint8_t *pt;
+
+ if (vec.pt.len > RTE_MBUF_MAX_NB_SEGS) {
+ RTE_LOG(ERR, USER1, "PT len %u\n", vec.pt.len);
+ return -EPERM;
+ }
+
+ pt = (uint8_t *)rte_pktmbuf_append(env.mbuf,
+ vec.pt.len + vec.aead.digest.len);
+
+ if (!pt) {
+ RTE_LOG(ERR, USER1, "Error %i: MBUF too small\n",
+ -ENOMEM);
+ return -ENOMEM;
+ }
+
+ memcpy(pt, vec.pt.val, vec.pt.len);
+ sym->aead.data.length = vec.pt.len;
+ sym->aead.digest.data = pt + vec.pt.len;
+ sym->aead.digest.phys_addr = rte_pktmbuf_mtophys_offset(
+ env.mbuf, vec.pt.len);
+ } else {
+ uint8_t *ct;
+
+ if (vec.ct.len > RTE_MBUF_MAX_NB_SEGS) {
+ RTE_LOG(ERR, USER1, "CT len %u\n", vec.ct.len);
+ return -EPERM;
+ }
+
+ ct = (uint8_t *)rte_pktmbuf_append(env.mbuf, vec.ct.len);
+
+ if (!ct) {
+ RTE_LOG(ERR, USER1, "Error %i: MBUF too small\n",
+ -ENOMEM);
+ return -ENOMEM;
+ }
+
+ memcpy(ct, vec.ct.val, vec.ct.len);
+ sym->aead.data.length = vec.ct.len;
+ sym->aead.digest.data = vec.aead.digest.val;
+ sym->aead.digest.phys_addr = rte_malloc_virt2iova(
+ sym->aead.digest.data);
+ }
+
+ rte_crypto_op_attach_sym_session(env.op, env.sess);
+}
+
static int
prepare_aes_xform(struct rte_crypto_sym_xform *xform)
{
return 0;
}
+static int
+prepare_tdes_xform(struct rte_crypto_sym_xform *xform)
+{
+ const struct rte_cryptodev_symmetric_capability *cap;
+ struct rte_cryptodev_sym_capability_idx cap_idx;
+ struct rte_crypto_cipher_xform *cipher_xform = &xform->cipher;
+
+ xform->type = RTE_CRYPTO_SYM_XFORM_CIPHER;
+
+ cipher_xform->algo = RTE_CRYPTO_CIPHER_3DES_CBC;
+ cipher_xform->op = (info.op == FIPS_TEST_ENC_AUTH_GEN) ?
+ RTE_CRYPTO_CIPHER_OP_ENCRYPT :
+ RTE_CRYPTO_CIPHER_OP_DECRYPT;
+ cipher_xform->key.data = vec.cipher_auth.key.val;
+ cipher_xform->key.length = vec.cipher_auth.key.len;
+ cipher_xform->iv.length = vec.iv.len;
+ cipher_xform->iv.offset = IV_OFF;
+
+ cap_idx.algo.cipher = RTE_CRYPTO_CIPHER_3DES_CBC;
+ cap_idx.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
+
+ cap = rte_cryptodev_sym_capability_get(env.dev_id, &cap_idx);
+ if (!cap) {
+ RTE_LOG(ERR, USER1, "Failed to get capability for cdev %u\n",
+ env.dev_id);
+ return -EINVAL;
+ }
+
+ if (rte_cryptodev_sym_capability_check_cipher(cap,
+ cipher_xform->key.length,
+ cipher_xform->iv.length) != 0) {
+ RTE_LOG(ERR, USER1, "PMD %s key length %u IV length %u\n",
+ info.device_name, cipher_xform->key.length,
+ cipher_xform->iv.length);
+ return -EPERM;
+ }
+
+ return 0;
+}
+
+static int
+prepare_hmac_xform(struct rte_crypto_sym_xform *xform)
+{
+ const struct rte_cryptodev_symmetric_capability *cap;
+ struct rte_cryptodev_sym_capability_idx cap_idx;
+ struct rte_crypto_auth_xform *auth_xform = &xform->auth;
+
+ xform->type = RTE_CRYPTO_SYM_XFORM_AUTH;
+
+ auth_xform->algo = info.interim_info.hmac_data.algo;
+ auth_xform->op = RTE_CRYPTO_AUTH_OP_GENERATE;
+ auth_xform->digest_length = vec.cipher_auth.digest.len;
+ auth_xform->key.data = vec.cipher_auth.key.val;
+ auth_xform->key.length = vec.cipher_auth.key.len;
+
+ cap_idx.algo.auth = auth_xform->algo;
+ cap_idx.type = RTE_CRYPTO_SYM_XFORM_AUTH;
+
+ cap = rte_cryptodev_sym_capability_get(env.dev_id, &cap_idx);
+ if (!cap) {
+ RTE_LOG(ERR, USER1, "Failed to get capability for cdev %u\n",
+ env.dev_id);
+ return -EINVAL;
+ }
+
+ if (rte_cryptodev_sym_capability_check_auth(cap,
+ auth_xform->key.length,
+ auth_xform->digest_length, 0) != 0) {
+ RTE_LOG(ERR, USER1, "PMD %s key length %u IV length %u\n",
+ info.device_name, auth_xform->key.length,
+ auth_xform->digest_length);
+ return -EPERM;
+ }
+
+ return 0;
+}
+
+static int
+prepare_gcm_xform(struct rte_crypto_sym_xform *xform)
+{
+ const struct rte_cryptodev_symmetric_capability *cap;
+ struct rte_cryptodev_sym_capability_idx cap_idx;
+ struct rte_crypto_aead_xform *aead_xform = &xform->aead;
+
+ xform->type = RTE_CRYPTO_SYM_XFORM_AEAD;
+
+ aead_xform->algo = RTE_CRYPTO_AEAD_AES_GCM;
+ aead_xform->aad_length = vec.aead.aad.len;
+ aead_xform->digest_length = vec.aead.digest.len;
+ aead_xform->iv.offset = IV_OFF;
+ aead_xform->iv.length = vec.iv.len;
+ aead_xform->key.data = vec.aead.key.val;
+ aead_xform->key.length = vec.aead.key.len;
+ aead_xform->op = (info.op == FIPS_TEST_ENC_AUTH_GEN) ?
+ RTE_CRYPTO_AEAD_OP_ENCRYPT :
+ RTE_CRYPTO_AEAD_OP_DECRYPT;
+
+ cap_idx.algo.aead = aead_xform->algo;
+ cap_idx.type = RTE_CRYPTO_SYM_XFORM_AEAD;
+
+ cap = rte_cryptodev_sym_capability_get(env.dev_id, &cap_idx);
+ if (!cap) {
+ RTE_LOG(ERR, USER1, "Failed to get capability for cdev %u\n",
+ env.dev_id);
+ return -EINVAL;
+ }
+
+ if (rte_cryptodev_sym_capability_check_aead(cap,
+ aead_xform->key.length,
+ aead_xform->digest_length, aead_xform->aad_length,
+ aead_xform->iv.length) != 0) {
+ RTE_LOG(ERR, USER1,
+ "PMD %s key_len %u tag_len %u aad_len %u iv_len %u\n",
+ info.device_name, aead_xform->key.length,
+ aead_xform->digest_length,
+ aead_xform->aad_length,
+ aead_xform->iv.length);
+ return -EPERM;
+ }
+
+ return 0;
+}
+
+static int
+prepare_cmac_xform(struct rte_crypto_sym_xform *xform)
+{
+ const struct rte_cryptodev_symmetric_capability *cap;
+ struct rte_cryptodev_sym_capability_idx cap_idx;
+ struct rte_crypto_auth_xform *auth_xform = &xform->auth;
+
+ xform->type = RTE_CRYPTO_SYM_XFORM_AUTH;
+
+ auth_xform->algo = RTE_CRYPTO_AUTH_AES_CMAC;
+ auth_xform->op = (info.op == FIPS_TEST_ENC_AUTH_GEN) ?
+ RTE_CRYPTO_AUTH_OP_GENERATE : RTE_CRYPTO_AUTH_OP_VERIFY;
+ auth_xform->digest_length = vec.cipher_auth.digest.len;
+ auth_xform->key.data = vec.cipher_auth.key.val;
+ auth_xform->key.length = vec.cipher_auth.key.len;
+
+ cap_idx.algo.auth = auth_xform->algo;
+ cap_idx.type = RTE_CRYPTO_SYM_XFORM_AUTH;
+
+ cap = rte_cryptodev_sym_capability_get(env.dev_id, &cap_idx);
+ if (!cap) {
+ RTE_LOG(ERR, USER1, "Failed to get capability for cdev %u\n",
+ env.dev_id);
+ return -EINVAL;
+ }
+
+ if (rte_cryptodev_sym_capability_check_auth(cap,
+ auth_xform->key.length,
+ auth_xform->digest_length, 0) != 0) {
+ RTE_LOG(ERR, USER1, "PMD %s key length %u IV length %u\n",
+ info.device_name, auth_xform->key.length,
+ auth_xform->digest_length);
+ return -EPERM;
+ }
+
+ return 0;
+}
+
+static int
+prepare_ccm_xform(struct rte_crypto_sym_xform *xform)
+{
+ const struct rte_cryptodev_symmetric_capability *cap;
+ struct rte_cryptodev_sym_capability_idx cap_idx;
+ struct rte_crypto_aead_xform *aead_xform = &xform->aead;
+
+ xform->type = RTE_CRYPTO_SYM_XFORM_AEAD;
+
+ aead_xform->algo = RTE_CRYPTO_AEAD_AES_CCM;
+ aead_xform->aad_length = vec.aead.aad.len;
+ aead_xform->digest_length = vec.aead.digest.len;
+ aead_xform->iv.offset = IV_OFF;
+ aead_xform->iv.length = vec.iv.len;
+ aead_xform->key.data = vec.aead.key.val;
+ aead_xform->key.length = vec.aead.key.len;
+ aead_xform->op = (info.op == FIPS_TEST_ENC_AUTH_GEN) ?
+ RTE_CRYPTO_AEAD_OP_ENCRYPT :
+ RTE_CRYPTO_AEAD_OP_DECRYPT;
+
+ cap_idx.algo.aead = aead_xform->algo;
+ cap_idx.type = RTE_CRYPTO_SYM_XFORM_AEAD;
+
+ cap = rte_cryptodev_sym_capability_get(env.dev_id, &cap_idx);
+ if (!cap) {
+ RTE_LOG(ERR, USER1, "Failed to get capability for cdev %u\n",
+ env.dev_id);
+ return -EINVAL;
+ }
+
+ if (rte_cryptodev_sym_capability_check_aead(cap,
+ aead_xform->key.length,
+ aead_xform->digest_length, aead_xform->aad_length,
+ aead_xform->iv.length) != 0) {
+ RTE_LOG(ERR, USER1,
+ "PMD %s key_len %u tag_len %u aad_len %u iv_len %u\n",
+ info.device_name, aead_xform->key.length,
+ aead_xform->digest_length,
+ aead_xform->aad_length,
+ aead_xform->iv.length);
+ return -EPERM;
+ }
+
+ return 0;
+}
+
static void
get_writeback_data(struct fips_val *val)
{
return 0;
}
+static int
+fips_mct_tdes_test(void)
+{
+#define TDES_BLOCK_SIZE 8
+#define TDES_EXTERN_ITER 400
+#define TDES_INTERN_ITER 10000
+ struct fips_val val, val_key;
+ uint8_t prev_out[TDES_BLOCK_SIZE];
+ uint8_t prev_prev_out[TDES_BLOCK_SIZE];
+ uint8_t prev_in[TDES_BLOCK_SIZE];
+ uint32_t i, j, k;
+ int ret;
+
+ for (i = 0; i < TDES_EXTERN_ITER; i++) {
+ if (i != 0)
+ update_info_vec(i);
+
+ fips_test_write_one_case();
+
+ for (j = 0; j < TDES_INTERN_ITER; j++) {
+ ret = fips_run_test();
+ if (ret < 0) {
+ if (ret == -EPERM) {
+ fprintf(info.fp_wr, "Bypass\n");
+ return 0;
+ }
+
+ return ret;
+ }
+
+ get_writeback_data(&val);
+
+ if (info.op == FIPS_TEST_DEC_AUTH_VERIF)
+ memcpy(prev_in, vec.ct.val, TDES_BLOCK_SIZE);
+
+ if (j == 0) {
+ memcpy(prev_out, val.val, TDES_BLOCK_SIZE);
+
+ if (info.op == FIPS_TEST_ENC_AUTH_GEN) {
+ memcpy(vec.pt.val, vec.iv.val,
+ TDES_BLOCK_SIZE);
+ memcpy(vec.iv.val, val.val,
+ TDES_BLOCK_SIZE);
+ } else {
+ memcpy(vec.iv.val, vec.ct.val,
+ TDES_BLOCK_SIZE);
+ memcpy(vec.ct.val, val.val,
+ TDES_BLOCK_SIZE);
+ }
+ continue;
+ }
+
+ if (info.op == FIPS_TEST_ENC_AUTH_GEN) {
+ memcpy(vec.iv.val, val.val, TDES_BLOCK_SIZE);
+ memcpy(vec.pt.val, prev_out, TDES_BLOCK_SIZE);
+ } else {
+ memcpy(vec.iv.val, vec.ct.val, TDES_BLOCK_SIZE);
+ memcpy(vec.ct.val, val.val, TDES_BLOCK_SIZE);
+ }
+
+ if (j == TDES_INTERN_ITER - 1)
+ continue;
+
+ memcpy(prev_out, val.val, TDES_BLOCK_SIZE);
+
+ if (j == TDES_INTERN_ITER - 3)
+ memcpy(prev_prev_out, val.val, TDES_BLOCK_SIZE);
+ }
+
+ info.parse_writeback(&val);
+ fprintf(info.fp_wr, "\n");
+
+ if (i == TDES_EXTERN_ITER - 1)
+ continue;
+
+ /** update key */
+ memcpy(&val_key, &vec.cipher_auth.key, sizeof(val_key));
+
+ if (info.interim_info.tdes_data.nb_keys == 0) {
+ if (memcmp(val_key.val, val_key.val + 8, 8) == 0)
+ info.interim_info.tdes_data.nb_keys = 1;
+ else if (memcmp(val_key.val, val_key.val + 16, 8) == 0)
+ info.interim_info.tdes_data.nb_keys = 2;
+ else
+ info.interim_info.tdes_data.nb_keys = 3;
+
+ }
+
+ for (k = 0; k < TDES_BLOCK_SIZE; k++) {
+
+ switch (info.interim_info.tdes_data.nb_keys) {
+ case 3:
+ val_key.val[k] ^= val.val[k];
+ val_key.val[k + 8] ^= prev_out[k];
+ val_key.val[k + 16] ^= prev_prev_out[k];
+ break;
+ case 2:
+ val_key.val[k] ^= val.val[k];
+ val_key.val[k + 8] ^= prev_out[k];
+ val_key.val[k + 16] ^= val.val[k];
+ break;
+ default: /* case 1 */
+ val_key.val[k] ^= val.val[k];
+ val_key.val[k + 8] ^= val.val[k];
+ val_key.val[k + 16] ^= val.val[k];
+ break;
+ }
+
+ }
+
+ for (k = 0; k < 24; k++)
+ val_key.val[k] = (__builtin_popcount(val_key.val[k]) &
+ 0x1) ?
+ val_key.val[k] : (val_key.val[k] ^ 0x1);
+
+ if (info.op == FIPS_TEST_ENC_AUTH_GEN) {
+ memcpy(vec.iv.val, val.val, TDES_BLOCK_SIZE);
+ memcpy(vec.pt.val, prev_out, TDES_BLOCK_SIZE);
+ } else {
+ memcpy(vec.iv.val, prev_out, TDES_BLOCK_SIZE);
+ memcpy(vec.ct.val, val.val, TDES_BLOCK_SIZE);
+ }
+ }
+
+ return 0;
+}
+
static int
fips_mct_aes_test(void)
{
else
test_ops.test = fips_generic_test;
break;
-
+ case FIPS_TEST_ALGO_HMAC:
+ test_ops.prepare_op = prepare_auth_op;
+ test_ops.prepare_xform = prepare_hmac_xform;
+ test_ops.test = fips_generic_test;
+ break;
+ case FIPS_TEST_ALGO_TDES:
+ test_ops.prepare_op = prepare_cipher_op;
+ test_ops.prepare_xform = prepare_tdes_xform;
+ if (info.interim_info.tdes_data.test_type == TDES_MCT)
+ test_ops.test = fips_mct_tdes_test;
+ else
+ test_ops.test = fips_generic_test;
+ break;
+ case FIPS_TEST_ALGO_AES_GCM:
+ test_ops.prepare_op = prepare_aead_op;
+ test_ops.prepare_xform = prepare_gcm_xform;
+ test_ops.test = fips_generic_test;
+ break;
+ case FIPS_TEST_ALGO_AES_CMAC:
+ test_ops.prepare_op = prepare_auth_op;
+ test_ops.prepare_xform = prepare_cmac_xform;
+ test_ops.test = fips_generic_test;
+ break;
+ case FIPS_TEST_ALGO_AES_CCM:
+ test_ops.prepare_op = prepare_aead_op;
+ test_ops.prepare_xform = prepare_ccm_xform;
+ test_ops.test = fips_generic_test;
+ break;
default:
return -1;
}