X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fcrypto%2Fccp%2Fccp_crypto.c;h=f37d35f18fa181a7621d6168e5f748c4eacbc60a;hb=e863fe3a13da89787fdf3b5c590101a3c0f10af6;hp=a0809e41e9182050c26090a55ff0829f82b0ce8b;hpb=6c561b03b54c3d09e333ff14caa06a40c894dfd2;p=dpdk.git diff --git a/drivers/crypto/ccp/ccp_crypto.c b/drivers/crypto/ccp/ccp_crypto.c index a0809e41e9..f37d35f18f 100644 --- a/drivers/crypto/ccp/ccp_crypto.c +++ b/drivers/crypto/ccp/ccp_crypto.c @@ -27,14 +27,14 @@ #include "ccp_pci.h" #include "ccp_pmd_private.h" -#ifdef RTE_LIBRTE_PMD_CCP_CPU_AUTH #include #include #include -#endif +extern int iommu_mode; +void *sha_ctx; /* SHA initial context values */ -static uint32_t ccp_sha1_init[SHA_COMMON_DIGEST_SIZE / sizeof(uint32_t)] = { +uint32_t ccp_sha1_init[SHA_COMMON_DIGEST_SIZE / sizeof(uint32_t)] = { SHA1_H4, SHA1_H3, SHA1_H2, SHA1_H1, SHA1_H0, 0x0U, @@ -746,8 +746,13 @@ ccp_configure_session_cipher(struct ccp_session *sess, CCP_LOG_ERR("Invalid CCP Engine"); return -ENOTSUP; } - sess->cipher.nonce_phys = rte_mem_virt2phy(sess->cipher.nonce); - sess->cipher.key_phys = rte_mem_virt2phy(sess->cipher.key_ccp); + if (iommu_mode == 2) { + sess->cipher.nonce_phys = rte_mem_virt2iova(sess->cipher.nonce); + sess->cipher.key_phys = rte_mem_virt2iova(sess->cipher.key_ccp); + } else { + sess->cipher.nonce_phys = rte_mem_virt2phy(sess->cipher.nonce); + sess->cipher.key_phys = rte_mem_virt2phy(sess->cipher.key_ccp); + } return 0; } @@ -766,17 +771,19 @@ ccp_configure_session_auth(struct ccp_session *sess, else sess->auth.op = CCP_AUTH_OP_VERIFY; switch (auth_xform->algo) { -#ifdef RTE_LIBRTE_PMD_CCP_CPU_AUTH case RTE_CRYPTO_AUTH_MD5_HMAC: - sess->auth.algo = CCP_AUTH_ALGO_MD5_HMAC; - sess->auth.offset = (CCP_SB_BYTES << 1) - MD5_DIGEST_SIZE; - sess->auth.key_length = auth_xform->key.length; - sess->auth.block_size = MD5_BLOCK_SIZE; - memset(sess->auth.key, 0, sess->auth.block_size); - rte_memcpy(sess->auth.key, auth_xform->key.data, - auth_xform->key.length); + if (sess->auth_opt) { + sess->auth.algo = CCP_AUTH_ALGO_MD5_HMAC; + sess->auth.offset = ((CCP_SB_BYTES << 1) - + MD5_DIGEST_SIZE); + sess->auth.key_length = auth_xform->key.length; + sess->auth.block_size = MD5_BLOCK_SIZE; + memset(sess->auth.key, 0, sess->auth.block_size); + rte_memcpy(sess->auth.key, auth_xform->key.data, + auth_xform->key.length); + } else + return -1; /* HMAC MD5 not supported on CCP */ break; -#endif case RTE_CRYPTO_AUTH_SHA1: sess->auth.engine = CCP_ENGINE_SHA; sess->auth.algo = CCP_AUTH_ALGO_SHA1; @@ -784,35 +791,37 @@ ccp_configure_session_auth(struct ccp_session *sess, sess->auth.ctx = (void *)ccp_sha1_init; sess->auth.ctx_len = CCP_SB_BYTES; sess->auth.offset = CCP_SB_BYTES - SHA1_DIGEST_SIZE; + rte_memcpy(sha_ctx, sess->auth.ctx, SHA_COMMON_DIGEST_SIZE); break; case RTE_CRYPTO_AUTH_SHA1_HMAC: -#ifdef RTE_LIBRTE_PMD_CCP_CPU_AUTH - if (auth_xform->key.length > SHA1_BLOCK_SIZE) - return -1; - sess->auth.algo = CCP_AUTH_ALGO_SHA1_HMAC; - sess->auth.offset = CCP_SB_BYTES - SHA1_DIGEST_SIZE; - sess->auth.block_size = SHA1_BLOCK_SIZE; - sess->auth.key_length = auth_xform->key.length; - memset(sess->auth.key, 0, sess->auth.block_size); - rte_memcpy(sess->auth.key, auth_xform->key.data, - auth_xform->key.length); -#else - if (auth_xform->key.length > SHA1_BLOCK_SIZE) - return -1; - sess->auth.engine = CCP_ENGINE_SHA; - sess->auth.algo = CCP_AUTH_ALGO_SHA1_HMAC; - sess->auth.ut.sha_type = CCP_SHA_TYPE_1; - sess->auth.ctx_len = CCP_SB_BYTES; - sess->auth.offset = CCP_SB_BYTES - SHA1_DIGEST_SIZE; - sess->auth.block_size = SHA1_BLOCK_SIZE; - sess->auth.key_length = auth_xform->key.length; - memset(sess->auth.key, 0, sess->auth.block_size); - memset(sess->auth.pre_compute, 0, sess->auth.ctx_len << 1); - rte_memcpy(sess->auth.key, auth_xform->key.data, - auth_xform->key.length); - if (generate_partial_hash(sess)) - return -1; -#endif + if (sess->auth_opt) { + if (auth_xform->key.length > SHA1_BLOCK_SIZE) + return -1; + sess->auth.algo = CCP_AUTH_ALGO_SHA1_HMAC; + sess->auth.offset = CCP_SB_BYTES - SHA1_DIGEST_SIZE; + sess->auth.block_size = SHA1_BLOCK_SIZE; + sess->auth.key_length = auth_xform->key.length; + memset(sess->auth.key, 0, sess->auth.block_size); + rte_memcpy(sess->auth.key, auth_xform->key.data, + auth_xform->key.length); + } else { + if (auth_xform->key.length > SHA1_BLOCK_SIZE) + return -1; + sess->auth.engine = CCP_ENGINE_SHA; + sess->auth.algo = CCP_AUTH_ALGO_SHA1_HMAC; + sess->auth.ut.sha_type = CCP_SHA_TYPE_1; + sess->auth.ctx_len = CCP_SB_BYTES; + sess->auth.offset = CCP_SB_BYTES - SHA1_DIGEST_SIZE; + sess->auth.block_size = SHA1_BLOCK_SIZE; + sess->auth.key_length = auth_xform->key.length; + memset(sess->auth.key, 0, sess->auth.block_size); + memset(sess->auth.pre_compute, 0, + sess->auth.ctx_len << 1); + rte_memcpy(sess->auth.key, auth_xform->key.data, + auth_xform->key.length); + if (generate_partial_hash(sess)) + return -1; + } break; case RTE_CRYPTO_AUTH_SHA224: sess->auth.algo = CCP_AUTH_ALGO_SHA224; @@ -821,35 +830,37 @@ ccp_configure_session_auth(struct ccp_session *sess, sess->auth.ctx = (void *)ccp_sha224_init; sess->auth.ctx_len = CCP_SB_BYTES; sess->auth.offset = CCP_SB_BYTES - SHA224_DIGEST_SIZE; + rte_memcpy(sha_ctx, sess->auth.ctx, SHA256_DIGEST_SIZE); break; case RTE_CRYPTO_AUTH_SHA224_HMAC: -#ifdef RTE_LIBRTE_PMD_CCP_CPU_AUTH - if (auth_xform->key.length > SHA224_BLOCK_SIZE) - return -1; - sess->auth.algo = CCP_AUTH_ALGO_SHA224_HMAC; - sess->auth.offset = CCP_SB_BYTES - SHA224_DIGEST_SIZE; - sess->auth.block_size = SHA224_BLOCK_SIZE; - sess->auth.key_length = auth_xform->key.length; - memset(sess->auth.key, 0, sess->auth.block_size); - rte_memcpy(sess->auth.key, auth_xform->key.data, - auth_xform->key.length); -#else - if (auth_xform->key.length > SHA224_BLOCK_SIZE) - return -1; - sess->auth.algo = CCP_AUTH_ALGO_SHA224_HMAC; - sess->auth.engine = CCP_ENGINE_SHA; - sess->auth.ut.sha_type = CCP_SHA_TYPE_224; - sess->auth.ctx_len = CCP_SB_BYTES; - sess->auth.offset = CCP_SB_BYTES - SHA224_DIGEST_SIZE; - sess->auth.block_size = SHA224_BLOCK_SIZE; - sess->auth.key_length = auth_xform->key.length; - memset(sess->auth.key, 0, sess->auth.block_size); - memset(sess->auth.pre_compute, 0, sess->auth.ctx_len << 1); - rte_memcpy(sess->auth.key, auth_xform->key.data, - auth_xform->key.length); - if (generate_partial_hash(sess)) - return -1; -#endif + if (sess->auth_opt) { + if (auth_xform->key.length > SHA224_BLOCK_SIZE) + return -1; + sess->auth.algo = CCP_AUTH_ALGO_SHA224_HMAC; + sess->auth.offset = CCP_SB_BYTES - SHA224_DIGEST_SIZE; + sess->auth.block_size = SHA224_BLOCK_SIZE; + sess->auth.key_length = auth_xform->key.length; + memset(sess->auth.key, 0, sess->auth.block_size); + rte_memcpy(sess->auth.key, auth_xform->key.data, + auth_xform->key.length); + } else { + if (auth_xform->key.length > SHA224_BLOCK_SIZE) + return -1; + sess->auth.algo = CCP_AUTH_ALGO_SHA224_HMAC; + sess->auth.engine = CCP_ENGINE_SHA; + sess->auth.ut.sha_type = CCP_SHA_TYPE_224; + sess->auth.ctx_len = CCP_SB_BYTES; + sess->auth.offset = CCP_SB_BYTES - SHA224_DIGEST_SIZE; + sess->auth.block_size = SHA224_BLOCK_SIZE; + sess->auth.key_length = auth_xform->key.length; + memset(sess->auth.key, 0, sess->auth.block_size); + memset(sess->auth.pre_compute, 0, + sess->auth.ctx_len << 1); + rte_memcpy(sess->auth.key, auth_xform->key.data, + auth_xform->key.length); + if (generate_partial_hash(sess)) + return -1; + } break; case RTE_CRYPTO_AUTH_SHA3_224: sess->auth.algo = CCP_AUTH_ALGO_SHA3_224; @@ -882,35 +893,37 @@ ccp_configure_session_auth(struct ccp_session *sess, sess->auth.ctx = (void *)ccp_sha256_init; sess->auth.ctx_len = CCP_SB_BYTES; sess->auth.offset = CCP_SB_BYTES - SHA256_DIGEST_SIZE; + rte_memcpy(sha_ctx, sess->auth.ctx, SHA256_DIGEST_SIZE); break; case RTE_CRYPTO_AUTH_SHA256_HMAC: -#ifdef RTE_LIBRTE_PMD_CCP_CPU_AUTH - if (auth_xform->key.length > SHA256_BLOCK_SIZE) - return -1; - sess->auth.algo = CCP_AUTH_ALGO_SHA256_HMAC; - sess->auth.offset = CCP_SB_BYTES - SHA256_DIGEST_SIZE; - sess->auth.block_size = SHA256_BLOCK_SIZE; - sess->auth.key_length = auth_xform->key.length; - memset(sess->auth.key, 0, sess->auth.block_size); - rte_memcpy(sess->auth.key, auth_xform->key.data, - auth_xform->key.length); -#else - if (auth_xform->key.length > SHA256_BLOCK_SIZE) - return -1; - sess->auth.algo = CCP_AUTH_ALGO_SHA256_HMAC; - sess->auth.engine = CCP_ENGINE_SHA; - sess->auth.ut.sha_type = CCP_SHA_TYPE_256; - sess->auth.ctx_len = CCP_SB_BYTES; - sess->auth.offset = CCP_SB_BYTES - SHA256_DIGEST_SIZE; - sess->auth.block_size = SHA256_BLOCK_SIZE; - sess->auth.key_length = auth_xform->key.length; - memset(sess->auth.key, 0, sess->auth.block_size); - memset(sess->auth.pre_compute, 0, sess->auth.ctx_len << 1); - rte_memcpy(sess->auth.key, auth_xform->key.data, - auth_xform->key.length); - if (generate_partial_hash(sess)) - return -1; -#endif + if (sess->auth_opt) { + if (auth_xform->key.length > SHA256_BLOCK_SIZE) + return -1; + sess->auth.algo = CCP_AUTH_ALGO_SHA256_HMAC; + sess->auth.offset = CCP_SB_BYTES - SHA256_DIGEST_SIZE; + sess->auth.block_size = SHA256_BLOCK_SIZE; + sess->auth.key_length = auth_xform->key.length; + memset(sess->auth.key, 0, sess->auth.block_size); + rte_memcpy(sess->auth.key, auth_xform->key.data, + auth_xform->key.length); + } else { + if (auth_xform->key.length > SHA256_BLOCK_SIZE) + return -1; + sess->auth.algo = CCP_AUTH_ALGO_SHA256_HMAC; + sess->auth.engine = CCP_ENGINE_SHA; + sess->auth.ut.sha_type = CCP_SHA_TYPE_256; + sess->auth.ctx_len = CCP_SB_BYTES; + sess->auth.offset = CCP_SB_BYTES - SHA256_DIGEST_SIZE; + sess->auth.block_size = SHA256_BLOCK_SIZE; + sess->auth.key_length = auth_xform->key.length; + memset(sess->auth.key, 0, sess->auth.block_size); + memset(sess->auth.pre_compute, 0, + sess->auth.ctx_len << 1); + rte_memcpy(sess->auth.key, auth_xform->key.data, + auth_xform->key.length); + if (generate_partial_hash(sess)) + return -1; + } break; case RTE_CRYPTO_AUTH_SHA3_256: sess->auth.algo = CCP_AUTH_ALGO_SHA3_256; @@ -943,35 +956,39 @@ ccp_configure_session_auth(struct ccp_session *sess, sess->auth.ctx = (void *)ccp_sha384_init; sess->auth.ctx_len = CCP_SB_BYTES << 1; sess->auth.offset = (CCP_SB_BYTES << 1) - SHA384_DIGEST_SIZE; + rte_memcpy(sha_ctx, sess->auth.ctx, SHA512_DIGEST_SIZE); break; case RTE_CRYPTO_AUTH_SHA384_HMAC: -#ifdef RTE_LIBRTE_PMD_CCP_CPU_AUTH - if (auth_xform->key.length > SHA384_BLOCK_SIZE) - return -1; - sess->auth.algo = CCP_AUTH_ALGO_SHA384_HMAC; - sess->auth.offset = (CCP_SB_BYTES << 1) - SHA384_DIGEST_SIZE; - sess->auth.block_size = SHA384_BLOCK_SIZE; - sess->auth.key_length = auth_xform->key.length; - memset(sess->auth.key, 0, sess->auth.block_size); - rte_memcpy(sess->auth.key, auth_xform->key.data, - auth_xform->key.length); -#else - if (auth_xform->key.length > SHA384_BLOCK_SIZE) - return -1; - sess->auth.algo = CCP_AUTH_ALGO_SHA384_HMAC; - sess->auth.engine = CCP_ENGINE_SHA; - sess->auth.ut.sha_type = CCP_SHA_TYPE_384; - sess->auth.ctx_len = CCP_SB_BYTES << 1; - sess->auth.offset = (CCP_SB_BYTES << 1) - SHA384_DIGEST_SIZE; - sess->auth.block_size = SHA384_BLOCK_SIZE; - sess->auth.key_length = auth_xform->key.length; - memset(sess->auth.key, 0, sess->auth.block_size); - memset(sess->auth.pre_compute, 0, sess->auth.ctx_len << 1); - rte_memcpy(sess->auth.key, auth_xform->key.data, - auth_xform->key.length); - if (generate_partial_hash(sess)) - return -1; -#endif + if (sess->auth_opt) { + if (auth_xform->key.length > SHA384_BLOCK_SIZE) + return -1; + sess->auth.algo = CCP_AUTH_ALGO_SHA384_HMAC; + sess->auth.offset = ((CCP_SB_BYTES << 1) - + SHA384_DIGEST_SIZE); + sess->auth.block_size = SHA384_BLOCK_SIZE; + sess->auth.key_length = auth_xform->key.length; + memset(sess->auth.key, 0, sess->auth.block_size); + rte_memcpy(sess->auth.key, auth_xform->key.data, + auth_xform->key.length); + } else { + if (auth_xform->key.length > SHA384_BLOCK_SIZE) + return -1; + sess->auth.algo = CCP_AUTH_ALGO_SHA384_HMAC; + sess->auth.engine = CCP_ENGINE_SHA; + sess->auth.ut.sha_type = CCP_SHA_TYPE_384; + sess->auth.ctx_len = CCP_SB_BYTES << 1; + sess->auth.offset = ((CCP_SB_BYTES << 1) - + SHA384_DIGEST_SIZE); + sess->auth.block_size = SHA384_BLOCK_SIZE; + sess->auth.key_length = auth_xform->key.length; + memset(sess->auth.key, 0, sess->auth.block_size); + memset(sess->auth.pre_compute, 0, + sess->auth.ctx_len << 1); + rte_memcpy(sess->auth.key, auth_xform->key.data, + auth_xform->key.length); + if (generate_partial_hash(sess)) + return -1; + } break; case RTE_CRYPTO_AUTH_SHA3_384: sess->auth.algo = CCP_AUTH_ALGO_SHA3_384; @@ -1004,35 +1021,39 @@ ccp_configure_session_auth(struct ccp_session *sess, sess->auth.ctx = (void *)ccp_sha512_init; sess->auth.ctx_len = CCP_SB_BYTES << 1; sess->auth.offset = (CCP_SB_BYTES << 1) - SHA512_DIGEST_SIZE; + rte_memcpy(sha_ctx, sess->auth.ctx, SHA512_DIGEST_SIZE); break; case RTE_CRYPTO_AUTH_SHA512_HMAC: -#ifdef RTE_LIBRTE_PMD_CCP_CPU_AUTH - if (auth_xform->key.length > SHA512_BLOCK_SIZE) - return -1; - sess->auth.algo = CCP_AUTH_ALGO_SHA512_HMAC; - sess->auth.offset = (CCP_SB_BYTES << 1) - SHA512_DIGEST_SIZE; - sess->auth.block_size = SHA512_BLOCK_SIZE; - sess->auth.key_length = auth_xform->key.length; - memset(sess->auth.key, 0, sess->auth.block_size); - rte_memcpy(sess->auth.key, auth_xform->key.data, - auth_xform->key.length); -#else - if (auth_xform->key.length > SHA512_BLOCK_SIZE) - return -1; - sess->auth.algo = CCP_AUTH_ALGO_SHA512_HMAC; - sess->auth.engine = CCP_ENGINE_SHA; - sess->auth.ut.sha_type = CCP_SHA_TYPE_512; - sess->auth.ctx_len = CCP_SB_BYTES << 1; - sess->auth.offset = (CCP_SB_BYTES << 1) - SHA512_DIGEST_SIZE; - sess->auth.block_size = SHA512_BLOCK_SIZE; - sess->auth.key_length = auth_xform->key.length; - memset(sess->auth.key, 0, sess->auth.block_size); - memset(sess->auth.pre_compute, 0, sess->auth.ctx_len << 1); - rte_memcpy(sess->auth.key, auth_xform->key.data, - auth_xform->key.length); - if (generate_partial_hash(sess)) - return -1; -#endif + if (sess->auth_opt) { + if (auth_xform->key.length > SHA512_BLOCK_SIZE) + return -1; + sess->auth.algo = CCP_AUTH_ALGO_SHA512_HMAC; + sess->auth.offset = ((CCP_SB_BYTES << 1) - + SHA512_DIGEST_SIZE); + sess->auth.block_size = SHA512_BLOCK_SIZE; + sess->auth.key_length = auth_xform->key.length; + memset(sess->auth.key, 0, sess->auth.block_size); + rte_memcpy(sess->auth.key, auth_xform->key.data, + auth_xform->key.length); + } else { + if (auth_xform->key.length > SHA512_BLOCK_SIZE) + return -1; + sess->auth.algo = CCP_AUTH_ALGO_SHA512_HMAC; + sess->auth.engine = CCP_ENGINE_SHA; + sess->auth.ut.sha_type = CCP_SHA_TYPE_512; + sess->auth.ctx_len = CCP_SB_BYTES << 1; + sess->auth.offset = ((CCP_SB_BYTES << 1) - + SHA512_DIGEST_SIZE); + sess->auth.block_size = SHA512_BLOCK_SIZE; + sess->auth.key_length = auth_xform->key.length; + memset(sess->auth.key, 0, sess->auth.block_size); + memset(sess->auth.pre_compute, 0, + sess->auth.ctx_len << 1); + rte_memcpy(sess->auth.key, auth_xform->key.data, + auth_xform->key.length); + if (generate_partial_hash(sess)) + return -1; + } break; case RTE_CRYPTO_AUTH_SHA3_512: sess->auth.algo = CCP_AUTH_ALGO_SHA3_512; @@ -1150,20 +1171,27 @@ ccp_configure_session_aead(struct ccp_session *sess, CCP_LOG_ERR("Unsupported aead algo"); return -ENOTSUP; } - sess->cipher.nonce_phys = rte_mem_virt2phy(sess->cipher.nonce); - sess->cipher.key_phys = rte_mem_virt2phy(sess->cipher.key_ccp); + if (iommu_mode == 2) { + sess->cipher.nonce_phys = rte_mem_virt2iova(sess->cipher.nonce); + sess->cipher.key_phys = rte_mem_virt2iova(sess->cipher.key_ccp); + } else { + sess->cipher.nonce_phys = rte_mem_virt2phy(sess->cipher.nonce); + sess->cipher.key_phys = rte_mem_virt2phy(sess->cipher.key_ccp); + } return 0; } int ccp_set_session_parameters(struct ccp_session *sess, - const struct rte_crypto_sym_xform *xform) + const struct rte_crypto_sym_xform *xform, + struct ccp_private *internals) { const struct rte_crypto_sym_xform *cipher_xform = NULL; const struct rte_crypto_sym_xform *auth_xform = NULL; const struct rte_crypto_sym_xform *aead_xform = NULL; int ret = 0; + sess->auth_opt = internals->auth_opt; sess->cmd_id = ccp_get_cmd_id(xform); switch (sess->cmd_id) { @@ -1259,22 +1287,16 @@ ccp_auth_slot(struct ccp_session *session) count = 3; /**< op + lsb passthrough cpy to/from*/ break; -#ifdef RTE_LIBRTE_PMD_CCP_CPU_AUTH case CCP_AUTH_ALGO_MD5_HMAC: break; -#endif case CCP_AUTH_ALGO_SHA1_HMAC: case CCP_AUTH_ALGO_SHA224_HMAC: case CCP_AUTH_ALGO_SHA256_HMAC: -#ifndef RTE_LIBRTE_PMD_CCP_CPU_AUTH - count = 6; -#endif + if (session->auth_opt == 0) + count = 6; break; case CCP_AUTH_ALGO_SHA384_HMAC: case CCP_AUTH_ALGO_SHA512_HMAC: -#ifndef RTE_LIBRTE_PMD_CCP_CPU_AUTH - count = 7; -#endif /** * 1. Load PHash1 = H(k ^ ipad); to LSB * 2. generate IHash = H(hash on meassage with PHash1 @@ -1285,6 +1307,8 @@ ccp_auth_slot(struct ccp_session *session) * as init value); * 6. Retrieve HMAC output from LSB to host memory */ + if (session->auth_opt == 0) + count = 7; break; case CCP_AUTH_ALGO_SHA3_224: case CCP_AUTH_ALGO_SHA3_256: @@ -1381,7 +1405,6 @@ ccp_compute_slot_count(struct ccp_session *session) return count; } -#ifdef RTE_LIBRTE_PMD_CCP_CPU_AUTH static uint8_t algo_select(int sessalgo, const EVP_MD **algo) @@ -1495,7 +1518,6 @@ static int cpu_crypto_auth(struct ccp_qp *qp, EVP_PKEY_free(pkey); return 0; } -#endif static void ccp_perform_passthru(struct ccp_passthru *pst, @@ -1561,20 +1583,25 @@ ccp_perform_hmac(struct rte_crypto_op *op, void *append_ptr; uint8_t *addr; - session = (struct ccp_session *)get_session_private_data( + session = (struct ccp_session *)get_sym_session_private_data( op->sym->session, ccp_cryptodev_driver_id); addr = session->auth.pre_compute; - src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src, + src_addr = rte_pktmbuf_iova_offset(op->sym->m_src, op->sym->auth.data.offset); append_ptr = (void *)rte_pktmbuf_append(op->sym->m_src, session->auth.ctx_len); - dest_addr = (phys_addr_t)rte_mem_virt2phy(append_ptr); + if (iommu_mode == 2) { + dest_addr = (phys_addr_t)rte_mem_virt2iova(append_ptr); + pst.src_addr = (phys_addr_t)rte_mem_virt2iova((void *)addr); + } else { + dest_addr = (phys_addr_t)rte_mem_virt2phy(append_ptr); + pst.src_addr = (phys_addr_t)rte_mem_virt2phy((void *)addr); + } dest_addr_t = dest_addr; /** Load PHash1 to LSB*/ - pst.src_addr = (phys_addr_t)rte_mem_virt2phy((void *)addr); pst.dest_addr = (phys_addr_t)(cmd_q->sb_sha * CCP_SB_BYTES); pst.len = session->auth.ctx_len; pst.dir = 1; @@ -1654,7 +1681,10 @@ ccp_perform_hmac(struct rte_crypto_op *op, /** Load PHash2 to LSB*/ addr += session->auth.ctx_len; - pst.src_addr = (phys_addr_t)rte_mem_virt2phy((void *)addr); + if (iommu_mode == 2) + pst.src_addr = (phys_addr_t)rte_mem_virt2iova((void *)addr); + else + pst.src_addr = (phys_addr_t)rte_mem_virt2phy((void *)addr); pst.dest_addr = (phys_addr_t)(cmd_q->sb_sha * CCP_SB_BYTES); pst.len = session->auth.ctx_len; pst.dir = 1; @@ -1734,21 +1764,25 @@ ccp_perform_sha(struct rte_crypto_op *op, void *append_ptr; uint64_t auth_msg_bits; - session = (struct ccp_session *)get_session_private_data( + session = (struct ccp_session *)get_sym_session_private_data( op->sym->session, ccp_cryptodev_driver_id); - src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src, + src_addr = rte_pktmbuf_iova_offset(op->sym->m_src, op->sym->auth.data.offset); - append_ptr = (void *)rte_pktmbuf_append(op->sym->m_src, session->auth.ctx_len); - dest_addr = (phys_addr_t)rte_mem_virt2phy(append_ptr); + if (iommu_mode == 2) { + dest_addr = (phys_addr_t)rte_mem_virt2iova(append_ptr); + pst.src_addr = (phys_addr_t)sha_ctx; + } else { + dest_addr = (phys_addr_t)rte_mem_virt2phy(append_ptr); + pst.src_addr = (phys_addr_t)rte_mem_virt2phy((void *) + session->auth.ctx); + } /** Passthru sha context*/ - pst.src_addr = (phys_addr_t)rte_mem_virt2phy((void *) - session->auth.ctx); pst.dest_addr = (phys_addr_t)(cmd_q->sb_sha * CCP_SB_BYTES); pst.len = session->auth.ctx_len; pst.dir = 1; @@ -1823,11 +1857,11 @@ ccp_perform_sha3_hmac(struct rte_crypto_op *op, uint32_t tail; phys_addr_t src_addr, dest_addr, ctx_paddr, dest_addr_t; - session = (struct ccp_session *)get_session_private_data( + session = (struct ccp_session *)get_sym_session_private_data( op->sym->session, ccp_cryptodev_driver_id); - src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src, + src_addr = rte_pktmbuf_iova_offset(op->sym->m_src, op->sym->auth.data.offset); append_ptr = (uint8_t *)rte_pktmbuf_append(op->sym->m_src, session->auth.ctx_len); @@ -1835,10 +1869,16 @@ ccp_perform_sha3_hmac(struct rte_crypto_op *op, CCP_LOG_ERR("CCP MBUF append failed\n"); return -1; } - dest_addr = (phys_addr_t)rte_mem_virt2phy((void *)append_ptr); + if (iommu_mode == 2) { + dest_addr = (phys_addr_t)rte_mem_virt2iova((void *)append_ptr); + ctx_paddr = (phys_addr_t)rte_mem_virt2iova( + session->auth.pre_compute); + } else { + dest_addr = (phys_addr_t)rte_mem_virt2phy((void *)append_ptr); + ctx_paddr = (phys_addr_t)rte_mem_virt2phy( + session->auth.pre_compute); + } dest_addr_t = dest_addr + (session->auth.ctx_len / 2); - ctx_paddr = (phys_addr_t)rte_mem_virt2phy((void - *)session->auth.pre_compute); desc = &cmd_q->qbase_desc[cmd_q->qidx]; memset(desc, 0, Q_DESC_SIZE); @@ -1959,15 +1999,15 @@ ccp_perform_sha3(struct rte_crypto_op *op, struct ccp_session *session; union ccp_function function; struct ccp_desc *desc; - uint8_t *ctx_addr, *append_ptr; + uint8_t *ctx_addr = NULL, *append_ptr = NULL; uint32_t tail; phys_addr_t src_addr, dest_addr, ctx_paddr; - session = (struct ccp_session *)get_session_private_data( + session = (struct ccp_session *)get_sym_session_private_data( op->sym->session, ccp_cryptodev_driver_id); - src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src, + src_addr = rte_pktmbuf_iova_offset(op->sym->m_src, op->sym->auth.data.offset); append_ptr = (uint8_t *)rte_pktmbuf_append(op->sym->m_src, session->auth.ctx_len); @@ -1975,9 +2015,15 @@ ccp_perform_sha3(struct rte_crypto_op *op, CCP_LOG_ERR("CCP MBUF append failed\n"); return -1; } - dest_addr = (phys_addr_t)rte_mem_virt2phy((void *)append_ptr); + if (iommu_mode == 2) { + dest_addr = (phys_addr_t)rte_mem_virt2iova((void *)append_ptr); + ctx_paddr = (phys_addr_t)rte_mem_virt2iova((void *)ctx_addr); + } else { + dest_addr = (phys_addr_t)rte_mem_virt2phy((void *)append_ptr); + ctx_paddr = (phys_addr_t)rte_mem_virt2phy((void *)ctx_addr); + } + ctx_addr = session->auth.sha3_ctx; - ctx_paddr = (phys_addr_t)rte_mem_virt2phy((void *)ctx_addr); desc = &cmd_q->qbase_desc[cmd_q->qidx]; memset(desc, 0, Q_DESC_SIZE); @@ -2031,12 +2077,12 @@ ccp_perform_aes_cmac(struct rte_crypto_op *op, phys_addr_t src_addr, dest_addr, key_addr; int length, non_align_len; - session = (struct ccp_session *)get_session_private_data( + session = (struct ccp_session *)get_sym_session_private_data( op->sym->session, ccp_cryptodev_driver_id); key_addr = rte_mem_virt2phy(session->auth.key_ccp); - src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src, + src_addr = rte_pktmbuf_iova_offset(op->sym->m_src, op->sym->auth.data.offset); append_ptr = (uint8_t *)rte_pktmbuf_append(op->sym->m_src, session->auth.ctx_len); @@ -2051,7 +2097,13 @@ ccp_perform_aes_cmac(struct rte_crypto_op *op, ctx_addr = session->auth.pre_compute; memset(ctx_addr, 0, AES_BLOCK_SIZE); - pst.src_addr = (phys_addr_t)rte_mem_virt2phy((void *)ctx_addr); + if (iommu_mode == 2) + pst.src_addr = (phys_addr_t)rte_mem_virt2iova( + (void *)ctx_addr); + else + pst.src_addr = (phys_addr_t)rte_mem_virt2phy( + (void *)ctx_addr); + pst.dest_addr = (phys_addr_t)(cmd_q->sb_iv * CCP_SB_BYTES); pst.len = CCP_SB_BYTES; pst.dir = 1; @@ -2089,7 +2141,12 @@ ccp_perform_aes_cmac(struct rte_crypto_op *op, } else { ctx_addr = session->auth.pre_compute + CCP_SB_BYTES; memset(ctx_addr, 0, AES_BLOCK_SIZE); - pst.src_addr = (phys_addr_t)rte_mem_virt2phy((void *)ctx_addr); + if (iommu_mode == 2) + pst.src_addr = (phys_addr_t)rte_mem_virt2iova( + (void *)ctx_addr); + else + pst.src_addr = (phys_addr_t)rte_mem_virt2phy( + (void *)ctx_addr); pst.dest_addr = (phys_addr_t)(cmd_q->sb_iv * CCP_SB_BYTES); pst.len = CCP_SB_BYTES; pst.dir = 1; @@ -2183,7 +2240,7 @@ ccp_perform_aes(struct rte_crypto_op *op, phys_addr_t src_addr, dest_addr, key_addr; uint8_t *iv; - session = (struct ccp_session *)get_session_private_data( + session = (struct ccp_session *)get_sym_session_private_data( op->sym->session, ccp_cryptodev_driver_id); function.raw = 0; @@ -2216,10 +2273,10 @@ ccp_perform_aes(struct rte_crypto_op *op, desc = &cmd_q->qbase_desc[cmd_q->qidx]; - src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src, + src_addr = rte_pktmbuf_iova_offset(op->sym->m_src, op->sym->cipher.data.offset); if (likely(op->sym->m_dst != NULL)) - dest_addr = rte_pktmbuf_mtophys_offset(op->sym->m_dst, + dest_addr = rte_pktmbuf_iova_offset(op->sym->m_dst, op->sym->cipher.data.offset); else dest_addr = src_addr; @@ -2271,7 +2328,7 @@ ccp_perform_3des(struct rte_crypto_op *op, uint8_t *iv; phys_addr_t src_addr, dest_addr, key_addr; - session = (struct ccp_session *)get_session_private_data( + session = (struct ccp_session *)get_sym_session_private_data( op->sym->session, ccp_cryptodev_driver_id); @@ -2283,8 +2340,12 @@ ccp_perform_3des(struct rte_crypto_op *op, rte_memcpy(lsb_buf + (CCP_SB_BYTES - session->iv.length), iv, session->iv.length); - - pst.src_addr = (phys_addr_t)rte_mem_virt2phy((void *) lsb_buf); + if (iommu_mode == 2) + pst.src_addr = (phys_addr_t)rte_mem_virt2iova( + (void *) lsb_buf); + else + pst.src_addr = (phys_addr_t)rte_mem_virt2phy( + (void *) lsb_buf); pst.dest_addr = (phys_addr_t)(cmd_q->sb_iv * CCP_SB_BYTES); pst.len = CCP_SB_BYTES; pst.dir = 1; @@ -2298,16 +2359,19 @@ ccp_perform_3des(struct rte_crypto_op *op, return -ENOTSUP; } - src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src, + src_addr = rte_pktmbuf_iova_offset(op->sym->m_src, op->sym->cipher.data.offset); if (unlikely(op->sym->m_dst != NULL)) dest_addr = - rte_pktmbuf_mtophys_offset(op->sym->m_dst, + rte_pktmbuf_iova_offset(op->sym->m_dst, op->sym->cipher.data.offset); else dest_addr = src_addr; - key_addr = rte_mem_virt2phy(session->cipher.key_ccp); + if (iommu_mode == 2) + key_addr = rte_mem_virt2iova(session->cipher.key_ccp); + else + key_addr = rte_mem_virt2phy(session->cipher.key_ccp); desc = &cmd_q->qbase_desc[cmd_q->qidx]; @@ -2374,16 +2438,16 @@ ccp_perform_aes_gcm(struct rte_crypto_op *op, struct ccp_queue *cmd_q) phys_addr_t digest_dest_addr; int length, non_align_len; - session = (struct ccp_session *)get_session_private_data( + session = (struct ccp_session *)get_sym_session_private_data( op->sym->session, ccp_cryptodev_driver_id); iv = rte_crypto_op_ctod_offset(op, uint8_t *, session->iv.offset); key_addr = session->cipher.key_phys; - src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src, + src_addr = rte_pktmbuf_iova_offset(op->sym->m_src, op->sym->aead.data.offset); if (unlikely(op->sym->m_dst != NULL)) - dest_addr = rte_pktmbuf_mtophys_offset(op->sym->m_dst, + dest_addr = rte_pktmbuf_iova_offset(op->sym->m_dst, op->sym->aead.data.offset); else dest_addr = src_addr; @@ -2541,7 +2605,7 @@ ccp_crypto_cipher(struct rte_crypto_op *op, int result = 0; struct ccp_session *session; - session = (struct ccp_session *)get_session_private_data( + session = (struct ccp_session *)get_sym_session_private_data( op->sym->session, ccp_cryptodev_driver_id); @@ -2579,7 +2643,7 @@ ccp_crypto_auth(struct rte_crypto_op *op, int result = 0; struct ccp_session *session; - session = (struct ccp_session *)get_session_private_data( + session = (struct ccp_session *)get_sym_session_private_data( op->sym->session, ccp_cryptodev_driver_id); @@ -2592,26 +2656,24 @@ ccp_crypto_auth(struct rte_crypto_op *op, result = ccp_perform_sha(op, cmd_q); b_info->desccnt += 3; break; -#ifdef RTE_LIBRTE_PMD_CCP_CPU_AUTH case CCP_AUTH_ALGO_MD5_HMAC: + if (session->auth_opt == 0) + result = -1; break; -#endif case CCP_AUTH_ALGO_SHA1_HMAC: case CCP_AUTH_ALGO_SHA224_HMAC: case CCP_AUTH_ALGO_SHA256_HMAC: -#ifdef RTE_LIBRTE_PMD_CCP_CPU_AUTH - break; -#endif - result = ccp_perform_hmac(op, cmd_q); - b_info->desccnt += 6; + if (session->auth_opt == 0) { + result = ccp_perform_hmac(op, cmd_q); + b_info->desccnt += 6; + } break; case CCP_AUTH_ALGO_SHA384_HMAC: case CCP_AUTH_ALGO_SHA512_HMAC: -#ifdef RTE_LIBRTE_PMD_CCP_CPU_AUTH - break; -#endif - result = ccp_perform_hmac(op, cmd_q); - b_info->desccnt += 7; + if (session->auth_opt == 0) { + result = ccp_perform_hmac(op, cmd_q); + b_info->desccnt += 7; + } break; case CCP_AUTH_ALGO_SHA3_224: case CCP_AUTH_ALGO_SHA3_256: @@ -2651,7 +2713,7 @@ ccp_crypto_aead(struct rte_crypto_op *op, int result = 0; struct ccp_session *session; - session = (struct ccp_session *)get_session_private_data( + session = (struct ccp_session *)get_sym_session_private_data( op->sym->session, ccp_cryptodev_driver_id); @@ -2677,40 +2739,46 @@ process_ops_to_enqueue(struct ccp_qp *qp, struct rte_crypto_op **op, struct ccp_queue *cmd_q, uint16_t nb_ops, - int slots_req) + uint16_t total_nb_ops, + int slots_req, + uint16_t b_idx) { int i, result = 0; struct ccp_batch_info *b_info; struct ccp_session *session; -#ifdef RTE_LIBRTE_PMD_CCP_CPU_AUTH EVP_MD_CTX *auth_ctx = NULL; -#endif if (rte_mempool_get(qp->batch_mp, (void **)&b_info)) { CCP_LOG_ERR("batch info allocation failed"); return 0; } -#ifdef RTE_LIBRTE_PMD_CCP_CPU_AUTH + auth_ctx = EVP_MD_CTX_create(); if (unlikely(!auth_ctx)) { CCP_LOG_ERR("Unable to create auth ctx"); return 0; } b_info->auth_ctr = 0; -#endif + /* populate batch info necessary for dequeue */ b_info->op_idx = 0; + b_info->b_idx = 0; b_info->lsb_buf_idx = 0; b_info->desccnt = 0; b_info->cmd_q = cmd_q; - b_info->lsb_buf_phys = - (phys_addr_t)rte_mem_virt2phy((void *)b_info->lsb_buf); + if (iommu_mode == 2) + b_info->lsb_buf_phys = + (phys_addr_t)rte_mem_virt2iova((void *)b_info->lsb_buf); + else + b_info->lsb_buf_phys = + (phys_addr_t)rte_mem_virt2phy((void *)b_info->lsb_buf); + rte_atomic64_sub(&b_info->cmd_q->free_slots, slots_req); b_info->head_offset = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE); - for (i = 0; i < nb_ops; i++) { - session = (struct ccp_session *)get_session_private_data( + for (i = b_idx; i < (nb_ops+b_idx); i++) { + session = (struct ccp_session *)get_sym_session_private_data( op[i]->sym->session, ccp_cryptodev_driver_id); switch (session->cmd_id) { @@ -2718,12 +2786,12 @@ process_ops_to_enqueue(struct ccp_qp *qp, result = ccp_crypto_cipher(op[i], cmd_q, b_info); break; case CCP_CMD_AUTH: - result = ccp_crypto_auth(op[i], cmd_q, b_info); -#ifdef RTE_LIBRTE_PMD_CCP_CPU_AUTH - b_info->auth_ctr++; - result = cpu_crypto_auth(qp, op[i], - session, auth_ctx); -#endif + if (session->auth_opt) { + b_info->auth_ctr++; + result = cpu_crypto_auth(qp, op[i], + session, auth_ctx); + } else + result = ccp_crypto_auth(op[i], cmd_q, b_info); break; case CCP_CMD_CIPHER_HASH: result = ccp_crypto_cipher(op[i], cmd_q, b_info); @@ -2732,13 +2800,15 @@ process_ops_to_enqueue(struct ccp_qp *qp, result = ccp_crypto_auth(op[i], cmd_q, b_info); break; case CCP_CMD_HASH_CIPHER: - result = ccp_crypto_auth(op[i], cmd_q, b_info); -#ifdef RTE_LIBRTE_PMD_CCP_CPU_AUTH - result = cpu_crypto_auth(qp, op[i], - session, auth_ctx); - if (op[i]->status != RTE_CRYPTO_OP_STATUS_SUCCESS) - continue; -#endif + if (session->auth_opt) { + result = cpu_crypto_auth(qp, op[i], + session, auth_ctx); + if (op[i]->status != + RTE_CRYPTO_OP_STATUS_SUCCESS) + CCP_LOG_ERR("RTE_CRYPTO_OP_STATUS_AUTH_FAILED"); + } else + result = ccp_crypto_auth(op[i], cmd_q, b_info); + if (result) break; result = ccp_crypto_cipher(op[i], cmd_q, b_info); @@ -2759,6 +2829,8 @@ process_ops_to_enqueue(struct ccp_qp *qp, } b_info->opcnt = i; + b_info->b_idx = b_idx; + b_info->total_nb_ops = total_nb_ops; b_info->tail_offset = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE); @@ -2772,10 +2844,8 @@ process_ops_to_enqueue(struct ccp_qp *qp, rte_ring_enqueue(qp->processed_pkts, (void *)b_info); -#ifdef RTE_LIBRTE_PMD_CCP_CPU_AUTH EVP_MD_CTX_destroy(auth_ctx); -#endif - return i; + return i-b_idx; } static inline void ccp_auth_dq_prepare(struct rte_crypto_op *op) @@ -2786,7 +2856,7 @@ static inline void ccp_auth_dq_prepare(struct rte_crypto_op *op) int offset, digest_offset; uint8_t digest_le[64]; - session = (struct ccp_session *)get_session_private_data( + session = (struct ccp_session *)get_sym_session_private_data( op->sym->session, ccp_cryptodev_driver_id); @@ -2843,11 +2913,7 @@ static inline void ccp_auth_dq_prepare(struct rte_crypto_op *op) } static int -#ifdef RTE_LIBRTE_PMD_CCP_CPU_AUTH ccp_prepare_ops(struct ccp_qp *qp, -#else -ccp_prepare_ops(struct ccp_qp *qp __rte_unused, -#endif struct rte_crypto_op **op_d, struct ccp_batch_info *b_info, uint16_t nb_ops) @@ -2855,7 +2921,6 @@ ccp_prepare_ops(struct ccp_qp *qp __rte_unused, int i, min_ops; struct ccp_session *session; -#ifdef RTE_LIBRTE_PMD_CCP_CPU_AUTH EVP_MD_CTX *auth_ctx = NULL; auth_ctx = EVP_MD_CTX_create(); @@ -2863,12 +2928,11 @@ ccp_prepare_ops(struct ccp_qp *qp __rte_unused, CCP_LOG_ERR("Unable to create auth ctx"); return 0; } -#endif min_ops = RTE_MIN(nb_ops, b_info->opcnt); - for (i = 0; i < min_ops; i++) { - op_d[i] = b_info->op[b_info->op_idx++]; - session = (struct ccp_session *)get_session_private_data( + for (i = b_info->b_idx; i < min_ops; i++) { + op_d[i] = b_info->op[b_info->b_idx + b_info->op_idx++]; + session = (struct ccp_session *)get_sym_session_private_data( op_d[i]->sym->session, ccp_cryptodev_driver_id); switch (session->cmd_id) { @@ -2876,24 +2940,21 @@ ccp_prepare_ops(struct ccp_qp *qp __rte_unused, op_d[i]->status = RTE_CRYPTO_OP_STATUS_SUCCESS; break; case CCP_CMD_AUTH: -#ifndef RTE_LIBRTE_PMD_CCP_CPU_AUTH - ccp_auth_dq_prepare(op_d[i]); -#endif + if (session->auth_opt == 0) + ccp_auth_dq_prepare(op_d[i]); break; case CCP_CMD_CIPHER_HASH: -#ifdef RTE_LIBRTE_PMD_CCP_CPU_AUTH - cpu_crypto_auth(qp, op_d[i], - session, auth_ctx); -#else - ccp_auth_dq_prepare(op_d[i]); -#endif + if (session->auth_opt) + cpu_crypto_auth(qp, op_d[i], + session, auth_ctx); + else + ccp_auth_dq_prepare(op_d[i]); break; case CCP_CMD_HASH_CIPHER: -#ifdef RTE_LIBRTE_PMD_CCP_CPU_AUTH - op_d[i]->status = RTE_CRYPTO_OP_STATUS_SUCCESS; -#else - ccp_auth_dq_prepare(op_d[i]); -#endif + if (session->auth_opt) + op_d[i]->status = RTE_CRYPTO_OP_STATUS_SUCCESS; + else + ccp_auth_dq_prepare(op_d[i]); break; case CCP_CMD_COMBINED: ccp_auth_dq_prepare(op_d[i]); @@ -2903,9 +2964,7 @@ ccp_prepare_ops(struct ccp_qp *qp __rte_unused, } } -#ifdef RTE_LIBRTE_PMD_CCP_CPU_AUTH EVP_MD_CTX_destroy(auth_ctx); -#endif b_info->opcnt -= min_ops; return min_ops; } @@ -2913,7 +2972,8 @@ ccp_prepare_ops(struct ccp_qp *qp __rte_unused, int process_ops_to_dequeue(struct ccp_qp *qp, struct rte_crypto_op **op, - uint16_t nb_ops) + uint16_t nb_ops, + uint16_t *total_nb_ops) { struct ccp_batch_info *b_info; uint32_t cur_head_offset; @@ -2925,10 +2985,10 @@ process_ops_to_dequeue(struct ccp_qp *qp, } else if (rte_ring_dequeue(qp->processed_pkts, (void **)&b_info)) return 0; -#ifdef RTE_LIBRTE_PMD_CCP_CPU_AUTH + if (b_info->auth_ctr == b_info->opcnt) goto success; -#endif + *total_nb_ops = b_info->total_nb_ops; cur_head_offset = CCP_READ_REG(b_info->cmd_q->reg_base, CMD_Q_HEAD_LO_BASE); @@ -2938,7 +2998,7 @@ process_ops_to_dequeue(struct ccp_qp *qp, qp->b_info = b_info; return 0; } - } else { + } else if (b_info->tail_offset != b_info->head_offset) { if ((cur_head_offset >= b_info->head_offset) || (cur_head_offset < b_info->tail_offset)) { qp->b_info = b_info; @@ -2948,6 +3008,7 @@ process_ops_to_dequeue(struct ccp_qp *qp, success: + *total_nb_ops = b_info->total_nb_ops; nb_ops = ccp_prepare_ops(qp, op, b_info, nb_ops); rte_atomic64_add(&b_info->cmd_q->free_slots, b_info->desccnt); b_info->desccnt = 0;