cryptodev: rename field in vector struct
authorHemant Agrawal <hemant.agrawal@nxp.com>
Wed, 13 Oct 2021 19:00:18 +0000 (00:30 +0530)
committerAkhil Goyal <gakhil@marvell.com>
Sun, 17 Oct 2021 17:31:15 +0000 (19:31 +0200)
This patch renames the sgl to src_sgl in struct rte_crypto_sym_vec
to help differentiating between source and destination sgl.

Signed-off-by: Hemant Agrawal <hemant.agrawal@nxp.com>
Acked-by: Akhil Goyal <gakhil@marvell.com>
Acked-by: Konstantin Ananyev <konstantin.ananyev@intel.com>
app/test/test_cryptodev.c
drivers/crypto/aesni_gcm/aesni_gcm_pmd.c
drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c
drivers/crypto/qat/qat_sym_hw_dp.c
lib/cryptodev/rte_crypto_sym.h
lib/ipsec/misc.h

index 65b64e1..4778daf 100644 (file)
@@ -232,7 +232,7 @@ process_sym_raw_dp_op(uint8_t dev_id, uint16_t qp_id,
        digest.va = NULL;
        sgl.vec = data_vec;
        vec.num = 1;
-       vec.sgl = &sgl;
+       vec.src_sgl = &sgl;
        vec.iv = &cipher_iv;
        vec.digest = &digest;
        vec.aad = &aad_auth_iv;
@@ -396,7 +396,7 @@ process_cpu_aead_op(uint8_t dev_id, struct rte_crypto_op *op)
 
        sgl.vec = vec;
        sgl.num = n;
-       symvec.sgl = &sgl;
+       symvec.src_sgl = &sgl;
        symvec.iv = &iv_ptr;
        symvec.digest = &digest_ptr;
        symvec.aad = &aad_ptr;
@@ -442,7 +442,7 @@ process_cpu_crypt_auth_op(uint8_t dev_id, struct rte_crypto_op *op)
 
        sgl.vec = vec;
        sgl.num = n;
-       symvec.sgl = &sgl;
+       symvec.src_sgl = &sgl;
        symvec.iv = &iv_ptr;
        symvec.digest = &digest_ptr;
        symvec.status = &st;
index 330aad8..d036882 100644 (file)
@@ -535,7 +535,7 @@ aesni_gcm_sgl_encrypt(struct aesni_gcm_session *s,
        processed = 0;
        for (i = 0; i < vec->num; ++i) {
                aesni_gcm_process_gcm_sgl_op(s, gdata_ctx,
-                       &vec->sgl[i], vec->iv[i].va,
+                       &vec->src_sgl[i], vec->iv[i].va,
                        vec->aad[i].va);
                vec->status[i] = aesni_gcm_sgl_op_finalize_encryption(s,
                        gdata_ctx, vec->digest[i].va);
@@ -554,7 +554,7 @@ aesni_gcm_sgl_decrypt(struct aesni_gcm_session *s,
        processed = 0;
        for (i = 0; i < vec->num; ++i) {
                aesni_gcm_process_gcm_sgl_op(s, gdata_ctx,
-                       &vec->sgl[i], vec->iv[i].va,
+                       &vec->src_sgl[i], vec->iv[i].va,
                        vec->aad[i].va);
                 vec->status[i] = aesni_gcm_sgl_op_finalize_decryption(s,
                        gdata_ctx, vec->digest[i].va);
@@ -572,13 +572,13 @@ aesni_gmac_sgl_generate(struct aesni_gcm_session *s,
 
        processed = 0;
        for (i = 0; i < vec->num; ++i) {
-               if (vec->sgl[i].num != 1) {
+               if (vec->src_sgl[i].num != 1) {
                        vec->status[i] = ENOTSUP;
                        continue;
                }
 
                aesni_gcm_process_gmac_sgl_op(s, gdata_ctx,
-                       &vec->sgl[i], vec->iv[i].va);
+                       &vec->src_sgl[i], vec->iv[i].va);
                vec->status[i] = aesni_gcm_sgl_op_finalize_encryption(s,
                        gdata_ctx, vec->digest[i].va);
                processed += (vec->status[i] == 0);
@@ -595,13 +595,13 @@ aesni_gmac_sgl_verify(struct aesni_gcm_session *s,
 
        processed = 0;
        for (i = 0; i < vec->num; ++i) {
-               if (vec->sgl[i].num != 1) {
+               if (vec->src_sgl[i].num != 1) {
                        vec->status[i] = ENOTSUP;
                        continue;
                }
 
                aesni_gcm_process_gmac_sgl_op(s, gdata_ctx,
-                       &vec->sgl[i], vec->iv[i].va);
+                       &vec->src_sgl[i], vec->iv[i].va);
                vec->status[i] = aesni_gcm_sgl_op_finalize_decryption(s,
                        gdata_ctx, vec->digest[i].va);
                processed += (vec->status[i] == 0);
index 60963a8..2419adc 100644 (file)
@@ -2002,14 +2002,14 @@ aesni_mb_cpu_crypto_process_bulk(struct rte_cryptodev *dev,
        for (i = 0, j = 0, k = 0; i != vec->num; i++) {
 
 
-               ret = check_crypto_sgl(sofs, vec->sgl + i);
+               ret = check_crypto_sgl(sofs, vec->src_sgl + i);
                if (ret != 0) {
                        vec->status[i] = ret;
                        continue;
                }
 
-               buf = vec->sgl[i].vec[0].base;
-               len = vec->sgl[i].vec[0].len;
+               buf = vec->src_sgl[i].vec[0].base;
+               len = vec->src_sgl[i].vec[0].len;
 
                job = IMB_GET_NEXT_JOB(mb_mgr);
                if (job == NULL) {
index 36d11e0..12825e4 100644 (file)
@@ -181,8 +181,9 @@ qat_sym_dp_enqueue_cipher_jobs(void *qp_data, uint8_t *drv_ctx,
                        (uint8_t *)tx_queue->base_addr + tail);
                rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
 
-               data_len = qat_sym_dp_parse_data_vec(qp, req, vec->sgl[i].vec,
-                       vec->sgl[i].num);
+               data_len = qat_sym_dp_parse_data_vec(qp, req,
+                       vec->src_sgl[i].vec,
+                       vec->src_sgl[i].num);
                if (unlikely(data_len < 0))
                        break;
                req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data[i];
@@ -302,8 +303,9 @@ qat_sym_dp_enqueue_auth_jobs(void *qp_data, uint8_t *drv_ctx,
                        (uint8_t *)tx_queue->base_addr + tail);
                rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
 
-               data_len = qat_sym_dp_parse_data_vec(qp, req, vec->sgl[i].vec,
-                       vec->sgl[i].num);
+               data_len = qat_sym_dp_parse_data_vec(qp, req,
+                       vec->src_sgl[i].vec,
+                       vec->src_sgl[i].num);
                if (unlikely(data_len < 0))
                        break;
                req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data[i];
@@ -484,14 +486,16 @@ qat_sym_dp_enqueue_chain_jobs(void *qp_data, uint8_t *drv_ctx,
                        (uint8_t *)tx_queue->base_addr + tail);
                rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
 
-               data_len = qat_sym_dp_parse_data_vec(qp, req, vec->sgl[i].vec,
-                       vec->sgl[i].num);
+               data_len = qat_sym_dp_parse_data_vec(qp, req,
+                       vec->src_sgl[i].vec,
+                       vec->src_sgl[i].num);
                if (unlikely(data_len < 0))
                        break;
                req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data[i];
-               if (unlikely(enqueue_one_chain_job(ctx, req, vec->sgl[i].vec,
-                       vec->sgl[i].num, &vec->iv[i], &vec->digest[i],
-                               &vec->auth_iv[i], ofs, (uint32_t)data_len)))
+               if (unlikely(enqueue_one_chain_job(ctx, req,
+                       vec->src_sgl[i].vec, vec->src_sgl[i].num,
+                       &vec->iv[i], &vec->digest[i],
+                       &vec->auth_iv[i], ofs, (uint32_t)data_len)))
                        break;
 
                tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
@@ -688,8 +692,9 @@ qat_sym_dp_enqueue_aead_jobs(void *qp_data, uint8_t *drv_ctx,
                        (uint8_t *)tx_queue->base_addr + tail);
                rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
 
-               data_len = qat_sym_dp_parse_data_vec(qp, req, vec->sgl[i].vec,
-                       vec->sgl[i].num);
+               data_len = qat_sym_dp_parse_data_vec(qp, req,
+                       vec->src_sgl[i].vec,
+                       vec->src_sgl[i].num);
                if (unlikely(data_len < 0))
                        break;
                req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data[i];
index 1106ad6..f8cb2cc 100644 (file)
@@ -69,7 +69,7 @@ struct rte_crypto_sym_vec {
        /** number of operations to perform */
        uint32_t num;
        /** array of SGL vectors */
-       struct rte_crypto_sgl *sgl;
+       struct rte_crypto_sgl *src_sgl;
        /** array of pointers to cipher IV */
        struct rte_crypto_va_iova_ptr *iv;
        /** array of pointers to digest */
index 79b9a20..58ff538 100644 (file)
@@ -136,7 +136,7 @@ cpu_crypto_bulk(const struct rte_ipsec_session *ss,
                /* not enough space in vec[] to hold all segments */
                if (vcnt < 0) {
                        /* fill the request structure */
-                       symvec.sgl = &vecpkt[j];
+                       symvec.src_sgl = &vecpkt[j];
                        symvec.iv = &iv[j];
                        symvec.digest = &dgst[j];
                        symvec.aad = &aad[j];
@@ -160,7 +160,7 @@ cpu_crypto_bulk(const struct rte_ipsec_session *ss,
        }
 
        /* fill the request structure */
-       symvec.sgl = &vecpkt[j];
+       symvec.src_sgl = &vecpkt[j];
        symvec.iv = &iv[j];
        symvec.aad = &aad[j];
        symvec.digest = &dgst[j];