digest.va = NULL;
sgl.vec = data_vec;
vec.num = 1;
- vec.sgl = &sgl;
+ vec.src_sgl = &sgl;
vec.iv = &cipher_iv;
vec.digest = &digest;
vec.aad = &aad_auth_iv;
sgl.vec = vec;
sgl.num = n;
- symvec.sgl = &sgl;
+ symvec.src_sgl = &sgl;
symvec.iv = &iv_ptr;
symvec.digest = &digest_ptr;
symvec.aad = &aad_ptr;
sgl.vec = vec;
sgl.num = n;
- symvec.sgl = &sgl;
+ symvec.src_sgl = &sgl;
symvec.iv = &iv_ptr;
symvec.digest = &digest_ptr;
symvec.status = &st;
processed = 0;
for (i = 0; i < vec->num; ++i) {
aesni_gcm_process_gcm_sgl_op(s, gdata_ctx,
- &vec->sgl[i], vec->iv[i].va,
+ &vec->src_sgl[i], vec->iv[i].va,
vec->aad[i].va);
vec->status[i] = aesni_gcm_sgl_op_finalize_encryption(s,
gdata_ctx, vec->digest[i].va);
processed = 0;
for (i = 0; i < vec->num; ++i) {
aesni_gcm_process_gcm_sgl_op(s, gdata_ctx,
- &vec->sgl[i], vec->iv[i].va,
+ &vec->src_sgl[i], vec->iv[i].va,
vec->aad[i].va);
vec->status[i] = aesni_gcm_sgl_op_finalize_decryption(s,
gdata_ctx, vec->digest[i].va);
processed = 0;
for (i = 0; i < vec->num; ++i) {
- if (vec->sgl[i].num != 1) {
+ if (vec->src_sgl[i].num != 1) {
vec->status[i] = ENOTSUP;
continue;
}
aesni_gcm_process_gmac_sgl_op(s, gdata_ctx,
- &vec->sgl[i], vec->iv[i].va);
+ &vec->src_sgl[i], vec->iv[i].va);
vec->status[i] = aesni_gcm_sgl_op_finalize_encryption(s,
gdata_ctx, vec->digest[i].va);
processed += (vec->status[i] == 0);
processed = 0;
for (i = 0; i < vec->num; ++i) {
- if (vec->sgl[i].num != 1) {
+ if (vec->src_sgl[i].num != 1) {
vec->status[i] = ENOTSUP;
continue;
}
aesni_gcm_process_gmac_sgl_op(s, gdata_ctx,
- &vec->sgl[i], vec->iv[i].va);
+ &vec->src_sgl[i], vec->iv[i].va);
vec->status[i] = aesni_gcm_sgl_op_finalize_decryption(s,
gdata_ctx, vec->digest[i].va);
processed += (vec->status[i] == 0);
for (i = 0, j = 0, k = 0; i != vec->num; i++) {
- ret = check_crypto_sgl(sofs, vec->sgl + i);
+ ret = check_crypto_sgl(sofs, vec->src_sgl + i);
if (ret != 0) {
vec->status[i] = ret;
continue;
}
- buf = vec->sgl[i].vec[0].base;
- len = vec->sgl[i].vec[0].len;
+ buf = vec->src_sgl[i].vec[0].base;
+ len = vec->src_sgl[i].vec[0].len;
job = IMB_GET_NEXT_JOB(mb_mgr);
if (job == NULL) {
(uint8_t *)tx_queue->base_addr + tail);
rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
- data_len = qat_sym_dp_parse_data_vec(qp, req, vec->sgl[i].vec,
- vec->sgl[i].num);
+ data_len = qat_sym_dp_parse_data_vec(qp, req,
+ vec->src_sgl[i].vec,
+ vec->src_sgl[i].num);
if (unlikely(data_len < 0))
break;
req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data[i];
(uint8_t *)tx_queue->base_addr + tail);
rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
- data_len = qat_sym_dp_parse_data_vec(qp, req, vec->sgl[i].vec,
- vec->sgl[i].num);
+ data_len = qat_sym_dp_parse_data_vec(qp, req,
+ vec->src_sgl[i].vec,
+ vec->src_sgl[i].num);
if (unlikely(data_len < 0))
break;
req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data[i];
(uint8_t *)tx_queue->base_addr + tail);
rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
- data_len = qat_sym_dp_parse_data_vec(qp, req, vec->sgl[i].vec,
- vec->sgl[i].num);
+ data_len = qat_sym_dp_parse_data_vec(qp, req,
+ vec->src_sgl[i].vec,
+ vec->src_sgl[i].num);
if (unlikely(data_len < 0))
break;
req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data[i];
- if (unlikely(enqueue_one_chain_job(ctx, req, vec->sgl[i].vec,
- vec->sgl[i].num, &vec->iv[i], &vec->digest[i],
- &vec->auth_iv[i], ofs, (uint32_t)data_len)))
+ if (unlikely(enqueue_one_chain_job(ctx, req,
+ vec->src_sgl[i].vec, vec->src_sgl[i].num,
+ &vec->iv[i], &vec->digest[i],
+ &vec->auth_iv[i], ofs, (uint32_t)data_len)))
break;
tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
(uint8_t *)tx_queue->base_addr + tail);
rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
- data_len = qat_sym_dp_parse_data_vec(qp, req, vec->sgl[i].vec,
- vec->sgl[i].num);
+ data_len = qat_sym_dp_parse_data_vec(qp, req,
+ vec->src_sgl[i].vec,
+ vec->src_sgl[i].num);
if (unlikely(data_len < 0))
break;
req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data[i];
/** number of operations to perform */
uint32_t num;
/** array of SGL vectors */
- struct rte_crypto_sgl *sgl;
+ struct rte_crypto_sgl *src_sgl;
/** array of pointers to cipher IV */
struct rte_crypto_va_iova_ptr *iv;
/** array of pointers to digest */
/* not enough space in vec[] to hold all segments */
if (vcnt < 0) {
/* fill the request structure */
- symvec.sgl = &vecpkt[j];
+ symvec.src_sgl = &vecpkt[j];
symvec.iv = &iv[j];
symvec.digest = &dgst[j];
symvec.aad = &aad[j];
}
/* fill the request structure */
- symvec.sgl = &vecpkt[j];
+ symvec.src_sgl = &vecpkt[j];
symvec.iv = &iv[j];
symvec.aad = &aad[j];
symvec.digest = &dgst[j];