process_cpu_aead_op(uint8_t dev_id, struct rte_crypto_op *op)
{
int32_t n, st;
- void *iv;
struct rte_crypto_sym_op *sop;
union rte_crypto_sym_ofs ofs;
struct rte_crypto_sgl sgl;
struct rte_crypto_sym_vec symvec;
+ struct rte_crypto_va_iova_ptr iv_ptr, aad_ptr, digest_ptr;
struct rte_crypto_vec vec[UINT8_MAX];
sop = op->sym;
sgl.vec = vec;
sgl.num = n;
symvec.sgl = &sgl;
- iv = rte_crypto_op_ctod_offset(op, void *, IV_OFFSET);
- symvec.iv = &iv;
- symvec.aad = (void **)&sop->aead.aad.data;
- symvec.digest = (void **)&sop->aead.digest.data;
+ symvec.iv = &iv_ptr;
+ symvec.digest = &digest_ptr;
+ symvec.aad = &aad_ptr;
symvec.status = &st;
symvec.num = 1;
+ /* for CPU crypto the IOVA address is not required */
+ iv_ptr.va = rte_crypto_op_ctod_offset(op, void *, IV_OFFSET);
+ digest_ptr.va = (void *)sop->aead.digest.data;
+ aad_ptr.va = (void *)sop->aead.aad.data;
+
ofs.raw = 0;
n = rte_cryptodev_sym_cpu_crypto_process(dev_id, sop->session, ofs,
process_cpu_crypt_auth_op(uint8_t dev_id, struct rte_crypto_op *op)
{
int32_t n, st;
- void *iv;
struct rte_crypto_sym_op *sop;
union rte_crypto_sym_ofs ofs;
struct rte_crypto_sgl sgl;
struct rte_crypto_sym_vec symvec;
+ struct rte_crypto_va_iova_ptr iv_ptr, digest_ptr;
struct rte_crypto_vec vec[UINT8_MAX];
sop = op->sym;
sgl.vec = vec;
sgl.num = n;
symvec.sgl = &sgl;
- iv = rte_crypto_op_ctod_offset(op, void *, IV_OFFSET);
- symvec.iv = &iv;
- symvec.aad = (void **)&sop->aead.aad.data;
- symvec.digest = (void **)&sop->auth.digest.data;
+ symvec.iv = &iv_ptr;
+ symvec.digest = &digest_ptr;
symvec.status = &st;
symvec.num = 1;
+ iv_ptr.va = rte_crypto_op_ctod_offset(op, void *, IV_OFFSET);
+ digest_ptr.va = (void *)sop->auth.digest.data;
+
ofs.raw = 0;
ofs.ofs.cipher.head = sop->cipher.data.offset - sop->auth.data.offset;
ofs.ofs.cipher.tail = (sop->auth.data.offset + sop->auth.data.length) -
descriptors of performed operations (``struct rte_crypto_sgl``). Each instance
of ``struct rte_crypto_sgl`` consists of a number of segments and a pointer to
an array of segment descriptors ``struct rte_crypto_vec``;
-- pointers to arrays of size ``num`` containing IV, AAD and digest information,
+- pointers to arrays of size ``num`` containing IV, AAD and digest information
+ in the ``cpu_crypto`` sub-structure,
- pointer to an array of size ``num`` where status information will be stored
for each operation.
* vhost: Moved vDPA APIs from experimental to stable.
+* cryptodev: The structure ``rte_crypto_sym_vec`` is updated to support both
+ cpu_crypto synchrounous operation and asynchronous raw data-path APIs.
+
* scheduler: Renamed functions ``rte_cryptodev_scheduler_slave_attach``,
``rte_cryptodev_scheduler_slave_detach`` and
``rte_cryptodev_scheduler_slaves_get`` to
processed = 0;
for (i = 0; i < vec->num; ++i) {
aesni_gcm_process_gcm_sgl_op(s, gdata_ctx,
- &vec->sgl[i], vec->iv[i], vec->aad[i]);
+ &vec->sgl[i], vec->iv[i].va,
+ vec->aad[i].va);
vec->status[i] = aesni_gcm_sgl_op_finalize_encryption(s,
- gdata_ctx, vec->digest[i]);
+ gdata_ctx, vec->digest[i].va);
processed += (vec->status[i] == 0);
}
processed = 0;
for (i = 0; i < vec->num; ++i) {
aesni_gcm_process_gcm_sgl_op(s, gdata_ctx,
- &vec->sgl[i], vec->iv[i], vec->aad[i]);
+ &vec->sgl[i], vec->iv[i].va,
+ vec->aad[i].va);
vec->status[i] = aesni_gcm_sgl_op_finalize_decryption(s,
- gdata_ctx, vec->digest[i]);
+ gdata_ctx, vec->digest[i].va);
processed += (vec->status[i] == 0);
}
}
aesni_gcm_process_gmac_sgl_op(s, gdata_ctx,
- &vec->sgl[i], vec->iv[i]);
+ &vec->sgl[i], vec->iv[i].va);
vec->status[i] = aesni_gcm_sgl_op_finalize_encryption(s,
- gdata_ctx, vec->digest[i]);
+ gdata_ctx, vec->digest[i].va);
processed += (vec->status[i] == 0);
}
}
aesni_gcm_process_gmac_sgl_op(s, gdata_ctx,
- &vec->sgl[i], vec->iv[i]);
+ &vec->sgl[i], vec->iv[i].va);
vec->status[i] = aesni_gcm_sgl_op_finalize_decryption(s,
- gdata_ctx, vec->digest[i]);
+ gdata_ctx, vec->digest[i].va);
processed += (vec->status[i] == 0);
}
for (i = 0, k = 0; i != vec->num; i++) {
if (vec->status[i] == 0) {
- memcpy(vec->digest[i], dgst[i], len);
+ memcpy(vec->digest[i].va, dgst[i], len);
k++;
}
}
for (i = 0, k = 0; i != vec->num; i++) {
if (vec->status[i] == 0) {
- if (memcmp(vec->digest[i], dgst[i], len) != 0)
+ if (memcmp(vec->digest[i].va, dgst[i], len) != 0)
vec->status[i] = EBADMSG;
else
k++;
}
/* Submit job for processing */
- set_cpu_mb_job_params(job, s, sofs, buf, len,
- vec->iv[i], vec->aad[i], tmp_dgst[i],
- &vec->status[i]);
+ set_cpu_mb_job_params(job, s, sofs, buf, len, vec->iv[i].va,
+ vec->aad[i].va, tmp_dgst[i], &vec->status[i]);
job = submit_sync_job(mb_mgr);
j++;
};
/**
- * Synchronous operation descriptor.
- * Supposed to be used with CPU crypto API call.
+ * Crypto virtual and IOVA address descriptor, used to describe cryptographic
+ * data buffer without the length information. The length information is
+ * normally predefined during session creation.
+ */
+struct rte_crypto_va_iova_ptr {
+ void *va;
+ rte_iova_t iova;
+};
+
+/**
+ * Raw data operation descriptor.
+ * Supposed to be used with synchronous CPU crypto API call or asynchronous
+ * RAW data path API call.
*/
struct rte_crypto_sym_vec {
+ /** number of operations to perform */
+ uint32_t num;
/** array of SGL vectors */
struct rte_crypto_sgl *sgl;
- /** array of pointers to IV */
- void **iv;
- /** array of pointers to AAD */
- void **aad;
+ /** array of pointers to cipher IV */
+ struct rte_crypto_va_iova_ptr *iv;
/** array of pointers to digest */
- void **digest;
+ struct rte_crypto_va_iova_ptr *digest;
+
+ __extension__
+ union {
+ /** array of pointers to auth IV, used for chain operation */
+ struct rte_crypto_va_iova_ptr *auth_iv;
+ /** array of pointers to AAD, used for AEAD operation */
+ struct rte_crypto_va_iova_ptr *aad;
+ };
+
/**
* array of statuses for each operation:
- * - 0 on success
- * - errno on error
+ * - 0 on success
+ * - errno on error
*/
int32_t *status;
- /** number of operations to perform */
- uint32_t num;
};
/**
struct rte_ipsec_sa *sa;
struct replay_sqn *rsn;
union sym_op_data icv;
- void *iv[num];
- void *aad[num];
- void *dgst[num];
+ struct rte_crypto_va_iova_ptr iv[num];
+ struct rte_crypto_va_iova_ptr aad[num];
+ struct rte_crypto_va_iova_ptr dgst[num];
uint32_t dr[num];
uint32_t l4ofs[num];
uint32_t clen[num];
l4ofs + k, rc, ivbuf[k]);
/* fill iv, digest and aad */
- iv[k] = ivbuf[k];
- aad[k] = icv.va + sa->icv_len;
- dgst[k++] = icv.va;
+ iv[k].va = ivbuf[k];
+ aad[k].va = icv.va + sa->icv_len;
+ dgst[k++].va = icv.va;
} else {
dr[i - k] = i;
rte_errno = -rc;
uint32_t i, k, n;
uint32_t l2, l3;
union sym_op_data icv;
- void *iv[num];
- void *aad[num];
- void *dgst[num];
+ struct rte_crypto_va_iova_ptr iv[num];
+ struct rte_crypto_va_iova_ptr aad[num];
+ struct rte_crypto_va_iova_ptr dgst[num];
uint32_t dr[num];
uint32_t l4ofs[num];
uint32_t clen[num];
ivbuf[k]);
/* fill iv, digest and aad */
- iv[k] = ivbuf[k];
- aad[k] = icv.va + sa->icv_len;
- dgst[k++] = icv.va;
+ iv[k].va = ivbuf[k];
+ aad[k].va = icv.va + sa->icv_len;
+ dgst[k++].va = icv.va;
} else {
dr[i - k] = i;
rte_errno = -rc;
static inline void
cpu_crypto_bulk(const struct rte_ipsec_session *ss,
union rte_crypto_sym_ofs ofs, struct rte_mbuf *mb[],
- void *iv[], void *aad[], void *dgst[], uint32_t l4ofs[],
+ struct rte_crypto_va_iova_ptr iv[],
+ struct rte_crypto_va_iova_ptr aad[],
+ struct rte_crypto_va_iova_ptr dgst[], uint32_t l4ofs[],
uint32_t clen[], uint32_t num)
{
uint32_t i, j, n;
/* fill the request structure */
symvec.sgl = &vecpkt[j];
symvec.iv = &iv[j];
- symvec.aad = &aad[j];
symvec.digest = &dgst[j];
+ symvec.aad = &aad[j];
symvec.status = &st[j];
symvec.num = i - j;