/*-
* BSD LICENSE
*
- * Copyright(c) 2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
/** Get xform chain order */
static int
-aesni_mb_get_chain_order(const struct rte_crypto_xform *xform)
+aesni_mb_get_chain_order(const struct rte_crypto_sym_xform *xform)
{
/*
* Multi-buffer only supports HASH_CIPHER or CIPHER_HASH chained
if (xform->next == NULL || xform->next->next != NULL)
return -1;
- if (xform->type == RTE_CRYPTO_XFORM_AUTH &&
- xform->next->type == RTE_CRYPTO_XFORM_CIPHER)
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
+ xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
return HASH_CIPHER;
- if (xform->type == RTE_CRYPTO_XFORM_CIPHER &&
- xform->next->type == RTE_CRYPTO_XFORM_AUTH)
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
+ xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)
return CIPHER_HASH;
return -1;
static int
aesni_mb_set_session_auth_parameters(const struct aesni_mb_ops *mb_ops,
struct aesni_mb_session *sess,
- const struct rte_crypto_xform *xform)
+ const struct rte_crypto_sym_xform *xform)
{
hash_one_block_t hash_oneblock_fn;
- if (xform->type != RTE_CRYPTO_XFORM_AUTH) {
+ if (xform->type != RTE_CRYPTO_SYM_XFORM_AUTH) {
MB_LOG_ERR("Crypto xform struct not of type auth");
return -1;
}
static int
aesni_mb_set_session_cipher_parameters(const struct aesni_mb_ops *mb_ops,
struct aesni_mb_session *sess,
- const struct rte_crypto_xform *xform)
+ const struct rte_crypto_sym_xform *xform)
{
aes_keyexp_t aes_keyexp_fn;
- if (xform->type != RTE_CRYPTO_XFORM_CIPHER) {
+ if (xform->type != RTE_CRYPTO_SYM_XFORM_CIPHER) {
MB_LOG_ERR("Crypto xform struct not of type cipher");
return -1;
}
int
aesni_mb_set_session_parameters(const struct aesni_mb_ops *mb_ops,
struct aesni_mb_session *sess,
- const struct rte_crypto_xform *xform)
+ const struct rte_crypto_sym_xform *xform)
{
- const struct rte_crypto_xform *auth_xform = NULL;
- const struct rte_crypto_xform *cipher_xform = NULL;
+ const struct rte_crypto_sym_xform *auth_xform = NULL;
+ const struct rte_crypto_sym_xform *cipher_xform = NULL;
/* Select Crypto operation - hash then cipher / cipher then hash */
switch (aesni_mb_get_chain_order(xform)) {
/** Get multi buffer session */
static struct aesni_mb_session *
-get_session(struct aesni_mb_qp *qp, struct rte_crypto_op *crypto_op)
+get_session(struct aesni_mb_qp *qp, struct rte_crypto_sym_op *crypto_op)
{
- struct aesni_mb_session *sess;
+ struct aesni_mb_session *sess = NULL;
- if (crypto_op->type == RTE_CRYPTO_OP_WITH_SESSION) {
+ if (crypto_op->type == RTE_CRYPTO_SYM_OP_WITH_SESSION) {
if (unlikely(crypto_op->session->type !=
RTE_CRYPTODEV_AESNI_MB_PMD))
return NULL;
sess = (struct aesni_mb_session *)crypto_op->session->_private;
} else {
- struct rte_cryptodev_session *c_sess = NULL;
+ void *_sess = NULL;
- if (rte_mempool_get(qp->sess_mp, (void **)&c_sess))
+ if (rte_mempool_get(qp->sess_mp, (void **)&_sess))
return NULL;
- sess = (struct aesni_mb_session *)c_sess->_private;
+ sess = (struct aesni_mb_session *)
+ ((struct rte_cryptodev_sym_session *)_sess)->_private;
if (unlikely(aesni_mb_set_session_parameters(qp->ops,
- sess, crypto_op->xform) != 0))
- return NULL;
+ sess, crypto_op->xform) != 0)) {
+ rte_mempool_put(qp->sess_mp, _sess);
+ sess = NULL;
+ }
}
return sess;
*/
static JOB_AES_HMAC *
process_crypto_op(struct aesni_mb_qp *qp, struct rte_mbuf *m,
- struct rte_crypto_op *c_op, struct aesni_mb_session *session)
+ struct rte_crypto_sym_op *c_op,
+ struct aesni_mb_session *session)
{
JOB_AES_HMAC *job;
job->auth_tag_output = (uint8_t *)rte_pktmbuf_append(m,
get_digest_byte_length(job->hash_alg));
- if (job->auth_tag_output)
- memset(job->auth_tag_output, 0,
- sizeof(get_digest_byte_length(job->hash_alg)));
- else
+ if (job->auth_tag_output == NULL) {
+ MB_LOG_ERR("failed to allocate space in output mbuf "
+ "for temp digest");
return NULL;
+ }
+
+ memset(job->auth_tag_output, 0,
+ sizeof(get_digest_byte_length(job->hash_alg)));
+
} else {
job->auth_tag_output = c_op->digest.data;
}
/*
- * Multiple buffer library current only support returning a truncated
+ * Multi-buffer library current only support returning a truncated
* digest length as specified in the relevant IPsec RFCs
*/
job->auth_tag_output_len_in_bytes =
post_process_mb_job(struct aesni_mb_qp *qp, JOB_AES_HMAC *job)
{
struct rte_mbuf *m;
- struct rte_crypto_op *c_op;
+ struct rte_crypto_sym_op *c_op;
if (job->user_data == NULL)
return NULL;
/* handled retrieved job */
m = (struct rte_mbuf *)job->user_data;
- c_op = (struct rte_crypto_op *)job->user_data2;
+ c_op = (struct rte_crypto_sym_op *)job->user_data2;
/* set status as successful by default */
c_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
}
/* Free session if a session-less crypto op */
- if (c_op->type == RTE_CRYPTO_OP_SESSIONLESS) {
+ if (c_op->type == RTE_CRYPTO_SYM_OP_SESSIONLESS) {
rte_mempool_put(qp->sess_mp, c_op->session);
c_op->session = NULL;
}
if (m)
rte_ring_enqueue(qp->processed_pkts, (void *)m);
else
- qp->qp_stats.dequeue_err_count++;
+ qp->stats.dequeue_err_count++;
job = (*qp->ops->job.get_completed_job)(&qp->mb_mgr);
}
int i, processed_jobs = 0;
for (i = 0; i < nb_bufs; i++) {
- ol = rte_pktmbuf_offload_get(bufs[i], RTE_PKTMBUF_OL_CRYPTO);
+ ol = rte_pktmbuf_offload_get(bufs[i],
+ RTE_PKTMBUF_OL_CRYPTO_SYM);
if (unlikely(ol == NULL)) {
- qp->qp_stats.enqueue_err_count++;
+ qp->stats.enqueue_err_count++;
goto flush_jobs;
}
sess = get_session(qp, &ol->op.crypto);
if (unlikely(sess == NULL)) {
- qp->qp_stats.enqueue_err_count++;
+ qp->stats.enqueue_err_count++;
goto flush_jobs;
}
job = process_crypto_op(qp, bufs[i], &ol->op.crypto, sess);
if (unlikely(job == NULL)) {
- qp->qp_stats.enqueue_err_count++;
+ qp->stats.enqueue_err_count++;
goto flush_jobs;
}
if (processed_jobs == 0)
goto flush_jobs;
else
- qp->qp_stats.enqueued_count += processed_jobs;
+ qp->stats.enqueued_count += processed_jobs;
return i;
flush_jobs:
*/
job = (*qp->ops->job.flush_job)(&qp->mb_mgr);
if (job)
- qp->qp_stats.enqueued_count += handle_completed_jobs(qp, job);
+ qp->stats.enqueued_count += handle_completed_jobs(qp, job);
return i;
}
nb_dequeued = rte_ring_dequeue_burst(qp->processed_pkts,
(void **)bufs, nb_bufs);
- qp->qp_stats.dequeued_count += nb_dequeued;
+ qp->stats.dequeued_count += nb_dequeued;
return nb_dequeued;
}