#include <rte_hexdump.h>
#include <rte_cryptodev.h>
#include <rte_cryptodev_pmd.h>
-#include <rte_dev.h>
+#include <rte_cryptodev_vdev.h>
+#include <rte_vdev.h>
#include <rte_malloc.h>
#include <rte_cpuflags.h>
-#include <rte_mbuf_offload.h>
#include "rte_aesni_mb_pmd_private.h"
-/**
- * Global static parameter used to create a unique name for each AES-NI multi
- * buffer crypto device.
- */
-static unsigned unique_name_id;
-
-static inline int
-create_unique_device_name(char *name, size_t size)
-{
- int ret;
-
- if (name == NULL)
- return -EINVAL;
-
- ret = snprintf(name, size, "%s_%u", CRYPTODEV_NAME_AESNI_MB_PMD,
- unique_name_id++);
- if (ret < 0)
- return ret;
- return 0;
-}
-
-typedef void (*hash_one_block_t)(void *data, void *digest);
-typedef void (*aes_keyexp_t)(void *key, void *enc_exp_keys, void *dec_exp_keys);
+typedef void (*hash_one_block_t)(const void *data, void *digest);
+typedef void (*aes_keyexp_t)(const void *key, void *enc_exp_keys, void *dec_exp_keys);
/**
* Calculate the authentication pre-computes
}
/** Get xform chain order */
-static int
+static enum aesni_mb_operation
aesni_mb_get_chain_order(const struct rte_crypto_sym_xform *xform)
{
- /*
- * Multi-buffer only supports HASH_CIPHER or CIPHER_HASH chained
- * operations, all other options are invalid, so we must have exactly
- * 2 xform structs chained together
- */
- if (xform->next == NULL || xform->next->next != NULL)
- return -1;
-
- if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
- xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
- return HASH_CIPHER;
+ if (xform == NULL)
+ return AESNI_MB_OP_NOT_SUPPORTED;
+
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
+ if (xform->next == NULL)
+ return AESNI_MB_OP_CIPHER_ONLY;
+ if (xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)
+ return AESNI_MB_OP_CIPHER_HASH;
+ }
- if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
- xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)
- return CIPHER_HASH;
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
+ if (xform->next == NULL)
+ return AESNI_MB_OP_HASH_ONLY;
+ if (xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
+ return AESNI_MB_OP_HASH_CIPHER;
+ }
- return -1;
+ return AESNI_MB_OP_NOT_SUPPORTED;
}
/** Set session authentication parameters */
static int
-aesni_mb_set_session_auth_parameters(const struct aesni_mb_ops *mb_ops,
+aesni_mb_set_session_auth_parameters(const struct aesni_mb_op_fns *mb_ops,
struct aesni_mb_session *sess,
const struct rte_crypto_sym_xform *xform)
{
hash_one_block_t hash_oneblock_fn;
+ if (xform == NULL) {
+ sess->auth.algo = NULL_HASH;
+ return 0;
+ }
+
if (xform->type != RTE_CRYPTO_SYM_XFORM_AUTH) {
MB_LOG_ERR("Crypto xform struct not of type auth");
return -1;
}
+ /* Select auth generate/verify */
+ sess->auth.operation = xform->auth.op;
+
/* Set Authentication Parameters */
if (xform->auth.algo == RTE_CRYPTO_AUTH_AES_XCBC_MAC) {
sess->auth.algo = AES_XCBC;
/** Set session cipher parameters */
static int
-aesni_mb_set_session_cipher_parameters(const struct aesni_mb_ops *mb_ops,
+aesni_mb_set_session_cipher_parameters(const struct aesni_mb_op_fns *mb_ops,
struct aesni_mb_session *sess,
const struct rte_crypto_sym_xform *xform)
{
aes_keyexp_t aes_keyexp_fn;
+ if (xform == NULL) {
+ sess->cipher.mode = NULL_CIPHER;
+ return 0;
+ }
+
if (xform->type != RTE_CRYPTO_SYM_XFORM_CIPHER) {
MB_LOG_ERR("Crypto xform struct not of type cipher");
return -1;
case RTE_CRYPTO_CIPHER_AES_CBC:
sess->cipher.mode = CBC;
break;
+ case RTE_CRYPTO_CIPHER_AES_CTR:
+ sess->cipher.mode = CNTR;
+ break;
+ case RTE_CRYPTO_CIPHER_AES_DOCSISBPI:
+ sess->cipher.mode = DOCSIS_SEC_BPI;
+ break;
default:
MB_LOG_ERR("Unsupported cipher mode parameter");
return -1;
/** Parse crypto xform chain and set private session parameters */
int
-aesni_mb_set_session_parameters(const struct aesni_mb_ops *mb_ops,
+aesni_mb_set_session_parameters(const struct aesni_mb_op_fns *mb_ops,
struct aesni_mb_session *sess,
const struct rte_crypto_sym_xform *xform)
{
/* Select Crypto operation - hash then cipher / cipher then hash */
switch (aesni_mb_get_chain_order(xform)) {
- case HASH_CIPHER:
+ case AESNI_MB_OP_HASH_CIPHER:
sess->chain_order = HASH_CIPHER;
auth_xform = xform;
cipher_xform = xform->next;
break;
- case CIPHER_HASH:
+ case AESNI_MB_OP_CIPHER_HASH:
sess->chain_order = CIPHER_HASH;
auth_xform = xform->next;
cipher_xform = xform;
break;
+ case AESNI_MB_OP_HASH_ONLY:
+ sess->chain_order = HASH_CIPHER;
+ auth_xform = xform;
+ cipher_xform = NULL;
+ break;
+ case AESNI_MB_OP_CIPHER_ONLY:
+ /*
+ * Multi buffer library operates only at two modes,
+ * CIPHER_HASH and HASH_CIPHER. When doing ciphering only,
+ * chain order depends on cipher operation: encryption is always
+ * the first operation and decryption the last one.
+ */
+ if (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
+ sess->chain_order = CIPHER_HASH;
+ else
+ sess->chain_order = HASH_CIPHER;
+ auth_xform = NULL;
+ cipher_xform = xform;
+ break;
+ case AESNI_MB_OP_NOT_SUPPORTED:
default:
MB_LOG_ERR("Unsupported operation chain order parameter");
return -1;
return 0;
}
+/**
+ * burst enqueue, place crypto operations on ingress queue for processing.
+ *
+ * @param __qp Queue Pair to process
+ * @param ops Crypto operations for processing
+ * @param nb_ops Number of crypto operations for processing
+ *
+ * @return
+ * - Number of crypto operations enqueued
+ */
+static uint16_t
+aesni_mb_pmd_enqueue_burst(void *__qp, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
+{
+ struct aesni_mb_qp *qp = __qp;
+
+ unsigned int nb_enqueued;
+
+ nb_enqueued = rte_ring_enqueue_burst(qp->ingress_queue,
+ (void **)ops, nb_ops, NULL);
+
+ qp->stats.enqueued_count += nb_enqueued;
+
+ return nb_enqueued;
+}
+
/** Get multi buffer session */
-static struct aesni_mb_session *
-get_session(struct aesni_mb_qp *qp, struct rte_crypto_sym_op *crypto_op)
+static inline struct aesni_mb_session *
+get_session(struct aesni_mb_qp *qp, struct rte_crypto_op *op)
{
struct aesni_mb_session *sess = NULL;
- if (crypto_op->type == RTE_CRYPTO_SYM_OP_WITH_SESSION) {
- if (unlikely(crypto_op->session->type !=
- RTE_CRYPTODEV_AESNI_MB_PMD))
+ if (op->sym->sess_type == RTE_CRYPTO_SYM_OP_WITH_SESSION) {
+ if (unlikely(op->sym->session->dev_type !=
+ RTE_CRYPTODEV_AESNI_MB_PMD)) {
return NULL;
+ }
- sess = (struct aesni_mb_session *)crypto_op->session->_private;
+ sess = (struct aesni_mb_session *)op->sym->session->_private;
} else {
void *_sess = NULL;
sess = (struct aesni_mb_session *)
((struct rte_cryptodev_sym_session *)_sess)->_private;
- if (unlikely(aesni_mb_set_session_parameters(qp->ops,
- sess, crypto_op->xform) != 0)) {
+ if (unlikely(aesni_mb_set_session_parameters(qp->op_fns,
+ sess, op->sym->xform) != 0)) {
rte_mempool_put(qp->sess_mp, _sess);
sess = NULL;
}
+ op->sym->session = (struct rte_cryptodev_sym_session *)_sess;
}
return sess;
* - Completed JOB_AES_HMAC structure pointer on success
* - NULL pointer if completion of JOB_AES_HMAC structure isn't possible
*/
-static JOB_AES_HMAC *
-process_crypto_op(struct aesni_mb_qp *qp, struct rte_mbuf *m,
- struct rte_crypto_sym_op *c_op,
- struct aesni_mb_session *session)
+static inline int
+set_mb_job_params(JOB_AES_HMAC *job, struct aesni_mb_qp *qp,
+ struct rte_crypto_op *op)
{
- JOB_AES_HMAC *job;
+ struct rte_mbuf *m_src = op->sym->m_src, *m_dst;
+ struct aesni_mb_session *session;
+ uint16_t m_offset = 0;
- job = (*qp->ops->job.get_next)(&qp->mb_mgr);
- if (unlikely(job == NULL))
- return job;
+ session = get_session(qp, op);
+ if (session == NULL) {
+ op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
+ return -1;
+ }
+ op->status = RTE_CRYPTO_OP_STATUS_ENQUEUED;
/* Set crypto operation */
job->chain_order = session->chain_order;
}
/* Mutable crypto operation parameters */
+ if (op->sym->m_dst) {
+ m_src = m_dst = op->sym->m_dst;
+
+ /* append space for output data to mbuf */
+ char *odata = rte_pktmbuf_append(m_dst,
+ rte_pktmbuf_data_len(op->sym->m_src));
+ if (odata == NULL) {
+ MB_LOG_ERR("failed to allocate space in destination "
+ "mbuf for source data");
+ op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+ return -1;
+ }
+
+ memcpy(odata, rte_pktmbuf_mtod(op->sym->m_src, void*),
+ rte_pktmbuf_data_len(op->sym->m_src));
+ } else {
+ m_dst = m_src;
+ m_offset = op->sym->cipher.data.offset;
+ }
/* Set digest output location */
- if (job->cipher_direction == DECRYPT) {
- job->auth_tag_output = (uint8_t *)rte_pktmbuf_append(m,
+ if (job->hash_alg != NULL_HASH &&
+ session->auth.operation == RTE_CRYPTO_AUTH_OP_VERIFY) {
+ job->auth_tag_output = (uint8_t *)rte_pktmbuf_append(m_dst,
get_digest_byte_length(job->hash_alg));
if (job->auth_tag_output == NULL) {
MB_LOG_ERR("failed to allocate space in output mbuf "
"for temp digest");
- return NULL;
+ op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+ return -1;
}
memset(job->auth_tag_output, 0,
sizeof(get_digest_byte_length(job->hash_alg)));
} else {
- job->auth_tag_output = c_op->digest.data;
+ job->auth_tag_output = op->sym->auth.digest.data;
}
/*
get_truncated_digest_byte_length(job->hash_alg);
/* Set IV parameters */
- job->iv = c_op->iv.data;
- job->iv_len_in_bytes = c_op->iv.length;
+ job->iv = op->sym->cipher.iv.data;
+ job->iv_len_in_bytes = op->sym->cipher.iv.length;
/* Data Parameter */
- job->src = rte_pktmbuf_mtod(m, uint8_t *);
- job->dst = c_op->dst.m ?
- rte_pktmbuf_mtod(c_op->dst.m, uint8_t *) +
- c_op->dst.offset :
- rte_pktmbuf_mtod(m, uint8_t *) +
- c_op->data.to_cipher.offset;
+ job->src = rte_pktmbuf_mtod(m_src, uint8_t *);
+ job->dst = rte_pktmbuf_mtod_offset(m_dst, uint8_t *, m_offset);
- job->cipher_start_src_offset_in_bytes = c_op->data.to_cipher.offset;
- job->msg_len_to_cipher_in_bytes = c_op->data.to_cipher.length;
+ job->cipher_start_src_offset_in_bytes = op->sym->cipher.data.offset;
+ job->msg_len_to_cipher_in_bytes = op->sym->cipher.data.length;
- job->hash_start_src_offset_in_bytes = c_op->data.to_hash.offset;
- job->msg_len_to_hash_in_bytes = c_op->data.to_hash.length;
+ job->hash_start_src_offset_in_bytes = op->sym->auth.data.offset;
+ job->msg_len_to_hash_in_bytes = op->sym->auth.data.length;
/* Set user data to be crypto operation data struct */
- job->user_data = m;
- job->user_data2 = c_op;
+ job->user_data = op;
+ job->user_data2 = m_dst;
- return job;
+ return 0;
+}
+
+static inline void
+verify_digest(JOB_AES_HMAC *job, struct rte_crypto_op *op) {
+ struct rte_mbuf *m_dst = (struct rte_mbuf *)job->user_data2;
+
+ /* Verify digest if required */
+ if (memcmp(job->auth_tag_output, op->sym->auth.digest.data,
+ job->auth_tag_output_len_in_bytes) != 0)
+ op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
+
+ /* trim area used for digest from mbuf */
+ rte_pktmbuf_trim(m_dst, get_digest_byte_length(job->hash_alg));
}
/**
* verification of supplied digest in the case of a HASH_CIPHER operation
* - Returns NULL on invalid job
*/
-static struct rte_mbuf *
+static inline struct rte_crypto_op *
post_process_mb_job(struct aesni_mb_qp *qp, JOB_AES_HMAC *job)
{
- struct rte_mbuf *m;
- struct rte_crypto_sym_op *c_op;
-
- if (job->user_data == NULL)
- return NULL;
-
- /* handled retrieved job */
- m = (struct rte_mbuf *)job->user_data;
- c_op = (struct rte_crypto_sym_op *)job->user_data2;
-
- /* set status as successful by default */
- c_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
-
- /* check if job has been processed */
- if (unlikely(job->status != STS_COMPLETED)) {
- c_op->status = RTE_CRYPTO_OP_STATUS_ERROR;
- return m;
- } else if (job->chain_order == HASH_CIPHER) {
- /* Verify digest if required */
- if (memcmp(job->auth_tag_output, c_op->digest.data,
- job->auth_tag_output_len_in_bytes) != 0)
- c_op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
-
- /* trim area used for digest from mbuf */
- rte_pktmbuf_trim(m, get_digest_byte_length(job->hash_alg));
+ struct rte_crypto_op *op = (struct rte_crypto_op *)job->user_data;
+
+ struct aesni_mb_session *sess;
+
+ if (unlikely(op->status == RTE_CRYPTO_OP_STATUS_ENQUEUED)) {
+ switch (job->status) {
+ case STS_COMPLETED:
+ op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+
+ if (job->hash_alg != NULL_HASH) {
+ sess = (struct aesni_mb_session *)
+ op->sym->session->_private;
+
+ if (sess->auth.operation ==
+ RTE_CRYPTO_AUTH_OP_VERIFY)
+ verify_digest(job, op);
+ }
+ break;
+ default:
+ op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+ }
}
/* Free session if a session-less crypto op */
- if (c_op->type == RTE_CRYPTO_SYM_OP_SESSIONLESS) {
- rte_mempool_put(qp->sess_mp, c_op->session);
- c_op->session = NULL;
+ if (op->sym->sess_type == RTE_CRYPTO_SYM_OP_SESSIONLESS) {
+ rte_mempool_put(qp->sess_mp, op->sym->session);
+ op->sym->session = NULL;
}
- return m;
+ return op;
}
/**
* - Number of processed jobs
*/
static unsigned
-handle_completed_jobs(struct aesni_mb_qp *qp, JOB_AES_HMAC *job)
+handle_completed_jobs(struct aesni_mb_qp *qp, JOB_AES_HMAC *job,
+ struct rte_crypto_op **ops, uint16_t nb_ops)
{
- struct rte_mbuf *m = NULL;
+ struct rte_crypto_op *op = NULL;
unsigned processed_jobs = 0;
- while (job) {
- processed_jobs++;
- m = post_process_mb_job(qp, job);
- if (m)
- rte_ring_enqueue(qp->processed_pkts, (void *)m);
- else
+ while (job != NULL && processed_jobs < nb_ops) {
+ op = post_process_mb_job(qp, job);
+
+ if (op) {
+ ops[processed_jobs++] = op;
+ qp->stats.dequeued_count++;
+ } else {
qp->stats.dequeue_err_count++;
+ break;
+ }
- job = (*qp->ops->job.get_completed_job)(&qp->mb_mgr);
+ job = (*qp->op_fns->job.get_completed_job)(&qp->mb_mgr);
}
return processed_jobs;
}
-static uint16_t
-aesni_mb_pmd_enqueue_burst(void *queue_pair, struct rte_mbuf **bufs,
- uint16_t nb_bufs)
+static inline uint16_t
+flush_mb_mgr(struct aesni_mb_qp *qp, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
{
- struct rte_mbuf_offload *ol;
+ int processed_ops = 0;
- struct aesni_mb_session *sess;
+ /* Flush the remaining jobs */
+ JOB_AES_HMAC *job = (*qp->op_fns->job.flush_job)(&qp->mb_mgr);
+
+ if (job)
+ processed_ops += handle_completed_jobs(qp, job,
+ &ops[processed_ops], nb_ops - processed_ops);
+
+ return processed_ops;
+}
+
+static inline JOB_AES_HMAC *
+set_job_null_op(JOB_AES_HMAC *job)
+{
+ job->chain_order = HASH_CIPHER;
+ job->cipher_mode = NULL_CIPHER;
+ job->hash_alg = NULL_HASH;
+ job->cipher_direction = DECRYPT;
+
+ return job;
+}
+
+static uint16_t
+aesni_mb_pmd_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
+{
struct aesni_mb_qp *qp = queue_pair;
- JOB_AES_HMAC *job = NULL;
+ struct rte_crypto_op *op;
+ JOB_AES_HMAC *job;
+
+ int retval, processed_jobs = 0;
- int i, processed_jobs = 0;
+ do {
+ /* Get next operation to process from ingress queue */
+ retval = rte_ring_dequeue(qp->ingress_queue, (void **)&op);
+ if (retval < 0)
+ break;
- for (i = 0; i < nb_bufs; i++) {
- ol = rte_pktmbuf_offload_get(bufs[i],
- RTE_PKTMBUF_OL_CRYPTO_SYM);
- if (unlikely(ol == NULL)) {
- qp->stats.enqueue_err_count++;
- goto flush_jobs;
- }
+ /* Get next free mb job struct from mb manager */
+ job = (*qp->op_fns->job.get_next)(&qp->mb_mgr);
+ if (unlikely(job == NULL)) {
+ /* if no free mb job structs we need to flush mb_mgr */
+ processed_jobs += flush_mb_mgr(qp,
+ &ops[processed_jobs],
+ (nb_ops - processed_jobs) - 1);
- sess = get_session(qp, &ol->op.crypto);
- if (unlikely(sess == NULL)) {
- qp->stats.enqueue_err_count++;
- goto flush_jobs;
+ job = (*qp->op_fns->job.get_next)(&qp->mb_mgr);
}
- job = process_crypto_op(qp, bufs[i], &ol->op.crypto, sess);
- if (unlikely(job == NULL)) {
- qp->stats.enqueue_err_count++;
- goto flush_jobs;
+ retval = set_mb_job_params(job, qp, op);
+ if (unlikely(retval != 0)) {
+ qp->stats.dequeue_err_count++;
+ set_job_null_op(job);
}
- /* Submit Job */
- job = (*qp->ops->job.submit)(&qp->mb_mgr);
+ /* Submit job to multi-buffer for processing */
+ job = (*qp->op_fns->job.submit)(&qp->mb_mgr);
/*
* If submit returns a processed job then handle it,
* before submitting subsequent jobs
*/
if (job)
- processed_jobs += handle_completed_jobs(qp, job);
- }
+ processed_jobs += handle_completed_jobs(qp, job,
+ &ops[processed_jobs],
+ nb_ops - processed_jobs);
- if (processed_jobs == 0)
- goto flush_jobs;
- else
- qp->stats.enqueued_count += processed_jobs;
- return i;
+ } while (processed_jobs < nb_ops);
-flush_jobs:
- /*
- * If we haven't processed any jobs in submit loop, then flush jobs
- * queue to stop the output stalling
- */
- job = (*qp->ops->job.flush_job)(&qp->mb_mgr);
- if (job)
- qp->stats.enqueued_count += handle_completed_jobs(qp, job);
-
- return i;
-}
-
-static uint16_t
-aesni_mb_pmd_dequeue_burst(void *queue_pair,
- struct rte_mbuf **bufs, uint16_t nb_bufs)
-{
- struct aesni_mb_qp *qp = queue_pair;
-
- unsigned nb_dequeued;
+ if (processed_jobs < 1)
+ processed_jobs += flush_mb_mgr(qp,
+ &ops[processed_jobs],
+ nb_ops - processed_jobs);
- nb_dequeued = rte_ring_dequeue_burst(qp->processed_pkts,
- (void **)bufs, nb_bufs);
- qp->stats.dequeued_count += nb_dequeued;
-
- return nb_dequeued;
+ return processed_jobs;
}
-
-static int cryptodev_aesni_mb_uninit(const char *name);
+static int cryptodev_aesni_mb_remove(struct rte_vdev_device *vdev);
static int
-cryptodev_aesni_mb_create(const char *name, unsigned socket_id)
+cryptodev_aesni_mb_create(const char *name,
+ struct rte_vdev_device *vdev,
+ struct rte_crypto_vdev_init_params *init_params)
{
struct rte_cryptodev *dev;
- char crypto_dev_name[RTE_CRYPTODEV_NAME_MAX_LEN];
struct aesni_mb_private *internals;
enum aesni_mb_vector_mode vector_mode;
- /* Check CPU for support for AES instruction set */
- if (!rte_cpu_get_flag_enabled(RTE_CPUFLAG_AES)) {
- MB_LOG_ERR("AES instructions not supported by CPU");
- return -EFAULT;
- }
+ if (init_params->name[0] == '\0')
+ snprintf(init_params->name, sizeof(init_params->name),
+ "%s", name);
/* Check CPU for supported vector instruction set */
- if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2))
+ if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F))
+ vector_mode = RTE_AESNI_MB_AVX512;
+ else if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2))
vector_mode = RTE_AESNI_MB_AVX2;
else if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX))
vector_mode = RTE_AESNI_MB_AVX;
return -EFAULT;
}
- /* create a unique device name */
- if (create_unique_device_name(crypto_dev_name,
- RTE_CRYPTODEV_NAME_MAX_LEN) != 0) {
- MB_LOG_ERR("failed to create unique cryptodev name");
- return -EINVAL;
- }
-
-
- dev = rte_cryptodev_pmd_virtual_dev_init(crypto_dev_name,
- sizeof(struct aesni_mb_private), socket_id);
+ dev = rte_cryptodev_vdev_pmd_init(init_params->name,
+ sizeof(struct aesni_mb_private), init_params->socket_id,
+ vdev);
if (dev == NULL) {
MB_LOG_ERR("failed to create cryptodev vdev");
goto init_error;
dev->dequeue_burst = aesni_mb_pmd_dequeue_burst;
dev->enqueue_burst = aesni_mb_pmd_enqueue_burst;
+ dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
+ RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
+ RTE_CRYPTODEV_FF_CPU_AESNI;
+
+ switch (vector_mode) {
+ case RTE_AESNI_MB_SSE:
+ dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_SSE;
+ break;
+ case RTE_AESNI_MB_AVX:
+ dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX;
+ break;
+ case RTE_AESNI_MB_AVX2:
+ dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX2;
+ break;
+ case RTE_AESNI_MB_AVX512:
+ dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX512;
+ break;
+ default:
+ break;
+ }
+
/* Set vector instructions mode supported */
internals = dev->data->dev_private;
internals->vector_mode = vector_mode;
- internals->max_nb_queue_pairs = RTE_AESNI_MB_PMD_MAX_NB_QUEUE_PAIRS;
- internals->max_nb_sessions = RTE_AESNI_MB_PMD_MAX_NB_SESSIONS;
+ internals->max_nb_queue_pairs = init_params->max_nb_queue_pairs;
+ internals->max_nb_sessions = init_params->max_nb_sessions;
return 0;
init_error:
- MB_LOG_ERR("driver %s: cryptodev_aesni_create failed", name);
+ MB_LOG_ERR("driver %s: cryptodev_aesni_create failed",
+ init_params->name);
- cryptodev_aesni_mb_uninit(crypto_dev_name);
+ cryptodev_aesni_mb_remove(vdev);
return -EFAULT;
}
-
static int
-cryptodev_aesni_mb_init(const char *name,
- const char *params __rte_unused)
+cryptodev_aesni_mb_probe(struct rte_vdev_device *vdev)
{
- RTE_LOG(INFO, PMD, "Initialising %s\n", name);
-
- return cryptodev_aesni_mb_create(name, rte_socket_id());
+ struct rte_crypto_vdev_init_params init_params = {
+ RTE_CRYPTODEV_VDEV_DEFAULT_MAX_NB_QUEUE_PAIRS,
+ RTE_CRYPTODEV_VDEV_DEFAULT_MAX_NB_SESSIONS,
+ rte_socket_id(),
+ ""
+ };
+ const char *name;
+ const char *input_args;
+
+ name = rte_vdev_device_name(vdev);
+ if (name == NULL)
+ return -EINVAL;
+ input_args = rte_vdev_device_args(vdev);
+ rte_cryptodev_vdev_parse_init_params(&init_params, input_args);
+
+ RTE_LOG(INFO, PMD, "Initialising %s on NUMA node %d\n", name,
+ init_params.socket_id);
+ if (init_params.name[0] != '\0')
+ RTE_LOG(INFO, PMD, " User defined name = %s\n",
+ init_params.name);
+ RTE_LOG(INFO, PMD, " Max number of queue pairs = %d\n",
+ init_params.max_nb_queue_pairs);
+ RTE_LOG(INFO, PMD, " Max number of sessions = %d\n",
+ init_params.max_nb_sessions);
+
+ return cryptodev_aesni_mb_create(name, vdev, &init_params);
}
static int
-cryptodev_aesni_mb_uninit(const char *name)
+cryptodev_aesni_mb_remove(struct rte_vdev_device *vdev)
{
+ const char *name;
+
+ name = rte_vdev_device_name(vdev);
if (name == NULL)
return -EINVAL;
return 0;
}
-static struct rte_driver cryptodev_aesni_mb_pmd_drv = {
- .name = CRYPTODEV_NAME_AESNI_MB_PMD,
- .type = PMD_VDEV,
- .init = cryptodev_aesni_mb_init,
- .uninit = cryptodev_aesni_mb_uninit
+static struct rte_vdev_driver cryptodev_aesni_mb_pmd_drv = {
+ .probe = cryptodev_aesni_mb_probe,
+ .remove = cryptodev_aesni_mb_remove
};
-PMD_REGISTER_DRIVER(cryptodev_aesni_mb_pmd_drv);
+RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_AESNI_MB_PMD, cryptodev_aesni_mb_pmd_drv);
+RTE_PMD_REGISTER_ALIAS(CRYPTODEV_NAME_AESNI_MB_PMD, cryptodev_aesni_mb_pmd);
+RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_AESNI_MB_PMD,
+ "max_nb_queue_pairs=<int> "
+ "max_nb_sessions=<int> "
+ "socket_id=<int>");