1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2015-2017 Intel Corporation
7 #include <rte_common.h>
8 #include <rte_hexdump.h>
9 #include <rte_cryptodev.h>
10 #include <rte_cryptodev_pmd.h>
11 #include <rte_bus_vdev.h>
12 #include <rte_malloc.h>
13 #include <rte_cpuflags.h>
15 #include "rte_aesni_mb_pmd_private.h"
17 static uint8_t cryptodev_driver_id;
19 typedef void (*hash_one_block_t)(const void *data, void *digest);
20 typedef void (*aes_keyexp_t)(const void *key, void *enc_exp_keys, void *dec_exp_keys);
23 * Calculate the authentication pre-computes
25 * @param one_block_hash Function pointer to calculate digest on ipad/opad
26 * @param ipad Inner pad output byte array
27 * @param opad Outer pad output byte array
28 * @param hkey Authentication key
29 * @param hkey_len Authentication key length
30 * @param blocksize Block size of selected hash algo
33 calculate_auth_precomputes(hash_one_block_t one_block_hash,
34 uint8_t *ipad, uint8_t *opad,
35 uint8_t *hkey, uint16_t hkey_len,
40 uint8_t ipad_buf[blocksize] __rte_aligned(16);
41 uint8_t opad_buf[blocksize] __rte_aligned(16);
43 /* Setup inner and outer pads */
44 memset(ipad_buf, HMAC_IPAD_VALUE, blocksize);
45 memset(opad_buf, HMAC_OPAD_VALUE, blocksize);
47 /* XOR hash key with inner and outer pads */
48 length = hkey_len > blocksize ? blocksize : hkey_len;
50 for (i = 0; i < length; i++) {
51 ipad_buf[i] ^= hkey[i];
52 opad_buf[i] ^= hkey[i];
55 /* Compute partial hashes */
56 (*one_block_hash)(ipad_buf, ipad);
57 (*one_block_hash)(opad_buf, opad);
60 memset(ipad_buf, 0, blocksize);
61 memset(opad_buf, 0, blocksize);
64 /** Get xform chain order */
65 static enum aesni_mb_operation
66 aesni_mb_get_chain_order(const struct rte_crypto_sym_xform *xform)
69 return AESNI_MB_OP_NOT_SUPPORTED;
71 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
72 if (xform->next == NULL)
73 return AESNI_MB_OP_CIPHER_ONLY;
74 if (xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)
75 return AESNI_MB_OP_CIPHER_HASH;
78 if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
79 if (xform->next == NULL)
80 return AESNI_MB_OP_HASH_ONLY;
81 if (xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
82 return AESNI_MB_OP_HASH_CIPHER;
85 return AESNI_MB_OP_NOT_SUPPORTED;
88 /** Set session authentication parameters */
90 aesni_mb_set_session_auth_parameters(const struct aesni_mb_op_fns *mb_ops,
91 struct aesni_mb_session *sess,
92 const struct rte_crypto_sym_xform *xform)
94 hash_one_block_t hash_oneblock_fn;
97 sess->auth.algo = NULL_HASH;
101 if (xform->type != RTE_CRYPTO_SYM_XFORM_AUTH) {
102 MB_LOG_ERR("Crypto xform struct not of type auth");
106 /* Select auth generate/verify */
107 sess->auth.operation = xform->auth.op;
109 /* Set Authentication Parameters */
110 if (xform->auth.algo == RTE_CRYPTO_AUTH_AES_XCBC_MAC) {
111 sess->auth.algo = AES_XCBC;
112 (*mb_ops->aux.keyexp.aes_xcbc)(xform->auth.key.data,
113 sess->auth.xcbc.k1_expanded,
114 sess->auth.xcbc.k2, sess->auth.xcbc.k3);
118 switch (xform->auth.algo) {
119 case RTE_CRYPTO_AUTH_MD5_HMAC:
120 sess->auth.algo = MD5;
121 hash_oneblock_fn = mb_ops->aux.one_block.md5;
123 case RTE_CRYPTO_AUTH_SHA1_HMAC:
124 sess->auth.algo = SHA1;
125 hash_oneblock_fn = mb_ops->aux.one_block.sha1;
127 case RTE_CRYPTO_AUTH_SHA224_HMAC:
128 sess->auth.algo = SHA_224;
129 hash_oneblock_fn = mb_ops->aux.one_block.sha224;
131 case RTE_CRYPTO_AUTH_SHA256_HMAC:
132 sess->auth.algo = SHA_256;
133 hash_oneblock_fn = mb_ops->aux.one_block.sha256;
135 case RTE_CRYPTO_AUTH_SHA384_HMAC:
136 sess->auth.algo = SHA_384;
137 hash_oneblock_fn = mb_ops->aux.one_block.sha384;
139 case RTE_CRYPTO_AUTH_SHA512_HMAC:
140 sess->auth.algo = SHA_512;
141 hash_oneblock_fn = mb_ops->aux.one_block.sha512;
144 MB_LOG_ERR("Unsupported authentication algorithm selection");
148 /* Calculate Authentication precomputes */
149 calculate_auth_precomputes(hash_oneblock_fn,
150 sess->auth.pads.inner, sess->auth.pads.outer,
151 xform->auth.key.data,
152 xform->auth.key.length,
153 get_auth_algo_blocksize(sess->auth.algo));
158 /** Set session cipher parameters */
160 aesni_mb_set_session_cipher_parameters(const struct aesni_mb_op_fns *mb_ops,
161 struct aesni_mb_session *sess,
162 const struct rte_crypto_sym_xform *xform)
165 aes_keyexp_t aes_keyexp_fn;
168 sess->cipher.mode = NULL_CIPHER;
172 if (xform->type != RTE_CRYPTO_SYM_XFORM_CIPHER) {
173 MB_LOG_ERR("Crypto xform struct not of type cipher");
177 /* Select cipher direction */
178 switch (xform->cipher.op) {
179 case RTE_CRYPTO_CIPHER_OP_ENCRYPT:
180 sess->cipher.direction = ENCRYPT;
182 case RTE_CRYPTO_CIPHER_OP_DECRYPT:
183 sess->cipher.direction = DECRYPT;
186 MB_LOG_ERR("Invalid cipher operation parameter");
190 /* Select cipher mode */
191 switch (xform->cipher.algo) {
192 case RTE_CRYPTO_CIPHER_AES_CBC:
193 sess->cipher.mode = CBC;
196 case RTE_CRYPTO_CIPHER_AES_CTR:
197 sess->cipher.mode = CNTR;
200 case RTE_CRYPTO_CIPHER_AES_DOCSISBPI:
201 sess->cipher.mode = DOCSIS_SEC_BPI;
204 case RTE_CRYPTO_CIPHER_DES_CBC:
205 sess->cipher.mode = DES;
207 case RTE_CRYPTO_CIPHER_DES_DOCSISBPI:
208 sess->cipher.mode = DOCSIS_DES;
211 MB_LOG_ERR("Unsupported cipher mode parameter");
215 /* Set IV parameters */
216 sess->iv.offset = xform->cipher.iv.offset;
217 sess->iv.length = xform->cipher.iv.length;
219 /* Check key length and choose key expansion function for AES */
221 switch (xform->cipher.key.length) {
223 sess->cipher.key_length_in_bytes = AES_128_BYTES;
224 aes_keyexp_fn = mb_ops->aux.keyexp.aes128;
227 sess->cipher.key_length_in_bytes = AES_192_BYTES;
228 aes_keyexp_fn = mb_ops->aux.keyexp.aes192;
231 sess->cipher.key_length_in_bytes = AES_256_BYTES;
232 aes_keyexp_fn = mb_ops->aux.keyexp.aes256;
235 MB_LOG_ERR("Invalid cipher key length");
239 /* Expanded cipher keys */
240 (*aes_keyexp_fn)(xform->cipher.key.data,
241 sess->cipher.expanded_aes_keys.encode,
242 sess->cipher.expanded_aes_keys.decode);
245 if (xform->cipher.key.length != 8) {
246 MB_LOG_ERR("Invalid cipher key length");
249 sess->cipher.key_length_in_bytes = 8;
251 des_key_schedule((uint64_t *)sess->cipher.expanded_aes_keys.encode,
252 xform->cipher.key.data);
253 des_key_schedule((uint64_t *)sess->cipher.expanded_aes_keys.decode,
254 xform->cipher.key.data);
260 /** Parse crypto xform chain and set private session parameters */
262 aesni_mb_set_session_parameters(const struct aesni_mb_op_fns *mb_ops,
263 struct aesni_mb_session *sess,
264 const struct rte_crypto_sym_xform *xform)
266 const struct rte_crypto_sym_xform *auth_xform = NULL;
267 const struct rte_crypto_sym_xform *cipher_xform = NULL;
270 /* Select Crypto operation - hash then cipher / cipher then hash */
271 switch (aesni_mb_get_chain_order(xform)) {
272 case AESNI_MB_OP_HASH_CIPHER:
273 sess->chain_order = HASH_CIPHER;
275 cipher_xform = xform->next;
277 case AESNI_MB_OP_CIPHER_HASH:
278 sess->chain_order = CIPHER_HASH;
279 auth_xform = xform->next;
280 cipher_xform = xform;
282 case AESNI_MB_OP_HASH_ONLY:
283 sess->chain_order = HASH_CIPHER;
287 case AESNI_MB_OP_CIPHER_ONLY:
289 * Multi buffer library operates only at two modes,
290 * CIPHER_HASH and HASH_CIPHER. When doing ciphering only,
291 * chain order depends on cipher operation: encryption is always
292 * the first operation and decryption the last one.
294 if (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
295 sess->chain_order = CIPHER_HASH;
297 sess->chain_order = HASH_CIPHER;
299 cipher_xform = xform;
301 case AESNI_MB_OP_NOT_SUPPORTED:
303 MB_LOG_ERR("Unsupported operation chain order parameter");
307 /* Default IV length = 0 */
310 ret = aesni_mb_set_session_auth_parameters(mb_ops, sess, auth_xform);
312 MB_LOG_ERR("Invalid/unsupported authentication parameters");
316 ret = aesni_mb_set_session_cipher_parameters(mb_ops, sess,
319 MB_LOG_ERR("Invalid/unsupported cipher parameters");
327 * burst enqueue, place crypto operations on ingress queue for processing.
329 * @param __qp Queue Pair to process
330 * @param ops Crypto operations for processing
331 * @param nb_ops Number of crypto operations for processing
334 * - Number of crypto operations enqueued
337 aesni_mb_pmd_enqueue_burst(void *__qp, struct rte_crypto_op **ops,
340 struct aesni_mb_qp *qp = __qp;
342 unsigned int nb_enqueued;
344 nb_enqueued = rte_ring_enqueue_burst(qp->ingress_queue,
345 (void **)ops, nb_ops, NULL);
347 qp->stats.enqueued_count += nb_enqueued;
352 /** Get multi buffer session */
353 static inline struct aesni_mb_session *
354 get_session(struct aesni_mb_qp *qp, struct rte_crypto_op *op)
356 struct aesni_mb_session *sess = NULL;
358 if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
359 if (likely(op->sym->session != NULL))
360 sess = (struct aesni_mb_session *)
361 get_session_private_data(
363 cryptodev_driver_id);
366 void *_sess_private_data = NULL;
368 if (rte_mempool_get(qp->sess_mp, (void **)&_sess))
371 if (rte_mempool_get(qp->sess_mp, (void **)&_sess_private_data))
374 sess = (struct aesni_mb_session *)_sess_private_data;
376 if (unlikely(aesni_mb_set_session_parameters(qp->op_fns,
377 sess, op->sym->xform) != 0)) {
378 rte_mempool_put(qp->sess_mp, _sess);
379 rte_mempool_put(qp->sess_mp, _sess_private_data);
382 op->sym->session = (struct rte_cryptodev_sym_session *)_sess;
383 set_session_private_data(op->sym->session, cryptodev_driver_id,
387 if (unlikely(sess == NULL))
388 op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
394 * Process a crypto operation and complete a JOB_AES_HMAC job structure for
395 * submission to the multi buffer library for processing.
397 * @param qp queue pair
398 * @param job JOB_AES_HMAC structure to fill
399 * @param m mbuf to process
402 * - Completed JOB_AES_HMAC structure pointer on success
403 * - NULL pointer if completion of JOB_AES_HMAC structure isn't possible
406 set_mb_job_params(JOB_AES_HMAC *job, struct aesni_mb_qp *qp,
407 struct rte_crypto_op *op, uint8_t *digest_idx)
409 struct rte_mbuf *m_src = op->sym->m_src, *m_dst;
410 struct aesni_mb_session *session;
411 uint16_t m_offset = 0;
413 session = get_session(qp, op);
414 if (session == NULL) {
415 op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
419 /* Set crypto operation */
420 job->chain_order = session->chain_order;
422 /* Set cipher parameters */
423 job->cipher_direction = session->cipher.direction;
424 job->cipher_mode = session->cipher.mode;
426 job->aes_key_len_in_bytes = session->cipher.key_length_in_bytes;
427 job->aes_enc_key_expanded = session->cipher.expanded_aes_keys.encode;
428 job->aes_dec_key_expanded = session->cipher.expanded_aes_keys.decode;
431 /* Set authentication parameters */
432 job->hash_alg = session->auth.algo;
433 if (job->hash_alg == AES_XCBC) {
434 job->_k1_expanded = session->auth.xcbc.k1_expanded;
435 job->_k2 = session->auth.xcbc.k2;
436 job->_k3 = session->auth.xcbc.k3;
438 job->hashed_auth_key_xor_ipad = session->auth.pads.inner;
439 job->hashed_auth_key_xor_opad = session->auth.pads.outer;
442 /* Mutable crypto operation parameters */
443 if (op->sym->m_dst) {
444 m_src = m_dst = op->sym->m_dst;
446 /* append space for output data to mbuf */
447 char *odata = rte_pktmbuf_append(m_dst,
448 rte_pktmbuf_data_len(op->sym->m_src));
450 MB_LOG_ERR("failed to allocate space in destination "
451 "mbuf for source data");
452 op->status = RTE_CRYPTO_OP_STATUS_ERROR;
456 memcpy(odata, rte_pktmbuf_mtod(op->sym->m_src, void*),
457 rte_pktmbuf_data_len(op->sym->m_src));
460 m_offset = op->sym->cipher.data.offset;
463 /* Set digest output location */
464 if (job->hash_alg != NULL_HASH &&
465 session->auth.operation == RTE_CRYPTO_AUTH_OP_VERIFY) {
466 job->auth_tag_output = qp->temp_digests[*digest_idx];
467 *digest_idx = (*digest_idx + 1) % MAX_JOBS;
469 job->auth_tag_output = op->sym->auth.digest.data;
473 * Multi-buffer library current only support returning a truncated
474 * digest length as specified in the relevant IPsec RFCs
476 job->auth_tag_output_len_in_bytes =
477 get_truncated_digest_byte_length(job->hash_alg);
479 /* Set IV parameters */
480 job->iv = rte_crypto_op_ctod_offset(op, uint8_t *,
482 job->iv_len_in_bytes = session->iv.length;
485 job->src = rte_pktmbuf_mtod(m_src, uint8_t *);
486 job->dst = rte_pktmbuf_mtod_offset(m_dst, uint8_t *, m_offset);
488 job->cipher_start_src_offset_in_bytes = op->sym->cipher.data.offset;
489 job->msg_len_to_cipher_in_bytes = op->sym->cipher.data.length;
491 job->hash_start_src_offset_in_bytes = op->sym->auth.data.offset;
492 job->msg_len_to_hash_in_bytes = op->sym->auth.data.length;
494 /* Set user data to be crypto operation data struct */
501 verify_digest(struct aesni_mb_qp *qp __rte_unused, JOB_AES_HMAC *job,
502 struct rte_crypto_op *op) {
503 /* Verify digest if required */
504 if (memcmp(job->auth_tag_output, op->sym->auth.digest.data,
505 job->auth_tag_output_len_in_bytes) != 0)
506 op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
510 * Process a completed job and return rte_mbuf which job processed
512 * @param qp Queue Pair to process
513 * @param job JOB_AES_HMAC job to process
516 * - Returns processed crypto operation.
517 * - Returns NULL on invalid job
519 static inline struct rte_crypto_op *
520 post_process_mb_job(struct aesni_mb_qp *qp, JOB_AES_HMAC *job)
522 struct rte_crypto_op *op = (struct rte_crypto_op *)job->user_data;
523 struct aesni_mb_session *sess = get_session_private_data(
525 cryptodev_driver_id);
527 if (likely(op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)) {
528 switch (job->status) {
530 op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
532 if (job->hash_alg != NULL_HASH) {
533 if (sess->auth.operation ==
534 RTE_CRYPTO_AUTH_OP_VERIFY)
535 verify_digest(qp, job, op);
539 op->status = RTE_CRYPTO_OP_STATUS_ERROR;
543 /* Free session if a session-less crypto op */
544 if (op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
545 memset(sess, 0, sizeof(struct aesni_mb_session));
546 memset(op->sym->session, 0,
547 rte_cryptodev_get_header_session_size());
548 rte_mempool_put(qp->sess_mp, sess);
549 rte_mempool_put(qp->sess_mp, op->sym->session);
550 op->sym->session = NULL;
557 * Process a completed JOB_AES_HMAC job and keep processing jobs until
558 * get_completed_job return NULL
560 * @param qp Queue Pair to process
561 * @param job JOB_AES_HMAC job
564 * - Number of processed jobs
567 handle_completed_jobs(struct aesni_mb_qp *qp, JOB_AES_HMAC *job,
568 struct rte_crypto_op **ops, uint16_t nb_ops)
570 struct rte_crypto_op *op = NULL;
571 unsigned processed_jobs = 0;
573 while (job != NULL) {
574 op = post_process_mb_job(qp, job);
577 ops[processed_jobs++] = op;
578 qp->stats.dequeued_count++;
580 qp->stats.dequeue_err_count++;
583 if (processed_jobs == nb_ops)
586 job = (*qp->op_fns->job.get_completed_job)(&qp->mb_mgr);
589 return processed_jobs;
592 static inline uint16_t
593 flush_mb_mgr(struct aesni_mb_qp *qp, struct rte_crypto_op **ops,
596 int processed_ops = 0;
598 /* Flush the remaining jobs */
599 JOB_AES_HMAC *job = (*qp->op_fns->job.flush_job)(&qp->mb_mgr);
602 processed_ops += handle_completed_jobs(qp, job,
603 &ops[processed_ops], nb_ops - processed_ops);
605 return processed_ops;
608 static inline JOB_AES_HMAC *
609 set_job_null_op(JOB_AES_HMAC *job, struct rte_crypto_op *op)
611 job->chain_order = HASH_CIPHER;
612 job->cipher_mode = NULL_CIPHER;
613 job->hash_alg = NULL_HASH;
614 job->cipher_direction = DECRYPT;
616 /* Set user data to be crypto operation data struct */
623 aesni_mb_pmd_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops,
626 struct aesni_mb_qp *qp = queue_pair;
628 struct rte_crypto_op *op;
631 int retval, processed_jobs = 0;
633 if (unlikely(nb_ops == 0))
636 uint8_t digest_idx = qp->digest_idx;
638 /* Get next operation to process from ingress queue */
639 retval = rte_ring_dequeue(qp->ingress_queue, (void **)&op);
643 /* Get next free mb job struct from mb manager */
644 job = (*qp->op_fns->job.get_next)(&qp->mb_mgr);
645 if (unlikely(job == NULL)) {
646 /* if no free mb job structs we need to flush mb_mgr */
647 processed_jobs += flush_mb_mgr(qp,
648 &ops[processed_jobs],
649 (nb_ops - processed_jobs) - 1);
651 job = (*qp->op_fns->job.get_next)(&qp->mb_mgr);
654 retval = set_mb_job_params(job, qp, op, &digest_idx);
655 if (unlikely(retval != 0)) {
656 qp->stats.dequeue_err_count++;
657 set_job_null_op(job, op);
660 /* Submit job to multi-buffer for processing */
661 job = (*qp->op_fns->job.submit)(&qp->mb_mgr);
664 * If submit returns a processed job then handle it,
665 * before submitting subsequent jobs
668 processed_jobs += handle_completed_jobs(qp, job,
669 &ops[processed_jobs],
670 nb_ops - processed_jobs);
672 } while (processed_jobs < nb_ops);
674 qp->digest_idx = digest_idx;
676 if (processed_jobs < 1)
677 processed_jobs += flush_mb_mgr(qp,
678 &ops[processed_jobs],
679 nb_ops - processed_jobs);
681 return processed_jobs;
684 static int cryptodev_aesni_mb_remove(struct rte_vdev_device *vdev);
687 cryptodev_aesni_mb_create(const char *name,
688 struct rte_vdev_device *vdev,
689 struct rte_cryptodev_pmd_init_params *init_params)
691 struct rte_cryptodev *dev;
692 struct aesni_mb_private *internals;
693 enum aesni_mb_vector_mode vector_mode;
695 /* Check CPU for support for AES instruction set */
696 if (!rte_cpu_get_flag_enabled(RTE_CPUFLAG_AES)) {
697 MB_LOG_ERR("AES instructions not supported by CPU");
701 dev = rte_cryptodev_pmd_create(name, &vdev->device, init_params);
703 MB_LOG_ERR("failed to create cryptodev vdev");
707 /* Check CPU for supported vector instruction set */
708 if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F))
709 vector_mode = RTE_AESNI_MB_AVX512;
710 else if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2))
711 vector_mode = RTE_AESNI_MB_AVX2;
712 else if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX))
713 vector_mode = RTE_AESNI_MB_AVX;
715 vector_mode = RTE_AESNI_MB_SSE;
717 dev->driver_id = cryptodev_driver_id;
718 dev->dev_ops = rte_aesni_mb_pmd_ops;
720 /* register rx/tx burst functions for data path */
721 dev->dequeue_burst = aesni_mb_pmd_dequeue_burst;
722 dev->enqueue_burst = aesni_mb_pmd_enqueue_burst;
724 dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
725 RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
726 RTE_CRYPTODEV_FF_CPU_AESNI;
728 switch (vector_mode) {
729 case RTE_AESNI_MB_SSE:
730 dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_SSE;
732 case RTE_AESNI_MB_AVX:
733 dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX;
735 case RTE_AESNI_MB_AVX2:
736 dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX2;
738 case RTE_AESNI_MB_AVX512:
739 dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX512;
745 /* Set vector instructions mode supported */
746 internals = dev->data->dev_private;
748 internals->vector_mode = vector_mode;
749 internals->max_nb_queue_pairs = init_params->max_nb_queue_pairs;
750 internals->max_nb_sessions = init_params->max_nb_sessions;
756 cryptodev_aesni_mb_probe(struct rte_vdev_device *vdev)
758 struct rte_cryptodev_pmd_init_params init_params = {
760 sizeof(struct aesni_mb_private),
762 RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_QUEUE_PAIRS,
763 RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_SESSIONS
765 const char *name, *args;
768 name = rte_vdev_device_name(vdev);
772 args = rte_vdev_device_args(vdev);
774 retval = rte_cryptodev_pmd_parse_input_args(&init_params, args);
776 MB_LOG_ERR("Failed to parse initialisation arguments[%s]\n",
781 return cryptodev_aesni_mb_create(name, vdev, &init_params);
785 cryptodev_aesni_mb_remove(struct rte_vdev_device *vdev)
787 struct rte_cryptodev *cryptodev;
790 name = rte_vdev_device_name(vdev);
794 cryptodev = rte_cryptodev_pmd_get_named_dev(name);
795 if (cryptodev == NULL)
798 return rte_cryptodev_pmd_destroy(cryptodev);
801 static struct rte_vdev_driver cryptodev_aesni_mb_pmd_drv = {
802 .probe = cryptodev_aesni_mb_probe,
803 .remove = cryptodev_aesni_mb_remove
806 static struct cryptodev_driver aesni_mb_crypto_drv;
808 RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_AESNI_MB_PMD, cryptodev_aesni_mb_pmd_drv);
809 RTE_PMD_REGISTER_ALIAS(CRYPTODEV_NAME_AESNI_MB_PMD, cryptodev_aesni_mb_pmd);
810 RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_AESNI_MB_PMD,
811 "max_nb_queue_pairs=<int> "
812 "max_nb_sessions=<int> "
814 RTE_PMD_REGISTER_CRYPTO_DRIVER(aesni_mb_crypto_drv,
815 cryptodev_aesni_mb_pmd_drv,
816 cryptodev_driver_id);