4 * Copyright(c) 2015-2017 Intel Corporation. All rights reserved.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in
14 * the documentation and/or other materials provided with the
16 * * Neither the name of Intel Corporation nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 #include <rte_common.h>
34 #include <rte_hexdump.h>
35 #include <rte_cryptodev.h>
36 #include <rte_cryptodev_pmd.h>
37 #include <rte_cryptodev_vdev.h>
39 #include <rte_malloc.h>
40 #include <rte_cpuflags.h>
42 #include "rte_aesni_mb_pmd_private.h"
44 typedef void (*hash_one_block_t)(const void *data, void *digest);
45 typedef void (*aes_keyexp_t)(const void *key, void *enc_exp_keys, void *dec_exp_keys);
48 * Calculate the authentication pre-computes
50 * @param one_block_hash Function pointer to calculate digest on ipad/opad
51 * @param ipad Inner pad output byte array
52 * @param opad Outer pad output byte array
53 * @param hkey Authentication key
54 * @param hkey_len Authentication key length
55 * @param blocksize Block size of selected hash algo
58 calculate_auth_precomputes(hash_one_block_t one_block_hash,
59 uint8_t *ipad, uint8_t *opad,
60 uint8_t *hkey, uint16_t hkey_len,
65 uint8_t ipad_buf[blocksize] __rte_aligned(16);
66 uint8_t opad_buf[blocksize] __rte_aligned(16);
68 /* Setup inner and outer pads */
69 memset(ipad_buf, HMAC_IPAD_VALUE, blocksize);
70 memset(opad_buf, HMAC_OPAD_VALUE, blocksize);
72 /* XOR hash key with inner and outer pads */
73 length = hkey_len > blocksize ? blocksize : hkey_len;
75 for (i = 0; i < length; i++) {
76 ipad_buf[i] ^= hkey[i];
77 opad_buf[i] ^= hkey[i];
80 /* Compute partial hashes */
81 (*one_block_hash)(ipad_buf, ipad);
82 (*one_block_hash)(opad_buf, opad);
85 memset(ipad_buf, 0, blocksize);
86 memset(opad_buf, 0, blocksize);
89 /** Get xform chain order */
90 static enum aesni_mb_operation
91 aesni_mb_get_chain_order(const struct rte_crypto_sym_xform *xform)
94 return AESNI_MB_OP_NOT_SUPPORTED;
96 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
97 if (xform->next == NULL)
98 return AESNI_MB_OP_CIPHER_ONLY;
99 if (xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)
100 return AESNI_MB_OP_CIPHER_HASH;
103 if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
104 if (xform->next == NULL)
105 return AESNI_MB_OP_HASH_ONLY;
106 if (xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
107 return AESNI_MB_OP_HASH_CIPHER;
110 return AESNI_MB_OP_NOT_SUPPORTED;
113 /** Set session authentication parameters */
115 aesni_mb_set_session_auth_parameters(const struct aesni_mb_op_fns *mb_ops,
116 struct aesni_mb_session *sess,
117 const struct rte_crypto_sym_xform *xform)
119 hash_one_block_t hash_oneblock_fn;
122 sess->auth.algo = NULL_HASH;
126 if (xform->type != RTE_CRYPTO_SYM_XFORM_AUTH) {
127 MB_LOG_ERR("Crypto xform struct not of type auth");
131 /* Select auth generate/verify */
132 sess->auth.operation = xform->auth.op;
134 /* Set Authentication Parameters */
135 if (xform->auth.algo == RTE_CRYPTO_AUTH_AES_XCBC_MAC) {
136 sess->auth.algo = AES_XCBC;
137 (*mb_ops->aux.keyexp.aes_xcbc)(xform->auth.key.data,
138 sess->auth.xcbc.k1_expanded,
139 sess->auth.xcbc.k2, sess->auth.xcbc.k3);
143 switch (xform->auth.algo) {
144 case RTE_CRYPTO_AUTH_MD5_HMAC:
145 sess->auth.algo = MD5;
146 hash_oneblock_fn = mb_ops->aux.one_block.md5;
148 case RTE_CRYPTO_AUTH_SHA1_HMAC:
149 sess->auth.algo = SHA1;
150 hash_oneblock_fn = mb_ops->aux.one_block.sha1;
152 case RTE_CRYPTO_AUTH_SHA224_HMAC:
153 sess->auth.algo = SHA_224;
154 hash_oneblock_fn = mb_ops->aux.one_block.sha224;
156 case RTE_CRYPTO_AUTH_SHA256_HMAC:
157 sess->auth.algo = SHA_256;
158 hash_oneblock_fn = mb_ops->aux.one_block.sha256;
160 case RTE_CRYPTO_AUTH_SHA384_HMAC:
161 sess->auth.algo = SHA_384;
162 hash_oneblock_fn = mb_ops->aux.one_block.sha384;
164 case RTE_CRYPTO_AUTH_SHA512_HMAC:
165 sess->auth.algo = SHA_512;
166 hash_oneblock_fn = mb_ops->aux.one_block.sha512;
169 MB_LOG_ERR("Unsupported authentication algorithm selection");
173 /* Calculate Authentication precomputes */
174 calculate_auth_precomputes(hash_oneblock_fn,
175 sess->auth.pads.inner, sess->auth.pads.outer,
176 xform->auth.key.data,
177 xform->auth.key.length,
178 get_auth_algo_blocksize(sess->auth.algo));
183 /** Set session cipher parameters */
185 aesni_mb_set_session_cipher_parameters(const struct aesni_mb_op_fns *mb_ops,
186 struct aesni_mb_session *sess,
187 const struct rte_crypto_sym_xform *xform)
189 aes_keyexp_t aes_keyexp_fn;
192 sess->cipher.mode = NULL_CIPHER;
196 if (xform->type != RTE_CRYPTO_SYM_XFORM_CIPHER) {
197 MB_LOG_ERR("Crypto xform struct not of type cipher");
201 /* Select cipher direction */
202 switch (xform->cipher.op) {
203 case RTE_CRYPTO_CIPHER_OP_ENCRYPT:
204 sess->cipher.direction = ENCRYPT;
206 case RTE_CRYPTO_CIPHER_OP_DECRYPT:
207 sess->cipher.direction = DECRYPT;
210 MB_LOG_ERR("Unsupported cipher operation parameter");
214 /* Select cipher mode */
215 switch (xform->cipher.algo) {
216 case RTE_CRYPTO_CIPHER_AES_CBC:
217 sess->cipher.mode = CBC;
219 case RTE_CRYPTO_CIPHER_AES_CTR:
220 sess->cipher.mode = CNTR;
222 case RTE_CRYPTO_CIPHER_AES_DOCSISBPI:
223 sess->cipher.mode = DOCSIS_SEC_BPI;
226 MB_LOG_ERR("Unsupported cipher mode parameter");
230 /* Check key length and choose key expansion function */
231 switch (xform->cipher.key.length) {
233 sess->cipher.key_length_in_bytes = AES_128_BYTES;
234 aes_keyexp_fn = mb_ops->aux.keyexp.aes128;
237 sess->cipher.key_length_in_bytes = AES_192_BYTES;
238 aes_keyexp_fn = mb_ops->aux.keyexp.aes192;
241 sess->cipher.key_length_in_bytes = AES_256_BYTES;
242 aes_keyexp_fn = mb_ops->aux.keyexp.aes256;
245 MB_LOG_ERR("Unsupported cipher key length");
249 /* Set IV parameters */
250 sess->iv.offset = xform->cipher.iv.offset;
251 sess->iv.length = xform->cipher.iv.length;
253 /* Expanded cipher keys */
254 (*aes_keyexp_fn)(xform->cipher.key.data,
255 sess->cipher.expanded_aes_keys.encode,
256 sess->cipher.expanded_aes_keys.decode);
261 /** Parse crypto xform chain and set private session parameters */
263 aesni_mb_set_session_parameters(const struct aesni_mb_op_fns *mb_ops,
264 struct aesni_mb_session *sess,
265 const struct rte_crypto_sym_xform *xform)
267 const struct rte_crypto_sym_xform *auth_xform = NULL;
268 const struct rte_crypto_sym_xform *cipher_xform = NULL;
270 /* Select Crypto operation - hash then cipher / cipher then hash */
271 switch (aesni_mb_get_chain_order(xform)) {
272 case AESNI_MB_OP_HASH_CIPHER:
273 sess->chain_order = HASH_CIPHER;
275 cipher_xform = xform->next;
277 case AESNI_MB_OP_CIPHER_HASH:
278 sess->chain_order = CIPHER_HASH;
279 auth_xform = xform->next;
280 cipher_xform = xform;
282 case AESNI_MB_OP_HASH_ONLY:
283 sess->chain_order = HASH_CIPHER;
287 case AESNI_MB_OP_CIPHER_ONLY:
289 * Multi buffer library operates only at two modes,
290 * CIPHER_HASH and HASH_CIPHER. When doing ciphering only,
291 * chain order depends on cipher operation: encryption is always
292 * the first operation and decryption the last one.
294 if (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
295 sess->chain_order = CIPHER_HASH;
297 sess->chain_order = HASH_CIPHER;
299 cipher_xform = xform;
301 case AESNI_MB_OP_NOT_SUPPORTED:
303 MB_LOG_ERR("Unsupported operation chain order parameter");
307 /* Default IV length = 0 */
310 if (aesni_mb_set_session_auth_parameters(mb_ops, sess, auth_xform)) {
311 MB_LOG_ERR("Invalid/unsupported authentication parameters");
315 if (aesni_mb_set_session_cipher_parameters(mb_ops, sess,
317 MB_LOG_ERR("Invalid/unsupported cipher parameters");
324 * burst enqueue, place crypto operations on ingress queue for processing.
326 * @param __qp Queue Pair to process
327 * @param ops Crypto operations for processing
328 * @param nb_ops Number of crypto operations for processing
331 * - Number of crypto operations enqueued
334 aesni_mb_pmd_enqueue_burst(void *__qp, struct rte_crypto_op **ops,
337 struct aesni_mb_qp *qp = __qp;
339 unsigned int nb_enqueued;
341 nb_enqueued = rte_ring_enqueue_burst(qp->ingress_queue,
342 (void **)ops, nb_ops, NULL);
344 qp->stats.enqueued_count += nb_enqueued;
349 /** Get multi buffer session */
350 static inline struct aesni_mb_session *
351 get_session(struct aesni_mb_qp *qp, struct rte_crypto_op *op)
353 struct aesni_mb_session *sess = NULL;
355 if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
356 if (unlikely(op->sym->session->dev_type !=
357 RTE_CRYPTODEV_AESNI_MB_PMD)) {
361 sess = (struct aesni_mb_session *)op->sym->session->_private;
365 if (rte_mempool_get(qp->sess_mp, (void **)&_sess))
368 sess = (struct aesni_mb_session *)
369 ((struct rte_cryptodev_sym_session *)_sess)->_private;
371 if (unlikely(aesni_mb_set_session_parameters(qp->op_fns,
372 sess, op->sym->xform) != 0)) {
373 rte_mempool_put(qp->sess_mp, _sess);
376 op->sym->session = (struct rte_cryptodev_sym_session *)_sess;
383 * Process a crypto operation and complete a JOB_AES_HMAC job structure for
384 * submission to the multi buffer library for processing.
386 * @param qp queue pair
387 * @param job JOB_AES_HMAC structure to fill
388 * @param m mbuf to process
391 * - Completed JOB_AES_HMAC structure pointer on success
392 * - NULL pointer if completion of JOB_AES_HMAC structure isn't possible
395 set_mb_job_params(JOB_AES_HMAC *job, struct aesni_mb_qp *qp,
396 struct rte_crypto_op *op)
398 struct rte_mbuf *m_src = op->sym->m_src, *m_dst;
399 struct aesni_mb_session *session;
400 uint16_t m_offset = 0;
402 session = get_session(qp, op);
403 if (session == NULL) {
404 op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
407 op->status = RTE_CRYPTO_OP_STATUS_ENQUEUED;
409 /* Set crypto operation */
410 job->chain_order = session->chain_order;
412 /* Set cipher parameters */
413 job->cipher_direction = session->cipher.direction;
414 job->cipher_mode = session->cipher.mode;
416 job->aes_key_len_in_bytes = session->cipher.key_length_in_bytes;
417 job->aes_enc_key_expanded = session->cipher.expanded_aes_keys.encode;
418 job->aes_dec_key_expanded = session->cipher.expanded_aes_keys.decode;
421 /* Set authentication parameters */
422 job->hash_alg = session->auth.algo;
423 if (job->hash_alg == AES_XCBC) {
424 job->_k1_expanded = session->auth.xcbc.k1_expanded;
425 job->_k2 = session->auth.xcbc.k2;
426 job->_k3 = session->auth.xcbc.k3;
428 job->hashed_auth_key_xor_ipad = session->auth.pads.inner;
429 job->hashed_auth_key_xor_opad = session->auth.pads.outer;
432 /* Mutable crypto operation parameters */
433 if (op->sym->m_dst) {
434 m_src = m_dst = op->sym->m_dst;
436 /* append space for output data to mbuf */
437 char *odata = rte_pktmbuf_append(m_dst,
438 rte_pktmbuf_data_len(op->sym->m_src));
440 MB_LOG_ERR("failed to allocate space in destination "
441 "mbuf for source data");
442 op->status = RTE_CRYPTO_OP_STATUS_ERROR;
446 memcpy(odata, rte_pktmbuf_mtod(op->sym->m_src, void*),
447 rte_pktmbuf_data_len(op->sym->m_src));
450 m_offset = op->sym->cipher.data.offset;
453 /* Set digest output location */
454 if (job->hash_alg != NULL_HASH &&
455 session->auth.operation == RTE_CRYPTO_AUTH_OP_VERIFY) {
456 job->auth_tag_output = (uint8_t *)rte_pktmbuf_append(m_dst,
457 get_digest_byte_length(job->hash_alg));
459 if (job->auth_tag_output == NULL) {
460 MB_LOG_ERR("failed to allocate space in output mbuf "
462 op->status = RTE_CRYPTO_OP_STATUS_ERROR;
466 memset(job->auth_tag_output, 0,
467 sizeof(get_digest_byte_length(job->hash_alg)));
470 job->auth_tag_output = op->sym->auth.digest.data;
474 * Multi-buffer library current only support returning a truncated
475 * digest length as specified in the relevant IPsec RFCs
477 job->auth_tag_output_len_in_bytes =
478 get_truncated_digest_byte_length(job->hash_alg);
480 /* Set IV parameters */
481 job->iv = rte_crypto_op_ctod_offset(op, uint8_t *,
483 job->iv_len_in_bytes = session->iv.length;
486 job->src = rte_pktmbuf_mtod(m_src, uint8_t *);
487 job->dst = rte_pktmbuf_mtod_offset(m_dst, uint8_t *, m_offset);
489 job->cipher_start_src_offset_in_bytes = op->sym->cipher.data.offset;
490 job->msg_len_to_cipher_in_bytes = op->sym->cipher.data.length;
492 job->hash_start_src_offset_in_bytes = op->sym->auth.data.offset;
493 job->msg_len_to_hash_in_bytes = op->sym->auth.data.length;
495 /* Set user data to be crypto operation data struct */
497 job->user_data2 = m_dst;
503 verify_digest(JOB_AES_HMAC *job, struct rte_crypto_op *op) {
504 struct rte_mbuf *m_dst = (struct rte_mbuf *)job->user_data2;
506 /* Verify digest if required */
507 if (memcmp(job->auth_tag_output, op->sym->auth.digest.data,
508 job->auth_tag_output_len_in_bytes) != 0)
509 op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
511 /* trim area used for digest from mbuf */
512 rte_pktmbuf_trim(m_dst, get_digest_byte_length(job->hash_alg));
516 * Process a completed job and return rte_mbuf which job processed
518 * @param job JOB_AES_HMAC job to process
521 * - Returns processed mbuf which is trimmed of output digest used in
522 * verification of supplied digest in the case of a HASH_CIPHER operation
523 * - Returns NULL on invalid job
525 static inline struct rte_crypto_op *
526 post_process_mb_job(struct aesni_mb_qp *qp, JOB_AES_HMAC *job)
528 struct rte_crypto_op *op = (struct rte_crypto_op *)job->user_data;
530 struct aesni_mb_session *sess;
532 if (unlikely(op->status == RTE_CRYPTO_OP_STATUS_ENQUEUED)) {
533 switch (job->status) {
535 op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
537 if (job->hash_alg != NULL_HASH) {
538 sess = (struct aesni_mb_session *)
539 op->sym->session->_private;
541 if (sess->auth.operation ==
542 RTE_CRYPTO_AUTH_OP_VERIFY)
543 verify_digest(job, op);
547 op->status = RTE_CRYPTO_OP_STATUS_ERROR;
551 /* Free session if a session-less crypto op */
552 if (op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
553 rte_mempool_put(qp->sess_mp, op->sym->session);
554 op->sym->session = NULL;
561 * Process a completed JOB_AES_HMAC job and keep processing jobs until
562 * get_completed_job return NULL
564 * @param qp Queue Pair to process
565 * @param job JOB_AES_HMAC job
568 * - Number of processed jobs
571 handle_completed_jobs(struct aesni_mb_qp *qp, JOB_AES_HMAC *job,
572 struct rte_crypto_op **ops, uint16_t nb_ops)
574 struct rte_crypto_op *op = NULL;
575 unsigned processed_jobs = 0;
577 while (job != NULL && processed_jobs < nb_ops) {
578 op = post_process_mb_job(qp, job);
581 ops[processed_jobs++] = op;
582 qp->stats.dequeued_count++;
584 qp->stats.dequeue_err_count++;
588 job = (*qp->op_fns->job.get_completed_job)(&qp->mb_mgr);
591 return processed_jobs;
594 static inline uint16_t
595 flush_mb_mgr(struct aesni_mb_qp *qp, struct rte_crypto_op **ops,
598 int processed_ops = 0;
600 /* Flush the remaining jobs */
601 JOB_AES_HMAC *job = (*qp->op_fns->job.flush_job)(&qp->mb_mgr);
604 processed_ops += handle_completed_jobs(qp, job,
605 &ops[processed_ops], nb_ops - processed_ops);
607 return processed_ops;
610 static inline JOB_AES_HMAC *
611 set_job_null_op(JOB_AES_HMAC *job)
613 job->chain_order = HASH_CIPHER;
614 job->cipher_mode = NULL_CIPHER;
615 job->hash_alg = NULL_HASH;
616 job->cipher_direction = DECRYPT;
622 aesni_mb_pmd_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops,
625 struct aesni_mb_qp *qp = queue_pair;
627 struct rte_crypto_op *op;
630 int retval, processed_jobs = 0;
633 /* Get next operation to process from ingress queue */
634 retval = rte_ring_dequeue(qp->ingress_queue, (void **)&op);
638 /* Get next free mb job struct from mb manager */
639 job = (*qp->op_fns->job.get_next)(&qp->mb_mgr);
640 if (unlikely(job == NULL)) {
641 /* if no free mb job structs we need to flush mb_mgr */
642 processed_jobs += flush_mb_mgr(qp,
643 &ops[processed_jobs],
644 (nb_ops - processed_jobs) - 1);
646 job = (*qp->op_fns->job.get_next)(&qp->mb_mgr);
649 retval = set_mb_job_params(job, qp, op);
650 if (unlikely(retval != 0)) {
651 qp->stats.dequeue_err_count++;
652 set_job_null_op(job);
655 /* Submit job to multi-buffer for processing */
656 job = (*qp->op_fns->job.submit)(&qp->mb_mgr);
659 * If submit returns a processed job then handle it,
660 * before submitting subsequent jobs
663 processed_jobs += handle_completed_jobs(qp, job,
664 &ops[processed_jobs],
665 nb_ops - processed_jobs);
667 } while (processed_jobs < nb_ops);
669 if (processed_jobs < 1)
670 processed_jobs += flush_mb_mgr(qp,
671 &ops[processed_jobs],
672 nb_ops - processed_jobs);
674 return processed_jobs;
677 static int cryptodev_aesni_mb_remove(struct rte_vdev_device *vdev);
680 cryptodev_aesni_mb_create(const char *name,
681 struct rte_vdev_device *vdev,
682 struct rte_crypto_vdev_init_params *init_params)
684 struct rte_cryptodev *dev;
685 struct aesni_mb_private *internals;
686 enum aesni_mb_vector_mode vector_mode;
688 if (init_params->name[0] == '\0')
689 snprintf(init_params->name, sizeof(init_params->name),
692 /* Check CPU for supported vector instruction set */
693 if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F))
694 vector_mode = RTE_AESNI_MB_AVX512;
695 else if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2))
696 vector_mode = RTE_AESNI_MB_AVX2;
697 else if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX))
698 vector_mode = RTE_AESNI_MB_AVX;
700 vector_mode = RTE_AESNI_MB_SSE;
702 dev = rte_cryptodev_vdev_pmd_init(init_params->name,
703 sizeof(struct aesni_mb_private), init_params->socket_id,
706 MB_LOG_ERR("failed to create cryptodev vdev");
710 dev->dev_type = RTE_CRYPTODEV_AESNI_MB_PMD;
711 dev->dev_ops = rte_aesni_mb_pmd_ops;
713 /* register rx/tx burst functions for data path */
714 dev->dequeue_burst = aesni_mb_pmd_dequeue_burst;
715 dev->enqueue_burst = aesni_mb_pmd_enqueue_burst;
717 dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
718 RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
719 RTE_CRYPTODEV_FF_CPU_AESNI;
721 switch (vector_mode) {
722 case RTE_AESNI_MB_SSE:
723 dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_SSE;
725 case RTE_AESNI_MB_AVX:
726 dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX;
728 case RTE_AESNI_MB_AVX2:
729 dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX2;
731 case RTE_AESNI_MB_AVX512:
732 dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX512;
738 /* Set vector instructions mode supported */
739 internals = dev->data->dev_private;
741 internals->vector_mode = vector_mode;
742 internals->max_nb_queue_pairs = init_params->max_nb_queue_pairs;
743 internals->max_nb_sessions = init_params->max_nb_sessions;
747 MB_LOG_ERR("driver %s: cryptodev_aesni_create failed",
750 cryptodev_aesni_mb_remove(vdev);
755 cryptodev_aesni_mb_probe(struct rte_vdev_device *vdev)
757 struct rte_crypto_vdev_init_params init_params = {
758 RTE_CRYPTODEV_VDEV_DEFAULT_MAX_NB_QUEUE_PAIRS,
759 RTE_CRYPTODEV_VDEV_DEFAULT_MAX_NB_SESSIONS,
764 const char *input_args;
766 name = rte_vdev_device_name(vdev);
769 input_args = rte_vdev_device_args(vdev);
770 rte_cryptodev_vdev_parse_init_params(&init_params, input_args);
772 RTE_LOG(INFO, PMD, "Initialising %s on NUMA node %d\n", name,
773 init_params.socket_id);
774 if (init_params.name[0] != '\0')
775 RTE_LOG(INFO, PMD, " User defined name = %s\n",
777 RTE_LOG(INFO, PMD, " Max number of queue pairs = %d\n",
778 init_params.max_nb_queue_pairs);
779 RTE_LOG(INFO, PMD, " Max number of sessions = %d\n",
780 init_params.max_nb_sessions);
782 return cryptodev_aesni_mb_create(name, vdev, &init_params);
786 cryptodev_aesni_mb_remove(struct rte_vdev_device *vdev)
790 name = rte_vdev_device_name(vdev);
794 RTE_LOG(INFO, PMD, "Closing AESNI crypto device %s on numa socket %u\n",
795 name, rte_socket_id());
800 static struct rte_vdev_driver cryptodev_aesni_mb_pmd_drv = {
801 .probe = cryptodev_aesni_mb_probe,
802 .remove = cryptodev_aesni_mb_remove
805 RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_AESNI_MB_PMD, cryptodev_aesni_mb_pmd_drv);
806 RTE_PMD_REGISTER_ALIAS(CRYPTODEV_NAME_AESNI_MB_PMD, cryptodev_aesni_mb_pmd);
807 RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_AESNI_MB_PMD,
808 "max_nb_queue_pairs=<int> "
809 "max_nb_sessions=<int> "