4 * Copyright(c) 2015-2017 Intel Corporation. All rights reserved.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in
14 * the documentation and/or other materials provided with the
16 * * Neither the name of Intel Corporation nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 #include <rte_common.h>
34 #include <rte_hexdump.h>
35 #include <rte_cryptodev.h>
36 #include <rte_cryptodev_pmd.h>
37 #include <rte_cryptodev_vdev.h>
39 #include <rte_malloc.h>
40 #include <rte_cpuflags.h>
42 #include "rte_aesni_mb_pmd_private.h"
44 static uint8_t cryptodev_driver_id;
46 typedef void (*hash_one_block_t)(const void *data, void *digest);
47 typedef void (*aes_keyexp_t)(const void *key, void *enc_exp_keys, void *dec_exp_keys);
50 * Calculate the authentication pre-computes
52 * @param one_block_hash Function pointer to calculate digest on ipad/opad
53 * @param ipad Inner pad output byte array
54 * @param opad Outer pad output byte array
55 * @param hkey Authentication key
56 * @param hkey_len Authentication key length
57 * @param blocksize Block size of selected hash algo
60 calculate_auth_precomputes(hash_one_block_t one_block_hash,
61 uint8_t *ipad, uint8_t *opad,
62 uint8_t *hkey, uint16_t hkey_len,
67 uint8_t ipad_buf[blocksize] __rte_aligned(16);
68 uint8_t opad_buf[blocksize] __rte_aligned(16);
70 /* Setup inner and outer pads */
71 memset(ipad_buf, HMAC_IPAD_VALUE, blocksize);
72 memset(opad_buf, HMAC_OPAD_VALUE, blocksize);
74 /* XOR hash key with inner and outer pads */
75 length = hkey_len > blocksize ? blocksize : hkey_len;
77 for (i = 0; i < length; i++) {
78 ipad_buf[i] ^= hkey[i];
79 opad_buf[i] ^= hkey[i];
82 /* Compute partial hashes */
83 (*one_block_hash)(ipad_buf, ipad);
84 (*one_block_hash)(opad_buf, opad);
87 memset(ipad_buf, 0, blocksize);
88 memset(opad_buf, 0, blocksize);
91 /** Get xform chain order */
92 static enum aesni_mb_operation
93 aesni_mb_get_chain_order(const struct rte_crypto_sym_xform *xform)
96 return AESNI_MB_OP_NOT_SUPPORTED;
98 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
99 if (xform->next == NULL)
100 return AESNI_MB_OP_CIPHER_ONLY;
101 if (xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)
102 return AESNI_MB_OP_CIPHER_HASH;
105 if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
106 if (xform->next == NULL)
107 return AESNI_MB_OP_HASH_ONLY;
108 if (xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
109 return AESNI_MB_OP_HASH_CIPHER;
112 return AESNI_MB_OP_NOT_SUPPORTED;
115 /** Set session authentication parameters */
117 aesni_mb_set_session_auth_parameters(const struct aesni_mb_op_fns *mb_ops,
118 struct aesni_mb_session *sess,
119 const struct rte_crypto_sym_xform *xform)
121 hash_one_block_t hash_oneblock_fn;
124 sess->auth.algo = NULL_HASH;
128 if (xform->type != RTE_CRYPTO_SYM_XFORM_AUTH) {
129 MB_LOG_ERR("Crypto xform struct not of type auth");
133 /* Select auth generate/verify */
134 sess->auth.operation = xform->auth.op;
136 /* Set Authentication Parameters */
137 if (xform->auth.algo == RTE_CRYPTO_AUTH_AES_XCBC_MAC) {
138 sess->auth.algo = AES_XCBC;
139 (*mb_ops->aux.keyexp.aes_xcbc)(xform->auth.key.data,
140 sess->auth.xcbc.k1_expanded,
141 sess->auth.xcbc.k2, sess->auth.xcbc.k3);
145 switch (xform->auth.algo) {
146 case RTE_CRYPTO_AUTH_MD5_HMAC:
147 sess->auth.algo = MD5;
148 hash_oneblock_fn = mb_ops->aux.one_block.md5;
150 case RTE_CRYPTO_AUTH_SHA1_HMAC:
151 sess->auth.algo = SHA1;
152 hash_oneblock_fn = mb_ops->aux.one_block.sha1;
154 case RTE_CRYPTO_AUTH_SHA224_HMAC:
155 sess->auth.algo = SHA_224;
156 hash_oneblock_fn = mb_ops->aux.one_block.sha224;
158 case RTE_CRYPTO_AUTH_SHA256_HMAC:
159 sess->auth.algo = SHA_256;
160 hash_oneblock_fn = mb_ops->aux.one_block.sha256;
162 case RTE_CRYPTO_AUTH_SHA384_HMAC:
163 sess->auth.algo = SHA_384;
164 hash_oneblock_fn = mb_ops->aux.one_block.sha384;
166 case RTE_CRYPTO_AUTH_SHA512_HMAC:
167 sess->auth.algo = SHA_512;
168 hash_oneblock_fn = mb_ops->aux.one_block.sha512;
171 MB_LOG_ERR("Unsupported authentication algorithm selection");
175 /* Calculate Authentication precomputes */
176 calculate_auth_precomputes(hash_oneblock_fn,
177 sess->auth.pads.inner, sess->auth.pads.outer,
178 xform->auth.key.data,
179 xform->auth.key.length,
180 get_auth_algo_blocksize(sess->auth.algo));
185 /** Set session cipher parameters */
187 aesni_mb_set_session_cipher_parameters(const struct aesni_mb_op_fns *mb_ops,
188 struct aesni_mb_session *sess,
189 const struct rte_crypto_sym_xform *xform)
191 aes_keyexp_t aes_keyexp_fn;
194 sess->cipher.mode = NULL_CIPHER;
198 if (xform->type != RTE_CRYPTO_SYM_XFORM_CIPHER) {
199 MB_LOG_ERR("Crypto xform struct not of type cipher");
203 /* Select cipher direction */
204 switch (xform->cipher.op) {
205 case RTE_CRYPTO_CIPHER_OP_ENCRYPT:
206 sess->cipher.direction = ENCRYPT;
208 case RTE_CRYPTO_CIPHER_OP_DECRYPT:
209 sess->cipher.direction = DECRYPT;
212 MB_LOG_ERR("Unsupported cipher operation parameter");
216 /* Select cipher mode */
217 switch (xform->cipher.algo) {
218 case RTE_CRYPTO_CIPHER_AES_CBC:
219 sess->cipher.mode = CBC;
221 case RTE_CRYPTO_CIPHER_AES_CTR:
222 sess->cipher.mode = CNTR;
224 case RTE_CRYPTO_CIPHER_AES_DOCSISBPI:
225 sess->cipher.mode = DOCSIS_SEC_BPI;
228 MB_LOG_ERR("Unsupported cipher mode parameter");
232 /* Check key length and choose key expansion function */
233 switch (xform->cipher.key.length) {
235 sess->cipher.key_length_in_bytes = AES_128_BYTES;
236 aes_keyexp_fn = mb_ops->aux.keyexp.aes128;
239 sess->cipher.key_length_in_bytes = AES_192_BYTES;
240 aes_keyexp_fn = mb_ops->aux.keyexp.aes192;
243 sess->cipher.key_length_in_bytes = AES_256_BYTES;
244 aes_keyexp_fn = mb_ops->aux.keyexp.aes256;
247 MB_LOG_ERR("Unsupported cipher key length");
251 /* Set IV parameters */
252 sess->iv.offset = xform->cipher.iv.offset;
253 sess->iv.length = xform->cipher.iv.length;
255 /* Expanded cipher keys */
256 (*aes_keyexp_fn)(xform->cipher.key.data,
257 sess->cipher.expanded_aes_keys.encode,
258 sess->cipher.expanded_aes_keys.decode);
263 /** Parse crypto xform chain and set private session parameters */
265 aesni_mb_set_session_parameters(const struct aesni_mb_op_fns *mb_ops,
266 struct aesni_mb_session *sess,
267 const struct rte_crypto_sym_xform *xform)
269 const struct rte_crypto_sym_xform *auth_xform = NULL;
270 const struct rte_crypto_sym_xform *cipher_xform = NULL;
272 /* Select Crypto operation - hash then cipher / cipher then hash */
273 switch (aesni_mb_get_chain_order(xform)) {
274 case AESNI_MB_OP_HASH_CIPHER:
275 sess->chain_order = HASH_CIPHER;
277 cipher_xform = xform->next;
279 case AESNI_MB_OP_CIPHER_HASH:
280 sess->chain_order = CIPHER_HASH;
281 auth_xform = xform->next;
282 cipher_xform = xform;
284 case AESNI_MB_OP_HASH_ONLY:
285 sess->chain_order = HASH_CIPHER;
289 case AESNI_MB_OP_CIPHER_ONLY:
291 * Multi buffer library operates only at two modes,
292 * CIPHER_HASH and HASH_CIPHER. When doing ciphering only,
293 * chain order depends on cipher operation: encryption is always
294 * the first operation and decryption the last one.
296 if (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
297 sess->chain_order = CIPHER_HASH;
299 sess->chain_order = HASH_CIPHER;
301 cipher_xform = xform;
303 case AESNI_MB_OP_NOT_SUPPORTED:
305 MB_LOG_ERR("Unsupported operation chain order parameter");
309 /* Default IV length = 0 */
312 if (aesni_mb_set_session_auth_parameters(mb_ops, sess, auth_xform)) {
313 MB_LOG_ERR("Invalid/unsupported authentication parameters");
317 if (aesni_mb_set_session_cipher_parameters(mb_ops, sess,
319 MB_LOG_ERR("Invalid/unsupported cipher parameters");
326 * burst enqueue, place crypto operations on ingress queue for processing.
328 * @param __qp Queue Pair to process
329 * @param ops Crypto operations for processing
330 * @param nb_ops Number of crypto operations for processing
333 * - Number of crypto operations enqueued
336 aesni_mb_pmd_enqueue_burst(void *__qp, struct rte_crypto_op **ops,
339 struct aesni_mb_qp *qp = __qp;
341 unsigned int nb_enqueued;
343 nb_enqueued = rte_ring_enqueue_burst(qp->ingress_queue,
344 (void **)ops, nb_ops, NULL);
346 qp->stats.enqueued_count += nb_enqueued;
351 /** Get multi buffer session */
352 static inline struct aesni_mb_session *
353 get_session(struct aesni_mb_qp *qp, struct rte_crypto_op *op)
355 struct aesni_mb_session *sess = NULL;
357 if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
358 if (likely(op->sym->session != NULL))
359 sess = (struct aesni_mb_session *)
360 get_session_private_data(
362 cryptodev_driver_id);
365 void *_sess_private_data = NULL;
367 if (rte_mempool_get(qp->sess_mp, (void **)&_sess))
370 if (rte_mempool_get(qp->sess_mp, (void **)&_sess_private_data))
373 sess = (struct aesni_mb_session *)_sess_private_data;
375 if (unlikely(aesni_mb_set_session_parameters(qp->op_fns,
376 sess, op->sym->xform) != 0)) {
377 rte_mempool_put(qp->sess_mp, _sess);
378 rte_mempool_put(qp->sess_mp, _sess_private_data);
381 op->sym->session = (struct rte_cryptodev_sym_session *)_sess;
382 set_session_private_data(op->sym->session, cryptodev_driver_id,
386 if (unlikely(sess == NULL))
387 op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
393 * Process a crypto operation and complete a JOB_AES_HMAC job structure for
394 * submission to the multi buffer library for processing.
396 * @param qp queue pair
397 * @param job JOB_AES_HMAC structure to fill
398 * @param m mbuf to process
401 * - Completed JOB_AES_HMAC structure pointer on success
402 * - NULL pointer if completion of JOB_AES_HMAC structure isn't possible
405 set_mb_job_params(JOB_AES_HMAC *job, struct aesni_mb_qp *qp,
406 struct rte_crypto_op *op)
408 struct rte_mbuf *m_src = op->sym->m_src, *m_dst;
409 struct aesni_mb_session *session;
410 uint16_t m_offset = 0;
412 session = get_session(qp, op);
413 if (session == NULL) {
414 op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
417 op->status = RTE_CRYPTO_OP_STATUS_ENQUEUED;
419 /* Set crypto operation */
420 job->chain_order = session->chain_order;
422 /* Set cipher parameters */
423 job->cipher_direction = session->cipher.direction;
424 job->cipher_mode = session->cipher.mode;
426 job->aes_key_len_in_bytes = session->cipher.key_length_in_bytes;
427 job->aes_enc_key_expanded = session->cipher.expanded_aes_keys.encode;
428 job->aes_dec_key_expanded = session->cipher.expanded_aes_keys.decode;
431 /* Set authentication parameters */
432 job->hash_alg = session->auth.algo;
433 if (job->hash_alg == AES_XCBC) {
434 job->_k1_expanded = session->auth.xcbc.k1_expanded;
435 job->_k2 = session->auth.xcbc.k2;
436 job->_k3 = session->auth.xcbc.k3;
438 job->hashed_auth_key_xor_ipad = session->auth.pads.inner;
439 job->hashed_auth_key_xor_opad = session->auth.pads.outer;
442 /* Mutable crypto operation parameters */
443 if (op->sym->m_dst) {
444 m_src = m_dst = op->sym->m_dst;
446 /* append space for output data to mbuf */
447 char *odata = rte_pktmbuf_append(m_dst,
448 rte_pktmbuf_data_len(op->sym->m_src));
450 MB_LOG_ERR("failed to allocate space in destination "
451 "mbuf for source data");
452 op->status = RTE_CRYPTO_OP_STATUS_ERROR;
456 memcpy(odata, rte_pktmbuf_mtod(op->sym->m_src, void*),
457 rte_pktmbuf_data_len(op->sym->m_src));
460 m_offset = op->sym->cipher.data.offset;
463 /* Set digest output location */
464 if (job->hash_alg != NULL_HASH &&
465 session->auth.operation == RTE_CRYPTO_AUTH_OP_VERIFY) {
466 job->auth_tag_output = (uint8_t *)rte_pktmbuf_append(m_dst,
467 get_digest_byte_length(job->hash_alg));
469 if (job->auth_tag_output == NULL) {
470 MB_LOG_ERR("failed to allocate space in output mbuf "
472 op->status = RTE_CRYPTO_OP_STATUS_ERROR;
476 memset(job->auth_tag_output, 0,
477 sizeof(get_digest_byte_length(job->hash_alg)));
480 job->auth_tag_output = op->sym->auth.digest.data;
484 * Multi-buffer library current only support returning a truncated
485 * digest length as specified in the relevant IPsec RFCs
487 job->auth_tag_output_len_in_bytes =
488 get_truncated_digest_byte_length(job->hash_alg);
490 /* Set IV parameters */
491 job->iv = rte_crypto_op_ctod_offset(op, uint8_t *,
493 job->iv_len_in_bytes = session->iv.length;
496 job->src = rte_pktmbuf_mtod(m_src, uint8_t *);
497 job->dst = rte_pktmbuf_mtod_offset(m_dst, uint8_t *, m_offset);
499 job->cipher_start_src_offset_in_bytes = op->sym->cipher.data.offset;
500 job->msg_len_to_cipher_in_bytes = op->sym->cipher.data.length;
502 job->hash_start_src_offset_in_bytes = op->sym->auth.data.offset;
503 job->msg_len_to_hash_in_bytes = op->sym->auth.data.length;
505 /* Set user data to be crypto operation data struct */
507 job->user_data2 = m_dst;
513 verify_digest(JOB_AES_HMAC *job, struct rte_crypto_op *op) {
514 struct rte_mbuf *m_dst = (struct rte_mbuf *)job->user_data2;
516 /* Verify digest if required */
517 if (memcmp(job->auth_tag_output, op->sym->auth.digest.data,
518 job->auth_tag_output_len_in_bytes) != 0)
519 op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
521 /* trim area used for digest from mbuf */
522 rte_pktmbuf_trim(m_dst, get_digest_byte_length(job->hash_alg));
526 * Process a completed job and return rte_mbuf which job processed
528 * @param qp Queue Pair to process
529 * @param job JOB_AES_HMAC job to process
532 * - Returns processed crypto operation which mbuf is trimmed of output digest
533 * used in verification of supplied digest.
534 * - Returns NULL on invalid job
536 static inline struct rte_crypto_op *
537 post_process_mb_job(struct aesni_mb_qp *qp, JOB_AES_HMAC *job)
539 struct rte_crypto_op *op = (struct rte_crypto_op *)job->user_data;
540 struct aesni_mb_session *sess = get_session_private_data(
542 cryptodev_driver_id);
544 if (unlikely(op->status == RTE_CRYPTO_OP_STATUS_ENQUEUED)) {
545 switch (job->status) {
547 op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
549 if (job->hash_alg != NULL_HASH) {
550 if (sess->auth.operation ==
551 RTE_CRYPTO_AUTH_OP_VERIFY)
552 verify_digest(job, op);
556 op->status = RTE_CRYPTO_OP_STATUS_ERROR;
560 /* Free session if a session-less crypto op */
561 if (op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
562 memset(sess, 0, sizeof(struct aesni_mb_session));
563 memset(op->sym->session, 0,
564 rte_cryptodev_get_header_session_size());
565 rte_mempool_put(qp->sess_mp, sess);
566 rte_mempool_put(qp->sess_mp, op->sym->session);
567 op->sym->session = NULL;
574 * Process a completed JOB_AES_HMAC job and keep processing jobs until
575 * get_completed_job return NULL
577 * @param qp Queue Pair to process
578 * @param job JOB_AES_HMAC job
581 * - Number of processed jobs
584 handle_completed_jobs(struct aesni_mb_qp *qp, JOB_AES_HMAC *job,
585 struct rte_crypto_op **ops, uint16_t nb_ops)
587 struct rte_crypto_op *op = NULL;
588 unsigned processed_jobs = 0;
590 while (job != NULL) {
591 op = post_process_mb_job(qp, job);
594 ops[processed_jobs++] = op;
595 qp->stats.dequeued_count++;
597 qp->stats.dequeue_err_count++;
600 if (processed_jobs == nb_ops)
603 job = (*qp->op_fns->job.get_completed_job)(&qp->mb_mgr);
606 return processed_jobs;
609 static inline uint16_t
610 flush_mb_mgr(struct aesni_mb_qp *qp, struct rte_crypto_op **ops,
613 int processed_ops = 0;
615 /* Flush the remaining jobs */
616 JOB_AES_HMAC *job = (*qp->op_fns->job.flush_job)(&qp->mb_mgr);
619 processed_ops += handle_completed_jobs(qp, job,
620 &ops[processed_ops], nb_ops - processed_ops);
622 return processed_ops;
625 static inline JOB_AES_HMAC *
626 set_job_null_op(JOB_AES_HMAC *job)
628 job->chain_order = HASH_CIPHER;
629 job->cipher_mode = NULL_CIPHER;
630 job->hash_alg = NULL_HASH;
631 job->cipher_direction = DECRYPT;
637 aesni_mb_pmd_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops,
640 struct aesni_mb_qp *qp = queue_pair;
642 struct rte_crypto_op *op;
645 int retval, processed_jobs = 0;
647 if (unlikely(nb_ops == 0))
651 /* Get next operation to process from ingress queue */
652 retval = rte_ring_dequeue(qp->ingress_queue, (void **)&op);
656 /* Get next free mb job struct from mb manager */
657 job = (*qp->op_fns->job.get_next)(&qp->mb_mgr);
658 if (unlikely(job == NULL)) {
659 /* if no free mb job structs we need to flush mb_mgr */
660 processed_jobs += flush_mb_mgr(qp,
661 &ops[processed_jobs],
662 (nb_ops - processed_jobs) - 1);
664 job = (*qp->op_fns->job.get_next)(&qp->mb_mgr);
667 retval = set_mb_job_params(job, qp, op);
668 if (unlikely(retval != 0)) {
669 qp->stats.dequeue_err_count++;
670 set_job_null_op(job);
673 /* Submit job to multi-buffer for processing */
674 job = (*qp->op_fns->job.submit)(&qp->mb_mgr);
677 * If submit returns a processed job then handle it,
678 * before submitting subsequent jobs
681 processed_jobs += handle_completed_jobs(qp, job,
682 &ops[processed_jobs],
683 nb_ops - processed_jobs);
685 } while (processed_jobs < nb_ops);
687 if (processed_jobs < 1)
688 processed_jobs += flush_mb_mgr(qp,
689 &ops[processed_jobs],
690 nb_ops - processed_jobs);
692 return processed_jobs;
695 static int cryptodev_aesni_mb_remove(struct rte_vdev_device *vdev);
698 cryptodev_aesni_mb_create(const char *name,
699 struct rte_vdev_device *vdev,
700 struct rte_crypto_vdev_init_params *init_params)
702 struct rte_cryptodev *dev;
703 struct aesni_mb_private *internals;
704 enum aesni_mb_vector_mode vector_mode;
706 if (init_params->name[0] == '\0')
707 snprintf(init_params->name, sizeof(init_params->name),
710 /* Check CPU for supported vector instruction set */
711 if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F))
712 vector_mode = RTE_AESNI_MB_AVX512;
713 else if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2))
714 vector_mode = RTE_AESNI_MB_AVX2;
715 else if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX))
716 vector_mode = RTE_AESNI_MB_AVX;
718 vector_mode = RTE_AESNI_MB_SSE;
720 dev = rte_cryptodev_vdev_pmd_init(init_params->name,
721 sizeof(struct aesni_mb_private), init_params->socket_id,
724 MB_LOG_ERR("failed to create cryptodev vdev");
728 dev->driver_id = cryptodev_driver_id;
729 dev->dev_ops = rte_aesni_mb_pmd_ops;
731 /* register rx/tx burst functions for data path */
732 dev->dequeue_burst = aesni_mb_pmd_dequeue_burst;
733 dev->enqueue_burst = aesni_mb_pmd_enqueue_burst;
735 dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
736 RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
737 RTE_CRYPTODEV_FF_CPU_AESNI;
739 switch (vector_mode) {
740 case RTE_AESNI_MB_SSE:
741 dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_SSE;
743 case RTE_AESNI_MB_AVX:
744 dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX;
746 case RTE_AESNI_MB_AVX2:
747 dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX2;
749 case RTE_AESNI_MB_AVX512:
750 dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX512;
756 /* Set vector instructions mode supported */
757 internals = dev->data->dev_private;
759 internals->vector_mode = vector_mode;
760 internals->max_nb_queue_pairs = init_params->max_nb_queue_pairs;
761 internals->max_nb_sessions = init_params->max_nb_sessions;
765 MB_LOG_ERR("driver %s: cryptodev_aesni_create failed",
768 cryptodev_aesni_mb_remove(vdev);
773 cryptodev_aesni_mb_probe(struct rte_vdev_device *vdev)
775 struct rte_crypto_vdev_init_params init_params = {
776 RTE_CRYPTODEV_VDEV_DEFAULT_MAX_NB_QUEUE_PAIRS,
777 RTE_CRYPTODEV_VDEV_DEFAULT_MAX_NB_SESSIONS,
782 const char *input_args;
784 name = rte_vdev_device_name(vdev);
787 input_args = rte_vdev_device_args(vdev);
788 rte_cryptodev_vdev_parse_init_params(&init_params, input_args);
790 RTE_LOG(INFO, PMD, "Initialising %s on NUMA node %d\n", name,
791 init_params.socket_id);
792 if (init_params.name[0] != '\0')
793 RTE_LOG(INFO, PMD, " User defined name = %s\n",
795 RTE_LOG(INFO, PMD, " Max number of queue pairs = %d\n",
796 init_params.max_nb_queue_pairs);
797 RTE_LOG(INFO, PMD, " Max number of sessions = %d\n",
798 init_params.max_nb_sessions);
800 return cryptodev_aesni_mb_create(name, vdev, &init_params);
804 cryptodev_aesni_mb_remove(struct rte_vdev_device *vdev)
808 name = rte_vdev_device_name(vdev);
812 RTE_LOG(INFO, PMD, "Closing AESNI crypto device %s on numa socket %u\n",
813 name, rte_socket_id());
818 static struct rte_vdev_driver cryptodev_aesni_mb_pmd_drv = {
819 .probe = cryptodev_aesni_mb_probe,
820 .remove = cryptodev_aesni_mb_remove
823 RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_AESNI_MB_PMD, cryptodev_aesni_mb_pmd_drv);
824 RTE_PMD_REGISTER_ALIAS(CRYPTODEV_NAME_AESNI_MB_PMD, cryptodev_aesni_mb_pmd);
825 RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_AESNI_MB_PMD,
826 "max_nb_queue_pairs=<int> "
827 "max_nb_sessions=<int> "
829 RTE_PMD_REGISTER_CRYPTO_DRIVER(cryptodev_aesni_mb_pmd_drv, cryptodev_driver_id);