1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2016-2020 Intel Corporation
5 #include <rte_common.h>
6 #include <rte_hexdump.h>
7 #include <rte_cryptodev.h>
8 #include <rte_cryptodev_pmd.h>
9 #include <rte_bus_vdev.h>
10 #include <rte_malloc.h>
11 #include <rte_cpuflags.h>
12 #include <rte_byteorder.h>
14 #include "aesni_gcm_pmd_private.h"
16 static uint8_t cryptodev_driver_id;
18 /* setup session handlers */
20 set_func_ops(struct aesni_gcm_session *s, const struct aesni_gcm_ops *gcm_ops)
22 s->ops.pre = gcm_ops->pre;
23 s->ops.init = gcm_ops->init;
26 case AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION:
27 s->ops.cipher = gcm_ops->enc;
28 s->ops.update = gcm_ops->update_enc;
29 s->ops.finalize = gcm_ops->finalize_enc;
31 case AESNI_GCM_OP_AUTHENTICATED_DECRYPTION:
32 s->ops.cipher = gcm_ops->dec;
33 s->ops.update = gcm_ops->update_dec;
34 s->ops.finalize = gcm_ops->finalize_dec;
36 case AESNI_GMAC_OP_GENERATE:
37 case AESNI_GMAC_OP_VERIFY:
38 s->ops.finalize = gcm_ops->finalize_enc;
43 /** Parse crypto xform chain and set private session parameters */
45 aesni_gcm_set_session_parameters(const struct aesni_gcm_ops *gcm_ops,
46 struct aesni_gcm_session *sess,
47 const struct rte_crypto_sym_xform *xform)
49 const struct rte_crypto_sym_xform *auth_xform;
50 const struct rte_crypto_sym_xform *aead_xform;
55 if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
57 if (auth_xform->auth.algo != RTE_CRYPTO_AUTH_AES_GMAC) {
58 AESNI_GCM_LOG(ERR, "Only AES GMAC is supported as an "
59 "authentication only algorithm");
62 /* Set IV parameters */
63 sess->iv.offset = auth_xform->auth.iv.offset;
64 sess->iv.length = auth_xform->auth.iv.length;
66 /* Select Crypto operation */
67 if (auth_xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE)
68 sess->op = AESNI_GMAC_OP_GENERATE;
70 sess->op = AESNI_GMAC_OP_VERIFY;
72 key_length = auth_xform->auth.key.length;
73 key = auth_xform->auth.key.data;
74 sess->req_digest_length = auth_xform->auth.digest_length;
77 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
80 if (aead_xform->aead.algo != RTE_CRYPTO_AEAD_AES_GCM) {
81 AESNI_GCM_LOG(ERR, "The only combined operation "
82 "supported is AES GCM");
86 /* Set IV parameters */
87 sess->iv.offset = aead_xform->aead.iv.offset;
88 sess->iv.length = aead_xform->aead.iv.length;
90 /* Select Crypto operation */
91 if (aead_xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT)
92 sess->op = AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION;
93 /* op == RTE_CRYPTO_AEAD_OP_DECRYPT */
95 sess->op = AESNI_GCM_OP_AUTHENTICATED_DECRYPTION;
97 key_length = aead_xform->aead.key.length;
98 key = aead_xform->aead.key.data;
100 sess->aad_length = aead_xform->aead.aad_length;
101 sess->req_digest_length = aead_xform->aead.digest_length;
103 AESNI_GCM_LOG(ERR, "Wrong xform type, has to be AEAD or authentication");
108 if (sess->iv.length != 16 && sess->iv.length != 12 &&
109 sess->iv.length != 0) {
110 AESNI_GCM_LOG(ERR, "Wrong IV length");
114 /* Check key length and calculate GCM pre-compute. */
115 switch (key_length) {
117 sess->key = GCM_KEY_128;
120 sess->key = GCM_KEY_192;
123 sess->key = GCM_KEY_256;
126 AESNI_GCM_LOG(ERR, "Invalid key length");
130 /* setup session handlers */
131 set_func_ops(sess, &gcm_ops[sess->key]);
133 /* pre-generate key */
134 gcm_ops[sess->key].pre(key, &sess->gdata_key);
137 if (sess->req_digest_length > 16) {
138 AESNI_GCM_LOG(ERR, "Invalid digest length");
142 * Multi-buffer lib supports digest sizes from 4 to 16 bytes
143 * in version 0.50 and sizes of 8, 12 and 16 bytes,
145 * If size requested is different, generate the full digest
146 * (16 bytes) in a temporary location and then memcpy
147 * the requested number of bytes.
149 #if IMB_VERSION_NUM >= IMB_VERSION(0, 50, 0)
150 if (sess->req_digest_length < 4)
152 if (sess->req_digest_length != 16 &&
153 sess->req_digest_length != 12 &&
154 sess->req_digest_length != 8)
156 sess->gen_digest_length = 16;
158 sess->gen_digest_length = sess->req_digest_length;
163 /** Get gcm session */
164 static struct aesni_gcm_session *
165 aesni_gcm_get_session(struct aesni_gcm_qp *qp, struct rte_crypto_op *op)
167 struct aesni_gcm_session *sess = NULL;
168 struct rte_crypto_sym_op *sym_op = op->sym;
170 if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
171 if (likely(sym_op->session != NULL))
172 sess = (struct aesni_gcm_session *)
173 get_sym_session_private_data(
175 cryptodev_driver_id);
178 void *_sess_private_data = NULL;
180 if (rte_mempool_get(qp->sess_mp, (void **)&_sess))
183 if (rte_mempool_get(qp->sess_mp_priv,
184 (void **)&_sess_private_data))
187 sess = (struct aesni_gcm_session *)_sess_private_data;
189 if (unlikely(aesni_gcm_set_session_parameters(qp->ops,
190 sess, sym_op->xform) != 0)) {
191 rte_mempool_put(qp->sess_mp, _sess);
192 rte_mempool_put(qp->sess_mp_priv, _sess_private_data);
195 sym_op->session = (struct rte_cryptodev_sym_session *)_sess;
196 set_sym_session_private_data(sym_op->session,
197 cryptodev_driver_id, _sess_private_data);
200 if (unlikely(sess == NULL))
201 op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
207 * Process a crypto operation, calling
208 * the GCM API from the multi buffer library.
210 * @param qp queue pair
211 * @param op symmetric crypto operation
212 * @param session GCM session
218 process_gcm_crypto_op(struct aesni_gcm_qp *qp, struct rte_crypto_op *op,
219 struct aesni_gcm_session *session)
223 struct rte_crypto_sym_op *sym_op = op->sym;
224 struct rte_mbuf *m_src = sym_op->m_src;
225 uint32_t offset, data_offset, data_length;
226 uint32_t part_len, total_len, data_len;
228 unsigned int oop = 0;
230 if (session->op == AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION ||
231 session->op == AESNI_GCM_OP_AUTHENTICATED_DECRYPTION) {
232 offset = sym_op->aead.data.offset;
233 data_offset = offset;
234 data_length = sym_op->aead.data.length;
236 offset = sym_op->auth.data.offset;
237 data_offset = offset;
238 data_length = sym_op->auth.data.length;
241 RTE_ASSERT(m_src != NULL);
243 while (offset >= m_src->data_len && data_length != 0) {
244 offset -= m_src->data_len;
247 RTE_ASSERT(m_src != NULL);
250 src = rte_pktmbuf_mtod_offset(m_src, uint8_t *, offset);
252 data_len = m_src->data_len - offset;
253 part_len = (data_len < data_length) ? data_len :
256 RTE_ASSERT((sym_op->m_dst == NULL) ||
257 ((sym_op->m_dst != NULL) &&
258 rte_pktmbuf_is_contiguous(sym_op->m_dst)));
261 if (sym_op->m_dst == NULL || (sym_op->m_dst == sym_op->m_src))
266 /* Segmented destination buffer is not supported if operation is
268 RTE_ASSERT(rte_pktmbuf_is_contiguous(sym_op->m_dst));
269 dst = rte_pktmbuf_mtod_offset(sym_op->m_dst, uint8_t *,
273 iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
276 if (session->op == AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION) {
277 qp->ops[session->key].init(&session->gdata_key,
280 sym_op->aead.aad.data,
281 (uint64_t)session->aad_length);
283 qp->ops[session->key].update_enc(&session->gdata_key,
284 &qp->gdata_ctx, dst, src,
286 total_len = data_length - part_len;
291 RTE_ASSERT(m_src != NULL);
293 src = rte_pktmbuf_mtod(m_src, uint8_t *);
298 part_len = (m_src->data_len < total_len) ?
299 m_src->data_len : total_len;
301 qp->ops[session->key].update_enc(&session->gdata_key,
302 &qp->gdata_ctx, dst, src,
304 total_len -= part_len;
307 if (session->req_digest_length != session->gen_digest_length)
308 tag = qp->temp_digest;
310 tag = sym_op->aead.digest.data;
312 qp->ops[session->key].finalize_enc(&session->gdata_key,
315 session->gen_digest_length);
316 } else if (session->op == AESNI_GCM_OP_AUTHENTICATED_DECRYPTION) {
317 qp->ops[session->key].init(&session->gdata_key,
320 sym_op->aead.aad.data,
321 (uint64_t)session->aad_length);
323 qp->ops[session->key].update_dec(&session->gdata_key,
324 &qp->gdata_ctx, dst, src,
326 total_len = data_length - part_len;
331 RTE_ASSERT(m_src != NULL);
333 src = rte_pktmbuf_mtod(m_src, uint8_t *);
338 part_len = (m_src->data_len < total_len) ?
339 m_src->data_len : total_len;
341 qp->ops[session->key].update_dec(&session->gdata_key,
345 total_len -= part_len;
348 tag = qp->temp_digest;
349 qp->ops[session->key].finalize_dec(&session->gdata_key,
352 session->gen_digest_length);
353 #if IMB_VERSION(0, 54, 0) < IMB_VERSION_NUM
354 } else if (session->op == AESNI_GMAC_OP_GENERATE) {
355 qp->ops[session->key].gmac_init(&session->gdata_key,
360 qp->ops[session->key].gmac_update(&session->gdata_key,
363 total_len = data_length - part_len;
368 RTE_ASSERT(m_src != NULL);
370 src = rte_pktmbuf_mtod(m_src, uint8_t *);
371 part_len = (m_src->data_len < total_len) ?
372 m_src->data_len : total_len;
374 qp->ops[session->key].gmac_update(&session->gdata_key,
377 total_len -= part_len;
380 if (session->req_digest_length != session->gen_digest_length)
381 tag = qp->temp_digest;
383 tag = sym_op->auth.digest.data;
385 qp->ops[session->key].gmac_finalize(&session->gdata_key,
388 session->gen_digest_length);
389 } else { /* AESNI_GMAC_OP_VERIFY */
390 qp->ops[session->key].gmac_init(&session->gdata_key,
395 qp->ops[session->key].gmac_update(&session->gdata_key,
398 total_len = data_length - part_len;
403 RTE_ASSERT(m_src != NULL);
405 src = rte_pktmbuf_mtod(m_src, uint8_t *);
406 part_len = (m_src->data_len < total_len) ?
407 m_src->data_len : total_len;
409 qp->ops[session->key].gmac_update(&session->gdata_key,
412 total_len -= part_len;
415 tag = qp->temp_digest;
417 qp->ops[session->key].gmac_finalize(&session->gdata_key,
420 session->gen_digest_length);
423 } else if (session->op == AESNI_GMAC_OP_GENERATE) {
424 qp->ops[session->key].init(&session->gdata_key,
428 (uint64_t)data_length);
429 if (session->req_digest_length != session->gen_digest_length)
430 tag = qp->temp_digest;
432 tag = sym_op->auth.digest.data;
433 qp->ops[session->key].finalize_enc(&session->gdata_key,
436 session->gen_digest_length);
437 } else { /* AESNI_GMAC_OP_VERIFY */
438 qp->ops[session->key].init(&session->gdata_key,
442 (uint64_t)data_length);
445 * Generate always 16 bytes and later compare only
448 tag = qp->temp_digest;
449 qp->ops[session->key].finalize_enc(&session->gdata_key,
452 session->gen_digest_length);
460 aesni_gcm_fill_error_code(struct rte_crypto_sym_vec *vec, int32_t errnum)
464 for (i = 0; i < vec->num; i++)
465 vec->status[i] = errnum;
469 static inline int32_t
470 aesni_gcm_sgl_op_finalize_encryption(const struct aesni_gcm_session *s,
471 struct gcm_context_data *gdata_ctx, uint8_t *digest)
473 if (s->req_digest_length != s->gen_digest_length) {
474 uint8_t tmpdigest[s->gen_digest_length];
476 s->ops.finalize(&s->gdata_key, gdata_ctx, tmpdigest,
477 s->gen_digest_length);
478 memcpy(digest, tmpdigest, s->req_digest_length);
480 s->ops.finalize(&s->gdata_key, gdata_ctx, digest,
481 s->gen_digest_length);
487 static inline int32_t
488 aesni_gcm_sgl_op_finalize_decryption(const struct aesni_gcm_session *s,
489 struct gcm_context_data *gdata_ctx, uint8_t *digest)
491 uint8_t tmpdigest[s->gen_digest_length];
493 s->ops.finalize(&s->gdata_key, gdata_ctx, tmpdigest,
494 s->gen_digest_length);
496 return memcmp(digest, tmpdigest, s->req_digest_length) == 0 ? 0 :
501 aesni_gcm_process_gcm_sgl_op(const struct aesni_gcm_session *s,
502 struct gcm_context_data *gdata_ctx, struct rte_crypto_sgl *sgl,
507 /* init crypto operation */
508 s->ops.init(&s->gdata_key, gdata_ctx, iv, aad,
509 (uint64_t)s->aad_length);
511 /* update with sgl data */
512 for (i = 0; i < sgl->num; i++) {
513 struct rte_crypto_vec *vec = &sgl->vec[i];
515 s->ops.update(&s->gdata_key, gdata_ctx, vec->base, vec->base,
521 aesni_gcm_process_gmac_sgl_op(const struct aesni_gcm_session *s,
522 struct gcm_context_data *gdata_ctx, struct rte_crypto_sgl *sgl,
525 s->ops.init(&s->gdata_key, gdata_ctx, iv, sgl->vec[0].base,
529 static inline uint32_t
530 aesni_gcm_sgl_encrypt(struct aesni_gcm_session *s,
531 struct gcm_context_data *gdata_ctx, struct rte_crypto_sym_vec *vec)
533 uint32_t i, processed;
536 for (i = 0; i < vec->num; ++i) {
537 aesni_gcm_process_gcm_sgl_op(s, gdata_ctx,
538 &vec->sgl[i], vec->iv[i], vec->aad[i]);
539 vec->status[i] = aesni_gcm_sgl_op_finalize_encryption(s,
540 gdata_ctx, vec->digest[i]);
541 processed += (vec->status[i] == 0);
547 static inline uint32_t
548 aesni_gcm_sgl_decrypt(struct aesni_gcm_session *s,
549 struct gcm_context_data *gdata_ctx, struct rte_crypto_sym_vec *vec)
551 uint32_t i, processed;
554 for (i = 0; i < vec->num; ++i) {
555 aesni_gcm_process_gcm_sgl_op(s, gdata_ctx,
556 &vec->sgl[i], vec->iv[i], vec->aad[i]);
557 vec->status[i] = aesni_gcm_sgl_op_finalize_decryption(s,
558 gdata_ctx, vec->digest[i]);
559 processed += (vec->status[i] == 0);
565 static inline uint32_t
566 aesni_gmac_sgl_generate(struct aesni_gcm_session *s,
567 struct gcm_context_data *gdata_ctx, struct rte_crypto_sym_vec *vec)
569 uint32_t i, processed;
572 for (i = 0; i < vec->num; ++i) {
573 if (vec->sgl[i].num != 1) {
574 vec->status[i] = ENOTSUP;
578 aesni_gcm_process_gmac_sgl_op(s, gdata_ctx,
579 &vec->sgl[i], vec->iv[i]);
580 vec->status[i] = aesni_gcm_sgl_op_finalize_encryption(s,
581 gdata_ctx, vec->digest[i]);
582 processed += (vec->status[i] == 0);
588 static inline uint32_t
589 aesni_gmac_sgl_verify(struct aesni_gcm_session *s,
590 struct gcm_context_data *gdata_ctx, struct rte_crypto_sym_vec *vec)
592 uint32_t i, processed;
595 for (i = 0; i < vec->num; ++i) {
596 if (vec->sgl[i].num != 1) {
597 vec->status[i] = ENOTSUP;
601 aesni_gcm_process_gmac_sgl_op(s, gdata_ctx,
602 &vec->sgl[i], vec->iv[i]);
603 vec->status[i] = aesni_gcm_sgl_op_finalize_decryption(s,
604 gdata_ctx, vec->digest[i]);
605 processed += (vec->status[i] == 0);
611 /** Process CPU crypto bulk operations */
613 aesni_gcm_pmd_cpu_crypto_process(struct rte_cryptodev *dev,
614 struct rte_cryptodev_sym_session *sess,
615 __rte_unused union rte_crypto_sym_ofs ofs,
616 struct rte_crypto_sym_vec *vec)
619 struct aesni_gcm_session *s;
620 struct gcm_context_data gdata_ctx;
622 sess_priv = get_sym_session_private_data(sess, dev->driver_id);
623 if (unlikely(sess_priv == NULL)) {
624 aesni_gcm_fill_error_code(vec, EINVAL);
630 case AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION:
631 return aesni_gcm_sgl_encrypt(s, &gdata_ctx, vec);
632 case AESNI_GCM_OP_AUTHENTICATED_DECRYPTION:
633 return aesni_gcm_sgl_decrypt(s, &gdata_ctx, vec);
634 case AESNI_GMAC_OP_GENERATE:
635 return aesni_gmac_sgl_generate(s, &gdata_ctx, vec);
636 case AESNI_GMAC_OP_VERIFY:
637 return aesni_gmac_sgl_verify(s, &gdata_ctx, vec);
639 aesni_gcm_fill_error_code(vec, EINVAL);
645 * Process a completed job and return rte_mbuf which job processed
647 * @param job JOB_AES_HMAC job to process
650 * - Returns processed mbuf which is trimmed of output digest used in
651 * verification of supplied digest in the case of a HASH_CIPHER operation
652 * - Returns NULL on invalid job
655 post_process_gcm_crypto_op(struct aesni_gcm_qp *qp,
656 struct rte_crypto_op *op,
657 struct aesni_gcm_session *session)
659 op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
661 /* Verify digest if required */
662 if (session->op == AESNI_GCM_OP_AUTHENTICATED_DECRYPTION ||
663 session->op == AESNI_GMAC_OP_VERIFY) {
666 uint8_t *tag = qp->temp_digest;
668 if (session->op == AESNI_GMAC_OP_VERIFY)
669 digest = op->sym->auth.digest.data;
671 digest = op->sym->aead.digest.data;
673 #ifdef RTE_LIBRTE_PMD_AESNI_GCM_DEBUG
674 rte_hexdump(stdout, "auth tag (orig):",
675 digest, session->req_digest_length);
676 rte_hexdump(stdout, "auth tag (calc):",
677 tag, session->req_digest_length);
680 if (memcmp(tag, digest, session->req_digest_length) != 0)
681 op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
683 if (session->req_digest_length != session->gen_digest_length) {
684 if (session->op == AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION)
685 memcpy(op->sym->aead.digest.data, qp->temp_digest,
686 session->req_digest_length);
688 memcpy(op->sym->auth.digest.data, qp->temp_digest,
689 session->req_digest_length);
695 * Process a completed GCM request
697 * @param qp Queue Pair to process
698 * @param op Crypto operation
699 * @param job JOB_AES_HMAC job
702 * - Number of processed jobs
705 handle_completed_gcm_crypto_op(struct aesni_gcm_qp *qp,
706 struct rte_crypto_op *op,
707 struct aesni_gcm_session *sess)
709 post_process_gcm_crypto_op(qp, op, sess);
711 /* Free session if a session-less crypto op */
712 if (op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
713 memset(sess, 0, sizeof(struct aesni_gcm_session));
714 memset(op->sym->session, 0,
715 rte_cryptodev_sym_get_existing_header_session_size(
717 rte_mempool_put(qp->sess_mp_priv, sess);
718 rte_mempool_put(qp->sess_mp, op->sym->session);
719 op->sym->session = NULL;
724 aesni_gcm_pmd_dequeue_burst(void *queue_pair,
725 struct rte_crypto_op **ops, uint16_t nb_ops)
727 struct aesni_gcm_session *sess;
728 struct aesni_gcm_qp *qp = queue_pair;
731 unsigned int i, nb_dequeued;
733 nb_dequeued = rte_ring_dequeue_burst(qp->processed_pkts,
734 (void **)ops, nb_ops, NULL);
736 for (i = 0; i < nb_dequeued; i++) {
738 sess = aesni_gcm_get_session(qp, ops[i]);
739 if (unlikely(sess == NULL)) {
740 ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
741 qp->qp_stats.dequeue_err_count++;
745 retval = process_gcm_crypto_op(qp, ops[i], sess);
747 ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
748 qp->qp_stats.dequeue_err_count++;
752 handle_completed_gcm_crypto_op(qp, ops[i], sess);
755 qp->qp_stats.dequeued_count += i;
761 aesni_gcm_pmd_enqueue_burst(void *queue_pair,
762 struct rte_crypto_op **ops, uint16_t nb_ops)
764 struct aesni_gcm_qp *qp = queue_pair;
766 unsigned int nb_enqueued;
768 nb_enqueued = rte_ring_enqueue_burst(qp->processed_pkts,
769 (void **)ops, nb_ops, NULL);
770 qp->qp_stats.enqueued_count += nb_enqueued;
775 static int aesni_gcm_remove(struct rte_vdev_device *vdev);
778 aesni_gcm_create(const char *name,
779 struct rte_vdev_device *vdev,
780 struct rte_cryptodev_pmd_init_params *init_params)
782 struct rte_cryptodev *dev;
783 struct aesni_gcm_private *internals;
784 enum aesni_gcm_vector_mode vector_mode;
787 dev = rte_cryptodev_pmd_create(name, &vdev->device, init_params);
789 AESNI_GCM_LOG(ERR, "driver %s: create failed",
794 /* Check CPU for supported vector instruction set */
795 if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F))
796 vector_mode = RTE_AESNI_GCM_AVX512;
797 else if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2))
798 vector_mode = RTE_AESNI_GCM_AVX2;
799 else if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX))
800 vector_mode = RTE_AESNI_GCM_AVX;
802 vector_mode = RTE_AESNI_GCM_SSE;
804 dev->driver_id = cryptodev_driver_id;
805 dev->dev_ops = rte_aesni_gcm_pmd_ops;
807 /* register rx/tx burst functions for data path */
808 dev->dequeue_burst = aesni_gcm_pmd_dequeue_burst;
809 dev->enqueue_burst = aesni_gcm_pmd_enqueue_burst;
811 dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
812 RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
813 RTE_CRYPTODEV_FF_IN_PLACE_SGL |
814 RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
815 RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT |
816 RTE_CRYPTODEV_FF_SYM_CPU_CRYPTO |
817 RTE_CRYPTODEV_FF_SYM_SESSIONLESS;
819 /* Check CPU for support for AES instruction set */
820 if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AES))
821 dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AESNI;
823 AESNI_GCM_LOG(WARNING, "AES instructions not supported by CPU");
825 mb_mgr = alloc_mb_mgr(0);
829 switch (vector_mode) {
830 case RTE_AESNI_GCM_SSE:
831 dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_SSE;
832 init_mb_mgr_sse(mb_mgr);
834 case RTE_AESNI_GCM_AVX:
835 dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX;
836 init_mb_mgr_avx(mb_mgr);
838 case RTE_AESNI_GCM_AVX2:
839 dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX2;
840 init_mb_mgr_avx2(mb_mgr);
842 case RTE_AESNI_GCM_AVX512:
843 dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX512;
844 init_mb_mgr_avx512(mb_mgr);
847 AESNI_GCM_LOG(ERR, "Unsupported vector mode %u\n", vector_mode);
851 internals = dev->data->dev_private;
853 internals->vector_mode = vector_mode;
854 internals->mb_mgr = mb_mgr;
856 /* Set arch independent function pointers, based on key size */
857 internals->ops[GCM_KEY_128].enc = mb_mgr->gcm128_enc;
858 internals->ops[GCM_KEY_128].dec = mb_mgr->gcm128_dec;
859 internals->ops[GCM_KEY_128].pre = mb_mgr->gcm128_pre;
860 internals->ops[GCM_KEY_128].init = mb_mgr->gcm128_init;
861 internals->ops[GCM_KEY_128].update_enc = mb_mgr->gcm128_enc_update;
862 internals->ops[GCM_KEY_128].update_dec = mb_mgr->gcm128_dec_update;
863 internals->ops[GCM_KEY_128].finalize_enc = mb_mgr->gcm128_enc_finalize;
864 internals->ops[GCM_KEY_128].finalize_dec = mb_mgr->gcm128_dec_finalize;
865 #if IMB_VERSION(0, 54, 0) < IMB_VERSION_NUM
866 internals->ops[GCM_KEY_128].gmac_init = mb_mgr->gmac128_init;
867 internals->ops[GCM_KEY_128].gmac_update = mb_mgr->gmac128_update;
868 internals->ops[GCM_KEY_128].gmac_finalize = mb_mgr->gmac128_finalize;
871 internals->ops[GCM_KEY_192].enc = mb_mgr->gcm192_enc;
872 internals->ops[GCM_KEY_192].dec = mb_mgr->gcm192_dec;
873 internals->ops[GCM_KEY_192].pre = mb_mgr->gcm192_pre;
874 internals->ops[GCM_KEY_192].init = mb_mgr->gcm192_init;
875 internals->ops[GCM_KEY_192].update_enc = mb_mgr->gcm192_enc_update;
876 internals->ops[GCM_KEY_192].update_dec = mb_mgr->gcm192_dec_update;
877 internals->ops[GCM_KEY_192].finalize_enc = mb_mgr->gcm192_enc_finalize;
878 internals->ops[GCM_KEY_192].finalize_dec = mb_mgr->gcm192_dec_finalize;
879 #if IMB_VERSION(0, 54, 0) < IMB_VERSION_NUM
880 internals->ops[GCM_KEY_192].gmac_init = mb_mgr->gmac192_init;
881 internals->ops[GCM_KEY_192].gmac_update = mb_mgr->gmac192_update;
882 internals->ops[GCM_KEY_192].gmac_finalize = mb_mgr->gmac192_finalize;
885 internals->ops[GCM_KEY_256].enc = mb_mgr->gcm256_enc;
886 internals->ops[GCM_KEY_256].dec = mb_mgr->gcm256_dec;
887 internals->ops[GCM_KEY_256].pre = mb_mgr->gcm256_pre;
888 internals->ops[GCM_KEY_256].init = mb_mgr->gcm256_init;
889 internals->ops[GCM_KEY_256].update_enc = mb_mgr->gcm256_enc_update;
890 internals->ops[GCM_KEY_256].update_dec = mb_mgr->gcm256_dec_update;
891 internals->ops[GCM_KEY_256].finalize_enc = mb_mgr->gcm256_enc_finalize;
892 internals->ops[GCM_KEY_256].finalize_dec = mb_mgr->gcm256_dec_finalize;
893 #if IMB_VERSION(0, 54, 0) < IMB_VERSION_NUM
894 internals->ops[GCM_KEY_256].gmac_init = mb_mgr->gmac256_init;
895 internals->ops[GCM_KEY_256].gmac_update = mb_mgr->gmac256_update;
896 internals->ops[GCM_KEY_256].gmac_finalize = mb_mgr->gmac256_finalize;
899 internals->max_nb_queue_pairs = init_params->max_nb_queue_pairs;
901 #if IMB_VERSION_NUM >= IMB_VERSION(0, 50, 0)
902 AESNI_GCM_LOG(INFO, "IPSec Multi-buffer library version used: %s\n",
903 imb_get_version_str());
905 AESNI_GCM_LOG(INFO, "IPSec Multi-buffer library version used: 0.49.0\n");
914 rte_cryptodev_pmd_destroy(dev);
920 aesni_gcm_probe(struct rte_vdev_device *vdev)
922 struct rte_cryptodev_pmd_init_params init_params = {
924 sizeof(struct aesni_gcm_private),
926 RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_QUEUE_PAIRS
929 const char *input_args;
931 name = rte_vdev_device_name(vdev);
934 input_args = rte_vdev_device_args(vdev);
935 rte_cryptodev_pmd_parse_input_args(&init_params, input_args);
937 return aesni_gcm_create(name, vdev, &init_params);
941 aesni_gcm_remove(struct rte_vdev_device *vdev)
943 struct rte_cryptodev *cryptodev;
944 struct aesni_gcm_private *internals;
947 name = rte_vdev_device_name(vdev);
951 cryptodev = rte_cryptodev_pmd_get_named_dev(name);
952 if (cryptodev == NULL)
955 internals = cryptodev->data->dev_private;
957 free_mb_mgr(internals->mb_mgr);
959 return rte_cryptodev_pmd_destroy(cryptodev);
962 static struct rte_vdev_driver aesni_gcm_pmd_drv = {
963 .probe = aesni_gcm_probe,
964 .remove = aesni_gcm_remove
967 static struct cryptodev_driver aesni_gcm_crypto_drv;
969 RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_AESNI_GCM_PMD, aesni_gcm_pmd_drv);
970 RTE_PMD_REGISTER_ALIAS(CRYPTODEV_NAME_AESNI_GCM_PMD, cryptodev_aesni_gcm_pmd);
971 RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_AESNI_GCM_PMD,
972 "max_nb_queue_pairs=<int> "
974 RTE_PMD_REGISTER_CRYPTO_DRIVER(aesni_gcm_crypto_drv, aesni_gcm_pmd_drv.driver,
975 cryptodev_driver_id);
976 RTE_LOG_REGISTER(aesni_gcm_logtype_driver, pmd.crypto.aesni_gcm, NOTICE);