1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2016-2020 Intel Corporation
5 #include <rte_common.h>
6 #include <rte_hexdump.h>
7 #include <rte_cryptodev.h>
8 #include <rte_cryptodev_pmd.h>
9 #include <rte_bus_vdev.h>
10 #include <rte_malloc.h>
11 #include <rte_cpuflags.h>
12 #include <rte_byteorder.h>
14 #include "aesni_gcm_pmd_private.h"
16 static uint8_t cryptodev_driver_id;
18 /* setup session handlers */
20 set_func_ops(struct aesni_gcm_session *s, const struct aesni_gcm_ops *gcm_ops)
22 s->ops.pre = gcm_ops->pre;
23 s->ops.init = gcm_ops->init;
26 case AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION:
27 s->ops.cipher = gcm_ops->enc;
28 s->ops.update = gcm_ops->update_enc;
29 s->ops.finalize = gcm_ops->finalize_enc;
31 case AESNI_GCM_OP_AUTHENTICATED_DECRYPTION:
32 s->ops.cipher = gcm_ops->dec;
33 s->ops.update = gcm_ops->update_dec;
34 s->ops.finalize = gcm_ops->finalize_dec;
36 case AESNI_GMAC_OP_GENERATE:
37 case AESNI_GMAC_OP_VERIFY:
38 s->ops.finalize = gcm_ops->finalize_enc;
43 /** Parse crypto xform chain and set private session parameters */
45 aesni_gcm_set_session_parameters(const struct aesni_gcm_ops *gcm_ops,
46 struct aesni_gcm_session *sess,
47 const struct rte_crypto_sym_xform *xform)
49 const struct rte_crypto_sym_xform *auth_xform;
50 const struct rte_crypto_sym_xform *aead_xform;
55 if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
57 if (auth_xform->auth.algo != RTE_CRYPTO_AUTH_AES_GMAC) {
58 AESNI_GCM_LOG(ERR, "Only AES GMAC is supported as an "
59 "authentication only algorithm");
62 /* Set IV parameters */
63 sess->iv.offset = auth_xform->auth.iv.offset;
64 sess->iv.length = auth_xform->auth.iv.length;
66 /* Select Crypto operation */
67 if (auth_xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE)
68 sess->op = AESNI_GMAC_OP_GENERATE;
70 sess->op = AESNI_GMAC_OP_VERIFY;
72 key_length = auth_xform->auth.key.length;
73 key = auth_xform->auth.key.data;
74 sess->req_digest_length = auth_xform->auth.digest_length;
77 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
80 if (aead_xform->aead.algo != RTE_CRYPTO_AEAD_AES_GCM) {
81 AESNI_GCM_LOG(ERR, "The only combined operation "
82 "supported is AES GCM");
86 /* Set IV parameters */
87 sess->iv.offset = aead_xform->aead.iv.offset;
88 sess->iv.length = aead_xform->aead.iv.length;
90 /* Select Crypto operation */
91 if (aead_xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT)
92 sess->op = AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION;
93 /* op == RTE_CRYPTO_AEAD_OP_DECRYPT */
95 sess->op = AESNI_GCM_OP_AUTHENTICATED_DECRYPTION;
97 key_length = aead_xform->aead.key.length;
98 key = aead_xform->aead.key.data;
100 sess->aad_length = aead_xform->aead.aad_length;
101 sess->req_digest_length = aead_xform->aead.digest_length;
103 AESNI_GCM_LOG(ERR, "Wrong xform type, has to be AEAD or authentication");
108 if (sess->iv.length != 16 && sess->iv.length != 12 &&
109 sess->iv.length != 0) {
110 AESNI_GCM_LOG(ERR, "Wrong IV length");
114 /* Check key length and calculate GCM pre-compute. */
115 switch (key_length) {
117 sess->key = GCM_KEY_128;
120 sess->key = GCM_KEY_192;
123 sess->key = GCM_KEY_256;
126 AESNI_GCM_LOG(ERR, "Invalid key length");
130 /* setup session handlers */
131 set_func_ops(sess, &gcm_ops[sess->key]);
133 /* pre-generate key */
134 gcm_ops[sess->key].pre(key, &sess->gdata_key);
137 if (sess->req_digest_length > 16) {
138 AESNI_GCM_LOG(ERR, "Invalid digest length");
142 * Multi-buffer lib supports digest sizes from 4 to 16 bytes
143 * in version 0.50 and sizes of 8, 12 and 16 bytes,
145 * If size requested is different, generate the full digest
146 * (16 bytes) in a temporary location and then memcpy
147 * the requested number of bytes.
149 #if IMB_VERSION_NUM >= IMB_VERSION(0, 50, 0)
150 if (sess->req_digest_length < 4)
152 if (sess->req_digest_length != 16 &&
153 sess->req_digest_length != 12 &&
154 sess->req_digest_length != 8)
156 sess->gen_digest_length = 16;
158 sess->gen_digest_length = sess->req_digest_length;
163 /** Get gcm session */
164 static struct aesni_gcm_session *
165 aesni_gcm_get_session(struct aesni_gcm_qp *qp, struct rte_crypto_op *op)
167 struct aesni_gcm_session *sess = NULL;
168 struct rte_crypto_sym_op *sym_op = op->sym;
170 if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
171 if (likely(sym_op->session != NULL))
172 sess = (struct aesni_gcm_session *)
173 get_sym_session_private_data(
175 cryptodev_driver_id);
178 void *_sess_private_data = NULL;
180 if (rte_mempool_get(qp->sess_mp, (void **)&_sess))
183 if (rte_mempool_get(qp->sess_mp_priv,
184 (void **)&_sess_private_data))
187 sess = (struct aesni_gcm_session *)_sess_private_data;
189 if (unlikely(aesni_gcm_set_session_parameters(qp->ops,
190 sess, sym_op->xform) != 0)) {
191 rte_mempool_put(qp->sess_mp, _sess);
192 rte_mempool_put(qp->sess_mp_priv, _sess_private_data);
195 sym_op->session = (struct rte_cryptodev_sym_session *)_sess;
196 set_sym_session_private_data(sym_op->session,
197 cryptodev_driver_id, _sess_private_data);
200 if (unlikely(sess == NULL))
201 op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
207 * Process a crypto operation, calling
208 * the GCM API from the multi buffer library.
210 * @param qp queue pair
211 * @param op symmetric crypto operation
212 * @param session GCM session
218 process_gcm_crypto_op(struct aesni_gcm_qp *qp, struct rte_crypto_op *op,
219 struct aesni_gcm_session *session)
223 struct rte_crypto_sym_op *sym_op = op->sym;
224 struct rte_mbuf *m_src = sym_op->m_src;
225 uint32_t offset, data_offset, data_length;
226 uint32_t part_len, total_len, data_len;
228 unsigned int oop = 0;
230 if (session->op == AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION ||
231 session->op == AESNI_GCM_OP_AUTHENTICATED_DECRYPTION) {
232 offset = sym_op->aead.data.offset;
233 data_offset = offset;
234 data_length = sym_op->aead.data.length;
236 offset = sym_op->auth.data.offset;
237 data_offset = offset;
238 data_length = sym_op->auth.data.length;
241 RTE_ASSERT(m_src != NULL);
243 while (offset >= m_src->data_len && data_length != 0) {
244 offset -= m_src->data_len;
247 RTE_ASSERT(m_src != NULL);
250 src = rte_pktmbuf_mtod_offset(m_src, uint8_t *, offset);
252 data_len = m_src->data_len - offset;
253 part_len = (data_len < data_length) ? data_len :
256 RTE_ASSERT((sym_op->m_dst == NULL) ||
257 ((sym_op->m_dst != NULL) &&
258 rte_pktmbuf_is_contiguous(sym_op->m_dst)));
261 if (sym_op->m_dst == NULL || (sym_op->m_dst == sym_op->m_src))
266 /* Segmented destination buffer is not supported if operation is
268 RTE_ASSERT(rte_pktmbuf_is_contiguous(sym_op->m_dst));
269 dst = rte_pktmbuf_mtod_offset(sym_op->m_dst, uint8_t *,
273 iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
276 if (session->op == AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION) {
277 qp->ops[session->key].init(&session->gdata_key,
280 sym_op->aead.aad.data,
281 (uint64_t)session->aad_length);
283 qp->ops[session->key].update_enc(&session->gdata_key,
284 &qp->gdata_ctx, dst, src,
286 total_len = data_length - part_len;
291 RTE_ASSERT(m_src != NULL);
293 src = rte_pktmbuf_mtod(m_src, uint8_t *);
298 part_len = (m_src->data_len < total_len) ?
299 m_src->data_len : total_len;
301 qp->ops[session->key].update_enc(&session->gdata_key,
302 &qp->gdata_ctx, dst, src,
304 total_len -= part_len;
307 if (session->req_digest_length != session->gen_digest_length)
308 tag = qp->temp_digest;
310 tag = sym_op->aead.digest.data;
312 qp->ops[session->key].finalize_enc(&session->gdata_key,
315 session->gen_digest_length);
316 } else if (session->op == AESNI_GCM_OP_AUTHENTICATED_DECRYPTION) {
317 qp->ops[session->key].init(&session->gdata_key,
320 sym_op->aead.aad.data,
321 (uint64_t)session->aad_length);
323 qp->ops[session->key].update_dec(&session->gdata_key,
324 &qp->gdata_ctx, dst, src,
326 total_len = data_length - part_len;
331 RTE_ASSERT(m_src != NULL);
333 src = rte_pktmbuf_mtod(m_src, uint8_t *);
338 part_len = (m_src->data_len < total_len) ?
339 m_src->data_len : total_len;
341 qp->ops[session->key].update_dec(&session->gdata_key,
345 total_len -= part_len;
348 tag = qp->temp_digest;
349 qp->ops[session->key].finalize_dec(&session->gdata_key,
352 session->gen_digest_length);
353 } else if (session->op == AESNI_GMAC_OP_GENERATE) {
354 qp->ops[session->key].init(&session->gdata_key,
358 (uint64_t)data_length);
359 if (session->req_digest_length != session->gen_digest_length)
360 tag = qp->temp_digest;
362 tag = sym_op->auth.digest.data;
363 qp->ops[session->key].finalize_enc(&session->gdata_key,
366 session->gen_digest_length);
367 } else { /* AESNI_GMAC_OP_VERIFY */
368 qp->ops[session->key].init(&session->gdata_key,
372 (uint64_t)data_length);
375 * Generate always 16 bytes and later compare only
378 tag = qp->temp_digest;
379 qp->ops[session->key].finalize_enc(&session->gdata_key,
382 session->gen_digest_length);
389 aesni_gcm_fill_error_code(struct rte_crypto_sym_vec *vec, int32_t errnum)
393 for (i = 0; i < vec->num; i++)
394 vec->status[i] = errnum;
398 static inline int32_t
399 aesni_gcm_sgl_op_finalize_encryption(const struct aesni_gcm_session *s,
400 struct gcm_context_data *gdata_ctx, uint8_t *digest)
402 if (s->req_digest_length != s->gen_digest_length) {
403 uint8_t tmpdigest[s->gen_digest_length];
405 s->ops.finalize(&s->gdata_key, gdata_ctx, tmpdigest,
406 s->gen_digest_length);
407 memcpy(digest, tmpdigest, s->req_digest_length);
409 s->ops.finalize(&s->gdata_key, gdata_ctx, digest,
410 s->gen_digest_length);
416 static inline int32_t
417 aesni_gcm_sgl_op_finalize_decryption(const struct aesni_gcm_session *s,
418 struct gcm_context_data *gdata_ctx, uint8_t *digest)
420 uint8_t tmpdigest[s->gen_digest_length];
422 s->ops.finalize(&s->gdata_key, gdata_ctx, tmpdigest,
423 s->gen_digest_length);
425 return memcmp(digest, tmpdigest, s->req_digest_length) == 0 ? 0 :
430 aesni_gcm_process_gcm_sgl_op(const struct aesni_gcm_session *s,
431 struct gcm_context_data *gdata_ctx, struct rte_crypto_sgl *sgl,
436 /* init crypto operation */
437 s->ops.init(&s->gdata_key, gdata_ctx, iv, aad,
438 (uint64_t)s->aad_length);
440 /* update with sgl data */
441 for (i = 0; i < sgl->num; i++) {
442 struct rte_crypto_vec *vec = &sgl->vec[i];
444 s->ops.update(&s->gdata_key, gdata_ctx, vec->base, vec->base,
450 aesni_gcm_process_gmac_sgl_op(const struct aesni_gcm_session *s,
451 struct gcm_context_data *gdata_ctx, struct rte_crypto_sgl *sgl,
454 s->ops.init(&s->gdata_key, gdata_ctx, iv, sgl->vec[0].base,
458 static inline uint32_t
459 aesni_gcm_sgl_encrypt(struct aesni_gcm_session *s,
460 struct gcm_context_data *gdata_ctx, struct rte_crypto_sym_vec *vec)
462 uint32_t i, processed;
465 for (i = 0; i < vec->num; ++i) {
466 aesni_gcm_process_gcm_sgl_op(s, gdata_ctx,
467 &vec->sgl[i], vec->iv[i], vec->aad[i]);
468 vec->status[i] = aesni_gcm_sgl_op_finalize_encryption(s,
469 gdata_ctx, vec->digest[i]);
470 processed += (vec->status[i] == 0);
476 static inline uint32_t
477 aesni_gcm_sgl_decrypt(struct aesni_gcm_session *s,
478 struct gcm_context_data *gdata_ctx, struct rte_crypto_sym_vec *vec)
480 uint32_t i, processed;
483 for (i = 0; i < vec->num; ++i) {
484 aesni_gcm_process_gcm_sgl_op(s, gdata_ctx,
485 &vec->sgl[i], vec->iv[i], vec->aad[i]);
486 vec->status[i] = aesni_gcm_sgl_op_finalize_decryption(s,
487 gdata_ctx, vec->digest[i]);
488 processed += (vec->status[i] == 0);
494 static inline uint32_t
495 aesni_gmac_sgl_generate(struct aesni_gcm_session *s,
496 struct gcm_context_data *gdata_ctx, struct rte_crypto_sym_vec *vec)
498 uint32_t i, processed;
501 for (i = 0; i < vec->num; ++i) {
502 if (vec->sgl[i].num != 1) {
503 vec->status[i] = ENOTSUP;
507 aesni_gcm_process_gmac_sgl_op(s, gdata_ctx,
508 &vec->sgl[i], vec->iv[i]);
509 vec->status[i] = aesni_gcm_sgl_op_finalize_encryption(s,
510 gdata_ctx, vec->digest[i]);
511 processed += (vec->status[i] == 0);
517 static inline uint32_t
518 aesni_gmac_sgl_verify(struct aesni_gcm_session *s,
519 struct gcm_context_data *gdata_ctx, struct rte_crypto_sym_vec *vec)
521 uint32_t i, processed;
524 for (i = 0; i < vec->num; ++i) {
525 if (vec->sgl[i].num != 1) {
526 vec->status[i] = ENOTSUP;
530 aesni_gcm_process_gmac_sgl_op(s, gdata_ctx,
531 &vec->sgl[i], vec->iv[i]);
532 vec->status[i] = aesni_gcm_sgl_op_finalize_decryption(s,
533 gdata_ctx, vec->digest[i]);
534 processed += (vec->status[i] == 0);
540 /** Process CPU crypto bulk operations */
542 aesni_gcm_pmd_cpu_crypto_process(struct rte_cryptodev *dev,
543 struct rte_cryptodev_sym_session *sess,
544 __rte_unused union rte_crypto_sym_ofs ofs,
545 struct rte_crypto_sym_vec *vec)
548 struct aesni_gcm_session *s;
549 struct gcm_context_data gdata_ctx;
551 sess_priv = get_sym_session_private_data(sess, dev->driver_id);
552 if (unlikely(sess_priv == NULL)) {
553 aesni_gcm_fill_error_code(vec, EINVAL);
559 case AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION:
560 return aesni_gcm_sgl_encrypt(s, &gdata_ctx, vec);
561 case AESNI_GCM_OP_AUTHENTICATED_DECRYPTION:
562 return aesni_gcm_sgl_decrypt(s, &gdata_ctx, vec);
563 case AESNI_GMAC_OP_GENERATE:
564 return aesni_gmac_sgl_generate(s, &gdata_ctx, vec);
565 case AESNI_GMAC_OP_VERIFY:
566 return aesni_gmac_sgl_verify(s, &gdata_ctx, vec);
568 aesni_gcm_fill_error_code(vec, EINVAL);
574 * Process a completed job and return rte_mbuf which job processed
576 * @param job JOB_AES_HMAC job to process
579 * - Returns processed mbuf which is trimmed of output digest used in
580 * verification of supplied digest in the case of a HASH_CIPHER operation
581 * - Returns NULL on invalid job
584 post_process_gcm_crypto_op(struct aesni_gcm_qp *qp,
585 struct rte_crypto_op *op,
586 struct aesni_gcm_session *session)
588 op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
590 /* Verify digest if required */
591 if (session->op == AESNI_GCM_OP_AUTHENTICATED_DECRYPTION ||
592 session->op == AESNI_GMAC_OP_VERIFY) {
595 uint8_t *tag = qp->temp_digest;
597 if (session->op == AESNI_GMAC_OP_VERIFY)
598 digest = op->sym->auth.digest.data;
600 digest = op->sym->aead.digest.data;
602 #ifdef RTE_LIBRTE_PMD_AESNI_GCM_DEBUG
603 rte_hexdump(stdout, "auth tag (orig):",
604 digest, session->req_digest_length);
605 rte_hexdump(stdout, "auth tag (calc):",
606 tag, session->req_digest_length);
609 if (memcmp(tag, digest, session->req_digest_length) != 0)
610 op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
612 if (session->req_digest_length != session->gen_digest_length) {
613 if (session->op == AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION)
614 memcpy(op->sym->aead.digest.data, qp->temp_digest,
615 session->req_digest_length);
617 memcpy(op->sym->auth.digest.data, qp->temp_digest,
618 session->req_digest_length);
624 * Process a completed GCM request
626 * @param qp Queue Pair to process
627 * @param op Crypto operation
628 * @param job JOB_AES_HMAC job
631 * - Number of processed jobs
634 handle_completed_gcm_crypto_op(struct aesni_gcm_qp *qp,
635 struct rte_crypto_op *op,
636 struct aesni_gcm_session *sess)
638 post_process_gcm_crypto_op(qp, op, sess);
640 /* Free session if a session-less crypto op */
641 if (op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
642 memset(sess, 0, sizeof(struct aesni_gcm_session));
643 memset(op->sym->session, 0,
644 rte_cryptodev_sym_get_existing_header_session_size(
646 rte_mempool_put(qp->sess_mp_priv, sess);
647 rte_mempool_put(qp->sess_mp, op->sym->session);
648 op->sym->session = NULL;
653 aesni_gcm_pmd_dequeue_burst(void *queue_pair,
654 struct rte_crypto_op **ops, uint16_t nb_ops)
656 struct aesni_gcm_session *sess;
657 struct aesni_gcm_qp *qp = queue_pair;
660 unsigned int i, nb_dequeued;
662 nb_dequeued = rte_ring_dequeue_burst(qp->processed_pkts,
663 (void **)ops, nb_ops, NULL);
665 for (i = 0; i < nb_dequeued; i++) {
667 sess = aesni_gcm_get_session(qp, ops[i]);
668 if (unlikely(sess == NULL)) {
669 ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
670 qp->qp_stats.dequeue_err_count++;
674 retval = process_gcm_crypto_op(qp, ops[i], sess);
676 ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
677 qp->qp_stats.dequeue_err_count++;
681 handle_completed_gcm_crypto_op(qp, ops[i], sess);
684 qp->qp_stats.dequeued_count += i;
690 aesni_gcm_pmd_enqueue_burst(void *queue_pair,
691 struct rte_crypto_op **ops, uint16_t nb_ops)
693 struct aesni_gcm_qp *qp = queue_pair;
695 unsigned int nb_enqueued;
697 nb_enqueued = rte_ring_enqueue_burst(qp->processed_pkts,
698 (void **)ops, nb_ops, NULL);
699 qp->qp_stats.enqueued_count += nb_enqueued;
704 static int aesni_gcm_remove(struct rte_vdev_device *vdev);
707 aesni_gcm_create(const char *name,
708 struct rte_vdev_device *vdev,
709 struct rte_cryptodev_pmd_init_params *init_params)
711 struct rte_cryptodev *dev;
712 struct aesni_gcm_private *internals;
713 enum aesni_gcm_vector_mode vector_mode;
716 dev = rte_cryptodev_pmd_create(name, &vdev->device, init_params);
718 AESNI_GCM_LOG(ERR, "driver %s: create failed",
723 /* Check CPU for supported vector instruction set */
724 if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F))
725 vector_mode = RTE_AESNI_GCM_AVX512;
726 else if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2))
727 vector_mode = RTE_AESNI_GCM_AVX2;
728 else if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX))
729 vector_mode = RTE_AESNI_GCM_AVX;
731 vector_mode = RTE_AESNI_GCM_SSE;
733 dev->driver_id = cryptodev_driver_id;
734 dev->dev_ops = rte_aesni_gcm_pmd_ops;
736 /* register rx/tx burst functions for data path */
737 dev->dequeue_burst = aesni_gcm_pmd_dequeue_burst;
738 dev->enqueue_burst = aesni_gcm_pmd_enqueue_burst;
740 dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
741 RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
742 RTE_CRYPTODEV_FF_IN_PLACE_SGL |
743 RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
744 RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT |
745 RTE_CRYPTODEV_FF_SYM_CPU_CRYPTO |
746 RTE_CRYPTODEV_FF_SYM_SESSIONLESS;
748 /* Check CPU for support for AES instruction set */
749 if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AES))
750 dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AESNI;
752 AESNI_GCM_LOG(WARNING, "AES instructions not supported by CPU");
754 mb_mgr = alloc_mb_mgr(0);
758 switch (vector_mode) {
759 case RTE_AESNI_GCM_SSE:
760 dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_SSE;
761 init_mb_mgr_sse(mb_mgr);
763 case RTE_AESNI_GCM_AVX:
764 dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX;
765 init_mb_mgr_avx(mb_mgr);
767 case RTE_AESNI_GCM_AVX2:
768 dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX2;
769 init_mb_mgr_avx2(mb_mgr);
771 case RTE_AESNI_GCM_AVX512:
772 dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX2;
773 init_mb_mgr_avx512(mb_mgr);
776 AESNI_GCM_LOG(ERR, "Unsupported vector mode %u\n", vector_mode);
780 internals = dev->data->dev_private;
782 internals->vector_mode = vector_mode;
783 internals->mb_mgr = mb_mgr;
785 /* Set arch independent function pointers, based on key size */
786 internals->ops[GCM_KEY_128].enc = mb_mgr->gcm128_enc;
787 internals->ops[GCM_KEY_128].dec = mb_mgr->gcm128_dec;
788 internals->ops[GCM_KEY_128].pre = mb_mgr->gcm128_pre;
789 internals->ops[GCM_KEY_128].init = mb_mgr->gcm128_init;
790 internals->ops[GCM_KEY_128].update_enc = mb_mgr->gcm128_enc_update;
791 internals->ops[GCM_KEY_128].update_dec = mb_mgr->gcm128_dec_update;
792 internals->ops[GCM_KEY_128].finalize_enc = mb_mgr->gcm128_enc_finalize;
793 internals->ops[GCM_KEY_128].finalize_dec = mb_mgr->gcm128_dec_finalize;
795 internals->ops[GCM_KEY_192].enc = mb_mgr->gcm192_enc;
796 internals->ops[GCM_KEY_192].dec = mb_mgr->gcm192_dec;
797 internals->ops[GCM_KEY_192].pre = mb_mgr->gcm192_pre;
798 internals->ops[GCM_KEY_192].init = mb_mgr->gcm192_init;
799 internals->ops[GCM_KEY_192].update_enc = mb_mgr->gcm192_enc_update;
800 internals->ops[GCM_KEY_192].update_dec = mb_mgr->gcm192_dec_update;
801 internals->ops[GCM_KEY_192].finalize_enc = mb_mgr->gcm192_enc_finalize;
802 internals->ops[GCM_KEY_192].finalize_dec = mb_mgr->gcm192_dec_finalize;
804 internals->ops[GCM_KEY_256].enc = mb_mgr->gcm256_enc;
805 internals->ops[GCM_KEY_256].dec = mb_mgr->gcm256_dec;
806 internals->ops[GCM_KEY_256].pre = mb_mgr->gcm256_pre;
807 internals->ops[GCM_KEY_256].init = mb_mgr->gcm256_init;
808 internals->ops[GCM_KEY_256].update_enc = mb_mgr->gcm256_enc_update;
809 internals->ops[GCM_KEY_256].update_dec = mb_mgr->gcm256_dec_update;
810 internals->ops[GCM_KEY_256].finalize_enc = mb_mgr->gcm256_enc_finalize;
811 internals->ops[GCM_KEY_256].finalize_dec = mb_mgr->gcm256_dec_finalize;
813 internals->max_nb_queue_pairs = init_params->max_nb_queue_pairs;
815 #if IMB_VERSION_NUM >= IMB_VERSION(0, 50, 0)
816 AESNI_GCM_LOG(INFO, "IPSec Multi-buffer library version used: %s\n",
817 imb_get_version_str());
819 AESNI_GCM_LOG(INFO, "IPSec Multi-buffer library version used: 0.49.0\n");
828 rte_cryptodev_pmd_destroy(dev);
834 aesni_gcm_probe(struct rte_vdev_device *vdev)
836 struct rte_cryptodev_pmd_init_params init_params = {
838 sizeof(struct aesni_gcm_private),
840 RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_QUEUE_PAIRS
843 const char *input_args;
845 name = rte_vdev_device_name(vdev);
848 input_args = rte_vdev_device_args(vdev);
849 rte_cryptodev_pmd_parse_input_args(&init_params, input_args);
851 return aesni_gcm_create(name, vdev, &init_params);
855 aesni_gcm_remove(struct rte_vdev_device *vdev)
857 struct rte_cryptodev *cryptodev;
858 struct aesni_gcm_private *internals;
861 name = rte_vdev_device_name(vdev);
865 cryptodev = rte_cryptodev_pmd_get_named_dev(name);
866 if (cryptodev == NULL)
869 internals = cryptodev->data->dev_private;
871 free_mb_mgr(internals->mb_mgr);
873 return rte_cryptodev_pmd_destroy(cryptodev);
876 static struct rte_vdev_driver aesni_gcm_pmd_drv = {
877 .probe = aesni_gcm_probe,
878 .remove = aesni_gcm_remove
881 static struct cryptodev_driver aesni_gcm_crypto_drv;
883 RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_AESNI_GCM_PMD, aesni_gcm_pmd_drv);
884 RTE_PMD_REGISTER_ALIAS(CRYPTODEV_NAME_AESNI_GCM_PMD, cryptodev_aesni_gcm_pmd);
885 RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_AESNI_GCM_PMD,
886 "max_nb_queue_pairs=<int> "
888 RTE_PMD_REGISTER_CRYPTO_DRIVER(aesni_gcm_crypto_drv, aesni_gcm_pmd_drv.driver,
889 cryptodev_driver_id);
890 RTE_LOG_REGISTER(aesni_gcm_logtype_driver, pmd.crypto.aesni_gcm, NOTICE);