1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2016-2020 Intel Corporation
5 #include <rte_common.h>
6 #include <rte_hexdump.h>
7 #include <rte_cryptodev.h>
8 #include <rte_cryptodev_pmd.h>
9 #include <rte_bus_vdev.h>
10 #include <rte_malloc.h>
11 #include <rte_cpuflags.h>
12 #include <rte_byteorder.h>
14 #include "aesni_gcm_pmd_private.h"
16 int aesni_gcm_logtype_driver;
18 static uint8_t cryptodev_driver_id;
20 /* setup session handlers */
22 set_func_ops(struct aesni_gcm_session *s, const struct aesni_gcm_ops *gcm_ops)
24 s->ops.pre = gcm_ops->pre;
25 s->ops.init = gcm_ops->init;
28 case AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION:
29 s->ops.cipher = gcm_ops->enc;
30 s->ops.update = gcm_ops->update_enc;
31 s->ops.finalize = gcm_ops->finalize_enc;
33 case AESNI_GCM_OP_AUTHENTICATED_DECRYPTION:
34 s->ops.cipher = gcm_ops->dec;
35 s->ops.update = gcm_ops->update_dec;
36 s->ops.finalize = gcm_ops->finalize_dec;
38 case AESNI_GMAC_OP_GENERATE:
39 case AESNI_GMAC_OP_VERIFY:
40 s->ops.finalize = gcm_ops->finalize_enc;
45 /** Parse crypto xform chain and set private session parameters */
47 aesni_gcm_set_session_parameters(const struct aesni_gcm_ops *gcm_ops,
48 struct aesni_gcm_session *sess,
49 const struct rte_crypto_sym_xform *xform)
51 const struct rte_crypto_sym_xform *auth_xform;
52 const struct rte_crypto_sym_xform *aead_xform;
57 if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
59 if (auth_xform->auth.algo != RTE_CRYPTO_AUTH_AES_GMAC) {
60 AESNI_GCM_LOG(ERR, "Only AES GMAC is supported as an "
61 "authentication only algorithm");
64 /* Set IV parameters */
65 sess->iv.offset = auth_xform->auth.iv.offset;
66 sess->iv.length = auth_xform->auth.iv.length;
68 /* Select Crypto operation */
69 if (auth_xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE)
70 sess->op = AESNI_GMAC_OP_GENERATE;
72 sess->op = AESNI_GMAC_OP_VERIFY;
74 key_length = auth_xform->auth.key.length;
75 key = auth_xform->auth.key.data;
76 sess->req_digest_length = auth_xform->auth.digest_length;
79 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
82 if (aead_xform->aead.algo != RTE_CRYPTO_AEAD_AES_GCM) {
83 AESNI_GCM_LOG(ERR, "The only combined operation "
84 "supported is AES GCM");
88 /* Set IV parameters */
89 sess->iv.offset = aead_xform->aead.iv.offset;
90 sess->iv.length = aead_xform->aead.iv.length;
92 /* Select Crypto operation */
93 if (aead_xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT)
94 sess->op = AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION;
95 /* op == RTE_CRYPTO_AEAD_OP_DECRYPT */
97 sess->op = AESNI_GCM_OP_AUTHENTICATED_DECRYPTION;
99 key_length = aead_xform->aead.key.length;
100 key = aead_xform->aead.key.data;
102 sess->aad_length = aead_xform->aead.aad_length;
103 sess->req_digest_length = aead_xform->aead.digest_length;
105 AESNI_GCM_LOG(ERR, "Wrong xform type, has to be AEAD or authentication");
110 if (sess->iv.length != 16 && sess->iv.length != 12 &&
111 sess->iv.length != 0) {
112 AESNI_GCM_LOG(ERR, "Wrong IV length");
116 /* Check key length and calculate GCM pre-compute. */
117 switch (key_length) {
119 sess->key = GCM_KEY_128;
122 sess->key = GCM_KEY_192;
125 sess->key = GCM_KEY_256;
128 AESNI_GCM_LOG(ERR, "Invalid key length");
132 /* setup session handlers */
133 set_func_ops(sess, &gcm_ops[sess->key]);
135 /* pre-generate key */
136 gcm_ops[sess->key].pre(key, &sess->gdata_key);
139 if (sess->req_digest_length > 16) {
140 AESNI_GCM_LOG(ERR, "Invalid digest length");
144 * Multi-buffer lib supports digest sizes from 4 to 16 bytes
145 * in version 0.50 and sizes of 8, 12 and 16 bytes,
147 * If size requested is different, generate the full digest
148 * (16 bytes) in a temporary location and then memcpy
149 * the requested number of bytes.
151 #if IMB_VERSION_NUM >= IMB_VERSION(0, 50, 0)
152 if (sess->req_digest_length < 4)
154 if (sess->req_digest_length != 16 &&
155 sess->req_digest_length != 12 &&
156 sess->req_digest_length != 8)
158 sess->gen_digest_length = 16;
160 sess->gen_digest_length = sess->req_digest_length;
165 /** Get gcm session */
166 static struct aesni_gcm_session *
167 aesni_gcm_get_session(struct aesni_gcm_qp *qp, struct rte_crypto_op *op)
169 struct aesni_gcm_session *sess = NULL;
170 struct rte_crypto_sym_op *sym_op = op->sym;
172 if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
173 if (likely(sym_op->session != NULL))
174 sess = (struct aesni_gcm_session *)
175 get_sym_session_private_data(
177 cryptodev_driver_id);
180 void *_sess_private_data = NULL;
182 if (rte_mempool_get(qp->sess_mp, (void **)&_sess))
185 if (rte_mempool_get(qp->sess_mp_priv,
186 (void **)&_sess_private_data))
189 sess = (struct aesni_gcm_session *)_sess_private_data;
191 if (unlikely(aesni_gcm_set_session_parameters(qp->ops,
192 sess, sym_op->xform) != 0)) {
193 rte_mempool_put(qp->sess_mp, _sess);
194 rte_mempool_put(qp->sess_mp_priv, _sess_private_data);
197 sym_op->session = (struct rte_cryptodev_sym_session *)_sess;
198 set_sym_session_private_data(sym_op->session,
199 cryptodev_driver_id, _sess_private_data);
202 if (unlikely(sess == NULL))
203 op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
209 * Process a crypto operation, calling
210 * the GCM API from the multi buffer library.
212 * @param qp queue pair
213 * @param op symmetric crypto operation
214 * @param session GCM session
220 process_gcm_crypto_op(struct aesni_gcm_qp *qp, struct rte_crypto_op *op,
221 struct aesni_gcm_session *session)
225 struct rte_crypto_sym_op *sym_op = op->sym;
226 struct rte_mbuf *m_src = sym_op->m_src;
227 uint32_t offset, data_offset, data_length;
228 uint32_t part_len, total_len, data_len;
230 unsigned int oop = 0;
232 if (session->op == AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION ||
233 session->op == AESNI_GCM_OP_AUTHENTICATED_DECRYPTION) {
234 offset = sym_op->aead.data.offset;
235 data_offset = offset;
236 data_length = sym_op->aead.data.length;
238 offset = sym_op->auth.data.offset;
239 data_offset = offset;
240 data_length = sym_op->auth.data.length;
243 RTE_ASSERT(m_src != NULL);
245 while (offset >= m_src->data_len && data_length != 0) {
246 offset -= m_src->data_len;
249 RTE_ASSERT(m_src != NULL);
252 src = rte_pktmbuf_mtod_offset(m_src, uint8_t *, offset);
254 data_len = m_src->data_len - offset;
255 part_len = (data_len < data_length) ? data_len :
258 RTE_ASSERT((sym_op->m_dst == NULL) ||
259 ((sym_op->m_dst != NULL) &&
260 rte_pktmbuf_is_contiguous(sym_op->m_dst)));
263 if (sym_op->m_dst == NULL || (sym_op->m_dst == sym_op->m_src))
268 /* Segmented destination buffer is not supported if operation is
270 RTE_ASSERT(rte_pktmbuf_is_contiguous(sym_op->m_dst));
271 dst = rte_pktmbuf_mtod_offset(sym_op->m_dst, uint8_t *,
275 iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
278 if (session->op == AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION) {
279 qp->ops[session->key].init(&session->gdata_key,
282 sym_op->aead.aad.data,
283 (uint64_t)session->aad_length);
285 qp->ops[session->key].update_enc(&session->gdata_key,
286 &qp->gdata_ctx, dst, src,
288 total_len = data_length - part_len;
293 RTE_ASSERT(m_src != NULL);
295 src = rte_pktmbuf_mtod(m_src, uint8_t *);
300 part_len = (m_src->data_len < total_len) ?
301 m_src->data_len : total_len;
303 qp->ops[session->key].update_enc(&session->gdata_key,
304 &qp->gdata_ctx, dst, src,
306 total_len -= part_len;
309 if (session->req_digest_length != session->gen_digest_length)
310 tag = qp->temp_digest;
312 tag = sym_op->aead.digest.data;
314 qp->ops[session->key].finalize_enc(&session->gdata_key,
317 session->gen_digest_length);
318 } else if (session->op == AESNI_GCM_OP_AUTHENTICATED_DECRYPTION) {
319 qp->ops[session->key].init(&session->gdata_key,
322 sym_op->aead.aad.data,
323 (uint64_t)session->aad_length);
325 qp->ops[session->key].update_dec(&session->gdata_key,
326 &qp->gdata_ctx, dst, src,
328 total_len = data_length - part_len;
333 RTE_ASSERT(m_src != NULL);
335 src = rte_pktmbuf_mtod(m_src, uint8_t *);
340 part_len = (m_src->data_len < total_len) ?
341 m_src->data_len : total_len;
343 qp->ops[session->key].update_dec(&session->gdata_key,
347 total_len -= part_len;
350 tag = qp->temp_digest;
351 qp->ops[session->key].finalize_dec(&session->gdata_key,
354 session->gen_digest_length);
355 } else if (session->op == AESNI_GMAC_OP_GENERATE) {
356 qp->ops[session->key].init(&session->gdata_key,
360 (uint64_t)data_length);
361 if (session->req_digest_length != session->gen_digest_length)
362 tag = qp->temp_digest;
364 tag = sym_op->auth.digest.data;
365 qp->ops[session->key].finalize_enc(&session->gdata_key,
368 session->gen_digest_length);
369 } else { /* AESNI_GMAC_OP_VERIFY */
370 qp->ops[session->key].init(&session->gdata_key,
374 (uint64_t)data_length);
377 * Generate always 16 bytes and later compare only
380 tag = qp->temp_digest;
381 qp->ops[session->key].finalize_enc(&session->gdata_key,
384 session->gen_digest_length);
391 aesni_gcm_fill_error_code(struct rte_crypto_sym_vec *vec, int32_t errnum)
395 for (i = 0; i < vec->num; i++)
396 vec->status[i] = errnum;
400 static inline int32_t
401 aesni_gcm_sgl_op_finalize_encryption(const struct aesni_gcm_session *s,
402 struct gcm_context_data *gdata_ctx, uint8_t *digest)
404 if (s->req_digest_length != s->gen_digest_length) {
405 uint8_t tmpdigest[s->gen_digest_length];
407 s->ops.finalize(&s->gdata_key, gdata_ctx, tmpdigest,
408 s->gen_digest_length);
409 memcpy(digest, tmpdigest, s->req_digest_length);
411 s->ops.finalize(&s->gdata_key, gdata_ctx, digest,
412 s->gen_digest_length);
418 static inline int32_t
419 aesni_gcm_sgl_op_finalize_decryption(const struct aesni_gcm_session *s,
420 struct gcm_context_data *gdata_ctx, uint8_t *digest)
422 uint8_t tmpdigest[s->gen_digest_length];
424 s->ops.finalize(&s->gdata_key, gdata_ctx, tmpdigest,
425 s->gen_digest_length);
427 return memcmp(digest, tmpdigest, s->req_digest_length) == 0 ? 0 :
432 aesni_gcm_process_gcm_sgl_op(const struct aesni_gcm_session *s,
433 struct gcm_context_data *gdata_ctx, struct rte_crypto_sgl *sgl,
438 /* init crypto operation */
439 s->ops.init(&s->gdata_key, gdata_ctx, iv, aad,
440 (uint64_t)s->aad_length);
442 /* update with sgl data */
443 for (i = 0; i < sgl->num; i++) {
444 struct rte_crypto_vec *vec = &sgl->vec[i];
446 s->ops.update(&s->gdata_key, gdata_ctx, vec->base, vec->base,
452 aesni_gcm_process_gmac_sgl_op(const struct aesni_gcm_session *s,
453 struct gcm_context_data *gdata_ctx, struct rte_crypto_sgl *sgl,
456 s->ops.init(&s->gdata_key, gdata_ctx, iv, sgl->vec[0].base,
460 static inline uint32_t
461 aesni_gcm_sgl_encrypt(struct aesni_gcm_session *s,
462 struct gcm_context_data *gdata_ctx, struct rte_crypto_sym_vec *vec)
464 uint32_t i, processed;
467 for (i = 0; i < vec->num; ++i) {
468 aesni_gcm_process_gcm_sgl_op(s, gdata_ctx,
469 &vec->sgl[i], vec->iv[i], vec->aad[i]);
470 vec->status[i] = aesni_gcm_sgl_op_finalize_encryption(s,
471 gdata_ctx, vec->digest[i]);
472 processed += (vec->status[i] == 0);
478 static inline uint32_t
479 aesni_gcm_sgl_decrypt(struct aesni_gcm_session *s,
480 struct gcm_context_data *gdata_ctx, struct rte_crypto_sym_vec *vec)
482 uint32_t i, processed;
485 for (i = 0; i < vec->num; ++i) {
486 aesni_gcm_process_gcm_sgl_op(s, gdata_ctx,
487 &vec->sgl[i], vec->iv[i], vec->aad[i]);
488 vec->status[i] = aesni_gcm_sgl_op_finalize_decryption(s,
489 gdata_ctx, vec->digest[i]);
490 processed += (vec->status[i] == 0);
496 static inline uint32_t
497 aesni_gmac_sgl_generate(struct aesni_gcm_session *s,
498 struct gcm_context_data *gdata_ctx, struct rte_crypto_sym_vec *vec)
500 uint32_t i, processed;
503 for (i = 0; i < vec->num; ++i) {
504 if (vec->sgl[i].num != 1) {
505 vec->status[i] = ENOTSUP;
509 aesni_gcm_process_gmac_sgl_op(s, gdata_ctx,
510 &vec->sgl[i], vec->iv[i]);
511 vec->status[i] = aesni_gcm_sgl_op_finalize_encryption(s,
512 gdata_ctx, vec->digest[i]);
513 processed += (vec->status[i] == 0);
519 static inline uint32_t
520 aesni_gmac_sgl_verify(struct aesni_gcm_session *s,
521 struct gcm_context_data *gdata_ctx, struct rte_crypto_sym_vec *vec)
523 uint32_t i, processed;
526 for (i = 0; i < vec->num; ++i) {
527 if (vec->sgl[i].num != 1) {
528 vec->status[i] = ENOTSUP;
532 aesni_gcm_process_gmac_sgl_op(s, gdata_ctx,
533 &vec->sgl[i], vec->iv[i]);
534 vec->status[i] = aesni_gcm_sgl_op_finalize_decryption(s,
535 gdata_ctx, vec->digest[i]);
536 processed += (vec->status[i] == 0);
542 /** Process CPU crypto bulk operations */
544 aesni_gcm_pmd_cpu_crypto_process(struct rte_cryptodev *dev,
545 struct rte_cryptodev_sym_session *sess,
546 __rte_unused union rte_crypto_sym_ofs ofs,
547 struct rte_crypto_sym_vec *vec)
550 struct aesni_gcm_session *s;
551 struct gcm_context_data gdata_ctx;
553 sess_priv = get_sym_session_private_data(sess, dev->driver_id);
554 if (unlikely(sess_priv == NULL)) {
555 aesni_gcm_fill_error_code(vec, EINVAL);
561 case AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION:
562 return aesni_gcm_sgl_encrypt(s, &gdata_ctx, vec);
563 case AESNI_GCM_OP_AUTHENTICATED_DECRYPTION:
564 return aesni_gcm_sgl_decrypt(s, &gdata_ctx, vec);
565 case AESNI_GMAC_OP_GENERATE:
566 return aesni_gmac_sgl_generate(s, &gdata_ctx, vec);
567 case AESNI_GMAC_OP_VERIFY:
568 return aesni_gmac_sgl_verify(s, &gdata_ctx, vec);
570 aesni_gcm_fill_error_code(vec, EINVAL);
576 * Process a completed job and return rte_mbuf which job processed
578 * @param job JOB_AES_HMAC job to process
581 * - Returns processed mbuf which is trimmed of output digest used in
582 * verification of supplied digest in the case of a HASH_CIPHER operation
583 * - Returns NULL on invalid job
586 post_process_gcm_crypto_op(struct aesni_gcm_qp *qp,
587 struct rte_crypto_op *op,
588 struct aesni_gcm_session *session)
590 op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
592 /* Verify digest if required */
593 if (session->op == AESNI_GCM_OP_AUTHENTICATED_DECRYPTION ||
594 session->op == AESNI_GMAC_OP_VERIFY) {
597 uint8_t *tag = qp->temp_digest;
599 if (session->op == AESNI_GMAC_OP_VERIFY)
600 digest = op->sym->auth.digest.data;
602 digest = op->sym->aead.digest.data;
604 #ifdef RTE_LIBRTE_PMD_AESNI_GCM_DEBUG
605 rte_hexdump(stdout, "auth tag (orig):",
606 digest, session->req_digest_length);
607 rte_hexdump(stdout, "auth tag (calc):",
608 tag, session->req_digest_length);
611 if (memcmp(tag, digest, session->req_digest_length) != 0)
612 op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
614 if (session->req_digest_length != session->gen_digest_length) {
615 if (session->op == AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION)
616 memcpy(op->sym->aead.digest.data, qp->temp_digest,
617 session->req_digest_length);
619 memcpy(op->sym->auth.digest.data, qp->temp_digest,
620 session->req_digest_length);
626 * Process a completed GCM request
628 * @param qp Queue Pair to process
629 * @param op Crypto operation
630 * @param job JOB_AES_HMAC job
633 * - Number of processed jobs
636 handle_completed_gcm_crypto_op(struct aesni_gcm_qp *qp,
637 struct rte_crypto_op *op,
638 struct aesni_gcm_session *sess)
640 post_process_gcm_crypto_op(qp, op, sess);
642 /* Free session if a session-less crypto op */
643 if (op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
644 memset(sess, 0, sizeof(struct aesni_gcm_session));
645 memset(op->sym->session, 0,
646 rte_cryptodev_sym_get_existing_header_session_size(
648 rte_mempool_put(qp->sess_mp_priv, sess);
649 rte_mempool_put(qp->sess_mp, op->sym->session);
650 op->sym->session = NULL;
655 aesni_gcm_pmd_dequeue_burst(void *queue_pair,
656 struct rte_crypto_op **ops, uint16_t nb_ops)
658 struct aesni_gcm_session *sess;
659 struct aesni_gcm_qp *qp = queue_pair;
662 unsigned int i, nb_dequeued;
664 nb_dequeued = rte_ring_dequeue_burst(qp->processed_pkts,
665 (void **)ops, nb_ops, NULL);
667 for (i = 0; i < nb_dequeued; i++) {
669 sess = aesni_gcm_get_session(qp, ops[i]);
670 if (unlikely(sess == NULL)) {
671 ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
672 qp->qp_stats.dequeue_err_count++;
676 retval = process_gcm_crypto_op(qp, ops[i], sess);
678 ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
679 qp->qp_stats.dequeue_err_count++;
683 handle_completed_gcm_crypto_op(qp, ops[i], sess);
686 qp->qp_stats.dequeued_count += i;
692 aesni_gcm_pmd_enqueue_burst(void *queue_pair,
693 struct rte_crypto_op **ops, uint16_t nb_ops)
695 struct aesni_gcm_qp *qp = queue_pair;
697 unsigned int nb_enqueued;
699 nb_enqueued = rte_ring_enqueue_burst(qp->processed_pkts,
700 (void **)ops, nb_ops, NULL);
701 qp->qp_stats.enqueued_count += nb_enqueued;
706 static int aesni_gcm_remove(struct rte_vdev_device *vdev);
709 aesni_gcm_create(const char *name,
710 struct rte_vdev_device *vdev,
711 struct rte_cryptodev_pmd_init_params *init_params)
713 struct rte_cryptodev *dev;
714 struct aesni_gcm_private *internals;
715 enum aesni_gcm_vector_mode vector_mode;
718 dev = rte_cryptodev_pmd_create(name, &vdev->device, init_params);
720 AESNI_GCM_LOG(ERR, "driver %s: create failed",
725 /* Check CPU for supported vector instruction set */
726 if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F))
727 vector_mode = RTE_AESNI_GCM_AVX512;
728 else if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2))
729 vector_mode = RTE_AESNI_GCM_AVX2;
730 else if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX))
731 vector_mode = RTE_AESNI_GCM_AVX;
733 vector_mode = RTE_AESNI_GCM_SSE;
735 dev->driver_id = cryptodev_driver_id;
736 dev->dev_ops = rte_aesni_gcm_pmd_ops;
738 /* register rx/tx burst functions for data path */
739 dev->dequeue_burst = aesni_gcm_pmd_dequeue_burst;
740 dev->enqueue_burst = aesni_gcm_pmd_enqueue_burst;
742 dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
743 RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
744 RTE_CRYPTODEV_FF_IN_PLACE_SGL |
745 RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
746 RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT |
747 RTE_CRYPTODEV_FF_SYM_CPU_CRYPTO;
749 /* Check CPU for support for AES instruction set */
750 if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AES))
751 dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AESNI;
753 AESNI_GCM_LOG(WARNING, "AES instructions not supported by CPU");
755 mb_mgr = alloc_mb_mgr(0);
759 switch (vector_mode) {
760 case RTE_AESNI_GCM_SSE:
761 dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_SSE;
762 init_mb_mgr_sse(mb_mgr);
764 case RTE_AESNI_GCM_AVX:
765 dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX;
766 init_mb_mgr_avx(mb_mgr);
768 case RTE_AESNI_GCM_AVX2:
769 dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX2;
770 init_mb_mgr_avx2(mb_mgr);
772 case RTE_AESNI_GCM_AVX512:
773 dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX2;
774 init_mb_mgr_avx512(mb_mgr);
777 AESNI_GCM_LOG(ERR, "Unsupported vector mode %u\n", vector_mode);
781 internals = dev->data->dev_private;
783 internals->vector_mode = vector_mode;
784 internals->mb_mgr = mb_mgr;
786 /* Set arch independent function pointers, based on key size */
787 internals->ops[GCM_KEY_128].enc = mb_mgr->gcm128_enc;
788 internals->ops[GCM_KEY_128].dec = mb_mgr->gcm128_dec;
789 internals->ops[GCM_KEY_128].pre = mb_mgr->gcm128_pre;
790 internals->ops[GCM_KEY_128].init = mb_mgr->gcm128_init;
791 internals->ops[GCM_KEY_128].update_enc = mb_mgr->gcm128_enc_update;
792 internals->ops[GCM_KEY_128].update_dec = mb_mgr->gcm128_dec_update;
793 internals->ops[GCM_KEY_128].finalize_enc = mb_mgr->gcm128_enc_finalize;
794 internals->ops[GCM_KEY_128].finalize_dec = mb_mgr->gcm128_dec_finalize;
796 internals->ops[GCM_KEY_192].enc = mb_mgr->gcm192_enc;
797 internals->ops[GCM_KEY_192].dec = mb_mgr->gcm192_dec;
798 internals->ops[GCM_KEY_192].pre = mb_mgr->gcm192_pre;
799 internals->ops[GCM_KEY_192].init = mb_mgr->gcm192_init;
800 internals->ops[GCM_KEY_192].update_enc = mb_mgr->gcm192_enc_update;
801 internals->ops[GCM_KEY_192].update_dec = mb_mgr->gcm192_dec_update;
802 internals->ops[GCM_KEY_192].finalize_enc = mb_mgr->gcm192_enc_finalize;
803 internals->ops[GCM_KEY_192].finalize_dec = mb_mgr->gcm192_dec_finalize;
805 internals->ops[GCM_KEY_256].enc = mb_mgr->gcm256_enc;
806 internals->ops[GCM_KEY_256].dec = mb_mgr->gcm256_dec;
807 internals->ops[GCM_KEY_256].pre = mb_mgr->gcm256_pre;
808 internals->ops[GCM_KEY_256].init = mb_mgr->gcm256_init;
809 internals->ops[GCM_KEY_256].update_enc = mb_mgr->gcm256_enc_update;
810 internals->ops[GCM_KEY_256].update_dec = mb_mgr->gcm256_dec_update;
811 internals->ops[GCM_KEY_256].finalize_enc = mb_mgr->gcm256_enc_finalize;
812 internals->ops[GCM_KEY_256].finalize_dec = mb_mgr->gcm256_dec_finalize;
814 internals->max_nb_queue_pairs = init_params->max_nb_queue_pairs;
816 #if IMB_VERSION_NUM >= IMB_VERSION(0, 50, 0)
817 AESNI_GCM_LOG(INFO, "IPSec Multi-buffer library version used: %s\n",
818 imb_get_version_str());
820 AESNI_GCM_LOG(INFO, "IPSec Multi-buffer library version used: 0.49.0\n");
829 rte_cryptodev_pmd_destroy(dev);
835 aesni_gcm_probe(struct rte_vdev_device *vdev)
837 struct rte_cryptodev_pmd_init_params init_params = {
839 sizeof(struct aesni_gcm_private),
841 RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_QUEUE_PAIRS
844 const char *input_args;
846 name = rte_vdev_device_name(vdev);
849 input_args = rte_vdev_device_args(vdev);
850 rte_cryptodev_pmd_parse_input_args(&init_params, input_args);
852 return aesni_gcm_create(name, vdev, &init_params);
856 aesni_gcm_remove(struct rte_vdev_device *vdev)
858 struct rte_cryptodev *cryptodev;
859 struct aesni_gcm_private *internals;
862 name = rte_vdev_device_name(vdev);
866 cryptodev = rte_cryptodev_pmd_get_named_dev(name);
867 if (cryptodev == NULL)
870 internals = cryptodev->data->dev_private;
872 free_mb_mgr(internals->mb_mgr);
874 return rte_cryptodev_pmd_destroy(cryptodev);
877 static struct rte_vdev_driver aesni_gcm_pmd_drv = {
878 .probe = aesni_gcm_probe,
879 .remove = aesni_gcm_remove
882 static struct cryptodev_driver aesni_gcm_crypto_drv;
884 RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_AESNI_GCM_PMD, aesni_gcm_pmd_drv);
885 RTE_PMD_REGISTER_ALIAS(CRYPTODEV_NAME_AESNI_GCM_PMD, cryptodev_aesni_gcm_pmd);
886 RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_AESNI_GCM_PMD,
887 "max_nb_queue_pairs=<int> "
889 RTE_PMD_REGISTER_CRYPTO_DRIVER(aesni_gcm_crypto_drv, aesni_gcm_pmd_drv.driver,
890 cryptodev_driver_id);
892 RTE_INIT(aesni_gcm_init_log)
894 aesni_gcm_logtype_driver = rte_log_register("pmd.crypto.aesni_gcm");