1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2016-2021 Intel Corporation
5 #include "pmd_aesni_gcm_priv.h"
8 aesni_gcm_set_ops(struct aesni_gcm_ops *ops, IMB_MGR *mb_mgr)
10 /* Set 128 bit function pointers. */
11 ops[GCM_KEY_128].pre = mb_mgr->gcm128_pre;
12 ops[GCM_KEY_128].init = mb_mgr->gcm128_init;
14 ops[GCM_KEY_128].enc = mb_mgr->gcm128_enc;
15 ops[GCM_KEY_128].update_enc = mb_mgr->gcm128_enc_update;
16 ops[GCM_KEY_128].finalize_enc = mb_mgr->gcm128_enc_finalize;
18 ops[GCM_KEY_128].dec = mb_mgr->gcm128_dec;
19 ops[GCM_KEY_128].update_dec = mb_mgr->gcm128_dec_update;
20 ops[GCM_KEY_128].finalize_dec = mb_mgr->gcm128_dec_finalize;
22 ops[GCM_KEY_128].gmac_init = mb_mgr->gmac128_init;
23 ops[GCM_KEY_128].gmac_update = mb_mgr->gmac128_update;
24 ops[GCM_KEY_128].gmac_finalize = mb_mgr->gmac128_finalize;
26 /* Set 192 bit function pointers. */
27 ops[GCM_KEY_192].pre = mb_mgr->gcm192_pre;
28 ops[GCM_KEY_192].init = mb_mgr->gcm192_init;
30 ops[GCM_KEY_192].enc = mb_mgr->gcm192_enc;
31 ops[GCM_KEY_192].update_enc = mb_mgr->gcm192_enc_update;
32 ops[GCM_KEY_192].finalize_enc = mb_mgr->gcm192_enc_finalize;
34 ops[GCM_KEY_192].dec = mb_mgr->gcm192_dec;
35 ops[GCM_KEY_192].update_dec = mb_mgr->gcm192_dec_update;
36 ops[GCM_KEY_192].finalize_dec = mb_mgr->gcm192_dec_finalize;
38 ops[GCM_KEY_192].gmac_init = mb_mgr->gmac192_init;
39 ops[GCM_KEY_192].gmac_update = mb_mgr->gmac192_update;
40 ops[GCM_KEY_192].gmac_finalize = mb_mgr->gmac192_finalize;
42 /* Set 256 bit function pointers. */
43 ops[GCM_KEY_256].pre = mb_mgr->gcm256_pre;
44 ops[GCM_KEY_256].init = mb_mgr->gcm256_init;
46 ops[GCM_KEY_256].enc = mb_mgr->gcm256_enc;
47 ops[GCM_KEY_256].update_enc = mb_mgr->gcm256_enc_update;
48 ops[GCM_KEY_256].finalize_enc = mb_mgr->gcm256_enc_finalize;
50 ops[GCM_KEY_256].dec = mb_mgr->gcm256_dec;
51 ops[GCM_KEY_256].update_dec = mb_mgr->gcm256_dec_update;
52 ops[GCM_KEY_256].finalize_dec = mb_mgr->gcm256_dec_finalize;
54 ops[GCM_KEY_256].gmac_init = mb_mgr->gmac256_init;
55 ops[GCM_KEY_256].gmac_update = mb_mgr->gmac256_update;
56 ops[GCM_KEY_256].gmac_finalize = mb_mgr->gmac256_finalize;
60 aesni_gcm_session_configure(IMB_MGR *mb_mgr, void *session,
61 const struct rte_crypto_sym_xform *xform)
63 struct aesni_gcm_session *sess = session;
64 const struct rte_crypto_sym_xform *auth_xform;
65 const struct rte_crypto_sym_xform *cipher_xform;
66 const struct rte_crypto_sym_xform *aead_xform;
70 enum ipsec_mb_operation mode;
73 ret = ipsec_mb_parse_xform(xform, &mode, &auth_xform,
74 &cipher_xform, &aead_xform);
83 case IPSEC_MB_OP_HASH_GEN_ONLY:
84 case IPSEC_MB_OP_HASH_VERIFY_ONLY:
88 if (auth_xform->auth.algo != RTE_CRYPTO_AUTH_AES_GMAC) {
90 "Only AES GMAC is supported as an authentication only algorithm");
94 /* Set IV parameters */
95 sess->iv.offset = auth_xform->auth.iv.offset;
96 sess->iv.length = auth_xform->auth.iv.length;
97 key_length = auth_xform->auth.key.length;
98 key = auth_xform->auth.key.data;
99 sess->req_digest_length =
100 RTE_MIN(auth_xform->auth.digest_length,
103 case IPSEC_MB_OP_AEAD_AUTHENTICATED_ENCRYPT:
104 case IPSEC_MB_OP_AEAD_AUTHENTICATED_DECRYPT:
106 * aead_xform = xform;
109 if (aead_xform->aead.algo != RTE_CRYPTO_AEAD_AES_GCM) {
111 "The only combined operation supported is AES GCM");
115 /* Set IV parameters */
116 sess->iv.offset = aead_xform->aead.iv.offset;
117 sess->iv.length = aead_xform->aead.iv.length;
118 key_length = aead_xform->aead.key.length;
119 key = aead_xform->aead.key.data;
120 sess->aad_length = aead_xform->aead.aad_length;
121 sess->req_digest_length =
122 RTE_MIN(aead_xform->aead.digest_length,
127 ERR, "Wrong xform type, has to be AEAD or authentication");
132 /* Check key length, and calculate GCM pre-compute. */
133 switch (key_length) {
135 sess->key_length = GCM_KEY_128;
136 mb_mgr->gcm128_pre(key, &sess->gdata_key);
139 sess->key_length = GCM_KEY_192;
140 mb_mgr->gcm192_pre(key, &sess->gdata_key);
143 sess->key_length = GCM_KEY_256;
144 mb_mgr->gcm256_pre(key, &sess->gdata_key);
147 IPSEC_MB_LOG(ERR, "Invalid key length");
153 if (sess->req_digest_length > 16) {
154 IPSEC_MB_LOG(ERR, "Invalid digest length");
159 * If size requested is different, generate the full digest
160 * (16 bytes) in a temporary location and then memcpy
161 * the requested number of bytes.
163 if (sess->req_digest_length < 4)
164 sess->gen_digest_length = 16;
166 sess->gen_digest_length = sess->req_digest_length;
173 * Process a completed job and return rte_mbuf which job processed
175 * @param job IMB_JOB job to process
178 * - Returns processed mbuf which is trimmed of output digest used in
179 * verification of supplied digest in the case of a HASH_CIPHER operation
180 * - Returns NULL on invalid job
183 post_process_gcm_crypto_op(struct ipsec_mb_qp *qp,
184 struct rte_crypto_op *op,
185 struct aesni_gcm_session *session)
187 struct aesni_gcm_qp_data *qp_data = ipsec_mb_get_qp_private_data(qp);
189 op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
190 /* Verify digest if required */
191 if (session->op == IPSEC_MB_OP_AEAD_AUTHENTICATED_DECRYPT ||
192 session->op == IPSEC_MB_OP_HASH_VERIFY_ONLY) {
195 uint8_t *tag = qp_data->temp_digest;
197 if (session->op == IPSEC_MB_OP_HASH_VERIFY_ONLY)
198 digest = op->sym->auth.digest.data;
200 digest = op->sym->aead.digest.data;
202 #ifdef RTE_LIBRTE_PMD_AESNI_GCM_DEBUG
203 rte_hexdump(stdout, "auth tag (orig):",
204 digest, session->req_digest_length);
205 rte_hexdump(stdout, "auth tag (calc):",
206 tag, session->req_digest_length);
209 if (memcmp(tag, digest, session->req_digest_length) != 0)
210 op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
212 if (session->req_digest_length != session->gen_digest_length) {
214 IPSEC_MB_OP_AEAD_AUTHENTICATED_ENCRYPT)
215 memcpy(op->sym->aead.digest.data,
216 qp_data->temp_digest,
217 session->req_digest_length);
219 memcpy(op->sym->auth.digest.data,
220 qp_data->temp_digest,
221 session->req_digest_length);
227 * Process a completed GCM request
229 * @param qp Queue Pair to process
230 * @param op Crypto operation
231 * @param sess AESNI-GCM session
235 handle_completed_gcm_crypto_op(struct ipsec_mb_qp *qp,
236 struct rte_crypto_op *op,
237 struct aesni_gcm_session *sess)
239 post_process_gcm_crypto_op(qp, op, sess);
241 /* Free session if a session-less crypto op */
242 if (op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
243 memset(sess, 0, sizeof(struct aesni_gcm_session));
244 memset(op->sym->session, 0,
245 rte_cryptodev_sym_get_existing_header_session_size(
247 rte_mempool_put(qp->sess_mp_priv, sess);
248 rte_mempool_put(qp->sess_mp, op->sym->session);
249 op->sym->session = NULL;
254 * Process a crypto operation, calling
255 * the GCM API from the multi buffer library.
257 * @param qp queue pair
258 * @param op symmetric crypto operation
259 * @param session GCM session
265 process_gcm_crypto_op(struct ipsec_mb_qp *qp, struct rte_crypto_op *op,
266 struct aesni_gcm_session *session)
268 struct aesni_gcm_qp_data *qp_data = ipsec_mb_get_qp_private_data(qp);
271 struct rte_crypto_sym_op *sym_op = op->sym;
272 struct rte_mbuf *m_src = sym_op->m_src;
273 uint32_t offset, data_offset, data_length;
274 uint32_t part_len, total_len, data_len;
276 unsigned int oop = 0;
277 struct aesni_gcm_ops *ops = &qp_data->ops[session->key_length];
279 if (session->op == IPSEC_MB_OP_AEAD_AUTHENTICATED_ENCRYPT ||
280 session->op == IPSEC_MB_OP_AEAD_AUTHENTICATED_DECRYPT) {
281 offset = sym_op->aead.data.offset;
282 data_offset = offset;
283 data_length = sym_op->aead.data.length;
285 offset = sym_op->auth.data.offset;
286 data_offset = offset;
287 data_length = sym_op->auth.data.length;
290 RTE_ASSERT(m_src != NULL);
292 while (offset >= m_src->data_len && data_length != 0) {
293 offset -= m_src->data_len;
296 RTE_ASSERT(m_src != NULL);
299 src = rte_pktmbuf_mtod_offset(m_src, uint8_t *, offset);
301 data_len = m_src->data_len - offset;
302 part_len = (data_len < data_length) ? data_len :
305 RTE_ASSERT((sym_op->m_dst == NULL) ||
306 ((sym_op->m_dst != NULL) &&
307 rte_pktmbuf_is_contiguous(sym_op->m_dst)));
310 if (sym_op->m_dst == NULL || (sym_op->m_dst == sym_op->m_src))
315 /* Segmented destination buffer is not supported
316 * if operation is Out-of-place
318 RTE_ASSERT(rte_pktmbuf_is_contiguous(sym_op->m_dst));
319 dst = rte_pktmbuf_mtod_offset(sym_op->m_dst, uint8_t *,
323 iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
326 if (session->op == IPSEC_MB_OP_AEAD_AUTHENTICATED_ENCRYPT) {
327 ops->init(&session->gdata_key, &qp_data->gcm_ctx_data, iv_ptr,
328 sym_op->aead.aad.data,
329 (uint64_t)session->aad_length);
331 ops->update_enc(&session->gdata_key, &qp_data->gcm_ctx_data,
332 dst, src, (uint64_t)part_len);
333 total_len = data_length - part_len;
338 RTE_ASSERT(m_src != NULL);
340 src = rte_pktmbuf_mtod(m_src, uint8_t *);
345 part_len = (m_src->data_len < total_len) ?
346 m_src->data_len : total_len;
348 ops->update_enc(&session->gdata_key,
349 &qp_data->gcm_ctx_data,
350 dst, src, (uint64_t)part_len);
351 total_len -= part_len;
354 if (session->req_digest_length != session->gen_digest_length)
355 tag = qp_data->temp_digest;
357 tag = sym_op->aead.digest.data;
359 ops->finalize_enc(&session->gdata_key, &qp_data->gcm_ctx_data,
360 tag, session->gen_digest_length);
361 } else if (session->op == IPSEC_MB_OP_AEAD_AUTHENTICATED_DECRYPT) {
362 ops->init(&session->gdata_key, &qp_data->gcm_ctx_data, iv_ptr,
363 sym_op->aead.aad.data,
364 (uint64_t)session->aad_length);
366 ops->update_dec(&session->gdata_key, &qp_data->gcm_ctx_data,
367 dst, src, (uint64_t)part_len);
368 total_len = data_length - part_len;
373 RTE_ASSERT(m_src != NULL);
375 src = rte_pktmbuf_mtod(m_src, uint8_t *);
380 part_len = (m_src->data_len < total_len) ?
381 m_src->data_len : total_len;
383 ops->update_dec(&session->gdata_key,
384 &qp_data->gcm_ctx_data,
385 dst, src, (uint64_t)part_len);
386 total_len -= part_len;
389 tag = qp_data->temp_digest;
390 ops->finalize_dec(&session->gdata_key, &qp_data->gcm_ctx_data,
391 tag, session->gen_digest_length);
392 } else if (session->op == IPSEC_MB_OP_HASH_GEN_ONLY) {
393 ops->gmac_init(&session->gdata_key, &qp_data->gcm_ctx_data,
394 iv_ptr, session->iv.length);
396 ops->gmac_update(&session->gdata_key, &qp_data->gcm_ctx_data,
397 src, (uint64_t)part_len);
398 total_len = data_length - part_len;
403 RTE_ASSERT(m_src != NULL);
405 src = rte_pktmbuf_mtod(m_src, uint8_t *);
406 part_len = (m_src->data_len < total_len) ?
407 m_src->data_len : total_len;
409 ops->gmac_update(&session->gdata_key,
410 &qp_data->gcm_ctx_data, src,
412 total_len -= part_len;
415 if (session->req_digest_length != session->gen_digest_length)
416 tag = qp_data->temp_digest;
418 tag = sym_op->auth.digest.data;
420 ops->gmac_finalize(&session->gdata_key, &qp_data->gcm_ctx_data,
421 tag, session->gen_digest_length);
422 } else { /* IPSEC_MB_OP_HASH_VERIFY_ONLY */
423 ops->gmac_init(&session->gdata_key, &qp_data->gcm_ctx_data,
424 iv_ptr, session->iv.length);
426 ops->gmac_update(&session->gdata_key, &qp_data->gcm_ctx_data,
427 src, (uint64_t)part_len);
428 total_len = data_length - part_len;
433 RTE_ASSERT(m_src != NULL);
435 src = rte_pktmbuf_mtod(m_src, uint8_t *);
436 part_len = (m_src->data_len < total_len) ?
437 m_src->data_len : total_len;
439 ops->gmac_update(&session->gdata_key,
440 &qp_data->gcm_ctx_data, src,
442 total_len -= part_len;
445 tag = qp_data->temp_digest;
447 ops->gmac_finalize(&session->gdata_key, &qp_data->gcm_ctx_data,
448 tag, session->gen_digest_length);
453 /** Get gcm session */
454 static inline struct aesni_gcm_session *
455 aesni_gcm_get_session(struct ipsec_mb_qp *qp,
456 struct rte_crypto_op *op)
458 struct aesni_gcm_session *sess = NULL;
460 ipsec_mb_get_driver_id(IPSEC_MB_PMD_TYPE_AESNI_GCM);
461 struct rte_crypto_sym_op *sym_op = op->sym;
463 if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
464 if (likely(sym_op->session != NULL))
465 sess = (struct aesni_gcm_session *)
466 get_sym_session_private_data(sym_op->session,
470 void *_sess_private_data = NULL;
472 if (rte_mempool_get(qp->sess_mp, (void **)&_sess))
475 if (rte_mempool_get(qp->sess_mp_priv,
476 (void **)&_sess_private_data))
479 sess = (struct aesni_gcm_session *)_sess_private_data;
481 if (unlikely(aesni_gcm_session_configure(qp->mb_mgr,
482 _sess_private_data, sym_op->xform) != 0)) {
483 rte_mempool_put(qp->sess_mp, _sess);
484 rte_mempool_put(qp->sess_mp_priv, _sess_private_data);
487 sym_op->session = (struct rte_cryptodev_sym_session *)_sess;
488 set_sym_session_private_data(sym_op->session, driver_id,
492 if (unlikely(sess == NULL))
493 op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
499 aesni_gcm_pmd_dequeue_burst(void *queue_pair,
500 struct rte_crypto_op **ops, uint16_t nb_ops)
502 struct aesni_gcm_session *sess;
503 struct ipsec_mb_qp *qp = queue_pair;
506 unsigned int i, nb_dequeued;
508 nb_dequeued = rte_ring_dequeue_burst(qp->ingress_queue,
509 (void **)ops, nb_ops, NULL);
511 for (i = 0; i < nb_dequeued; i++) {
513 sess = aesni_gcm_get_session(qp, ops[i]);
514 if (unlikely(sess == NULL)) {
515 ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
516 qp->stats.dequeue_err_count++;
520 retval = process_gcm_crypto_op(qp, ops[i], sess);
522 ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
523 qp->stats.dequeue_err_count++;
527 handle_completed_gcm_crypto_op(qp, ops[i], sess);
530 qp->stats.dequeued_count += i;
536 aesni_gcm_fill_error_code(struct rte_crypto_sym_vec *vec,
541 for (i = 0; i < vec->num; i++)
542 vec->status[i] = errnum;
545 static inline int32_t
546 aesni_gcm_sgl_op_finalize_encryption(const struct aesni_gcm_session *s,
547 struct gcm_context_data *gdata_ctx,
548 uint8_t *digest, struct aesni_gcm_ops ops)
550 if (s->req_digest_length != s->gen_digest_length) {
551 uint8_t tmpdigest[s->gen_digest_length];
553 ops.finalize_enc(&s->gdata_key, gdata_ctx, tmpdigest,
554 s->gen_digest_length);
555 memcpy(digest, tmpdigest, s->req_digest_length);
557 ops.finalize_enc(&s->gdata_key, gdata_ctx, digest,
558 s->gen_digest_length);
564 static inline int32_t
565 aesni_gcm_sgl_op_finalize_decryption(const struct aesni_gcm_session *s,
566 struct gcm_context_data *gdata_ctx,
567 uint8_t *digest, struct aesni_gcm_ops ops)
569 uint8_t tmpdigest[s->gen_digest_length];
571 ops.finalize_dec(&s->gdata_key, gdata_ctx, tmpdigest,
572 s->gen_digest_length);
574 return memcmp(digest, tmpdigest, s->req_digest_length) == 0 ? 0
579 aesni_gcm_process_gcm_sgl_op(const struct aesni_gcm_session *s,
580 struct gcm_context_data *gdata_ctx,
581 struct rte_crypto_sgl *sgl, void *iv, void *aad,
582 struct aesni_gcm_ops ops)
586 /* init crypto operation */
587 ops.init(&s->gdata_key, gdata_ctx, iv, aad,
588 (uint64_t)s->aad_length);
590 /* update with sgl data */
591 for (i = 0; i < sgl->num; i++) {
592 struct rte_crypto_vec *vec = &sgl->vec[i];
595 case IPSEC_MB_OP_AEAD_AUTHENTICATED_ENCRYPT:
596 ops.update_enc(&s->gdata_key, gdata_ctx,
597 vec->base, vec->base, vec->len);
599 case IPSEC_MB_OP_AEAD_AUTHENTICATED_DECRYPT:
600 ops.update_dec(&s->gdata_key, gdata_ctx,
601 vec->base, vec->base, vec->len);
604 IPSEC_MB_LOG(ERR, "Invalid session op");
612 aesni_gcm_process_gmac_sgl_op(const struct aesni_gcm_session *s,
613 struct gcm_context_data *gdata_ctx,
614 struct rte_crypto_sgl *sgl, void *iv,
615 struct aesni_gcm_ops ops)
617 ops.init(&s->gdata_key, gdata_ctx, iv, sgl->vec[0].base,
621 static inline uint32_t
622 aesni_gcm_sgl_encrypt(struct aesni_gcm_session *s,
623 struct gcm_context_data *gdata_ctx,
624 struct rte_crypto_sym_vec *vec,
625 struct aesni_gcm_ops ops)
627 uint32_t i, processed;
630 for (i = 0; i < vec->num; ++i) {
631 aesni_gcm_process_gcm_sgl_op(s, gdata_ctx, &vec->src_sgl[i],
632 vec->iv[i].va, vec->aad[i].va,
634 vec->status[i] = aesni_gcm_sgl_op_finalize_encryption(
635 s, gdata_ctx, vec->digest[i].va, ops);
636 processed += (vec->status[i] == 0);
642 static inline uint32_t
643 aesni_gcm_sgl_decrypt(struct aesni_gcm_session *s,
644 struct gcm_context_data *gdata_ctx,
645 struct rte_crypto_sym_vec *vec,
646 struct aesni_gcm_ops ops)
648 uint32_t i, processed;
651 for (i = 0; i < vec->num; ++i) {
652 aesni_gcm_process_gcm_sgl_op(s, gdata_ctx, &vec->src_sgl[i],
653 vec->iv[i].va, vec->aad[i].va,
655 vec->status[i] = aesni_gcm_sgl_op_finalize_decryption(
656 s, gdata_ctx, vec->digest[i].va, ops);
657 processed += (vec->status[i] == 0);
663 static inline uint32_t
664 aesni_gmac_sgl_generate(struct aesni_gcm_session *s,
665 struct gcm_context_data *gdata_ctx,
666 struct rte_crypto_sym_vec *vec,
667 struct aesni_gcm_ops ops)
669 uint32_t i, processed;
672 for (i = 0; i < vec->num; ++i) {
673 if (vec->src_sgl[i].num != 1) {
674 vec->status[i] = ENOTSUP;
678 aesni_gcm_process_gmac_sgl_op(s, gdata_ctx, &vec->src_sgl[i],
680 vec->status[i] = aesni_gcm_sgl_op_finalize_encryption(
681 s, gdata_ctx, vec->digest[i].va, ops);
682 processed += (vec->status[i] == 0);
688 static inline uint32_t
689 aesni_gmac_sgl_verify(struct aesni_gcm_session *s,
690 struct gcm_context_data *gdata_ctx,
691 struct rte_crypto_sym_vec *vec,
692 struct aesni_gcm_ops ops)
694 uint32_t i, processed;
697 for (i = 0; i < vec->num; ++i) {
698 if (vec->src_sgl[i].num != 1) {
699 vec->status[i] = ENOTSUP;
703 aesni_gcm_process_gmac_sgl_op(s, gdata_ctx, &vec->src_sgl[i],
705 vec->status[i] = aesni_gcm_sgl_op_finalize_decryption(
706 s, gdata_ctx, vec->digest[i].va, ops);
707 processed += (vec->status[i] == 0);
713 /** Process CPU crypto bulk operations */
715 aesni_gcm_process_bulk(struct rte_cryptodev *dev,
716 struct rte_cryptodev_sym_session *sess,
717 __rte_unused union rte_crypto_sym_ofs ofs,
718 struct rte_crypto_sym_vec *vec)
720 struct aesni_gcm_session *s;
721 struct gcm_context_data gdata_ctx;
724 s = (struct aesni_gcm_session *) get_sym_session_private_data(sess,
726 if (unlikely(s == NULL)) {
727 aesni_gcm_fill_error_code(vec, EINVAL);
731 /* get per-thread MB MGR, create one if needed */
732 mb_mgr = get_per_thread_mb_mgr();
733 if (unlikely(mb_mgr == NULL))
736 /* Check if function pointers have been set for this thread ops. */
737 if (unlikely(RTE_PER_LCORE(gcm_ops)[s->key_length].init == NULL))
738 aesni_gcm_set_ops(RTE_PER_LCORE(gcm_ops), mb_mgr);
741 case IPSEC_MB_OP_AEAD_AUTHENTICATED_ENCRYPT:
742 return aesni_gcm_sgl_encrypt(s, &gdata_ctx, vec,
743 RTE_PER_LCORE(gcm_ops)[s->key_length]);
744 case IPSEC_MB_OP_AEAD_AUTHENTICATED_DECRYPT:
745 return aesni_gcm_sgl_decrypt(s, &gdata_ctx, vec,
746 RTE_PER_LCORE(gcm_ops)[s->key_length]);
747 case IPSEC_MB_OP_HASH_GEN_ONLY:
748 return aesni_gmac_sgl_generate(s, &gdata_ctx, vec,
749 RTE_PER_LCORE(gcm_ops)[s->key_length]);
750 case IPSEC_MB_OP_HASH_VERIFY_ONLY:
751 return aesni_gmac_sgl_verify(s, &gdata_ctx, vec,
752 RTE_PER_LCORE(gcm_ops)[s->key_length]);
754 aesni_gcm_fill_error_code(vec, EINVAL);
760 aesni_gcm_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
761 const struct rte_cryptodev_qp_conf *qp_conf,
764 int ret = ipsec_mb_qp_setup(dev, qp_id, qp_conf, socket_id);
768 struct ipsec_mb_qp *qp = dev->data->queue_pairs[qp_id];
769 struct aesni_gcm_qp_data *qp_data = ipsec_mb_get_qp_private_data(qp);
770 aesni_gcm_set_ops(qp_data->ops, qp->mb_mgr);
774 struct rte_cryptodev_ops aesni_gcm_pmd_ops = {
775 .dev_configure = ipsec_mb_config,
776 .dev_start = ipsec_mb_start,
777 .dev_stop = ipsec_mb_stop,
778 .dev_close = ipsec_mb_close,
780 .stats_get = ipsec_mb_stats_get,
781 .stats_reset = ipsec_mb_stats_reset,
783 .dev_infos_get = ipsec_mb_info_get,
785 .queue_pair_setup = aesni_gcm_qp_setup,
786 .queue_pair_release = ipsec_mb_qp_release,
788 .sym_cpu_process = aesni_gcm_process_bulk,
790 .sym_session_get_size = ipsec_mb_sym_session_get_size,
791 .sym_session_configure = ipsec_mb_sym_session_configure,
792 .sym_session_clear = ipsec_mb_sym_session_clear
796 aesni_gcm_probe(struct rte_vdev_device *vdev)
798 return ipsec_mb_create(vdev, IPSEC_MB_PMD_TYPE_AESNI_GCM);
801 static struct rte_vdev_driver cryptodev_aesni_gcm_pmd_drv = {
802 .probe = aesni_gcm_probe,
803 .remove = ipsec_mb_remove
806 static struct cryptodev_driver aesni_gcm_crypto_drv;
808 RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_AESNI_GCM_PMD,
809 cryptodev_aesni_gcm_pmd_drv);
810 RTE_PMD_REGISTER_ALIAS(CRYPTODEV_NAME_AESNI_GCM_PMD, cryptodev_aesni_gcm_pmd);
811 RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_AESNI_GCM_PMD,
812 "max_nb_queue_pairs=<int> socket_id=<int>");
813 RTE_PMD_REGISTER_CRYPTO_DRIVER(aesni_gcm_crypto_drv,
814 cryptodev_aesni_gcm_pmd_drv.driver,
815 pmd_driver_id_aesni_gcm);
817 /* Constructor function to register aesni-gcm PMD */
818 RTE_INIT(ipsec_mb_register_aesni_gcm)
820 struct ipsec_mb_internals *aesni_gcm_data =
821 &ipsec_mb_pmds[IPSEC_MB_PMD_TYPE_AESNI_GCM];
823 aesni_gcm_data->caps = aesni_gcm_capabilities;
824 aesni_gcm_data->dequeue_burst = aesni_gcm_pmd_dequeue_burst;
825 aesni_gcm_data->feature_flags =
826 RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
827 RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
828 RTE_CRYPTODEV_FF_IN_PLACE_SGL |
829 RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
830 RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT |
831 RTE_CRYPTODEV_FF_SYM_CPU_CRYPTO |
832 RTE_CRYPTODEV_FF_SYM_SESSIONLESS;
833 aesni_gcm_data->internals_priv_size = 0;
834 aesni_gcm_data->ops = &aesni_gcm_pmd_ops;
835 aesni_gcm_data->qp_priv_size = sizeof(struct aesni_gcm_qp_data);
836 aesni_gcm_data->queue_pair_configure = NULL;
837 aesni_gcm_data->session_configure = aesni_gcm_session_configure;
838 aesni_gcm_data->session_priv_size = sizeof(struct aesni_gcm_session);