crypto/ipsec_mb: fix GCM requested digest length
[dpdk.git] / drivers / crypto / ipsec_mb / pmd_aesni_gcm.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2016-2021 Intel Corporation
3  */
4
5 #include "pmd_aesni_gcm_priv.h"
6
7 static void
8 aesni_gcm_set_ops(struct aesni_gcm_ops *ops, IMB_MGR *mb_mgr)
9 {
10         /* Set 128 bit function pointers. */
11         ops[GCM_KEY_128].pre = mb_mgr->gcm128_pre;
12         ops[GCM_KEY_128].init = mb_mgr->gcm128_init;
13
14         ops[GCM_KEY_128].enc = mb_mgr->gcm128_enc;
15         ops[GCM_KEY_128].update_enc = mb_mgr->gcm128_enc_update;
16         ops[GCM_KEY_128].finalize_enc = mb_mgr->gcm128_enc_finalize;
17
18         ops[GCM_KEY_128].dec = mb_mgr->gcm128_dec;
19         ops[GCM_KEY_128].update_dec = mb_mgr->gcm128_dec_update;
20         ops[GCM_KEY_128].finalize_dec = mb_mgr->gcm128_dec_finalize;
21
22         ops[GCM_KEY_128].gmac_init = mb_mgr->gmac128_init;
23         ops[GCM_KEY_128].gmac_update = mb_mgr->gmac128_update;
24         ops[GCM_KEY_128].gmac_finalize = mb_mgr->gmac128_finalize;
25
26         /* Set 192 bit function pointers. */
27         ops[GCM_KEY_192].pre = mb_mgr->gcm192_pre;
28         ops[GCM_KEY_192].init = mb_mgr->gcm192_init;
29
30         ops[GCM_KEY_192].enc = mb_mgr->gcm192_enc;
31         ops[GCM_KEY_192].update_enc = mb_mgr->gcm192_enc_update;
32         ops[GCM_KEY_192].finalize_enc = mb_mgr->gcm192_enc_finalize;
33
34         ops[GCM_KEY_192].dec = mb_mgr->gcm192_dec;
35         ops[GCM_KEY_192].update_dec = mb_mgr->gcm192_dec_update;
36         ops[GCM_KEY_192].finalize_dec = mb_mgr->gcm192_dec_finalize;
37
38         ops[GCM_KEY_192].gmac_init = mb_mgr->gmac192_init;
39         ops[GCM_KEY_192].gmac_update = mb_mgr->gmac192_update;
40         ops[GCM_KEY_192].gmac_finalize = mb_mgr->gmac192_finalize;
41
42         /* Set 256 bit function pointers. */
43         ops[GCM_KEY_256].pre = mb_mgr->gcm256_pre;
44         ops[GCM_KEY_256].init = mb_mgr->gcm256_init;
45
46         ops[GCM_KEY_256].enc = mb_mgr->gcm256_enc;
47         ops[GCM_KEY_256].update_enc = mb_mgr->gcm256_enc_update;
48         ops[GCM_KEY_256].finalize_enc = mb_mgr->gcm256_enc_finalize;
49
50         ops[GCM_KEY_256].dec = mb_mgr->gcm256_dec;
51         ops[GCM_KEY_256].update_dec = mb_mgr->gcm256_dec_update;
52         ops[GCM_KEY_256].finalize_dec = mb_mgr->gcm256_dec_finalize;
53
54         ops[GCM_KEY_256].gmac_init = mb_mgr->gmac256_init;
55         ops[GCM_KEY_256].gmac_update = mb_mgr->gmac256_update;
56         ops[GCM_KEY_256].gmac_finalize = mb_mgr->gmac256_finalize;
57 }
58
59 static int
60 aesni_gcm_session_configure(IMB_MGR *mb_mgr, void *session,
61                             const struct rte_crypto_sym_xform *xform)
62 {
63         struct aesni_gcm_session *sess = session;
64         const struct rte_crypto_sym_xform *auth_xform;
65         const struct rte_crypto_sym_xform *cipher_xform;
66         const struct rte_crypto_sym_xform *aead_xform;
67
68         uint8_t key_length;
69         const uint8_t *key;
70         enum ipsec_mb_operation mode;
71         int ret = 0;
72
73         ret = ipsec_mb_parse_xform(xform, &mode, &auth_xform,
74                                 &cipher_xform, &aead_xform);
75         if (ret)
76                 return ret;
77
78         /**< GCM key type */
79
80         sess->op = mode;
81
82         switch (sess->op) {
83         case IPSEC_MB_OP_HASH_GEN_ONLY:
84         case IPSEC_MB_OP_HASH_VERIFY_ONLY:
85                 /* AES-GMAC
86                  * auth_xform = xform;
87                  */
88                 if (auth_xform->auth.algo != RTE_CRYPTO_AUTH_AES_GMAC) {
89                         IPSEC_MB_LOG(ERR,
90         "Only AES GMAC is supported as an authentication only algorithm");
91                         ret = -ENOTSUP;
92                         goto error_exit;
93                 }
94                 /* Set IV parameters */
95                 sess->iv.offset = auth_xform->auth.iv.offset;
96                 sess->iv.length = auth_xform->auth.iv.length;
97                 key_length = auth_xform->auth.key.length;
98                 key = auth_xform->auth.key.data;
99                 sess->req_digest_length =
100                     RTE_MIN(auth_xform->auth.digest_length,
101                                 DIGEST_LENGTH_MAX);
102                 break;
103         case IPSEC_MB_OP_AEAD_AUTHENTICATED_ENCRYPT:
104         case IPSEC_MB_OP_AEAD_AUTHENTICATED_DECRYPT:
105                 /* AES-GCM
106                  * aead_xform = xform;
107                  */
108
109                 if (aead_xform->aead.algo != RTE_CRYPTO_AEAD_AES_GCM) {
110                         IPSEC_MB_LOG(ERR,
111                         "The only combined operation supported is AES GCM");
112                         ret = -ENOTSUP;
113                         goto error_exit;
114                 }
115                 /* Set IV parameters */
116                 sess->iv.offset = aead_xform->aead.iv.offset;
117                 sess->iv.length = aead_xform->aead.iv.length;
118                 key_length = aead_xform->aead.key.length;
119                 key = aead_xform->aead.key.data;
120                 sess->aad_length = aead_xform->aead.aad_length;
121                 sess->req_digest_length =
122                         RTE_MIN(aead_xform->aead.digest_length,
123                                 DIGEST_LENGTH_MAX);
124                 break;
125         default:
126                 IPSEC_MB_LOG(
127                     ERR, "Wrong xform type, has to be AEAD or authentication");
128                 ret = -ENOTSUP;
129                 goto error_exit;
130         }
131
132         /* Check key length, and calculate GCM pre-compute. */
133         switch (key_length) {
134         case 16:
135                 sess->key_length = GCM_KEY_128;
136                 mb_mgr->gcm128_pre(key, &sess->gdata_key);
137                 break;
138         case 24:
139                 sess->key_length = GCM_KEY_192;
140                 mb_mgr->gcm192_pre(key, &sess->gdata_key);
141                 break;
142         case 32:
143                 sess->key_length = GCM_KEY_256;
144                 mb_mgr->gcm256_pre(key, &sess->gdata_key);
145                 break;
146         default:
147                 IPSEC_MB_LOG(ERR, "Invalid key length");
148                 ret = -EINVAL;
149                 goto error_exit;
150         }
151
152         /* Digest check */
153         if (sess->req_digest_length > 16) {
154                 IPSEC_MB_LOG(ERR, "Invalid digest length");
155                 ret = -EINVAL;
156                 goto error_exit;
157         }
158         /*
159          * If size requested is different, generate the full digest
160          * (16 bytes) in a temporary location and then memcpy
161          * the requested number of bytes.
162          */
163         if (sess->req_digest_length < 4)
164                 sess->gen_digest_length = 16;
165         else
166                 sess->gen_digest_length = sess->req_digest_length;
167
168 error_exit:
169         return ret;
170 }
171
172 /**
173  * Process a completed job and return rte_mbuf which job processed
174  *
175  * @param job   IMB_JOB job to process
176  *
177  * @return
178  * - Returns processed mbuf which is trimmed of output digest used in
179  * verification of supplied digest in the case of a HASH_CIPHER operation
180  * - Returns NULL on invalid job
181  */
182 static void
183 post_process_gcm_crypto_op(struct ipsec_mb_qp *qp,
184                 struct rte_crypto_op *op,
185                 struct aesni_gcm_session *session)
186 {
187         struct aesni_gcm_qp_data *qp_data = ipsec_mb_get_qp_private_data(qp);
188
189         op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
190         /* Verify digest if required */
191         if (session->op == IPSEC_MB_OP_AEAD_AUTHENTICATED_DECRYPT ||
192                         session->op == IPSEC_MB_OP_HASH_VERIFY_ONLY) {
193                 uint8_t *digest;
194
195                 uint8_t *tag = qp_data->temp_digest;
196
197                 if (session->op == IPSEC_MB_OP_HASH_VERIFY_ONLY)
198                         digest = op->sym->auth.digest.data;
199                 else
200                         digest = op->sym->aead.digest.data;
201
202 #ifdef RTE_LIBRTE_PMD_AESNI_GCM_DEBUG
203                 rte_hexdump(stdout, "auth tag (orig):",
204                                 digest, session->req_digest_length);
205                 rte_hexdump(stdout, "auth tag (calc):",
206                                 tag, session->req_digest_length);
207 #endif
208
209                 if (memcmp(tag, digest, session->req_digest_length) != 0)
210                         op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
211         } else {
212                 if (session->req_digest_length != session->gen_digest_length) {
213                         if (session->op ==
214                                 IPSEC_MB_OP_AEAD_AUTHENTICATED_ENCRYPT)
215                                 memcpy(op->sym->aead.digest.data,
216                                         qp_data->temp_digest,
217                                         session->req_digest_length);
218                         else
219                                 memcpy(op->sym->auth.digest.data,
220                                         qp_data->temp_digest,
221                                         session->req_digest_length);
222                 }
223         }
224 }
225
226 /**
227  * Process a completed GCM request
228  *
229  * @param qp            Queue Pair to process
230  * @param op            Crypto operation
231  * @param sess          AESNI-GCM session
232  *
233  */
234 static void
235 handle_completed_gcm_crypto_op(struct ipsec_mb_qp *qp,
236                 struct rte_crypto_op *op,
237                 struct aesni_gcm_session *sess)
238 {
239         post_process_gcm_crypto_op(qp, op, sess);
240
241         /* Free session if a session-less crypto op */
242         if (op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
243                 memset(sess, 0, sizeof(struct aesni_gcm_session));
244                 memset(op->sym->session, 0,
245                         rte_cryptodev_sym_get_existing_header_session_size(
246                                 op->sym->session));
247                 rte_mempool_put(qp->sess_mp_priv, sess);
248                 rte_mempool_put(qp->sess_mp, op->sym->session);
249                 op->sym->session = NULL;
250         }
251 }
252
253 /**
254  * Process a crypto operation, calling
255  * the GCM API from the multi buffer library.
256  *
257  * @param       qp              queue pair
258  * @param       op              symmetric crypto operation
259  * @param       session         GCM session
260  *
261  * @return
262  *  0 on success
263  */
264 static int
265 process_gcm_crypto_op(struct ipsec_mb_qp *qp, struct rte_crypto_op *op,
266                 struct aesni_gcm_session *session)
267 {
268         struct aesni_gcm_qp_data *qp_data = ipsec_mb_get_qp_private_data(qp);
269         uint8_t *src, *dst;
270         uint8_t *iv_ptr;
271         struct rte_crypto_sym_op *sym_op = op->sym;
272         struct rte_mbuf *m_src = sym_op->m_src;
273         uint32_t offset, data_offset, data_length;
274         uint32_t part_len, total_len, data_len;
275         uint8_t *tag;
276         unsigned int oop = 0;
277         struct aesni_gcm_ops *ops = &qp_data->ops[session->key_length];
278
279         if (session->op == IPSEC_MB_OP_AEAD_AUTHENTICATED_ENCRYPT ||
280                         session->op == IPSEC_MB_OP_AEAD_AUTHENTICATED_DECRYPT) {
281                 offset = sym_op->aead.data.offset;
282                 data_offset = offset;
283                 data_length = sym_op->aead.data.length;
284         } else {
285                 offset = sym_op->auth.data.offset;
286                 data_offset = offset;
287                 data_length = sym_op->auth.data.length;
288         }
289
290         RTE_ASSERT(m_src != NULL);
291
292         while (offset >= m_src->data_len && data_length != 0) {
293                 offset -= m_src->data_len;
294                 m_src = m_src->next;
295
296                 RTE_ASSERT(m_src != NULL);
297         }
298
299         src = rte_pktmbuf_mtod_offset(m_src, uint8_t *, offset);
300
301         data_len = m_src->data_len - offset;
302         part_len = (data_len < data_length) ? data_len :
303                         data_length;
304
305         RTE_ASSERT((sym_op->m_dst == NULL) ||
306                         ((sym_op->m_dst != NULL) &&
307                                 rte_pktmbuf_is_contiguous(sym_op->m_dst)));
308
309         /* In-place */
310         if (sym_op->m_dst == NULL || (sym_op->m_dst == sym_op->m_src))
311                 dst = src;
312         /* Out-of-place */
313         else {
314                 oop = 1;
315                 /* Segmented destination buffer is not supported
316                  * if operation is Out-of-place
317                  */
318                 RTE_ASSERT(rte_pktmbuf_is_contiguous(sym_op->m_dst));
319                 dst = rte_pktmbuf_mtod_offset(sym_op->m_dst, uint8_t *,
320                                         data_offset);
321         }
322
323         iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
324                                 session->iv.offset);
325
326         if (session->op == IPSEC_MB_OP_AEAD_AUTHENTICATED_ENCRYPT) {
327                 ops->init(&session->gdata_key, &qp_data->gcm_ctx_data, iv_ptr,
328                                 sym_op->aead.aad.data,
329                                 (uint64_t)session->aad_length);
330
331                 ops->update_enc(&session->gdata_key, &qp_data->gcm_ctx_data,
332                                 dst, src, (uint64_t)part_len);
333                 total_len = data_length - part_len;
334
335                 while (total_len) {
336                         m_src = m_src->next;
337
338                         RTE_ASSERT(m_src != NULL);
339
340                         src = rte_pktmbuf_mtod(m_src, uint8_t *);
341                         if (oop)
342                                 dst += part_len;
343                         else
344                                 dst = src;
345                         part_len = (m_src->data_len < total_len) ?
346                                         m_src->data_len : total_len;
347
348                         ops->update_enc(&session->gdata_key,
349                                         &qp_data->gcm_ctx_data,
350                                         dst, src, (uint64_t)part_len);
351                         total_len -= part_len;
352                 }
353
354                 if (session->req_digest_length != session->gen_digest_length)
355                         tag = qp_data->temp_digest;
356                 else
357                         tag = sym_op->aead.digest.data;
358
359                 ops->finalize_enc(&session->gdata_key, &qp_data->gcm_ctx_data,
360                                 tag, session->gen_digest_length);
361         } else if (session->op == IPSEC_MB_OP_AEAD_AUTHENTICATED_DECRYPT) {
362                 ops->init(&session->gdata_key, &qp_data->gcm_ctx_data, iv_ptr,
363                                 sym_op->aead.aad.data,
364                                 (uint64_t)session->aad_length);
365
366                 ops->update_dec(&session->gdata_key, &qp_data->gcm_ctx_data,
367                                 dst, src, (uint64_t)part_len);
368                 total_len = data_length - part_len;
369
370                 while (total_len) {
371                         m_src = m_src->next;
372
373                         RTE_ASSERT(m_src != NULL);
374
375                         src = rte_pktmbuf_mtod(m_src, uint8_t *);
376                         if (oop)
377                                 dst += part_len;
378                         else
379                                 dst = src;
380                         part_len = (m_src->data_len < total_len) ?
381                                         m_src->data_len : total_len;
382
383                         ops->update_dec(&session->gdata_key,
384                                         &qp_data->gcm_ctx_data,
385                                         dst, src, (uint64_t)part_len);
386                         total_len -= part_len;
387                 }
388
389                 tag = qp_data->temp_digest;
390                 ops->finalize_dec(&session->gdata_key, &qp_data->gcm_ctx_data,
391                                 tag, session->gen_digest_length);
392         } else if (session->op == IPSEC_MB_OP_HASH_GEN_ONLY) {
393                 ops->gmac_init(&session->gdata_key, &qp_data->gcm_ctx_data,
394                                 iv_ptr, session->iv.length);
395
396                 ops->gmac_update(&session->gdata_key, &qp_data->gcm_ctx_data,
397                                 src, (uint64_t)part_len);
398                 total_len = data_length - part_len;
399
400                 while (total_len) {
401                         m_src = m_src->next;
402
403                         RTE_ASSERT(m_src != NULL);
404
405                         src = rte_pktmbuf_mtod(m_src, uint8_t *);
406                         part_len = (m_src->data_len < total_len) ?
407                                         m_src->data_len : total_len;
408
409                         ops->gmac_update(&session->gdata_key,
410                                         &qp_data->gcm_ctx_data, src,
411                                         (uint64_t)part_len);
412                         total_len -= part_len;
413                 }
414
415                 if (session->req_digest_length != session->gen_digest_length)
416                         tag = qp_data->temp_digest;
417                 else
418                         tag = sym_op->auth.digest.data;
419
420                 ops->gmac_finalize(&session->gdata_key, &qp_data->gcm_ctx_data,
421                                 tag, session->gen_digest_length);
422         } else { /* IPSEC_MB_OP_HASH_VERIFY_ONLY */
423                 ops->gmac_init(&session->gdata_key, &qp_data->gcm_ctx_data,
424                                 iv_ptr, session->iv.length);
425
426                 ops->gmac_update(&session->gdata_key, &qp_data->gcm_ctx_data,
427                                 src, (uint64_t)part_len);
428                 total_len = data_length - part_len;
429
430                 while (total_len) {
431                         m_src = m_src->next;
432
433                         RTE_ASSERT(m_src != NULL);
434
435                         src = rte_pktmbuf_mtod(m_src, uint8_t *);
436                         part_len = (m_src->data_len < total_len) ?
437                                         m_src->data_len : total_len;
438
439                         ops->gmac_update(&session->gdata_key,
440                                         &qp_data->gcm_ctx_data, src,
441                                         (uint64_t)part_len);
442                         total_len -= part_len;
443                 }
444
445                 tag = qp_data->temp_digest;
446
447                 ops->gmac_finalize(&session->gdata_key, &qp_data->gcm_ctx_data,
448                                 tag, session->gen_digest_length);
449         }
450         return 0;
451 }
452
453 /** Get gcm session */
454 static inline struct aesni_gcm_session *
455 aesni_gcm_get_session(struct ipsec_mb_qp *qp,
456              struct rte_crypto_op *op)
457 {
458         struct aesni_gcm_session *sess = NULL;
459         uint32_t driver_id =
460             ipsec_mb_get_driver_id(IPSEC_MB_PMD_TYPE_AESNI_GCM);
461         struct rte_crypto_sym_op *sym_op = op->sym;
462
463         if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
464                 if (likely(sym_op->session != NULL))
465                         sess = (struct aesni_gcm_session *)
466                             get_sym_session_private_data(sym_op->session,
467                                                          driver_id);
468         } else {
469                 void *_sess;
470                 void *_sess_private_data = NULL;
471
472                 if (rte_mempool_get(qp->sess_mp, (void **)&_sess))
473                         return NULL;
474
475                 if (rte_mempool_get(qp->sess_mp_priv,
476                                 (void **)&_sess_private_data))
477                         return NULL;
478
479                 sess = (struct aesni_gcm_session *)_sess_private_data;
480
481                 if (unlikely(aesni_gcm_session_configure(qp->mb_mgr,
482                                  _sess_private_data, sym_op->xform) != 0)) {
483                         rte_mempool_put(qp->sess_mp, _sess);
484                         rte_mempool_put(qp->sess_mp_priv, _sess_private_data);
485                         sess = NULL;
486                 }
487                 sym_op->session = (struct rte_cryptodev_sym_session *)_sess;
488                 set_sym_session_private_data(sym_op->session, driver_id,
489                                              _sess_private_data);
490         }
491
492         if (unlikely(sess == NULL))
493                 op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
494
495         return sess;
496 }
497
498 static uint16_t
499 aesni_gcm_pmd_dequeue_burst(void *queue_pair,
500                 struct rte_crypto_op **ops, uint16_t nb_ops)
501 {
502         struct aesni_gcm_session *sess;
503         struct ipsec_mb_qp *qp = queue_pair;
504
505         int retval = 0;
506         unsigned int i, nb_dequeued;
507
508         nb_dequeued = rte_ring_dequeue_burst(qp->ingress_queue,
509                         (void **)ops, nb_ops, NULL);
510
511         for (i = 0; i < nb_dequeued; i++) {
512
513                 sess = aesni_gcm_get_session(qp, ops[i]);
514                 if (unlikely(sess == NULL)) {
515                         ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
516                         qp->stats.dequeue_err_count++;
517                         break;
518                 }
519
520                 retval = process_gcm_crypto_op(qp, ops[i], sess);
521                 if (retval < 0) {
522                         ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
523                         qp->stats.dequeue_err_count++;
524                         break;
525                 }
526
527                 handle_completed_gcm_crypto_op(qp, ops[i], sess);
528         }
529
530         qp->stats.dequeued_count += i;
531
532         return i;
533 }
534
535 static inline void
536 aesni_gcm_fill_error_code(struct rte_crypto_sym_vec *vec,
537                           int32_t errnum)
538 {
539         uint32_t i;
540
541         for (i = 0; i < vec->num; i++)
542                 vec->status[i] = errnum;
543 }
544
545 static inline int32_t
546 aesni_gcm_sgl_op_finalize_encryption(const struct aesni_gcm_session *s,
547                                      struct gcm_context_data *gdata_ctx,
548                                      uint8_t *digest, struct aesni_gcm_ops ops)
549 {
550         if (s->req_digest_length != s->gen_digest_length) {
551                 uint8_t tmpdigest[s->gen_digest_length];
552
553                 ops.finalize_enc(&s->gdata_key, gdata_ctx, tmpdigest,
554                                 s->gen_digest_length);
555                 memcpy(digest, tmpdigest, s->req_digest_length);
556         } else {
557                 ops.finalize_enc(&s->gdata_key, gdata_ctx, digest,
558                                 s->gen_digest_length);
559         }
560
561         return 0;
562 }
563
564 static inline int32_t
565 aesni_gcm_sgl_op_finalize_decryption(const struct aesni_gcm_session *s,
566                                      struct gcm_context_data *gdata_ctx,
567                                      uint8_t *digest, struct aesni_gcm_ops ops)
568 {
569         uint8_t tmpdigest[s->gen_digest_length];
570
571         ops.finalize_dec(&s->gdata_key, gdata_ctx, tmpdigest,
572                         s->gen_digest_length);
573
574         return memcmp(digest, tmpdigest, s->req_digest_length) == 0 ? 0
575                                                                     : EBADMSG;
576 }
577
578 static inline void
579 aesni_gcm_process_gcm_sgl_op(const struct aesni_gcm_session *s,
580                              struct gcm_context_data *gdata_ctx,
581                              struct rte_crypto_sgl *sgl, void *iv, void *aad,
582                              struct aesni_gcm_ops ops)
583 {
584         uint32_t i;
585
586         /* init crypto operation */
587         ops.init(&s->gdata_key, gdata_ctx, iv, aad,
588                     (uint64_t)s->aad_length);
589
590         /* update with sgl data */
591         for (i = 0; i < sgl->num; i++) {
592                 struct rte_crypto_vec *vec = &sgl->vec[i];
593
594                 switch (s->op) {
595                 case IPSEC_MB_OP_AEAD_AUTHENTICATED_ENCRYPT:
596                         ops.update_enc(&s->gdata_key, gdata_ctx,
597                               vec->base, vec->base, vec->len);
598                         break;
599                 case IPSEC_MB_OP_AEAD_AUTHENTICATED_DECRYPT:
600                         ops.update_dec(&s->gdata_key, gdata_ctx,
601                               vec->base, vec->base, vec->len);
602                         break;
603                 default:
604                         IPSEC_MB_LOG(ERR, "Invalid session op");
605                         break;
606                 }
607
608         }
609 }
610
611 static inline void
612 aesni_gcm_process_gmac_sgl_op(const struct aesni_gcm_session *s,
613                               struct gcm_context_data *gdata_ctx,
614                               struct rte_crypto_sgl *sgl, void *iv,
615                               struct aesni_gcm_ops ops)
616 {
617         ops.init(&s->gdata_key, gdata_ctx, iv, sgl->vec[0].base,
618                     sgl->vec[0].len);
619 }
620
621 static inline uint32_t
622 aesni_gcm_sgl_encrypt(struct aesni_gcm_session *s,
623                       struct gcm_context_data *gdata_ctx,
624                       struct rte_crypto_sym_vec *vec,
625                       struct aesni_gcm_ops ops)
626 {
627         uint32_t i, processed;
628
629         processed = 0;
630         for (i = 0; i < vec->num; ++i) {
631                 aesni_gcm_process_gcm_sgl_op(s, gdata_ctx, &vec->src_sgl[i],
632                                              vec->iv[i].va, vec->aad[i].va,
633                                              ops);
634                 vec->status[i] = aesni_gcm_sgl_op_finalize_encryption(
635                     s, gdata_ctx, vec->digest[i].va, ops);
636                 processed += (vec->status[i] == 0);
637         }
638
639         return processed;
640 }
641
642 static inline uint32_t
643 aesni_gcm_sgl_decrypt(struct aesni_gcm_session *s,
644                       struct gcm_context_data *gdata_ctx,
645                       struct rte_crypto_sym_vec *vec,
646                       struct aesni_gcm_ops ops)
647 {
648         uint32_t i, processed;
649
650         processed = 0;
651         for (i = 0; i < vec->num; ++i) {
652                 aesni_gcm_process_gcm_sgl_op(s, gdata_ctx, &vec->src_sgl[i],
653                                              vec->iv[i].va, vec->aad[i].va,
654                                              ops);
655                 vec->status[i] = aesni_gcm_sgl_op_finalize_decryption(
656                     s, gdata_ctx, vec->digest[i].va, ops);
657                 processed += (vec->status[i] == 0);
658         }
659
660         return processed;
661 }
662
663 static inline uint32_t
664 aesni_gmac_sgl_generate(struct aesni_gcm_session *s,
665                         struct gcm_context_data *gdata_ctx,
666                         struct rte_crypto_sym_vec *vec,
667                         struct aesni_gcm_ops ops)
668 {
669         uint32_t i, processed;
670
671         processed = 0;
672         for (i = 0; i < vec->num; ++i) {
673                 if (vec->src_sgl[i].num != 1) {
674                         vec->status[i] = ENOTSUP;
675                         continue;
676                 }
677
678                 aesni_gcm_process_gmac_sgl_op(s, gdata_ctx, &vec->src_sgl[i],
679                                               vec->iv[i].va, ops);
680                 vec->status[i] = aesni_gcm_sgl_op_finalize_encryption(
681                     s, gdata_ctx, vec->digest[i].va, ops);
682                 processed += (vec->status[i] == 0);
683         }
684
685         return processed;
686 }
687
688 static inline uint32_t
689 aesni_gmac_sgl_verify(struct aesni_gcm_session *s,
690                       struct gcm_context_data *gdata_ctx,
691                       struct rte_crypto_sym_vec *vec,
692                       struct aesni_gcm_ops ops)
693 {
694         uint32_t i, processed;
695
696         processed = 0;
697         for (i = 0; i < vec->num; ++i) {
698                 if (vec->src_sgl[i].num != 1) {
699                         vec->status[i] = ENOTSUP;
700                         continue;
701                 }
702
703                 aesni_gcm_process_gmac_sgl_op(s, gdata_ctx, &vec->src_sgl[i],
704                                               vec->iv[i].va, ops);
705                 vec->status[i] = aesni_gcm_sgl_op_finalize_decryption(
706                     s, gdata_ctx, vec->digest[i].va, ops);
707                 processed += (vec->status[i] == 0);
708         }
709
710         return processed;
711 }
712
713 /** Process CPU crypto bulk operations */
714 static uint32_t
715 aesni_gcm_process_bulk(struct rte_cryptodev *dev,
716                         struct rte_cryptodev_sym_session *sess,
717                         __rte_unused union rte_crypto_sym_ofs ofs,
718                         struct rte_crypto_sym_vec *vec)
719 {
720         struct aesni_gcm_session *s;
721         struct gcm_context_data gdata_ctx;
722         IMB_MGR *mb_mgr;
723
724         s = (struct aesni_gcm_session *) get_sym_session_private_data(sess,
725                 dev->driver_id);
726         if (unlikely(s == NULL)) {
727                 aesni_gcm_fill_error_code(vec, EINVAL);
728                 return 0;
729         }
730
731         /* get per-thread MB MGR, create one if needed */
732         mb_mgr = get_per_thread_mb_mgr();
733         if (unlikely(mb_mgr == NULL))
734                 return 0;
735
736         /* Check if function pointers have been set for this thread ops. */
737         if (unlikely(RTE_PER_LCORE(gcm_ops)[s->key_length].init == NULL))
738                 aesni_gcm_set_ops(RTE_PER_LCORE(gcm_ops), mb_mgr);
739
740         switch (s->op) {
741         case IPSEC_MB_OP_AEAD_AUTHENTICATED_ENCRYPT:
742                 return aesni_gcm_sgl_encrypt(s, &gdata_ctx, vec,
743                                 RTE_PER_LCORE(gcm_ops)[s->key_length]);
744         case IPSEC_MB_OP_AEAD_AUTHENTICATED_DECRYPT:
745                 return aesni_gcm_sgl_decrypt(s, &gdata_ctx, vec,
746                                 RTE_PER_LCORE(gcm_ops)[s->key_length]);
747         case IPSEC_MB_OP_HASH_GEN_ONLY:
748                 return aesni_gmac_sgl_generate(s, &gdata_ctx, vec,
749                                 RTE_PER_LCORE(gcm_ops)[s->key_length]);
750         case IPSEC_MB_OP_HASH_VERIFY_ONLY:
751                 return aesni_gmac_sgl_verify(s, &gdata_ctx, vec,
752                                 RTE_PER_LCORE(gcm_ops)[s->key_length]);
753         default:
754                 aesni_gcm_fill_error_code(vec, EINVAL);
755                 return 0;
756         }
757 }
758
759 static int
760 aesni_gcm_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
761                                 const struct rte_cryptodev_qp_conf *qp_conf,
762                                 int socket_id)
763 {
764         int ret = ipsec_mb_qp_setup(dev, qp_id, qp_conf, socket_id);
765         if (ret < 0)
766                 return ret;
767
768         struct ipsec_mb_qp *qp = dev->data->queue_pairs[qp_id];
769         struct aesni_gcm_qp_data *qp_data = ipsec_mb_get_qp_private_data(qp);
770         aesni_gcm_set_ops(qp_data->ops, qp->mb_mgr);
771         return 0;
772 }
773
774 struct rte_cryptodev_ops aesni_gcm_pmd_ops = {
775         .dev_configure = ipsec_mb_config,
776         .dev_start = ipsec_mb_start,
777         .dev_stop = ipsec_mb_stop,
778         .dev_close = ipsec_mb_close,
779
780         .stats_get = ipsec_mb_stats_get,
781         .stats_reset = ipsec_mb_stats_reset,
782
783         .dev_infos_get = ipsec_mb_info_get,
784
785         .queue_pair_setup = aesni_gcm_qp_setup,
786         .queue_pair_release = ipsec_mb_qp_release,
787
788         .sym_cpu_process = aesni_gcm_process_bulk,
789
790         .sym_session_get_size = ipsec_mb_sym_session_get_size,
791         .sym_session_configure = ipsec_mb_sym_session_configure,
792         .sym_session_clear = ipsec_mb_sym_session_clear
793 };
794
795 static int
796 aesni_gcm_probe(struct rte_vdev_device *vdev)
797 {
798         return ipsec_mb_create(vdev, IPSEC_MB_PMD_TYPE_AESNI_GCM);
799 }
800
801 static struct rte_vdev_driver cryptodev_aesni_gcm_pmd_drv = {
802         .probe = aesni_gcm_probe,
803         .remove = ipsec_mb_remove
804 };
805
806 static struct cryptodev_driver aesni_gcm_crypto_drv;
807
808 RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_AESNI_GCM_PMD,
809                       cryptodev_aesni_gcm_pmd_drv);
810 RTE_PMD_REGISTER_ALIAS(CRYPTODEV_NAME_AESNI_GCM_PMD, cryptodev_aesni_gcm_pmd);
811 RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_AESNI_GCM_PMD,
812                               "max_nb_queue_pairs=<int> socket_id=<int>");
813 RTE_PMD_REGISTER_CRYPTO_DRIVER(aesni_gcm_crypto_drv,
814                                cryptodev_aesni_gcm_pmd_drv.driver,
815                                pmd_driver_id_aesni_gcm);
816
817 /* Constructor function to register aesni-gcm PMD */
818 RTE_INIT(ipsec_mb_register_aesni_gcm)
819 {
820         struct ipsec_mb_internals *aesni_gcm_data =
821                 &ipsec_mb_pmds[IPSEC_MB_PMD_TYPE_AESNI_GCM];
822
823         aesni_gcm_data->caps = aesni_gcm_capabilities;
824         aesni_gcm_data->dequeue_burst = aesni_gcm_pmd_dequeue_burst;
825         aesni_gcm_data->feature_flags =
826                 RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
827                 RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
828                 RTE_CRYPTODEV_FF_IN_PLACE_SGL |
829                 RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
830                 RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT |
831                 RTE_CRYPTODEV_FF_SYM_CPU_CRYPTO |
832                 RTE_CRYPTODEV_FF_SYM_SESSIONLESS;
833         aesni_gcm_data->internals_priv_size = 0;
834         aesni_gcm_data->ops = &aesni_gcm_pmd_ops;
835         aesni_gcm_data->qp_priv_size = sizeof(struct aesni_gcm_qp_data);
836         aesni_gcm_data->queue_pair_configure = NULL;
837         aesni_gcm_data->session_configure = aesni_gcm_session_configure;
838         aesni_gcm_data->session_priv_size = sizeof(struct aesni_gcm_session);
839 }