f28b29f68125e39cf204a6fa1c008321c6f8eed0
[dpdk.git] / drivers / crypto / aesni_mb / rte_aesni_mb_pmd.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
5  *
6  *   Redistribution and use in source and binary forms, with or without
7  *   modification, are permitted provided that the following conditions
8  *   are met:
9  *
10  *     * Redistributions of source code must retain the above copyright
11  *       notice, this list of conditions and the following disclaimer.
12  *     * Redistributions in binary form must reproduce the above copyright
13  *       notice, this list of conditions and the following disclaimer in
14  *       the documentation and/or other materials provided with the
15  *       distribution.
16  *     * Neither the name of Intel Corporation nor the names of its
17  *       contributors may be used to endorse or promote products derived
18  *       from this software without specific prior written permission.
19  *
20  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31  */
32
33 #include <rte_common.h>
34 #include <rte_hexdump.h>
35 #include <rte_cryptodev.h>
36 #include <rte_cryptodev_pmd.h>
37 #include <rte_dev.h>
38 #include <rte_malloc.h>
39 #include <rte_cpuflags.h>
40 #include <rte_mbuf_offload.h>
41
42 #include "rte_aesni_mb_pmd_private.h"
43
44 /**
45  * Global static parameter used to create a unique name for each AES-NI multi
46  * buffer crypto device.
47  */
48 static unsigned unique_name_id;
49
50 static inline int
51 create_unique_device_name(char *name, size_t size)
52 {
53         int ret;
54
55         if (name == NULL)
56                 return -EINVAL;
57
58         ret = snprintf(name, size, "%s_%u", CRYPTODEV_NAME_AESNI_MB_PMD,
59                         unique_name_id++);
60         if (ret < 0)
61                 return ret;
62         return 0;
63 }
64
65 typedef void (*hash_one_block_t)(void *data, void *digest);
66 typedef void (*aes_keyexp_t)(void *key, void *enc_exp_keys, void *dec_exp_keys);
67
68 /**
69  * Calculate the authentication pre-computes
70  *
71  * @param one_block_hash        Function pointer to calculate digest on ipad/opad
72  * @param ipad                  Inner pad output byte array
73  * @param opad                  Outer pad output byte array
74  * @param hkey                  Authentication key
75  * @param hkey_len              Authentication key length
76  * @param blocksize             Block size of selected hash algo
77  */
78 static void
79 calculate_auth_precomputes(hash_one_block_t one_block_hash,
80                 uint8_t *ipad, uint8_t *opad,
81                 uint8_t *hkey, uint16_t hkey_len,
82                 uint16_t blocksize)
83 {
84         unsigned i, length;
85
86         uint8_t ipad_buf[blocksize] __rte_aligned(16);
87         uint8_t opad_buf[blocksize] __rte_aligned(16);
88
89         /* Setup inner and outer pads */
90         memset(ipad_buf, HMAC_IPAD_VALUE, blocksize);
91         memset(opad_buf, HMAC_OPAD_VALUE, blocksize);
92
93         /* XOR hash key with inner and outer pads */
94         length = hkey_len > blocksize ? blocksize : hkey_len;
95
96         for (i = 0; i < length; i++) {
97                 ipad_buf[i] ^= hkey[i];
98                 opad_buf[i] ^= hkey[i];
99         }
100
101         /* Compute partial hashes */
102         (*one_block_hash)(ipad_buf, ipad);
103         (*one_block_hash)(opad_buf, opad);
104
105         /* Clean up stack */
106         memset(ipad_buf, 0, blocksize);
107         memset(opad_buf, 0, blocksize);
108 }
109
110 /** Get xform chain order */
111 static int
112 aesni_mb_get_chain_order(const struct rte_crypto_sym_xform *xform)
113 {
114         /*
115          * Multi-buffer only supports HASH_CIPHER or CIPHER_HASH chained
116          * operations, all other options are invalid, so we must have exactly
117          * 2 xform structs chained together
118          */
119         if (xform->next == NULL || xform->next->next != NULL)
120                 return -1;
121
122         if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
123                         xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
124                 return HASH_CIPHER;
125
126         if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
127                                 xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)
128                 return CIPHER_HASH;
129
130         return -1;
131 }
132
133 /** Set session authentication parameters */
134 static int
135 aesni_mb_set_session_auth_parameters(const struct aesni_mb_ops *mb_ops,
136                 struct aesni_mb_session *sess,
137                 const struct rte_crypto_sym_xform *xform)
138 {
139         hash_one_block_t hash_oneblock_fn;
140
141         if (xform->type != RTE_CRYPTO_SYM_XFORM_AUTH) {
142                 MB_LOG_ERR("Crypto xform struct not of type auth");
143                 return -1;
144         }
145
146         /* Set Authentication Parameters */
147         if (xform->auth.algo == RTE_CRYPTO_AUTH_AES_XCBC_MAC) {
148                 sess->auth.algo = AES_XCBC;
149                 (*mb_ops->aux.keyexp.aes_xcbc)(xform->auth.key.data,
150                                 sess->auth.xcbc.k1_expanded,
151                                 sess->auth.xcbc.k2, sess->auth.xcbc.k3);
152                 return 0;
153         }
154
155         switch (xform->auth.algo) {
156         case RTE_CRYPTO_AUTH_MD5_HMAC:
157                 sess->auth.algo = MD5;
158                 hash_oneblock_fn = mb_ops->aux.one_block.md5;
159                 break;
160         case RTE_CRYPTO_AUTH_SHA1_HMAC:
161                 sess->auth.algo = SHA1;
162                 hash_oneblock_fn = mb_ops->aux.one_block.sha1;
163                 break;
164         case RTE_CRYPTO_AUTH_SHA224_HMAC:
165                 sess->auth.algo = SHA_224;
166                 hash_oneblock_fn = mb_ops->aux.one_block.sha224;
167                 break;
168         case RTE_CRYPTO_AUTH_SHA256_HMAC:
169                 sess->auth.algo = SHA_256;
170                 hash_oneblock_fn = mb_ops->aux.one_block.sha256;
171                 break;
172         case RTE_CRYPTO_AUTH_SHA384_HMAC:
173                 sess->auth.algo = SHA_384;
174                 hash_oneblock_fn = mb_ops->aux.one_block.sha384;
175                 break;
176         case RTE_CRYPTO_AUTH_SHA512_HMAC:
177                 sess->auth.algo = SHA_512;
178                 hash_oneblock_fn = mb_ops->aux.one_block.sha512;
179                 break;
180         default:
181                 MB_LOG_ERR("Unsupported authentication algorithm selection");
182                 return -1;
183         }
184
185         /* Calculate Authentication precomputes */
186         calculate_auth_precomputes(hash_oneblock_fn,
187                         sess->auth.pads.inner, sess->auth.pads.outer,
188                         xform->auth.key.data,
189                         xform->auth.key.length,
190                         get_auth_algo_blocksize(sess->auth.algo));
191
192         return 0;
193 }
194
195 /** Set session cipher parameters */
196 static int
197 aesni_mb_set_session_cipher_parameters(const struct aesni_mb_ops *mb_ops,
198                 struct aesni_mb_session *sess,
199                 const struct rte_crypto_sym_xform *xform)
200 {
201         aes_keyexp_t aes_keyexp_fn;
202
203         if (xform->type != RTE_CRYPTO_SYM_XFORM_CIPHER) {
204                 MB_LOG_ERR("Crypto xform struct not of type cipher");
205                 return -1;
206         }
207
208         /* Select cipher direction */
209         switch (xform->cipher.op) {
210         case RTE_CRYPTO_CIPHER_OP_ENCRYPT:
211                 sess->cipher.direction = ENCRYPT;
212                 break;
213         case RTE_CRYPTO_CIPHER_OP_DECRYPT:
214                 sess->cipher.direction = DECRYPT;
215                 break;
216         default:
217                 MB_LOG_ERR("Unsupported cipher operation parameter");
218                 return -1;
219         }
220
221         /* Select cipher mode */
222         switch (xform->cipher.algo) {
223         case RTE_CRYPTO_CIPHER_AES_CBC:
224                 sess->cipher.mode = CBC;
225                 break;
226         default:
227                 MB_LOG_ERR("Unsupported cipher mode parameter");
228                 return -1;
229         }
230
231         /* Check key length and choose key expansion function */
232         switch (xform->cipher.key.length) {
233         case AES_128_BYTES:
234                 sess->cipher.key_length_in_bytes = AES_128_BYTES;
235                 aes_keyexp_fn = mb_ops->aux.keyexp.aes128;
236                 break;
237         case AES_192_BYTES:
238                 sess->cipher.key_length_in_bytes = AES_192_BYTES;
239                 aes_keyexp_fn = mb_ops->aux.keyexp.aes192;
240                 break;
241         case AES_256_BYTES:
242                 sess->cipher.key_length_in_bytes = AES_256_BYTES;
243                 aes_keyexp_fn = mb_ops->aux.keyexp.aes256;
244                 break;
245         default:
246                 MB_LOG_ERR("Unsupported cipher key length");
247                 return -1;
248         }
249
250         /* Expanded cipher keys */
251         (*aes_keyexp_fn)(xform->cipher.key.data,
252                         sess->cipher.expanded_aes_keys.encode,
253                         sess->cipher.expanded_aes_keys.decode);
254
255         return 0;
256 }
257
258 /** Parse crypto xform chain and set private session parameters */
259 int
260 aesni_mb_set_session_parameters(const struct aesni_mb_ops *mb_ops,
261                 struct aesni_mb_session *sess,
262                 const struct rte_crypto_sym_xform *xform)
263 {
264         const struct rte_crypto_sym_xform *auth_xform = NULL;
265         const struct rte_crypto_sym_xform *cipher_xform = NULL;
266
267         /* Select Crypto operation - hash then cipher / cipher then hash */
268         switch (aesni_mb_get_chain_order(xform)) {
269         case HASH_CIPHER:
270                 sess->chain_order = HASH_CIPHER;
271                 auth_xform = xform;
272                 cipher_xform = xform->next;
273                 break;
274         case CIPHER_HASH:
275                 sess->chain_order = CIPHER_HASH;
276                 auth_xform = xform->next;
277                 cipher_xform = xform;
278                 break;
279         default:
280                 MB_LOG_ERR("Unsupported operation chain order parameter");
281                 return -1;
282         }
283
284         if (aesni_mb_set_session_auth_parameters(mb_ops, sess, auth_xform)) {
285                 MB_LOG_ERR("Invalid/unsupported authentication parameters");
286                 return -1;
287         }
288
289         if (aesni_mb_set_session_cipher_parameters(mb_ops, sess,
290                         cipher_xform)) {
291                 MB_LOG_ERR("Invalid/unsupported cipher parameters");
292                 return -1;
293         }
294         return 0;
295 }
296
297 /** Get multi buffer session */
298 static struct aesni_mb_session *
299 get_session(struct aesni_mb_qp *qp, struct rte_crypto_op *op)
300 {
301         struct aesni_mb_session *sess = NULL;
302
303         if (op->sym->type == RTE_CRYPTO_SYM_OP_WITH_SESSION) {
304                 if (unlikely(op->sym->session->type !=
305                                 RTE_CRYPTODEV_AESNI_MB_PMD))
306                         return NULL;
307
308                 sess = (struct aesni_mb_session *)op->sym->session->_private;
309         } else  {
310                 void *_sess = NULL;
311
312                 if (rte_mempool_get(qp->sess_mp, (void **)&_sess))
313                         return NULL;
314
315                 sess = (struct aesni_mb_session *)
316                         ((struct rte_cryptodev_sym_session *)_sess)->_private;
317
318                 if (unlikely(aesni_mb_set_session_parameters(qp->ops,
319                                 sess, op->sym->xform) != 0)) {
320                         rte_mempool_put(qp->sess_mp, _sess);
321                         sess = NULL;
322                 }
323         }
324
325         return sess;
326 }
327
328 /**
329  * Process a crypto operation and complete a JOB_AES_HMAC job structure for
330  * submission to the multi buffer library for processing.
331  *
332  * @param       qp      queue pair
333  * @param       job     JOB_AES_HMAC structure to fill
334  * @param       m       mbuf to process
335  *
336  * @return
337  * - Completed JOB_AES_HMAC structure pointer on success
338  * - NULL pointer if completion of JOB_AES_HMAC structure isn't possible
339  */
340 static JOB_AES_HMAC *
341 process_crypto_op(struct aesni_mb_qp *qp, struct rte_crypto_op *op,
342                 struct aesni_mb_session *session)
343 {
344         JOB_AES_HMAC *job;
345
346         struct rte_mbuf *m_src = op->sym->m_src, *m_dst;
347         uint16_t m_offset = 0;
348
349         job = (*qp->ops->job.get_next)(&qp->mb_mgr);
350         if (unlikely(job == NULL))
351                 return job;
352
353         /* Set crypto operation */
354         job->chain_order = session->chain_order;
355
356         /* Set cipher parameters */
357         job->cipher_direction = session->cipher.direction;
358         job->cipher_mode = session->cipher.mode;
359
360         job->aes_key_len_in_bytes = session->cipher.key_length_in_bytes;
361         job->aes_enc_key_expanded = session->cipher.expanded_aes_keys.encode;
362         job->aes_dec_key_expanded = session->cipher.expanded_aes_keys.decode;
363
364
365         /* Set authentication parameters */
366         job->hash_alg = session->auth.algo;
367         if (job->hash_alg == AES_XCBC) {
368                 job->_k1_expanded = session->auth.xcbc.k1_expanded;
369                 job->_k2 = session->auth.xcbc.k2;
370                 job->_k3 = session->auth.xcbc.k3;
371         } else {
372                 job->hashed_auth_key_xor_ipad = session->auth.pads.inner;
373                 job->hashed_auth_key_xor_opad = session->auth.pads.outer;
374         }
375
376         /* Mutable crypto operation parameters */
377         if (op->sym->m_dst) {
378                 m_src = m_dst = op->sym->m_dst;
379
380                 /* append space for output data to mbuf */
381                 char *odata = rte_pktmbuf_append(m_dst,
382                                 rte_pktmbuf_data_len(op->sym->m_src));
383                 if (odata == NULL)
384                         MB_LOG_ERR("failed to allocate space in destination "
385                                         "mbuf for source data");
386
387                 memcpy(odata, rte_pktmbuf_mtod(op->sym->m_src, void*),
388                                 rte_pktmbuf_data_len(op->sym->m_src));
389         } else {
390                 m_dst = m_src;
391                 m_offset = op->sym->cipher.data.offset;
392         }
393
394         /* Set digest output location */
395         if (job->cipher_direction == DECRYPT) {
396                 job->auth_tag_output = (uint8_t *)rte_pktmbuf_append(m_dst,
397                                 get_digest_byte_length(job->hash_alg));
398
399                 if (job->auth_tag_output == NULL) {
400                         MB_LOG_ERR("failed to allocate space in output mbuf "
401                                         "for temp digest");
402                         return NULL;
403                 }
404
405                 memset(job->auth_tag_output, 0,
406                                 sizeof(get_digest_byte_length(job->hash_alg)));
407
408         } else {
409                 job->auth_tag_output = op->sym->auth.digest.data;
410         }
411
412         /*
413          * Multi-buffer library current only support returning a truncated
414          * digest length as specified in the relevant IPsec RFCs
415          */
416         job->auth_tag_output_len_in_bytes =
417                         get_truncated_digest_byte_length(job->hash_alg);
418
419         /* Set IV parameters */
420         job->iv = op->sym->cipher.iv.data;
421         job->iv_len_in_bytes = op->sym->cipher.iv.length;
422
423         /* Data  Parameter */
424         job->src = rte_pktmbuf_mtod(m_src, uint8_t *);
425         job->dst = rte_pktmbuf_mtod_offset(m_dst, uint8_t *, m_offset);
426
427         job->cipher_start_src_offset_in_bytes = op->sym->cipher.data.offset;
428         job->msg_len_to_cipher_in_bytes = op->sym->cipher.data.length;
429
430         job->hash_start_src_offset_in_bytes = op->sym->auth.data.offset;
431         job->msg_len_to_hash_in_bytes = op->sym->auth.data.length;
432
433         /* Set user data to be crypto operation data struct */
434         job->user_data = op;
435         job->user_data2 = m_dst;
436
437         return job;
438 }
439
440 /**
441  * Process a completed job and return rte_mbuf which job processed
442  *
443  * @param job   JOB_AES_HMAC job to process
444  *
445  * @return
446  * - Returns processed mbuf which is trimmed of output digest used in
447  * verification of supplied digest in the case of a HASH_CIPHER operation
448  * - Returns NULL on invalid job
449  */
450 static struct rte_crypto_op *
451 post_process_mb_job(struct aesni_mb_qp *qp, JOB_AES_HMAC *job)
452 {
453         struct rte_crypto_op *op =
454                         (struct rte_crypto_op *)job->user_data;
455         struct rte_mbuf *m_dst =
456                         (struct rte_mbuf *)job->user_data2;
457
458         if (op == NULL || m_dst == NULL)
459                 return NULL;
460
461         /* set status as successful by default */
462         op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
463
464         /* check if job has been processed  */
465         if (unlikely(job->status != STS_COMPLETED)) {
466                 op->status = RTE_CRYPTO_OP_STATUS_ERROR;
467                 return op;
468         } else if (job->chain_order == HASH_CIPHER) {
469                 /* Verify digest if required */
470                 if (memcmp(job->auth_tag_output, op->sym->auth.digest.data,
471                                 job->auth_tag_output_len_in_bytes) != 0)
472                         op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
473
474                 /* trim area used for digest from mbuf */
475                 rte_pktmbuf_trim(m_dst, get_digest_byte_length(job->hash_alg));
476         }
477
478         /* Free session if a session-less crypto op */
479         if (op->sym->type == RTE_CRYPTO_SYM_OP_SESSIONLESS) {
480                 rte_mempool_put(qp->sess_mp, op->sym->session);
481                 op->sym->session = NULL;
482         }
483
484         return op;
485 }
486
487 /**
488  * Process a completed JOB_AES_HMAC job and keep processing jobs until
489  * get_completed_job return NULL
490  *
491  * @param qp            Queue Pair to process
492  * @param job           JOB_AES_HMAC job
493  *
494  * @return
495  * - Number of processed jobs
496  */
497 static unsigned
498 handle_completed_jobs(struct aesni_mb_qp *qp, JOB_AES_HMAC *job)
499 {
500         struct rte_crypto_op *op = NULL;
501         unsigned processed_jobs = 0;
502
503         while (job) {
504                 processed_jobs++;
505                 op = post_process_mb_job(qp, job);
506                 if (op)
507                         rte_ring_enqueue(qp->processed_ops, (void *)op);
508                 else
509                         qp->stats.dequeue_err_count++;
510                 job = (*qp->ops->job.get_completed_job)(&qp->mb_mgr);
511         }
512
513         return processed_jobs;
514 }
515
516 static uint16_t
517 aesni_mb_pmd_enqueue_burst(void *queue_pair, struct rte_crypto_op **ops,
518                 uint16_t nb_ops)
519 {
520         struct aesni_mb_session *sess;
521         struct aesni_mb_qp *qp = queue_pair;
522
523         JOB_AES_HMAC *job = NULL;
524
525         int i, processed_jobs = 0;
526
527         for (i = 0; i < nb_ops; i++) {
528 #ifdef RTE_LIBRTE_AESNI_MB_DEBUG
529                 if (unlikely(op->type != RTE_CRYPTO_OP_TYPE_SYMMETRIC)) {
530                         MB_LOG_ERR("PMD only supports symmetric crypto "
531                                 "operation requests, op (%p) is not a "
532                                 "symmetric operation.", op);
533                         qp->stats.enqueue_err_count++;
534                         goto flush_jobs;
535                 }
536 #endif
537                 sess = get_session(qp, ops[i]);
538                 if (unlikely(sess == NULL)) {
539                         qp->stats.enqueue_err_count++;
540                         goto flush_jobs;
541                 }
542
543                 job = process_crypto_op(qp, ops[i], sess);
544                 if (unlikely(job == NULL)) {
545                         qp->stats.enqueue_err_count++;
546                         goto flush_jobs;
547                 }
548
549                 /* Submit Job */
550                 job = (*qp->ops->job.submit)(&qp->mb_mgr);
551
552                 /*
553                  * If submit returns a processed job then handle it,
554                  * before submitting subsequent jobs
555                  */
556                 if (job)
557                         processed_jobs += handle_completed_jobs(qp, job);
558         }
559
560         if (processed_jobs == 0)
561                 goto flush_jobs;
562         else
563                 qp->stats.enqueued_count += processed_jobs;
564                 return i;
565
566 flush_jobs:
567         /*
568          * If we haven't processed any jobs in submit loop, then flush jobs
569          * queue to stop the output stalling
570          */
571         job = (*qp->ops->job.flush_job)(&qp->mb_mgr);
572         if (job)
573                 qp->stats.enqueued_count += handle_completed_jobs(qp, job);
574
575         return i;
576 }
577
578 static uint16_t
579 aesni_mb_pmd_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops,
580                 uint16_t nb_ops)
581 {
582         struct aesni_mb_qp *qp = queue_pair;
583
584         unsigned nb_dequeued;
585
586         nb_dequeued = rte_ring_dequeue_burst(qp->processed_ops,
587                         (void **)ops, nb_ops);
588         qp->stats.dequeued_count += nb_dequeued;
589
590         return nb_dequeued;
591 }
592
593
594 static int cryptodev_aesni_mb_uninit(const char *name);
595
596 static int
597 cryptodev_aesni_mb_create(const char *name, unsigned socket_id)
598 {
599         struct rte_cryptodev *dev;
600         char crypto_dev_name[RTE_CRYPTODEV_NAME_MAX_LEN];
601         struct aesni_mb_private *internals;
602         enum aesni_mb_vector_mode vector_mode;
603
604         /* Check CPU for support for AES instruction set */
605         if (!rte_cpu_get_flag_enabled(RTE_CPUFLAG_AES)) {
606                 MB_LOG_ERR("AES instructions not supported by CPU");
607                 return -EFAULT;
608         }
609
610         /* Check CPU for supported vector instruction set */
611         if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2))
612                 vector_mode = RTE_AESNI_MB_AVX2;
613         else if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX))
614                 vector_mode = RTE_AESNI_MB_AVX;
615         else if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_SSE4_1))
616                 vector_mode = RTE_AESNI_MB_SSE;
617         else {
618                 MB_LOG_ERR("Vector instructions are not supported by CPU");
619                 return -EFAULT;
620         }
621
622         /* create a unique device name */
623         if (create_unique_device_name(crypto_dev_name,
624                         RTE_CRYPTODEV_NAME_MAX_LEN) != 0) {
625                 MB_LOG_ERR("failed to create unique cryptodev name");
626                 return -EINVAL;
627         }
628
629
630         dev = rte_cryptodev_pmd_virtual_dev_init(crypto_dev_name,
631                         sizeof(struct aesni_mb_private), socket_id);
632         if (dev == NULL) {
633                 MB_LOG_ERR("failed to create cryptodev vdev");
634                 goto init_error;
635         }
636
637         dev->dev_type = RTE_CRYPTODEV_AESNI_MB_PMD;
638         dev->dev_ops = rte_aesni_mb_pmd_ops;
639
640         /* register rx/tx burst functions for data path */
641         dev->dequeue_burst = aesni_mb_pmd_dequeue_burst;
642         dev->enqueue_burst = aesni_mb_pmd_enqueue_burst;
643
644         /* Set vector instructions mode supported */
645         internals = dev->data->dev_private;
646
647         internals->vector_mode = vector_mode;
648         internals->max_nb_queue_pairs = RTE_AESNI_MB_PMD_MAX_NB_QUEUE_PAIRS;
649         internals->max_nb_sessions = RTE_AESNI_MB_PMD_MAX_NB_SESSIONS;
650
651         return 0;
652 init_error:
653         MB_LOG_ERR("driver %s: cryptodev_aesni_create failed", name);
654
655         cryptodev_aesni_mb_uninit(crypto_dev_name);
656         return -EFAULT;
657 }
658
659
660 static int
661 cryptodev_aesni_mb_init(const char *name,
662                 const char *params __rte_unused)
663 {
664         RTE_LOG(INFO, PMD, "Initialising %s\n", name);
665
666         return cryptodev_aesni_mb_create(name, rte_socket_id());
667 }
668
669 static int
670 cryptodev_aesni_mb_uninit(const char *name)
671 {
672         if (name == NULL)
673                 return -EINVAL;
674
675         RTE_LOG(INFO, PMD, "Closing AESNI crypto device %s on numa socket %u\n",
676                         name, rte_socket_id());
677
678         return 0;
679 }
680
681 static struct rte_driver cryptodev_aesni_mb_pmd_drv = {
682         .name = CRYPTODEV_NAME_AESNI_MB_PMD,
683         .type = PMD_VDEV,
684         .init = cryptodev_aesni_mb_init,
685         .uninit = cryptodev_aesni_mb_uninit
686 };
687
688 PMD_REGISTER_DRIVER(cryptodev_aesni_mb_pmd_drv);