crypto/mrvl: add mrvl crypto driver
[dpdk.git] / drivers / crypto / mrvl / rte_mrvl_pmd.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (C) Semihalf 2017. All rights reserved.
5  *
6  *   Redistribution and use in source and binary forms, with or without
7  *   modification, are permitted provided that the following conditions
8  *   are met:
9  *
10  *     * Redistributions of source code must retain the above copyright
11  *       notice, this list of conditions and the following disclaimer.
12  *     * Redistributions in binary form must reproduce the above copyright
13  *       notice, this list of conditions and the following disclaimer in
14  *       the documentation and/or other materials provided with the
15  *       distribution.
16  *     * Neither the name of Semihalf nor the names of its
17  *       contributors may be used to endorse or promote products derived
18  *       from this software without specific prior written permission.
19  *
20  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31  */
32
33 #include <rte_common.h>
34 #include <rte_hexdump.h>
35 #include <rte_cryptodev.h>
36 #include <rte_cryptodev_pmd.h>
37 #include <rte_cryptodev_vdev.h>
38 #include <rte_vdev.h>
39 #include <rte_malloc.h>
40 #include <rte_cpuflags.h>
41
42 #include "rte_mrvl_pmd_private.h"
43
44 #ifndef RTE_MRVL_MUSDK_DMA_MEMSIZE
45 #define RTE_MRVL_MUSDK_DMA_MEMSIZE 41943040
46 #endif
47
48 static uint8_t cryptodev_driver_id;
49
50 /**
51  * Flag if particular crypto algorithm is supported by PMD/MUSDK.
52  *
53  * The idea is to have Not Supported value as default (0).
54  * This way we need only to define proper map sizes,
55  * non-initialized entries will be by default not supported.
56  */
57 enum algo_supported {
58         ALGO_NOT_SUPPORTED = 0,
59         ALGO_SUPPORTED = 1,
60 };
61
62 /** Map elements for cipher mapping.*/
63 struct cipher_params_mapping {
64         enum algo_supported  supported;   /**< On/Off switch */
65         enum sam_cipher_alg  cipher_alg;  /**< Cipher algorithm */
66         enum sam_cipher_mode cipher_mode; /**< Cipher mode */
67         unsigned int max_key_len;         /**< Maximum key length (in bytes)*/
68 }
69 /* We want to squeeze in multiple maps into the cache line. */
70 __rte_aligned(32);
71
72 /** Map elements for auth mapping.*/
73 struct auth_params_mapping {
74         enum algo_supported supported;  /**< On/off switch */
75         enum sam_auth_alg   auth_alg;   /**< Auth algorithm */
76 }
77 /* We want to squeeze in multiple maps into the cache line. */
78 __rte_aligned(32);
79
80 /**
81  * Map of supported cipher algorithms.
82  */
83 static const
84 struct cipher_params_mapping cipher_map[RTE_CRYPTO_CIPHER_LIST_END] = {
85         [RTE_CRYPTO_CIPHER_3DES_CBC] = {
86                 .supported = ALGO_SUPPORTED,
87                 .cipher_alg = SAM_CIPHER_3DES,
88                 .cipher_mode = SAM_CIPHER_CBC,
89                 .max_key_len = BITS2BYTES(192) },
90         [RTE_CRYPTO_CIPHER_3DES_CTR] = {
91                 .supported = ALGO_SUPPORTED,
92                 .cipher_alg = SAM_CIPHER_3DES,
93                 .cipher_mode = SAM_CIPHER_CTR,
94                 .max_key_len = BITS2BYTES(192) },
95         [RTE_CRYPTO_CIPHER_3DES_ECB] = {
96                 .supported = ALGO_SUPPORTED,
97                 .cipher_alg = SAM_CIPHER_3DES,
98                 .cipher_mode = SAM_CIPHER_ECB,
99                 .max_key_len = BITS2BYTES(192) },
100         [RTE_CRYPTO_CIPHER_AES_CBC] = {
101                 .supported = ALGO_SUPPORTED,
102                 .cipher_alg = SAM_CIPHER_AES,
103                 .cipher_mode = SAM_CIPHER_CBC,
104                 .max_key_len = BITS2BYTES(256) },
105         [RTE_CRYPTO_CIPHER_AES_CTR] = {
106                 .supported = ALGO_SUPPORTED,
107                 .cipher_alg = SAM_CIPHER_AES,
108                 .cipher_mode = SAM_CIPHER_CTR,
109                 .max_key_len = BITS2BYTES(256) },
110 };
111
112 /**
113  * Map of supported auth algorithms.
114  */
115 static const
116 struct auth_params_mapping auth_map[RTE_CRYPTO_AUTH_LIST_END] = {
117         [RTE_CRYPTO_AUTH_MD5_HMAC] = {
118                 .supported = ALGO_SUPPORTED,
119                 .auth_alg = SAM_AUTH_HMAC_MD5 },
120         [RTE_CRYPTO_AUTH_MD5] = {
121                 .supported = ALGO_SUPPORTED,
122                 .auth_alg = SAM_AUTH_HASH_MD5 },
123         [RTE_CRYPTO_AUTH_SHA1_HMAC] = {
124                 .supported = ALGO_SUPPORTED,
125                 .auth_alg = SAM_AUTH_HMAC_SHA1 },
126         [RTE_CRYPTO_AUTH_SHA1] = {
127                 .supported = ALGO_SUPPORTED,
128                 .auth_alg = SAM_AUTH_HASH_SHA1 },
129         [RTE_CRYPTO_AUTH_SHA224] = {
130                 .supported = ALGO_SUPPORTED,
131                 .auth_alg = SAM_AUTH_HASH_SHA2_224 },
132         [RTE_CRYPTO_AUTH_SHA256_HMAC] = {
133                 .supported = ALGO_SUPPORTED,
134                 .auth_alg = SAM_AUTH_HMAC_SHA2_256 },
135         [RTE_CRYPTO_AUTH_SHA256] = {
136                 .supported = ALGO_SUPPORTED,
137                 .auth_alg = SAM_AUTH_HASH_SHA2_256 },
138         [RTE_CRYPTO_AUTH_SHA384_HMAC] = {
139                 .supported = ALGO_SUPPORTED,
140                 .auth_alg = SAM_AUTH_HMAC_SHA2_384 },
141         [RTE_CRYPTO_AUTH_SHA384] = {
142                 .supported = ALGO_SUPPORTED,
143                 .auth_alg = SAM_AUTH_HASH_SHA2_384 },
144         [RTE_CRYPTO_AUTH_SHA512_HMAC] = {
145                 .supported = ALGO_SUPPORTED,
146                 .auth_alg = SAM_AUTH_HMAC_SHA2_512 },
147         [RTE_CRYPTO_AUTH_SHA512] = {
148                 .supported = ALGO_SUPPORTED,
149                 .auth_alg = SAM_AUTH_HASH_SHA2_512 },
150         [RTE_CRYPTO_AUTH_AES_GMAC] = {
151                 .supported = ALGO_SUPPORTED,
152                 .auth_alg = SAM_AUTH_AES_GMAC },
153 };
154
155 /**
156  * Map of supported aead algorithms.
157  */
158 static const
159 struct cipher_params_mapping aead_map[RTE_CRYPTO_AEAD_LIST_END] = {
160         [RTE_CRYPTO_AEAD_AES_GCM] = {
161                 .supported = ALGO_SUPPORTED,
162                 .cipher_alg = SAM_CIPHER_AES,
163                 .cipher_mode = SAM_CIPHER_GCM,
164                 .max_key_len = BITS2BYTES(256) },
165 };
166
167 /*
168  *-----------------------------------------------------------------------------
169  * Forward declarations.
170  *-----------------------------------------------------------------------------
171  */
172 static int cryptodev_mrvl_crypto_uninit(struct rte_vdev_device *vdev);
173
174 /*
175  *-----------------------------------------------------------------------------
176  * Session Preparation.
177  *-----------------------------------------------------------------------------
178  */
179
180 /**
181  * Get xform chain order.
182  *
183  * @param xform Pointer to configuration structure chain for crypto operations.
184  * @returns Order of crypto operations.
185  */
186 static enum mrvl_crypto_chain_order
187 mrvl_crypto_get_chain_order(const struct rte_crypto_sym_xform *xform)
188 {
189         /* Currently, Marvell supports max 2 operations in chain */
190         if (xform->next != NULL && xform->next->next != NULL)
191                 return MRVL_CRYPTO_CHAIN_NOT_SUPPORTED;
192
193         if (xform->next != NULL) {
194                 if ((xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) &&
195                         (xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER))
196                         return MRVL_CRYPTO_CHAIN_AUTH_CIPHER;
197
198                 if ((xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) &&
199                         (xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH))
200                         return MRVL_CRYPTO_CHAIN_CIPHER_AUTH;
201         } else {
202                 if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH)
203                         return MRVL_CRYPTO_CHAIN_AUTH_ONLY;
204
205                 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
206                         return MRVL_CRYPTO_CHAIN_CIPHER_ONLY;
207
208                 if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD)
209                         return MRVL_CRYPTO_CHAIN_COMBINED;
210         }
211         return MRVL_CRYPTO_CHAIN_NOT_SUPPORTED;
212 }
213
214 /**
215  * Set session parameters for cipher part.
216  *
217  * @param sess Crypto session pointer.
218  * @param cipher_xform Pointer to configuration structure for cipher operations.
219  * @returns 0 in case of success, negative value otherwise.
220  */
221 static int
222 mrvl_crypto_set_cipher_session_parameters(struct mrvl_crypto_session *sess,
223                 const struct rte_crypto_sym_xform *cipher_xform)
224 {
225         /* Make sure we've got proper struct */
226         if (cipher_xform->type != RTE_CRYPTO_SYM_XFORM_CIPHER) {
227                 MRVL_CRYPTO_LOG_ERR("Wrong xform struct provided!");
228                 return -EINVAL;
229         }
230
231         /* See if map data is present and valid */
232         if ((cipher_xform->cipher.algo > RTE_DIM(cipher_map)) ||
233                 (cipher_map[cipher_xform->cipher.algo].supported
234                         != ALGO_SUPPORTED)) {
235                 MRVL_CRYPTO_LOG_ERR("Cipher algorithm not supported!");
236                 return -EINVAL;
237         }
238
239         sess->cipher_iv_offset = cipher_xform->cipher.iv.offset;
240
241         sess->sam_sess_params.dir =
242                 (cipher_xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
243                 SAM_DIR_ENCRYPT : SAM_DIR_DECRYPT;
244         sess->sam_sess_params.cipher_alg =
245                 cipher_map[cipher_xform->cipher.algo].cipher_alg;
246         sess->sam_sess_params.cipher_mode =
247                 cipher_map[cipher_xform->cipher.algo].cipher_mode;
248
249         /* Assume IV will be passed together with data. */
250         sess->sam_sess_params.cipher_iv = NULL;
251
252         /* Get max key length. */
253         if (cipher_xform->cipher.key.length >
254                 cipher_map[cipher_xform->cipher.algo].max_key_len) {
255                 MRVL_CRYPTO_LOG_ERR("Wrong key length!");
256                 return -EINVAL;
257         }
258
259         sess->sam_sess_params.cipher_key_len = cipher_xform->cipher.key.length;
260         sess->sam_sess_params.cipher_key = cipher_xform->cipher.key.data;
261
262         return 0;
263 }
264
265 /**
266  * Set session parameters for authentication part.
267  *
268  * @param sess Crypto session pointer.
269  * @param auth_xform Pointer to configuration structure for auth operations.
270  * @returns 0 in case of success, negative value otherwise.
271  */
272 static int
273 mrvl_crypto_set_auth_session_parameters(struct mrvl_crypto_session *sess,
274                 const struct rte_crypto_sym_xform *auth_xform)
275 {
276         /* Make sure we've got proper struct */
277         if (auth_xform->type != RTE_CRYPTO_SYM_XFORM_AUTH) {
278                 MRVL_CRYPTO_LOG_ERR("Wrong xform struct provided!");
279                 return -EINVAL;
280         }
281
282         /* See if map data is present and valid */
283         if ((auth_xform->auth.algo > RTE_DIM(auth_map)) ||
284                 (auth_map[auth_xform->auth.algo].supported != ALGO_SUPPORTED)) {
285                 MRVL_CRYPTO_LOG_ERR("Auth algorithm not supported!");
286                 return -EINVAL;
287         }
288
289         sess->sam_sess_params.dir =
290                 (auth_xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ?
291                 SAM_DIR_ENCRYPT : SAM_DIR_DECRYPT;
292         sess->sam_sess_params.auth_alg =
293                 auth_map[auth_xform->auth.algo].auth_alg;
294         sess->sam_sess_params.u.basic.auth_icv_len =
295                 auth_xform->auth.digest_length;
296         /* auth_key must be NULL if auth algorithm does not use HMAC */
297         sess->sam_sess_params.auth_key = auth_xform->auth.key.length ?
298                                          auth_xform->auth.key.data : NULL;
299         sess->sam_sess_params.auth_key_len = auth_xform->auth.key.length;
300
301         return 0;
302 }
303
304 /**
305  * Set session parameters for aead part.
306  *
307  * @param sess Crypto session pointer.
308  * @param aead_xform Pointer to configuration structure for aead operations.
309  * @returns 0 in case of success, negative value otherwise.
310  */
311 static int
312 mrvl_crypto_set_aead_session_parameters(struct mrvl_crypto_session *sess,
313                 const struct rte_crypto_sym_xform *aead_xform)
314 {
315         /* Make sure we've got proper struct */
316         if (aead_xform->type != RTE_CRYPTO_SYM_XFORM_AEAD) {
317                 MRVL_CRYPTO_LOG_ERR("Wrong xform struct provided!");
318                 return -EINVAL;
319         }
320
321         /* See if map data is present and valid */
322         if ((aead_xform->aead.algo > RTE_DIM(aead_map)) ||
323                 (aead_map[aead_xform->aead.algo].supported
324                         != ALGO_SUPPORTED)) {
325                 MRVL_CRYPTO_LOG_ERR("AEAD algorithm not supported!");
326                 return -EINVAL;
327         }
328
329         sess->sam_sess_params.dir =
330                 (aead_xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
331                 SAM_DIR_ENCRYPT : SAM_DIR_DECRYPT;
332         sess->sam_sess_params.cipher_alg =
333                 aead_map[aead_xform->aead.algo].cipher_alg;
334         sess->sam_sess_params.cipher_mode =
335                 aead_map[aead_xform->aead.algo].cipher_mode;
336
337         /* Assume IV will be passed together with data. */
338         sess->sam_sess_params.cipher_iv = NULL;
339
340         /* Get max key length. */
341         if (aead_xform->aead.key.length >
342                 aead_map[aead_xform->aead.algo].max_key_len) {
343                 MRVL_CRYPTO_LOG_ERR("Wrong key length!");
344                 return -EINVAL;
345         }
346
347         sess->sam_sess_params.cipher_key = aead_xform->aead.key.data;
348         sess->sam_sess_params.cipher_key_len = aead_xform->aead.key.length;
349
350         if (sess->sam_sess_params.cipher_mode == SAM_CIPHER_GCM)
351                 sess->sam_sess_params.auth_alg = SAM_AUTH_AES_GCM;
352
353         sess->sam_sess_params.u.basic.auth_icv_len =
354                 aead_xform->aead.digest_length;
355
356         sess->sam_sess_params.u.basic.auth_aad_len =
357                 aead_xform->aead.aad_length;
358
359         return 0;
360 }
361
362 /**
363  * Parse crypto transform chain and setup session parameters.
364  *
365  * @param dev Pointer to crypto device
366  * @param sess Poiner to crypto session
367  * @param xform Pointer to configuration structure chain for crypto operations.
368  * @returns 0 in case of success, negative value otherwise.
369  */
370 int
371 mrvl_crypto_set_session_parameters(struct mrvl_crypto_session *sess,
372                 const struct rte_crypto_sym_xform *xform)
373 {
374         const struct rte_crypto_sym_xform *cipher_xform = NULL;
375         const struct rte_crypto_sym_xform *auth_xform = NULL;
376         const struct rte_crypto_sym_xform *aead_xform = NULL;
377
378         /* Filter out spurious/broken requests */
379         if (xform == NULL)
380                 return -EINVAL;
381
382         sess->chain_order = mrvl_crypto_get_chain_order(xform);
383         switch (sess->chain_order) {
384         case MRVL_CRYPTO_CHAIN_CIPHER_AUTH:
385                 cipher_xform = xform;
386                 auth_xform = xform->next;
387                 break;
388         case MRVL_CRYPTO_CHAIN_AUTH_CIPHER:
389                 auth_xform = xform;
390                 cipher_xform = xform->next;
391                 break;
392         case MRVL_CRYPTO_CHAIN_CIPHER_ONLY:
393                 cipher_xform = xform;
394                 break;
395         case MRVL_CRYPTO_CHAIN_AUTH_ONLY:
396                 auth_xform = xform;
397                 break;
398         case MRVL_CRYPTO_CHAIN_COMBINED:
399                 aead_xform = xform;
400                 break;
401         default:
402                 return -EINVAL;
403         }
404
405         if ((cipher_xform != NULL) &&
406                 (mrvl_crypto_set_cipher_session_parameters(
407                         sess, cipher_xform) < 0)) {
408                 MRVL_CRYPTO_LOG_ERR("Invalid/unsupported cipher parameters");
409                 return -EINVAL;
410         }
411
412         if ((auth_xform != NULL) &&
413                 (mrvl_crypto_set_auth_session_parameters(
414                         sess, auth_xform) < 0)) {
415                 MRVL_CRYPTO_LOG_ERR("Invalid/unsupported auth parameters");
416                 return -EINVAL;
417         }
418
419         if ((aead_xform != NULL) &&
420                 (mrvl_crypto_set_aead_session_parameters(
421                         sess, aead_xform) < 0)) {
422                 MRVL_CRYPTO_LOG_ERR("Invalid/unsupported aead parameters");
423                 return -EINVAL;
424         }
425
426         return 0;
427 }
428
429 /*
430  *-----------------------------------------------------------------------------
431  * Process Operations
432  *-----------------------------------------------------------------------------
433  */
434
435 /**
436  * Prepare a single request.
437  *
438  * This function basically translates DPDK crypto request into one
439  * understandable by MUDSK's SAM. If this is a first request in a session,
440  * it starts the session.
441  *
442  * @param request Pointer to pre-allocated && reset request buffer [Out].
443  * @param src_bd Pointer to pre-allocated source descriptor [Out].
444  * @param dst_bd Pointer to pre-allocated destination descriptor [Out].
445  * @param op Pointer to DPDK crypto operation struct [In].
446  */
447 static inline int
448 mrvl_request_prepare(struct sam_cio_op_params *request,
449                 struct sam_buf_info *src_bd,
450                 struct sam_buf_info *dst_bd,
451                 struct rte_crypto_op *op)
452 {
453         struct mrvl_crypto_session *sess;
454         struct rte_mbuf *dst_mbuf;
455         uint8_t *digest;
456
457         if (unlikely(op->sess_type == RTE_CRYPTO_OP_SESSIONLESS)) {
458                 MRVL_CRYPTO_LOG_ERR("MRVL CRYPTO PMD only supports session "
459                                 "oriented requests, op (%p) is sessionless.",
460                                 op);
461                 return -EINVAL;
462         }
463
464         sess = (struct mrvl_crypto_session *)get_session_private_data(
465                         op->sym->session, cryptodev_driver_id);
466         if (unlikely(sess == NULL)) {
467                 MRVL_CRYPTO_LOG_ERR("Session was not created for this device");
468                 return -EINVAL;
469         }
470
471         /*
472          * If application delivered us null dst buffer, it means it expects
473          * us to deliver the result in src buffer.
474          */
475         dst_mbuf = op->sym->m_dst ? op->sym->m_dst : op->sym->m_src;
476
477         request->sa = sess->sam_sess;
478         request->cookie = op;
479
480         /* Single buffers only, sorry. */
481         request->num_bufs = 1;
482         request->src = src_bd;
483         src_bd->vaddr = rte_pktmbuf_mtod(op->sym->m_src, void *);
484         src_bd->paddr = rte_pktmbuf_mtophys(op->sym->m_src);
485         src_bd->len = rte_pktmbuf_data_len(op->sym->m_src);
486
487         /* Empty source. */
488         if (rte_pktmbuf_data_len(op->sym->m_src) == 0) {
489                 /* EIP does not support 0 length buffers. */
490                 MRVL_CRYPTO_LOG_ERR("Buffer length == 0 not supported!");
491                 return -1;
492         }
493
494         /* Empty destination. */
495         if (rte_pktmbuf_data_len(dst_mbuf) == 0) {
496                 /* Make dst buffer fit at least source data. */
497                 if (rte_pktmbuf_append(dst_mbuf,
498                         rte_pktmbuf_data_len(op->sym->m_src)) == NULL) {
499                         MRVL_CRYPTO_LOG_ERR("Unable to set big enough dst buffer!");
500                         return -1;
501                 }
502         }
503
504         request->dst = dst_bd;
505         dst_bd->vaddr = rte_pktmbuf_mtod(dst_mbuf, void *);
506         dst_bd->paddr = rte_pktmbuf_mtophys(dst_mbuf);
507
508         /*
509          * We can use all available space in dst_mbuf,
510          * not only what's used currently.
511          */
512         dst_bd->len = dst_mbuf->buf_len - rte_pktmbuf_headroom(dst_mbuf);
513
514         if (sess->chain_order == MRVL_CRYPTO_CHAIN_COMBINED) {
515                 request->cipher_len = op->sym->aead.data.length;
516                 request->cipher_offset = op->sym->aead.data.offset;
517                 request->cipher_iv = rte_crypto_op_ctod_offset(op, uint8_t *,
518                         sess->cipher_iv_offset);
519
520                 request->auth_aad = op->sym->aead.aad.data;
521                 request->auth_offset = request->cipher_offset;
522                 request->auth_len = request->cipher_len;
523         } else {
524                 request->cipher_len = op->sym->cipher.data.length;
525                 request->cipher_offset = op->sym->cipher.data.offset;
526                 request->cipher_iv = rte_crypto_op_ctod_offset(op, uint8_t *,
527                                 sess->cipher_iv_offset);
528
529                 request->auth_offset = op->sym->auth.data.offset;
530                 request->auth_len = op->sym->auth.data.length;
531         }
532
533         digest = sess->chain_order == MRVL_CRYPTO_CHAIN_COMBINED ?
534                 op->sym->aead.digest.data : op->sym->auth.digest.data;
535         if (digest == NULL) {
536                 /* No auth - no worry. */
537                 return 0;
538         }
539
540         request->auth_icv_offset = request->auth_offset + request->auth_len;
541
542         /*
543          * EIP supports only scenarios where ICV(digest buffer) is placed at
544          * auth_icv_offset. Any other placement means risking errors.
545          */
546         if (sess->sam_sess_params.dir == SAM_DIR_ENCRYPT) {
547                 /*
548                  * This should be the most common case anyway,
549                  * EIP will overwrite DST buffer at auth_icv_offset.
550                  */
551                 if (rte_pktmbuf_mtod_offset(
552                                 dst_mbuf, uint8_t *,
553                                 request->auth_icv_offset) == digest) {
554                         return 0;
555                 }
556         } else {/* sess->sam_sess_params.dir == SAM_DIR_DECRYPT */
557                 /*
558                  * EIP will look for digest at auth_icv_offset
559                  * offset in SRC buffer.
560                  */
561                 if (rte_pktmbuf_mtod_offset(
562                                 op->sym->m_src, uint8_t *,
563                                 request->auth_icv_offset) == digest) {
564                         return 0;
565                 }
566         }
567
568         /*
569          * If we landed here it means that digest pointer is
570          * at different than expected place.
571          */
572         return -1;
573 }
574
575 /*
576  *-----------------------------------------------------------------------------
577  * PMD Framework handlers
578  *-----------------------------------------------------------------------------
579  */
580
581 /**
582  * Enqueue burst.
583  *
584  * @param queue_pair Pointer to queue pair.
585  * @param ops Pointer to ops requests array.
586  * @param nb_ops Number of elements in ops requests array.
587  * @returns Number of elements consumed from ops.
588  */
589 static uint16_t
590 mrvl_crypto_pmd_enqueue_burst(void *queue_pair, struct rte_crypto_op **ops,
591                 uint16_t nb_ops)
592 {
593         uint16_t iter_ops = 0;
594         uint16_t to_enq = 0;
595         uint16_t consumed = 0;
596         int ret;
597         struct sam_cio_op_params requests[nb_ops];
598         /*
599          * DPDK uses single fragment buffers, so we can KISS descriptors.
600          * SAM does not store bd pointers, so on-stack scope will be enough.
601          */
602         struct sam_buf_info src_bd[nb_ops];
603         struct sam_buf_info dst_bd[nb_ops];
604         struct mrvl_crypto_qp *qp = (struct mrvl_crypto_qp *)queue_pair;
605
606         if (nb_ops == 0)
607                 return 0;
608
609         /* Prepare the burst. */
610         memset(&requests, 0, sizeof(requests));
611
612         /* Iterate through */
613         for (; iter_ops < nb_ops; ++iter_ops) {
614                 if (mrvl_request_prepare(&requests[iter_ops],
615                                         &src_bd[iter_ops],
616                                         &dst_bd[iter_ops],
617                                         ops[iter_ops]) < 0) {
618                         MRVL_CRYPTO_LOG_ERR(
619                                 "Error while parameters preparation!");
620                         qp->stats.enqueue_err_count++;
621                         ops[iter_ops]->status = RTE_CRYPTO_OP_STATUS_ERROR;
622
623                         /*
624                          * Number of handled ops is increased
625                          * (even if the result of handling is error).
626                          */
627                         ++consumed;
628                         break;
629                 }
630
631                 ops[iter_ops]->status =
632                         RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
633
634                 /* Increase the number of ops to enqueue. */
635                 ++to_enq;
636         } /* for (; iter_ops < nb_ops;... */
637
638         if (to_enq > 0) {
639                 /* Send the burst */
640                 ret = sam_cio_enq(qp->cio, requests, &to_enq);
641                 consumed += to_enq;
642                 if (ret < 0) {
643                         /*
644                          * Trust SAM that in this case returned value will be at
645                          * some point correct (now it is returned unmodified).
646                          */
647                         qp->stats.enqueue_err_count += to_enq;
648                         for (iter_ops = 0; iter_ops < to_enq; ++iter_ops)
649                                 ops[iter_ops]->status =
650                                         RTE_CRYPTO_OP_STATUS_ERROR;
651                 }
652         }
653
654         qp->stats.enqueued_count += to_enq;
655         return consumed;
656 }
657
658 /**
659  * Dequeue burst.
660  *
661  * @param queue_pair Pointer to queue pair.
662  * @param ops Pointer to ops requests array.
663  * @param nb_ops Number of elements in ops requests array.
664  * @returns Number of elements dequeued.
665  */
666 static uint16_t
667 mrvl_crypto_pmd_dequeue_burst(void *queue_pair,
668                 struct rte_crypto_op **ops,
669                 uint16_t nb_ops)
670 {
671         int ret;
672         struct mrvl_crypto_qp *qp = queue_pair;
673         struct sam_cio *cio = qp->cio;
674         struct sam_cio_op_result results[nb_ops];
675         uint16_t i;
676
677         ret = sam_cio_deq(cio, results, &nb_ops);
678         if (ret < 0) {
679                 /* Count all dequeued as error. */
680                 qp->stats.dequeue_err_count += nb_ops;
681
682                 /* But act as they were dequeued anyway*/
683                 qp->stats.dequeued_count += nb_ops;
684
685                 return 0;
686         }
687
688         /* Unpack and check results. */
689         for (i = 0; i < nb_ops; ++i) {
690                 ops[i] = results[i].cookie;
691
692                 switch (results[i].status) {
693                 case SAM_CIO_OK:
694                         ops[i]->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
695                         break;
696                 case SAM_CIO_ERR_ICV:
697                         MRVL_CRYPTO_LOG_DBG("CIO returned SAM_CIO_ERR_ICV.");
698                         ops[i]->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
699                         break;
700                 default:
701                         MRVL_CRYPTO_LOG_DBG(
702                                 "CIO returned Error: %d", results[i].status);
703                         ops[i]->status = RTE_CRYPTO_OP_STATUS_ERROR;
704                         break;
705                 }
706         }
707
708         qp->stats.dequeued_count += nb_ops;
709         return nb_ops;
710 }
711
712 /**
713  * Create a new crypto device.
714  *
715  * @param name Driver name.
716  * @param vdev Pointer to device structure.
717  * @param init_params Pointer to initialization parameters.
718  * @returns 0 in case of success, negative value otherwise.
719  */
720 static int
721 cryptodev_mrvl_crypto_create(const char *name,
722                 struct rte_vdev_device *vdev,
723                 struct rte_crypto_vdev_init_params *init_params)
724 {
725         struct rte_cryptodev *dev;
726         struct mrvl_crypto_private *internals;
727         struct sam_init_params  sam_params;
728         int ret;
729
730         if (init_params->name[0] == '\0') {
731                 ret = rte_cryptodev_pmd_create_dev_name(
732                                 init_params->name, name);
733
734                 if (ret < 0) {
735                         MRVL_CRYPTO_LOG_ERR("failed to create unique name");
736                         return ret;
737                 }
738         }
739
740         dev = rte_cryptodev_vdev_pmd_init(init_params->name,
741                                 sizeof(struct mrvl_crypto_private),
742                                 init_params->socket_id, vdev);
743         if (dev == NULL) {
744                 MRVL_CRYPTO_LOG_ERR("failed to create cryptodev vdev");
745                 goto init_error;
746         }
747
748         dev->driver_id = cryptodev_driver_id;
749         dev->dev_ops = rte_mrvl_crypto_pmd_ops;
750
751         /* Register rx/tx burst functions for data path. */
752         dev->enqueue_burst = mrvl_crypto_pmd_enqueue_burst;
753         dev->dequeue_burst = mrvl_crypto_pmd_dequeue_burst;
754
755         dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
756                         RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
757                         RTE_CRYPTODEV_FF_HW_ACCELERATED;
758
759         /* Set vector instructions mode supported */
760         internals = dev->data->dev_private;
761
762         internals->max_nb_qpairs = init_params->max_nb_queue_pairs;
763         internals->max_nb_sessions = init_params->max_nb_sessions;
764
765         /*
766          * ret == -EEXIST is correct, it means DMA
767          * has been already initialized.
768          */
769         ret = mv_sys_dma_mem_init(RTE_MRVL_MUSDK_DMA_MEMSIZE);
770         if ((ret < 0) && (ret != -EEXIST))
771                 return ret;
772
773         sam_params.max_num_sessions = internals->max_nb_sessions;
774
775         return sam_init(&sam_params);
776
777 init_error:
778         MRVL_CRYPTO_LOG_ERR(
779                 "driver %s: %s failed", init_params->name, __func__);
780
781         cryptodev_mrvl_crypto_uninit(vdev);
782         return -EFAULT;
783 }
784
785 /**
786  * Initialize the crypto device.
787  *
788  * @param vdev Pointer to device structure.
789  * @returns 0 in case of success, negative value otherwise.
790  */
791 static int
792 cryptodev_mrvl_crypto_init(struct rte_vdev_device *vdev)
793 {
794         struct rte_crypto_vdev_init_params init_params = { };
795         const char *name;
796         const char *input_args;
797         int ret;
798
799         name = rte_vdev_device_name(vdev);
800         if (name == NULL)
801                 return -EINVAL;
802         input_args = rte_vdev_device_args(vdev);
803
804         if (!input_args)
805                 return -EINVAL;
806
807         init_params.max_nb_queue_pairs = sam_get_num_inst() * SAM_HW_RING_NUM;
808         init_params.max_nb_sessions =
809                 RTE_CRYPTODEV_VDEV_DEFAULT_MAX_NB_SESSIONS;
810         init_params.socket_id = rte_socket_id();
811
812         ret = rte_cryptodev_vdev_parse_init_params(&init_params, input_args);
813         if (ret) {
814                 RTE_LOG(ERR, PMD, "Failed to parse input arguments\n");
815                 return ret;
816         }
817
818         RTE_LOG(INFO, PMD, "Initialising %s on NUMA node %d\n", name,
819                         init_params.socket_id);
820         if (init_params.name[0] != '\0') {
821                 RTE_LOG(INFO, PMD, "  User defined name = %s\n",
822                         init_params.name);
823         }
824         RTE_LOG(INFO, PMD, "  Max number of queue pairs = %d\n",
825                         init_params.max_nb_queue_pairs);
826         RTE_LOG(INFO, PMD, "  Max number of sessions = %d\n",
827                         init_params.max_nb_sessions);
828
829         return cryptodev_mrvl_crypto_create(name, vdev, &init_params);
830 }
831
832 /**
833  * Uninitialize the crypto device
834  *
835  * @param vdev Pointer to device structure.
836  * @returns 0 in case of success, negative value otherwise.
837  */
838 static int
839 cryptodev_mrvl_crypto_uninit(struct rte_vdev_device *vdev)
840 {
841         const char *name = rte_vdev_device_name(vdev);
842
843         if (name == NULL)
844                 return -EINVAL;
845
846         RTE_LOG(INFO, PMD,
847                 "Closing Marvell crypto device %s on numa socket %u\n",
848                 name, rte_socket_id());
849
850         sam_deinit();
851
852         return 0;
853 }
854
855 /**
856  * Basic driver handlers for use in the constructor.
857  */
858 static struct rte_vdev_driver cryptodev_mrvl_pmd_drv = {
859         .probe = cryptodev_mrvl_crypto_init,
860         .remove = cryptodev_mrvl_crypto_uninit
861 };
862
863 static struct cryptodev_driver mrvl_crypto_drv;
864
865 /* Register the driver in constructor. */
866 RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_MRVL_PMD, cryptodev_mrvl_pmd_drv);
867 RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_MRVL_PMD,
868         "max_nb_queue_pairs=<int> "
869         "max_nb_sessions=<int> "
870         "socket_id=<int>");
871 RTE_PMD_REGISTER_CRYPTO_DRIVER(mrvl_crypto_drv, cryptodev_mrvl_pmd_drv,
872                 cryptodev_driver_id);