7e800a6943061eda4b7900a020482030d0f8a9d1
[dpdk.git] / drivers / crypto / mvsam / rte_mrvl_pmd.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017 Marvell International Ltd.
3  * Copyright(c) 2017 Semihalf.
4  * All rights reserved.
5  */
6
7 #include <rte_common.h>
8 #include <rte_hexdump.h>
9 #include <rte_cryptodev.h>
10 #include <rte_cryptodev_pmd.h>
11 #include <rte_bus_vdev.h>
12 #include <rte_malloc.h>
13 #include <rte_cpuflags.h>
14 #include <rte_kvargs.h>
15
16 #include "rte_mrvl_pmd_private.h"
17
18 #define MRVL_MUSDK_DMA_MEMSIZE 41943040
19
20 #define MRVL_PMD_MAX_NB_SESS_ARG                ("max_nb_sessions")
21 #define MRVL_PMD_DEFAULT_MAX_NB_SESSIONS        2048
22
23 static uint8_t cryptodev_driver_id;
24
25 struct mrvl_pmd_init_params {
26         struct rte_cryptodev_pmd_init_params common;
27         uint32_t max_nb_sessions;
28 };
29
30 const char *mrvl_pmd_valid_params[] = {
31         RTE_CRYPTODEV_PMD_NAME_ARG,
32         RTE_CRYPTODEV_PMD_MAX_NB_QP_ARG,
33         RTE_CRYPTODEV_PMD_SOCKET_ID_ARG,
34         MRVL_PMD_MAX_NB_SESS_ARG
35 };
36
37 /**
38  * Flag if particular crypto algorithm is supported by PMD/MUSDK.
39  *
40  * The idea is to have Not Supported value as default (0).
41  * This way we need only to define proper map sizes,
42  * non-initialized entries will be by default not supported.
43  */
44 enum algo_supported {
45         ALGO_NOT_SUPPORTED = 0,
46         ALGO_SUPPORTED = 1,
47 };
48
49 /** Map elements for cipher mapping.*/
50 struct cipher_params_mapping {
51         enum algo_supported  supported;   /**< On/Off switch */
52         enum sam_cipher_alg  cipher_alg;  /**< Cipher algorithm */
53         enum sam_cipher_mode cipher_mode; /**< Cipher mode */
54         unsigned int max_key_len;         /**< Maximum key length (in bytes)*/
55 }
56 /* We want to squeeze in multiple maps into the cache line. */
57 __rte_aligned(32);
58
59 /** Map elements for auth mapping.*/
60 struct auth_params_mapping {
61         enum algo_supported supported;  /**< On/off switch */
62         enum sam_auth_alg   auth_alg;   /**< Auth algorithm */
63 }
64 /* We want to squeeze in multiple maps into the cache line. */
65 __rte_aligned(32);
66
67 /**
68  * Map of supported cipher algorithms.
69  */
70 static const
71 struct cipher_params_mapping cipher_map[RTE_CRYPTO_CIPHER_LIST_END] = {
72         [RTE_CRYPTO_CIPHER_3DES_CBC] = {
73                 .supported = ALGO_SUPPORTED,
74                 .cipher_alg = SAM_CIPHER_3DES,
75                 .cipher_mode = SAM_CIPHER_CBC,
76                 .max_key_len = BITS2BYTES(192) },
77         [RTE_CRYPTO_CIPHER_3DES_CTR] = {
78                 .supported = ALGO_SUPPORTED,
79                 .cipher_alg = SAM_CIPHER_3DES,
80                 .cipher_mode = SAM_CIPHER_CTR,
81                 .max_key_len = BITS2BYTES(192) },
82         [RTE_CRYPTO_CIPHER_3DES_ECB] = {
83                 .supported = ALGO_SUPPORTED,
84                 .cipher_alg = SAM_CIPHER_3DES,
85                 .cipher_mode = SAM_CIPHER_ECB,
86                 .max_key_len = BITS2BYTES(192) },
87         [RTE_CRYPTO_CIPHER_AES_CBC] = {
88                 .supported = ALGO_SUPPORTED,
89                 .cipher_alg = SAM_CIPHER_AES,
90                 .cipher_mode = SAM_CIPHER_CBC,
91                 .max_key_len = BITS2BYTES(256) },
92         [RTE_CRYPTO_CIPHER_AES_CTR] = {
93                 .supported = ALGO_SUPPORTED,
94                 .cipher_alg = SAM_CIPHER_AES,
95                 .cipher_mode = SAM_CIPHER_CTR,
96                 .max_key_len = BITS2BYTES(256) },
97 };
98
99 /**
100  * Map of supported auth algorithms.
101  */
102 static const
103 struct auth_params_mapping auth_map[RTE_CRYPTO_AUTH_LIST_END] = {
104         [RTE_CRYPTO_AUTH_MD5_HMAC] = {
105                 .supported = ALGO_SUPPORTED,
106                 .auth_alg = SAM_AUTH_HMAC_MD5 },
107         [RTE_CRYPTO_AUTH_MD5] = {
108                 .supported = ALGO_SUPPORTED,
109                 .auth_alg = SAM_AUTH_HASH_MD5 },
110         [RTE_CRYPTO_AUTH_SHA1_HMAC] = {
111                 .supported = ALGO_SUPPORTED,
112                 .auth_alg = SAM_AUTH_HMAC_SHA1 },
113         [RTE_CRYPTO_AUTH_SHA1] = {
114                 .supported = ALGO_SUPPORTED,
115                 .auth_alg = SAM_AUTH_HASH_SHA1 },
116         [RTE_CRYPTO_AUTH_SHA224] = {
117                 .supported = ALGO_SUPPORTED,
118                 .auth_alg = SAM_AUTH_HASH_SHA2_224 },
119         [RTE_CRYPTO_AUTH_SHA256_HMAC] = {
120                 .supported = ALGO_SUPPORTED,
121                 .auth_alg = SAM_AUTH_HMAC_SHA2_256 },
122         [RTE_CRYPTO_AUTH_SHA256] = {
123                 .supported = ALGO_SUPPORTED,
124                 .auth_alg = SAM_AUTH_HASH_SHA2_256 },
125         [RTE_CRYPTO_AUTH_SHA384_HMAC] = {
126                 .supported = ALGO_SUPPORTED,
127                 .auth_alg = SAM_AUTH_HMAC_SHA2_384 },
128         [RTE_CRYPTO_AUTH_SHA384] = {
129                 .supported = ALGO_SUPPORTED,
130                 .auth_alg = SAM_AUTH_HASH_SHA2_384 },
131         [RTE_CRYPTO_AUTH_SHA512_HMAC] = {
132                 .supported = ALGO_SUPPORTED,
133                 .auth_alg = SAM_AUTH_HMAC_SHA2_512 },
134         [RTE_CRYPTO_AUTH_SHA512] = {
135                 .supported = ALGO_SUPPORTED,
136                 .auth_alg = SAM_AUTH_HASH_SHA2_512 },
137         [RTE_CRYPTO_AUTH_AES_GMAC] = {
138                 .supported = ALGO_SUPPORTED,
139                 .auth_alg = SAM_AUTH_AES_GMAC },
140 };
141
142 /**
143  * Map of supported aead algorithms.
144  */
145 static const
146 struct cipher_params_mapping aead_map[RTE_CRYPTO_AEAD_LIST_END] = {
147         [RTE_CRYPTO_AEAD_AES_GCM] = {
148                 .supported = ALGO_SUPPORTED,
149                 .cipher_alg = SAM_CIPHER_AES,
150                 .cipher_mode = SAM_CIPHER_GCM,
151                 .max_key_len = BITS2BYTES(256) },
152 };
153
154 /*
155  *-----------------------------------------------------------------------------
156  * Forward declarations.
157  *-----------------------------------------------------------------------------
158  */
159 static int cryptodev_mrvl_crypto_uninit(struct rte_vdev_device *vdev);
160
161 /*
162  *-----------------------------------------------------------------------------
163  * Session Preparation.
164  *-----------------------------------------------------------------------------
165  */
166
167 /**
168  * Get xform chain order.
169  *
170  * @param xform Pointer to configuration structure chain for crypto operations.
171  * @returns Order of crypto operations.
172  */
173 static enum mrvl_crypto_chain_order
174 mrvl_crypto_get_chain_order(const struct rte_crypto_sym_xform *xform)
175 {
176         /* Currently, Marvell supports max 2 operations in chain */
177         if (xform->next != NULL && xform->next->next != NULL)
178                 return MRVL_CRYPTO_CHAIN_NOT_SUPPORTED;
179
180         if (xform->next != NULL) {
181                 if ((xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) &&
182                         (xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER))
183                         return MRVL_CRYPTO_CHAIN_AUTH_CIPHER;
184
185                 if ((xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) &&
186                         (xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH))
187                         return MRVL_CRYPTO_CHAIN_CIPHER_AUTH;
188         } else {
189                 if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH)
190                         return MRVL_CRYPTO_CHAIN_AUTH_ONLY;
191
192                 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
193                         return MRVL_CRYPTO_CHAIN_CIPHER_ONLY;
194
195                 if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD)
196                         return MRVL_CRYPTO_CHAIN_COMBINED;
197         }
198         return MRVL_CRYPTO_CHAIN_NOT_SUPPORTED;
199 }
200
201 /**
202  * Set session parameters for cipher part.
203  *
204  * @param sess Crypto session pointer.
205  * @param cipher_xform Pointer to configuration structure for cipher operations.
206  * @returns 0 in case of success, negative value otherwise.
207  */
208 static int
209 mrvl_crypto_set_cipher_session_parameters(struct mrvl_crypto_session *sess,
210                 const struct rte_crypto_sym_xform *cipher_xform)
211 {
212         /* Make sure we've got proper struct */
213         if (cipher_xform->type != RTE_CRYPTO_SYM_XFORM_CIPHER) {
214                 MRVL_CRYPTO_LOG_ERR("Wrong xform struct provided!");
215                 return -EINVAL;
216         }
217
218         /* See if map data is present and valid */
219         if ((cipher_xform->cipher.algo > RTE_DIM(cipher_map)) ||
220                 (cipher_map[cipher_xform->cipher.algo].supported
221                         != ALGO_SUPPORTED)) {
222                 MRVL_CRYPTO_LOG_ERR("Cipher algorithm not supported!");
223                 return -EINVAL;
224         }
225
226         sess->cipher_iv_offset = cipher_xform->cipher.iv.offset;
227
228         sess->sam_sess_params.dir =
229                 (cipher_xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
230                 SAM_DIR_ENCRYPT : SAM_DIR_DECRYPT;
231         sess->sam_sess_params.cipher_alg =
232                 cipher_map[cipher_xform->cipher.algo].cipher_alg;
233         sess->sam_sess_params.cipher_mode =
234                 cipher_map[cipher_xform->cipher.algo].cipher_mode;
235
236         /* Assume IV will be passed together with data. */
237         sess->sam_sess_params.cipher_iv = NULL;
238
239         /* Get max key length. */
240         if (cipher_xform->cipher.key.length >
241                 cipher_map[cipher_xform->cipher.algo].max_key_len) {
242                 MRVL_CRYPTO_LOG_ERR("Wrong key length!");
243                 return -EINVAL;
244         }
245
246         sess->sam_sess_params.cipher_key_len = cipher_xform->cipher.key.length;
247         sess->sam_sess_params.cipher_key = cipher_xform->cipher.key.data;
248
249         return 0;
250 }
251
252 /**
253  * Set session parameters for authentication part.
254  *
255  * @param sess Crypto session pointer.
256  * @param auth_xform Pointer to configuration structure for auth operations.
257  * @returns 0 in case of success, negative value otherwise.
258  */
259 static int
260 mrvl_crypto_set_auth_session_parameters(struct mrvl_crypto_session *sess,
261                 const struct rte_crypto_sym_xform *auth_xform)
262 {
263         /* Make sure we've got proper struct */
264         if (auth_xform->type != RTE_CRYPTO_SYM_XFORM_AUTH) {
265                 MRVL_CRYPTO_LOG_ERR("Wrong xform struct provided!");
266                 return -EINVAL;
267         }
268
269         /* See if map data is present and valid */
270         if ((auth_xform->auth.algo > RTE_DIM(auth_map)) ||
271                 (auth_map[auth_xform->auth.algo].supported != ALGO_SUPPORTED)) {
272                 MRVL_CRYPTO_LOG_ERR("Auth algorithm not supported!");
273                 return -EINVAL;
274         }
275
276         sess->sam_sess_params.dir =
277                 (auth_xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ?
278                 SAM_DIR_ENCRYPT : SAM_DIR_DECRYPT;
279         sess->sam_sess_params.auth_alg =
280                 auth_map[auth_xform->auth.algo].auth_alg;
281         sess->sam_sess_params.u.basic.auth_icv_len =
282                 auth_xform->auth.digest_length;
283         /* auth_key must be NULL if auth algorithm does not use HMAC */
284         sess->sam_sess_params.auth_key = auth_xform->auth.key.length ?
285                                          auth_xform->auth.key.data : NULL;
286         sess->sam_sess_params.auth_key_len = auth_xform->auth.key.length;
287
288         return 0;
289 }
290
291 /**
292  * Set session parameters for aead part.
293  *
294  * @param sess Crypto session pointer.
295  * @param aead_xform Pointer to configuration structure for aead operations.
296  * @returns 0 in case of success, negative value otherwise.
297  */
298 static int
299 mrvl_crypto_set_aead_session_parameters(struct mrvl_crypto_session *sess,
300                 const struct rte_crypto_sym_xform *aead_xform)
301 {
302         /* Make sure we've got proper struct */
303         if (aead_xform->type != RTE_CRYPTO_SYM_XFORM_AEAD) {
304                 MRVL_CRYPTO_LOG_ERR("Wrong xform struct provided!");
305                 return -EINVAL;
306         }
307
308         /* See if map data is present and valid */
309         if ((aead_xform->aead.algo > RTE_DIM(aead_map)) ||
310                 (aead_map[aead_xform->aead.algo].supported
311                         != ALGO_SUPPORTED)) {
312                 MRVL_CRYPTO_LOG_ERR("AEAD algorithm not supported!");
313                 return -EINVAL;
314         }
315
316         sess->sam_sess_params.dir =
317                 (aead_xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
318                 SAM_DIR_ENCRYPT : SAM_DIR_DECRYPT;
319         sess->sam_sess_params.cipher_alg =
320                 aead_map[aead_xform->aead.algo].cipher_alg;
321         sess->sam_sess_params.cipher_mode =
322                 aead_map[aead_xform->aead.algo].cipher_mode;
323
324         /* Assume IV will be passed together with data. */
325         sess->sam_sess_params.cipher_iv = NULL;
326
327         /* Get max key length. */
328         if (aead_xform->aead.key.length >
329                 aead_map[aead_xform->aead.algo].max_key_len) {
330                 MRVL_CRYPTO_LOG_ERR("Wrong key length!");
331                 return -EINVAL;
332         }
333
334         sess->sam_sess_params.cipher_key = aead_xform->aead.key.data;
335         sess->sam_sess_params.cipher_key_len = aead_xform->aead.key.length;
336
337         if (sess->sam_sess_params.cipher_mode == SAM_CIPHER_GCM)
338                 sess->sam_sess_params.auth_alg = SAM_AUTH_AES_GCM;
339
340         sess->sam_sess_params.u.basic.auth_icv_len =
341                 aead_xform->aead.digest_length;
342
343         sess->sam_sess_params.u.basic.auth_aad_len =
344                 aead_xform->aead.aad_length;
345
346         return 0;
347 }
348
349 /**
350  * Parse crypto transform chain and setup session parameters.
351  *
352  * @param dev Pointer to crypto device
353  * @param sess Poiner to crypto session
354  * @param xform Pointer to configuration structure chain for crypto operations.
355  * @returns 0 in case of success, negative value otherwise.
356  */
357 int
358 mrvl_crypto_set_session_parameters(struct mrvl_crypto_session *sess,
359                 const struct rte_crypto_sym_xform *xform)
360 {
361         const struct rte_crypto_sym_xform *cipher_xform = NULL;
362         const struct rte_crypto_sym_xform *auth_xform = NULL;
363         const struct rte_crypto_sym_xform *aead_xform = NULL;
364
365         /* Filter out spurious/broken requests */
366         if (xform == NULL)
367                 return -EINVAL;
368
369         sess->chain_order = mrvl_crypto_get_chain_order(xform);
370         switch (sess->chain_order) {
371         case MRVL_CRYPTO_CHAIN_CIPHER_AUTH:
372                 cipher_xform = xform;
373                 auth_xform = xform->next;
374                 break;
375         case MRVL_CRYPTO_CHAIN_AUTH_CIPHER:
376                 auth_xform = xform;
377                 cipher_xform = xform->next;
378                 break;
379         case MRVL_CRYPTO_CHAIN_CIPHER_ONLY:
380                 cipher_xform = xform;
381                 break;
382         case MRVL_CRYPTO_CHAIN_AUTH_ONLY:
383                 auth_xform = xform;
384                 break;
385         case MRVL_CRYPTO_CHAIN_COMBINED:
386                 aead_xform = xform;
387                 break;
388         default:
389                 return -EINVAL;
390         }
391
392         if ((cipher_xform != NULL) &&
393                 (mrvl_crypto_set_cipher_session_parameters(
394                         sess, cipher_xform) < 0)) {
395                 MRVL_CRYPTO_LOG_ERR("Invalid/unsupported cipher parameters");
396                 return -EINVAL;
397         }
398
399         if ((auth_xform != NULL) &&
400                 (mrvl_crypto_set_auth_session_parameters(
401                         sess, auth_xform) < 0)) {
402                 MRVL_CRYPTO_LOG_ERR("Invalid/unsupported auth parameters");
403                 return -EINVAL;
404         }
405
406         if ((aead_xform != NULL) &&
407                 (mrvl_crypto_set_aead_session_parameters(
408                         sess, aead_xform) < 0)) {
409                 MRVL_CRYPTO_LOG_ERR("Invalid/unsupported aead parameters");
410                 return -EINVAL;
411         }
412
413         return 0;
414 }
415
416 /*
417  *-----------------------------------------------------------------------------
418  * Process Operations
419  *-----------------------------------------------------------------------------
420  */
421
422 /**
423  * Prepare a single request.
424  *
425  * This function basically translates DPDK crypto request into one
426  * understandable by MUDSK's SAM. If this is a first request in a session,
427  * it starts the session.
428  *
429  * @param request Pointer to pre-allocated && reset request buffer [Out].
430  * @param src_bd Pointer to pre-allocated source descriptor [Out].
431  * @param dst_bd Pointer to pre-allocated destination descriptor [Out].
432  * @param op Pointer to DPDK crypto operation struct [In].
433  */
434 static inline int
435 mrvl_request_prepare(struct sam_cio_op_params *request,
436                 struct sam_buf_info *src_bd,
437                 struct sam_buf_info *dst_bd,
438                 struct rte_crypto_op *op)
439 {
440         struct mrvl_crypto_session *sess;
441         struct rte_mbuf *dst_mbuf;
442         uint8_t *digest;
443
444         if (unlikely(op->sess_type == RTE_CRYPTO_OP_SESSIONLESS)) {
445                 MRVL_CRYPTO_LOG_ERR("MRVL CRYPTO PMD only supports session "
446                                 "oriented requests, op (%p) is sessionless.",
447                                 op);
448                 return -EINVAL;
449         }
450
451         sess = (struct mrvl_crypto_session *)get_sym_session_private_data(
452                         op->sym->session, cryptodev_driver_id);
453         if (unlikely(sess == NULL)) {
454                 MRVL_CRYPTO_LOG_ERR("Session was not created for this device");
455                 return -EINVAL;
456         }
457
458         /*
459          * If application delivered us null dst buffer, it means it expects
460          * us to deliver the result in src buffer.
461          */
462         dst_mbuf = op->sym->m_dst ? op->sym->m_dst : op->sym->m_src;
463
464         request->sa = sess->sam_sess;
465         request->cookie = op;
466
467         /* Single buffers only, sorry. */
468         request->num_bufs = 1;
469         request->src = src_bd;
470         src_bd->vaddr = rte_pktmbuf_mtod(op->sym->m_src, void *);
471         src_bd->paddr = rte_pktmbuf_iova(op->sym->m_src);
472         src_bd->len = rte_pktmbuf_data_len(op->sym->m_src);
473
474         /* Empty source. */
475         if (rte_pktmbuf_data_len(op->sym->m_src) == 0) {
476                 /* EIP does not support 0 length buffers. */
477                 MRVL_CRYPTO_LOG_ERR("Buffer length == 0 not supported!");
478                 return -1;
479         }
480
481         /* Empty destination. */
482         if (rte_pktmbuf_data_len(dst_mbuf) == 0) {
483                 /* Make dst buffer fit at least source data. */
484                 if (rte_pktmbuf_append(dst_mbuf,
485                         rte_pktmbuf_data_len(op->sym->m_src)) == NULL) {
486                         MRVL_CRYPTO_LOG_ERR("Unable to set big enough dst buffer!");
487                         return -1;
488                 }
489         }
490
491         request->dst = dst_bd;
492         dst_bd->vaddr = rte_pktmbuf_mtod(dst_mbuf, void *);
493         dst_bd->paddr = rte_pktmbuf_iova(dst_mbuf);
494
495         /*
496          * We can use all available space in dst_mbuf,
497          * not only what's used currently.
498          */
499         dst_bd->len = dst_mbuf->buf_len - rte_pktmbuf_headroom(dst_mbuf);
500
501         if (sess->chain_order == MRVL_CRYPTO_CHAIN_COMBINED) {
502                 request->cipher_len = op->sym->aead.data.length;
503                 request->cipher_offset = op->sym->aead.data.offset;
504                 request->cipher_iv = rte_crypto_op_ctod_offset(op, uint8_t *,
505                         sess->cipher_iv_offset);
506
507                 request->auth_aad = op->sym->aead.aad.data;
508                 request->auth_offset = request->cipher_offset;
509                 request->auth_len = request->cipher_len;
510         } else {
511                 request->cipher_len = op->sym->cipher.data.length;
512                 request->cipher_offset = op->sym->cipher.data.offset;
513                 request->cipher_iv = rte_crypto_op_ctod_offset(op, uint8_t *,
514                                 sess->cipher_iv_offset);
515
516                 request->auth_offset = op->sym->auth.data.offset;
517                 request->auth_len = op->sym->auth.data.length;
518         }
519
520         digest = sess->chain_order == MRVL_CRYPTO_CHAIN_COMBINED ?
521                 op->sym->aead.digest.data : op->sym->auth.digest.data;
522         if (digest == NULL) {
523                 /* No auth - no worry. */
524                 return 0;
525         }
526
527         request->auth_icv_offset = request->auth_offset + request->auth_len;
528
529         /*
530          * EIP supports only scenarios where ICV(digest buffer) is placed at
531          * auth_icv_offset. Any other placement means risking errors.
532          */
533         if (sess->sam_sess_params.dir == SAM_DIR_ENCRYPT) {
534                 /*
535                  * This should be the most common case anyway,
536                  * EIP will overwrite DST buffer at auth_icv_offset.
537                  */
538                 if (rte_pktmbuf_mtod_offset(
539                                 dst_mbuf, uint8_t *,
540                                 request->auth_icv_offset) == digest) {
541                         return 0;
542                 }
543         } else {/* sess->sam_sess_params.dir == SAM_DIR_DECRYPT */
544                 /*
545                  * EIP will look for digest at auth_icv_offset
546                  * offset in SRC buffer.
547                  */
548                 if (rte_pktmbuf_mtod_offset(
549                                 op->sym->m_src, uint8_t *,
550                                 request->auth_icv_offset) == digest) {
551                         return 0;
552                 }
553         }
554
555         /*
556          * If we landed here it means that digest pointer is
557          * at different than expected place.
558          */
559         return -1;
560 }
561
562 /*
563  *-----------------------------------------------------------------------------
564  * PMD Framework handlers
565  *-----------------------------------------------------------------------------
566  */
567
568 /**
569  * Enqueue burst.
570  *
571  * @param queue_pair Pointer to queue pair.
572  * @param ops Pointer to ops requests array.
573  * @param nb_ops Number of elements in ops requests array.
574  * @returns Number of elements consumed from ops.
575  */
576 static uint16_t
577 mrvl_crypto_pmd_enqueue_burst(void *queue_pair, struct rte_crypto_op **ops,
578                 uint16_t nb_ops)
579 {
580         uint16_t iter_ops = 0;
581         uint16_t to_enq = 0;
582         uint16_t consumed = 0;
583         int ret;
584         struct sam_cio_op_params requests[nb_ops];
585         /*
586          * DPDK uses single fragment buffers, so we can KISS descriptors.
587          * SAM does not store bd pointers, so on-stack scope will be enough.
588          */
589         struct sam_buf_info src_bd[nb_ops];
590         struct sam_buf_info dst_bd[nb_ops];
591         struct mrvl_crypto_qp *qp = (struct mrvl_crypto_qp *)queue_pair;
592
593         if (nb_ops == 0)
594                 return 0;
595
596         /* Prepare the burst. */
597         memset(&requests, 0, sizeof(requests));
598
599         /* Iterate through */
600         for (; iter_ops < nb_ops; ++iter_ops) {
601                 if (mrvl_request_prepare(&requests[iter_ops],
602                                         &src_bd[iter_ops],
603                                         &dst_bd[iter_ops],
604                                         ops[iter_ops]) < 0) {
605                         MRVL_CRYPTO_LOG_ERR(
606                                 "Error while parameters preparation!");
607                         qp->stats.enqueue_err_count++;
608                         ops[iter_ops]->status = RTE_CRYPTO_OP_STATUS_ERROR;
609
610                         /*
611                          * Number of handled ops is increased
612                          * (even if the result of handling is error).
613                          */
614                         ++consumed;
615                         break;
616                 }
617
618                 ops[iter_ops]->status =
619                         RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
620
621                 /* Increase the number of ops to enqueue. */
622                 ++to_enq;
623         } /* for (; iter_ops < nb_ops;... */
624
625         if (to_enq > 0) {
626                 /* Send the burst */
627                 ret = sam_cio_enq(qp->cio, requests, &to_enq);
628                 consumed += to_enq;
629                 if (ret < 0) {
630                         /*
631                          * Trust SAM that in this case returned value will be at
632                          * some point correct (now it is returned unmodified).
633                          */
634                         qp->stats.enqueue_err_count += to_enq;
635                         for (iter_ops = 0; iter_ops < to_enq; ++iter_ops)
636                                 ops[iter_ops]->status =
637                                         RTE_CRYPTO_OP_STATUS_ERROR;
638                 }
639         }
640
641         qp->stats.enqueued_count += to_enq;
642         return consumed;
643 }
644
645 /**
646  * Dequeue burst.
647  *
648  * @param queue_pair Pointer to queue pair.
649  * @param ops Pointer to ops requests array.
650  * @param nb_ops Number of elements in ops requests array.
651  * @returns Number of elements dequeued.
652  */
653 static uint16_t
654 mrvl_crypto_pmd_dequeue_burst(void *queue_pair,
655                 struct rte_crypto_op **ops,
656                 uint16_t nb_ops)
657 {
658         int ret;
659         struct mrvl_crypto_qp *qp = queue_pair;
660         struct sam_cio *cio = qp->cio;
661         struct sam_cio_op_result results[nb_ops];
662         uint16_t i;
663
664         ret = sam_cio_deq(cio, results, &nb_ops);
665         if (ret < 0) {
666                 /* Count all dequeued as error. */
667                 qp->stats.dequeue_err_count += nb_ops;
668
669                 /* But act as they were dequeued anyway*/
670                 qp->stats.dequeued_count += nb_ops;
671
672                 return 0;
673         }
674
675         /* Unpack and check results. */
676         for (i = 0; i < nb_ops; ++i) {
677                 ops[i] = results[i].cookie;
678
679                 switch (results[i].status) {
680                 case SAM_CIO_OK:
681                         ops[i]->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
682                         break;
683                 case SAM_CIO_ERR_ICV:
684                         MRVL_CRYPTO_LOG_DBG("CIO returned SAM_CIO_ERR_ICV.");
685                         ops[i]->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
686                         break;
687                 default:
688                         MRVL_CRYPTO_LOG_DBG(
689                                 "CIO returned Error: %d", results[i].status);
690                         ops[i]->status = RTE_CRYPTO_OP_STATUS_ERROR;
691                         break;
692                 }
693         }
694
695         qp->stats.dequeued_count += nb_ops;
696         return nb_ops;
697 }
698
699 /**
700  * Create a new crypto device.
701  *
702  * @param name Driver name.
703  * @param vdev Pointer to device structure.
704  * @param init_params Pointer to initialization parameters.
705  * @returns 0 in case of success, negative value otherwise.
706  */
707 static int
708 cryptodev_mrvl_crypto_create(const char *name,
709                 struct rte_vdev_device *vdev,
710                 struct mrvl_pmd_init_params *init_params)
711 {
712         struct rte_cryptodev *dev;
713         struct mrvl_crypto_private *internals;
714         struct sam_init_params  sam_params;
715         int ret;
716
717         dev = rte_cryptodev_pmd_create(name, &vdev->device,
718                         &init_params->common);
719         if (dev == NULL) {
720                 MRVL_CRYPTO_LOG_ERR("failed to create cryptodev vdev");
721                 goto init_error;
722         }
723
724         dev->driver_id = cryptodev_driver_id;
725         dev->dev_ops = rte_mrvl_crypto_pmd_ops;
726
727         /* Register rx/tx burst functions for data path. */
728         dev->enqueue_burst = mrvl_crypto_pmd_enqueue_burst;
729         dev->dequeue_burst = mrvl_crypto_pmd_dequeue_burst;
730
731         dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
732                         RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
733                         RTE_CRYPTODEV_FF_HW_ACCELERATED |
734                         RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
735                         RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT;
736
737         /* Set vector instructions mode supported */
738         internals = dev->data->dev_private;
739
740         internals->max_nb_qpairs = init_params->common.max_nb_queue_pairs;
741         internals->max_nb_sessions = init_params->max_nb_sessions;
742
743         /*
744          * ret == -EEXIST is correct, it means DMA
745          * has been already initialized.
746          */
747         ret = mv_sys_dma_mem_init(MRVL_MUSDK_DMA_MEMSIZE);
748         if (ret < 0) {
749                 if (ret != -EEXIST)
750                         return ret;
751
752                 MRVL_CRYPTO_LOG_INFO(
753                         "DMA memory has been already initialized by a different driver.");
754         }
755
756         sam_params.max_num_sessions = internals->max_nb_sessions;
757
758         return sam_init(&sam_params);
759
760 init_error:
761         MRVL_CRYPTO_LOG_ERR(
762                 "driver %s: %s failed", init_params->common.name, __func__);
763
764         cryptodev_mrvl_crypto_uninit(vdev);
765         return -EFAULT;
766 }
767
768 /** Parse integer from integer argument */
769 static int
770 parse_integer_arg(const char *key __rte_unused,
771                 const char *value, void *extra_args)
772 {
773         int *i = (int *) extra_args;
774
775         *i = atoi(value);
776         if (*i < 0) {
777                 MRVL_CRYPTO_LOG_ERR("Argument has to be positive.\n");
778                 return -EINVAL;
779         }
780
781         return 0;
782 }
783
784 /** Parse name */
785 static int
786 parse_name_arg(const char *key __rte_unused,
787                 const char *value, void *extra_args)
788 {
789         struct rte_cryptodev_pmd_init_params *params = extra_args;
790
791         if (strlen(value) >= RTE_CRYPTODEV_NAME_MAX_LEN - 1) {
792                 MRVL_CRYPTO_LOG_ERR("Invalid name %s, should be less than "
793                                 "%u bytes.\n", value,
794                                 RTE_CRYPTODEV_NAME_MAX_LEN - 1);
795                 return -EINVAL;
796         }
797
798         strncpy(params->name, value, RTE_CRYPTODEV_NAME_MAX_LEN);
799
800         return 0;
801 }
802
803 static int
804 mrvl_pmd_parse_input_args(struct mrvl_pmd_init_params *params,
805                          const char *input_args)
806 {
807         struct rte_kvargs *kvlist = NULL;
808         int ret = 0;
809
810         if (params == NULL)
811                 return -EINVAL;
812
813         if (input_args) {
814                 kvlist = rte_kvargs_parse(input_args,
815                                           mrvl_pmd_valid_params);
816                 if (kvlist == NULL)
817                         return -1;
818
819                 /* Common VDEV parameters */
820                 ret = rte_kvargs_process(kvlist,
821                                          RTE_CRYPTODEV_PMD_MAX_NB_QP_ARG,
822                                          &parse_integer_arg,
823                                          &params->common.max_nb_queue_pairs);
824                 if (ret < 0)
825                         goto free_kvlist;
826
827                 ret = rte_kvargs_process(kvlist,
828                                          RTE_CRYPTODEV_PMD_SOCKET_ID_ARG,
829                                          &parse_integer_arg,
830                                          &params->common.socket_id);
831                 if (ret < 0)
832                         goto free_kvlist;
833
834                 ret = rte_kvargs_process(kvlist,
835                                          RTE_CRYPTODEV_PMD_NAME_ARG,
836                                          &parse_name_arg,
837                                          &params->common);
838                 if (ret < 0)
839                         goto free_kvlist;
840
841                 ret = rte_kvargs_process(kvlist,
842                                          MRVL_PMD_MAX_NB_SESS_ARG,
843                                          &parse_integer_arg,
844                                          params);
845                 if (ret < 0)
846                         goto free_kvlist;
847
848         }
849
850 free_kvlist:
851         rte_kvargs_free(kvlist);
852         return ret;
853 }
854
855 /**
856  * Initialize the crypto device.
857  *
858  * @param vdev Pointer to device structure.
859  * @returns 0 in case of success, negative value otherwise.
860  */
861 static int
862 cryptodev_mrvl_crypto_init(struct rte_vdev_device *vdev)
863 {
864         struct mrvl_pmd_init_params init_params = {
865                 .common = {
866                         .name = "",
867                         .private_data_size =
868                                 sizeof(struct mrvl_crypto_private),
869                         .max_nb_queue_pairs =
870                                 sam_get_num_inst() * sam_get_num_cios(0),
871                         .socket_id = rte_socket_id()
872                 },
873                 .max_nb_sessions = MRVL_PMD_DEFAULT_MAX_NB_SESSIONS
874         };
875
876         const char *name, *args;
877         int ret;
878
879         name = rte_vdev_device_name(vdev);
880         if (name == NULL)
881                 return -EINVAL;
882         args = rte_vdev_device_args(vdev);
883
884         ret = mrvl_pmd_parse_input_args(&init_params, args);
885         if (ret) {
886                 RTE_LOG(ERR, PMD,
887                         "Failed to parse initialisation arguments[%s]\n",
888                         args);
889                 return -EINVAL;
890         }
891
892         return cryptodev_mrvl_crypto_create(name, vdev, &init_params);
893 }
894
895 /**
896  * Uninitialize the crypto device
897  *
898  * @param vdev Pointer to device structure.
899  * @returns 0 in case of success, negative value otherwise.
900  */
901 static int
902 cryptodev_mrvl_crypto_uninit(struct rte_vdev_device *vdev)
903 {
904         struct rte_cryptodev *cryptodev;
905         const char *name = rte_vdev_device_name(vdev);
906
907         if (name == NULL)
908                 return -EINVAL;
909
910         RTE_LOG(INFO, PMD,
911                 "Closing Marvell crypto device %s on numa socket %u\n",
912                 name, rte_socket_id());
913
914         sam_deinit();
915
916         cryptodev = rte_cryptodev_pmd_get_named_dev(name);
917         if (cryptodev == NULL)
918                 return -ENODEV;
919
920         return rte_cryptodev_pmd_destroy(cryptodev);
921 }
922
923 /**
924  * Basic driver handlers for use in the constructor.
925  */
926 static struct rte_vdev_driver cryptodev_mrvl_pmd_drv = {
927         .probe = cryptodev_mrvl_crypto_init,
928         .remove = cryptodev_mrvl_crypto_uninit
929 };
930
931 static struct cryptodev_driver mrvl_crypto_drv;
932
933 /* Register the driver in constructor. */
934 RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_MRVL_PMD, cryptodev_mrvl_pmd_drv);
935 RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_MRVL_PMD,
936         "max_nb_queue_pairs=<int> "
937         "max_nb_sessions=<int> "
938         "socket_id=<int>");
939 RTE_PMD_REGISTER_CRYPTO_DRIVER(mrvl_crypto_drv, cryptodev_mrvl_pmd_drv.driver,
940                 cryptodev_driver_id);