crypto/mvsam: support scatter gather
[dpdk.git] / drivers / crypto / mvsam / rte_mrvl_pmd.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017 Marvell International Ltd.
3  * Copyright(c) 2017 Semihalf.
4  * All rights reserved.
5  */
6
7 #include <rte_common.h>
8 #include <rte_hexdump.h>
9 #include <rte_cryptodev.h>
10 #include <rte_cryptodev_pmd.h>
11 #include <rte_bus_vdev.h>
12 #include <rte_malloc.h>
13 #include <rte_cpuflags.h>
14 #include <rte_kvargs.h>
15
16 #include "rte_mrvl_pmd_private.h"
17
18 #define MRVL_MUSDK_DMA_MEMSIZE 41943040
19
20 #define MRVL_PMD_MAX_NB_SESS_ARG                ("max_nb_sessions")
21 #define MRVL_PMD_DEFAULT_MAX_NB_SESSIONS        2048
22
23 static uint8_t cryptodev_driver_id;
24
25 struct mrvl_pmd_init_params {
26         struct rte_cryptodev_pmd_init_params common;
27         uint32_t max_nb_sessions;
28 };
29
30 const char *mrvl_pmd_valid_params[] = {
31         RTE_CRYPTODEV_PMD_NAME_ARG,
32         RTE_CRYPTODEV_PMD_MAX_NB_QP_ARG,
33         RTE_CRYPTODEV_PMD_SOCKET_ID_ARG,
34         MRVL_PMD_MAX_NB_SESS_ARG
35 };
36
37 /**
38  * Flag if particular crypto algorithm is supported by PMD/MUSDK.
39  *
40  * The idea is to have Not Supported value as default (0).
41  * This way we need only to define proper map sizes,
42  * non-initialized entries will be by default not supported.
43  */
44 enum algo_supported {
45         ALGO_NOT_SUPPORTED = 0,
46         ALGO_SUPPORTED = 1,
47 };
48
49 /** Map elements for cipher mapping.*/
50 struct cipher_params_mapping {
51         enum algo_supported  supported;   /**< On/Off switch */
52         enum sam_cipher_alg  cipher_alg;  /**< Cipher algorithm */
53         enum sam_cipher_mode cipher_mode; /**< Cipher mode */
54         unsigned int max_key_len;         /**< Maximum key length (in bytes)*/
55 }
56 /* We want to squeeze in multiple maps into the cache line. */
57 __rte_aligned(32);
58
59 /** Map elements for auth mapping.*/
60 struct auth_params_mapping {
61         enum algo_supported supported;  /**< On/off switch */
62         enum sam_auth_alg   auth_alg;   /**< Auth algorithm */
63 }
64 /* We want to squeeze in multiple maps into the cache line. */
65 __rte_aligned(32);
66
67 /**
68  * Map of supported cipher algorithms.
69  */
70 static const
71 struct cipher_params_mapping cipher_map[RTE_CRYPTO_CIPHER_LIST_END] = {
72         [RTE_CRYPTO_CIPHER_NULL] = {
73                 .supported = ALGO_SUPPORTED,
74                 .cipher_alg = SAM_CIPHER_NONE },
75         [RTE_CRYPTO_CIPHER_3DES_CBC] = {
76                 .supported = ALGO_SUPPORTED,
77                 .cipher_alg = SAM_CIPHER_3DES,
78                 .cipher_mode = SAM_CIPHER_CBC,
79                 .max_key_len = BITS2BYTES(192) },
80         [RTE_CRYPTO_CIPHER_3DES_CTR] = {
81                 .supported = ALGO_SUPPORTED,
82                 .cipher_alg = SAM_CIPHER_3DES,
83                 .cipher_mode = SAM_CIPHER_CTR,
84                 .max_key_len = BITS2BYTES(192) },
85         [RTE_CRYPTO_CIPHER_3DES_ECB] = {
86                 .supported = ALGO_SUPPORTED,
87                 .cipher_alg = SAM_CIPHER_3DES,
88                 .cipher_mode = SAM_CIPHER_ECB,
89                 .max_key_len = BITS2BYTES(192) },
90         [RTE_CRYPTO_CIPHER_AES_CBC] = {
91                 .supported = ALGO_SUPPORTED,
92                 .cipher_alg = SAM_CIPHER_AES,
93                 .cipher_mode = SAM_CIPHER_CBC,
94                 .max_key_len = BITS2BYTES(256) },
95         [RTE_CRYPTO_CIPHER_AES_CTR] = {
96                 .supported = ALGO_SUPPORTED,
97                 .cipher_alg = SAM_CIPHER_AES,
98                 .cipher_mode = SAM_CIPHER_CTR,
99                 .max_key_len = BITS2BYTES(256) },
100         [RTE_CRYPTO_CIPHER_AES_ECB] = {
101                 .supported = ALGO_SUPPORTED,
102                 .cipher_alg = SAM_CIPHER_AES,
103                 .cipher_mode = SAM_CIPHER_ECB,
104                 .max_key_len = BITS2BYTES(256) },
105 };
106
107 /**
108  * Map of supported auth algorithms.
109  */
110 static const
111 struct auth_params_mapping auth_map[RTE_CRYPTO_AUTH_LIST_END] = {
112         [RTE_CRYPTO_AUTH_NULL] = {
113                 .supported = ALGO_SUPPORTED,
114                 .auth_alg = SAM_AUTH_NONE },
115         [RTE_CRYPTO_AUTH_MD5_HMAC] = {
116                 .supported = ALGO_SUPPORTED,
117                 .auth_alg = SAM_AUTH_HMAC_MD5 },
118         [RTE_CRYPTO_AUTH_MD5] = {
119                 .supported = ALGO_SUPPORTED,
120                 .auth_alg = SAM_AUTH_HASH_MD5 },
121         [RTE_CRYPTO_AUTH_SHA1_HMAC] = {
122                 .supported = ALGO_SUPPORTED,
123                 .auth_alg = SAM_AUTH_HMAC_SHA1 },
124         [RTE_CRYPTO_AUTH_SHA1] = {
125                 .supported = ALGO_SUPPORTED,
126                 .auth_alg = SAM_AUTH_HASH_SHA1 },
127         [RTE_CRYPTO_AUTH_SHA224_HMAC] = {
128                 .supported = ALGO_SUPPORTED,
129                 .auth_alg = SAM_AUTH_HMAC_SHA2_224 },
130         [RTE_CRYPTO_AUTH_SHA224] = {
131                 .supported = ALGO_SUPPORTED,
132                 .auth_alg = SAM_AUTH_HASH_SHA2_224 },
133         [RTE_CRYPTO_AUTH_SHA256_HMAC] = {
134                 .supported = ALGO_SUPPORTED,
135                 .auth_alg = SAM_AUTH_HMAC_SHA2_256 },
136         [RTE_CRYPTO_AUTH_SHA256] = {
137                 .supported = ALGO_SUPPORTED,
138                 .auth_alg = SAM_AUTH_HASH_SHA2_256 },
139         [RTE_CRYPTO_AUTH_SHA384_HMAC] = {
140                 .supported = ALGO_SUPPORTED,
141                 .auth_alg = SAM_AUTH_HMAC_SHA2_384 },
142         [RTE_CRYPTO_AUTH_SHA384] = {
143                 .supported = ALGO_SUPPORTED,
144                 .auth_alg = SAM_AUTH_HASH_SHA2_384 },
145         [RTE_CRYPTO_AUTH_SHA512_HMAC] = {
146                 .supported = ALGO_SUPPORTED,
147                 .auth_alg = SAM_AUTH_HMAC_SHA2_512 },
148         [RTE_CRYPTO_AUTH_SHA512] = {
149                 .supported = ALGO_SUPPORTED,
150                 .auth_alg = SAM_AUTH_HASH_SHA2_512 },
151         [RTE_CRYPTO_AUTH_AES_GMAC] = {
152                 .supported = ALGO_SUPPORTED,
153                 .auth_alg = SAM_AUTH_AES_GMAC },
154 };
155
156 /**
157  * Map of supported aead algorithms.
158  */
159 static const
160 struct cipher_params_mapping aead_map[RTE_CRYPTO_AEAD_LIST_END] = {
161         [RTE_CRYPTO_AEAD_AES_GCM] = {
162                 .supported = ALGO_SUPPORTED,
163                 .cipher_alg = SAM_CIPHER_AES,
164                 .cipher_mode = SAM_CIPHER_GCM,
165                 .max_key_len = BITS2BYTES(256) },
166 };
167
168 /*
169  *-----------------------------------------------------------------------------
170  * Forward declarations.
171  *-----------------------------------------------------------------------------
172  */
173 static int cryptodev_mrvl_crypto_uninit(struct rte_vdev_device *vdev);
174
175 /*
176  *-----------------------------------------------------------------------------
177  * Session Preparation.
178  *-----------------------------------------------------------------------------
179  */
180
181 /**
182  * Get xform chain order.
183  *
184  * @param xform Pointer to configuration structure chain for crypto operations.
185  * @returns Order of crypto operations.
186  */
187 static enum mrvl_crypto_chain_order
188 mrvl_crypto_get_chain_order(const struct rte_crypto_sym_xform *xform)
189 {
190         /* Currently, Marvell supports max 2 operations in chain */
191         if (xform->next != NULL && xform->next->next != NULL)
192                 return MRVL_CRYPTO_CHAIN_NOT_SUPPORTED;
193
194         if (xform->next != NULL) {
195                 if ((xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) &&
196                         (xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER))
197                         return MRVL_CRYPTO_CHAIN_AUTH_CIPHER;
198
199                 if ((xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) &&
200                         (xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH))
201                         return MRVL_CRYPTO_CHAIN_CIPHER_AUTH;
202         } else {
203                 if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH)
204                         return MRVL_CRYPTO_CHAIN_AUTH_ONLY;
205
206                 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
207                         return MRVL_CRYPTO_CHAIN_CIPHER_ONLY;
208
209                 if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD)
210                         return MRVL_CRYPTO_CHAIN_COMBINED;
211         }
212         return MRVL_CRYPTO_CHAIN_NOT_SUPPORTED;
213 }
214
215 /**
216  * Set session parameters for cipher part.
217  *
218  * @param sess Crypto session pointer.
219  * @param cipher_xform Pointer to configuration structure for cipher operations.
220  * @returns 0 in case of success, negative value otherwise.
221  */
222 static int
223 mrvl_crypto_set_cipher_session_parameters(struct mrvl_crypto_session *sess,
224                 const struct rte_crypto_sym_xform *cipher_xform)
225 {
226         /* Make sure we've got proper struct */
227         if (cipher_xform->type != RTE_CRYPTO_SYM_XFORM_CIPHER) {
228                 MRVL_CRYPTO_LOG_ERR("Wrong xform struct provided!");
229                 return -EINVAL;
230         }
231
232         /* See if map data is present and valid */
233         if ((cipher_xform->cipher.algo > RTE_DIM(cipher_map)) ||
234                 (cipher_map[cipher_xform->cipher.algo].supported
235                         != ALGO_SUPPORTED)) {
236                 MRVL_CRYPTO_LOG_ERR("Cipher algorithm not supported!");
237                 return -EINVAL;
238         }
239
240         sess->cipher_iv_offset = cipher_xform->cipher.iv.offset;
241
242         sess->sam_sess_params.dir =
243                 (cipher_xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
244                 SAM_DIR_ENCRYPT : SAM_DIR_DECRYPT;
245         sess->sam_sess_params.cipher_alg =
246                 cipher_map[cipher_xform->cipher.algo].cipher_alg;
247         sess->sam_sess_params.cipher_mode =
248                 cipher_map[cipher_xform->cipher.algo].cipher_mode;
249
250         /* Assume IV will be passed together with data. */
251         sess->sam_sess_params.cipher_iv = NULL;
252
253         /* Get max key length. */
254         if (cipher_xform->cipher.key.length >
255                 cipher_map[cipher_xform->cipher.algo].max_key_len) {
256                 MRVL_CRYPTO_LOG_ERR("Wrong key length!");
257                 return -EINVAL;
258         }
259
260         sess->sam_sess_params.cipher_key_len = cipher_xform->cipher.key.length;
261         sess->sam_sess_params.cipher_key = cipher_xform->cipher.key.data;
262
263         return 0;
264 }
265
266 /**
267  * Set session parameters for authentication part.
268  *
269  * @param sess Crypto session pointer.
270  * @param auth_xform Pointer to configuration structure for auth operations.
271  * @returns 0 in case of success, negative value otherwise.
272  */
273 static int
274 mrvl_crypto_set_auth_session_parameters(struct mrvl_crypto_session *sess,
275                 const struct rte_crypto_sym_xform *auth_xform)
276 {
277         /* Make sure we've got proper struct */
278         if (auth_xform->type != RTE_CRYPTO_SYM_XFORM_AUTH) {
279                 MRVL_CRYPTO_LOG_ERR("Wrong xform struct provided!");
280                 return -EINVAL;
281         }
282
283         /* See if map data is present and valid */
284         if ((auth_xform->auth.algo > RTE_DIM(auth_map)) ||
285                 (auth_map[auth_xform->auth.algo].supported != ALGO_SUPPORTED)) {
286                 MRVL_CRYPTO_LOG_ERR("Auth algorithm not supported!");
287                 return -EINVAL;
288         }
289
290         sess->sam_sess_params.dir =
291                 (auth_xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ?
292                 SAM_DIR_ENCRYPT : SAM_DIR_DECRYPT;
293         sess->sam_sess_params.auth_alg =
294                 auth_map[auth_xform->auth.algo].auth_alg;
295         sess->sam_sess_params.u.basic.auth_icv_len =
296                 auth_xform->auth.digest_length;
297         /* auth_key must be NULL if auth algorithm does not use HMAC */
298         sess->sam_sess_params.auth_key = auth_xform->auth.key.length ?
299                                          auth_xform->auth.key.data : NULL;
300         sess->sam_sess_params.auth_key_len = auth_xform->auth.key.length;
301
302         return 0;
303 }
304
305 /**
306  * Set session parameters for aead part.
307  *
308  * @param sess Crypto session pointer.
309  * @param aead_xform Pointer to configuration structure for aead operations.
310  * @returns 0 in case of success, negative value otherwise.
311  */
312 static int
313 mrvl_crypto_set_aead_session_parameters(struct mrvl_crypto_session *sess,
314                 const struct rte_crypto_sym_xform *aead_xform)
315 {
316         /* Make sure we've got proper struct */
317         if (aead_xform->type != RTE_CRYPTO_SYM_XFORM_AEAD) {
318                 MRVL_CRYPTO_LOG_ERR("Wrong xform struct provided!");
319                 return -EINVAL;
320         }
321
322         /* See if map data is present and valid */
323         if ((aead_xform->aead.algo > RTE_DIM(aead_map)) ||
324                 (aead_map[aead_xform->aead.algo].supported
325                         != ALGO_SUPPORTED)) {
326                 MRVL_CRYPTO_LOG_ERR("AEAD algorithm not supported!");
327                 return -EINVAL;
328         }
329
330         sess->sam_sess_params.dir =
331                 (aead_xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
332                 SAM_DIR_ENCRYPT : SAM_DIR_DECRYPT;
333         sess->sam_sess_params.cipher_alg =
334                 aead_map[aead_xform->aead.algo].cipher_alg;
335         sess->sam_sess_params.cipher_mode =
336                 aead_map[aead_xform->aead.algo].cipher_mode;
337
338         /* Assume IV will be passed together with data. */
339         sess->sam_sess_params.cipher_iv = NULL;
340
341         /* Get max key length. */
342         if (aead_xform->aead.key.length >
343                 aead_map[aead_xform->aead.algo].max_key_len) {
344                 MRVL_CRYPTO_LOG_ERR("Wrong key length!");
345                 return -EINVAL;
346         }
347
348         sess->sam_sess_params.cipher_key = aead_xform->aead.key.data;
349         sess->sam_sess_params.cipher_key_len = aead_xform->aead.key.length;
350
351         if (sess->sam_sess_params.cipher_mode == SAM_CIPHER_GCM)
352                 sess->sam_sess_params.auth_alg = SAM_AUTH_AES_GCM;
353
354         sess->sam_sess_params.u.basic.auth_icv_len =
355                 aead_xform->aead.digest_length;
356
357         sess->sam_sess_params.u.basic.auth_aad_len =
358                 aead_xform->aead.aad_length;
359
360         return 0;
361 }
362
363 /**
364  * Parse crypto transform chain and setup session parameters.
365  *
366  * @param dev Pointer to crypto device
367  * @param sess Poiner to crypto session
368  * @param xform Pointer to configuration structure chain for crypto operations.
369  * @returns 0 in case of success, negative value otherwise.
370  */
371 int
372 mrvl_crypto_set_session_parameters(struct mrvl_crypto_session *sess,
373                 const struct rte_crypto_sym_xform *xform)
374 {
375         const struct rte_crypto_sym_xform *cipher_xform = NULL;
376         const struct rte_crypto_sym_xform *auth_xform = NULL;
377         const struct rte_crypto_sym_xform *aead_xform = NULL;
378
379         /* Filter out spurious/broken requests */
380         if (xform == NULL)
381                 return -EINVAL;
382
383         sess->chain_order = mrvl_crypto_get_chain_order(xform);
384         switch (sess->chain_order) {
385         case MRVL_CRYPTO_CHAIN_CIPHER_AUTH:
386                 cipher_xform = xform;
387                 auth_xform = xform->next;
388                 break;
389         case MRVL_CRYPTO_CHAIN_AUTH_CIPHER:
390                 auth_xform = xform;
391                 cipher_xform = xform->next;
392                 break;
393         case MRVL_CRYPTO_CHAIN_CIPHER_ONLY:
394                 cipher_xform = xform;
395                 break;
396         case MRVL_CRYPTO_CHAIN_AUTH_ONLY:
397                 auth_xform = xform;
398                 break;
399         case MRVL_CRYPTO_CHAIN_COMBINED:
400                 aead_xform = xform;
401                 break;
402         default:
403                 return -EINVAL;
404         }
405
406         if ((cipher_xform != NULL) &&
407                 (mrvl_crypto_set_cipher_session_parameters(
408                         sess, cipher_xform) < 0)) {
409                 MRVL_CRYPTO_LOG_ERR("Invalid/unsupported cipher parameters");
410                 return -EINVAL;
411         }
412
413         if ((auth_xform != NULL) &&
414                 (mrvl_crypto_set_auth_session_parameters(
415                         sess, auth_xform) < 0)) {
416                 MRVL_CRYPTO_LOG_ERR("Invalid/unsupported auth parameters");
417                 return -EINVAL;
418         }
419
420         if ((aead_xform != NULL) &&
421                 (mrvl_crypto_set_aead_session_parameters(
422                         sess, aead_xform) < 0)) {
423                 MRVL_CRYPTO_LOG_ERR("Invalid/unsupported aead parameters");
424                 return -EINVAL;
425         }
426
427         return 0;
428 }
429
430 /*
431  *-----------------------------------------------------------------------------
432  * Process Operations
433  *-----------------------------------------------------------------------------
434  */
435
436 /**
437  * Prepare a single request.
438  *
439  * This function basically translates DPDK crypto request into one
440  * understandable by MUDSK's SAM. If this is a first request in a session,
441  * it starts the session.
442  *
443  * @param request Pointer to pre-allocated && reset request buffer [Out].
444  * @param src_bd Pointer to pre-allocated source descriptor [Out].
445  * @param dst_bd Pointer to pre-allocated destination descriptor [Out].
446  * @param op Pointer to DPDK crypto operation struct [In].
447  */
448 static inline int
449 mrvl_request_prepare(struct sam_cio_op_params *request,
450                 struct sam_buf_info *src_bd,
451                 struct sam_buf_info *dst_bd,
452                 struct rte_crypto_op *op)
453 {
454         struct mrvl_crypto_session *sess;
455         struct rte_mbuf *src_mbuf, *dst_mbuf;
456         uint16_t segments_nb;
457         uint8_t *digest;
458         int i;
459
460         if (unlikely(op->sess_type == RTE_CRYPTO_OP_SESSIONLESS)) {
461                 MRVL_CRYPTO_LOG_ERR("MRVL CRYPTO PMD only supports session "
462                                 "oriented requests, op (%p) is sessionless.",
463                                 op);
464                 return -EINVAL;
465         }
466
467         sess = (struct mrvl_crypto_session *)get_sym_session_private_data(
468                         op->sym->session, cryptodev_driver_id);
469         if (unlikely(sess == NULL)) {
470                 MRVL_CRYPTO_LOG_ERR("Session was not created for this device");
471                 return -EINVAL;
472         }
473
474         request->sa = sess->sam_sess;
475         request->cookie = op;
476
477         src_mbuf = op->sym->m_src;
478         segments_nb = src_mbuf->nb_segs;
479         /* The following conditions must be met:
480          * - Destination buffer is required when segmented source buffer
481          * - Segmented destination buffer is not supported
482          */
483         if ((segments_nb > 1) && (!op->sym->m_dst)) {
484                 MRVL_CRYPTO_LOG_ERR("op->sym->m_dst = NULL!\n");
485                 return -1;
486         }
487         /* For non SG case:
488          * If application delivered us null dst buffer, it means it expects
489          * us to deliver the result in src buffer.
490          */
491         dst_mbuf = op->sym->m_dst ? op->sym->m_dst : op->sym->m_src;
492
493         if (!rte_pktmbuf_is_contiguous(dst_mbuf)) {
494                 MRVL_CRYPTO_LOG_ERR("Segmented destination buffer "
495                                     "not supported.\n");
496                 return -1;
497         }
498
499         request->num_bufs = segments_nb;
500         for (i = 0; i < segments_nb; i++) {
501                 /* Empty source. */
502                 if (rte_pktmbuf_data_len(src_mbuf) == 0) {
503                         /* EIP does not support 0 length buffers. */
504                         MRVL_CRYPTO_LOG_ERR("Buffer length == 0 not supported!");
505                         return -1;
506                 }
507                 src_bd[i].vaddr = rte_pktmbuf_mtod(src_mbuf, void *);
508                 src_bd[i].paddr = rte_pktmbuf_iova(src_mbuf);
509                 src_bd[i].len = rte_pktmbuf_data_len(src_mbuf);
510
511                 src_mbuf = src_mbuf->next;
512         }
513         request->src = src_bd;
514
515         /* Empty destination. */
516         if (rte_pktmbuf_data_len(dst_mbuf) == 0) {
517                 /* Make dst buffer fit at least source data. */
518                 if (rte_pktmbuf_append(dst_mbuf,
519                         rte_pktmbuf_data_len(op->sym->m_src)) == NULL) {
520                         MRVL_CRYPTO_LOG_ERR("Unable to set big enough dst buffer!");
521                         return -1;
522                 }
523         }
524
525         request->dst = dst_bd;
526         dst_bd->vaddr = rte_pktmbuf_mtod(dst_mbuf, void *);
527         dst_bd->paddr = rte_pktmbuf_iova(dst_mbuf);
528
529         /*
530          * We can use all available space in dst_mbuf,
531          * not only what's used currently.
532          */
533         dst_bd->len = dst_mbuf->buf_len - rte_pktmbuf_headroom(dst_mbuf);
534
535         if (sess->chain_order == MRVL_CRYPTO_CHAIN_COMBINED) {
536                 request->cipher_len = op->sym->aead.data.length;
537                 request->cipher_offset = op->sym->aead.data.offset;
538                 request->cipher_iv = rte_crypto_op_ctod_offset(op, uint8_t *,
539                         sess->cipher_iv_offset);
540
541                 request->auth_aad = op->sym->aead.aad.data;
542                 request->auth_offset = request->cipher_offset;
543                 request->auth_len = request->cipher_len;
544         } else {
545                 request->cipher_len = op->sym->cipher.data.length;
546                 request->cipher_offset = op->sym->cipher.data.offset;
547                 request->cipher_iv = rte_crypto_op_ctod_offset(op, uint8_t *,
548                                 sess->cipher_iv_offset);
549
550                 request->auth_offset = op->sym->auth.data.offset;
551                 request->auth_len = op->sym->auth.data.length;
552         }
553
554         digest = sess->chain_order == MRVL_CRYPTO_CHAIN_COMBINED ?
555                 op->sym->aead.digest.data : op->sym->auth.digest.data;
556         if (digest == NULL) {
557                 /* No auth - no worry. */
558                 return 0;
559         }
560
561         request->auth_icv_offset = request->auth_offset + request->auth_len;
562
563         /*
564          * EIP supports only scenarios where ICV(digest buffer) is placed at
565          * auth_icv_offset.
566          */
567         if (sess->sam_sess_params.dir == SAM_DIR_ENCRYPT) {
568                 /*
569                  * This should be the most common case anyway,
570                  * EIP will overwrite DST buffer at auth_icv_offset.
571                  */
572                 if (rte_pktmbuf_mtod_offset(
573                                 dst_mbuf, uint8_t *,
574                                 request->auth_icv_offset) == digest)
575                         return 0;
576         } else {/* sess->sam_sess_params.dir == SAM_DIR_DECRYPT */
577                 /*
578                  * EIP will look for digest at auth_icv_offset
579                  * offset in SRC buffer. It must be placed in the last
580                  * segment and the offset must be set to reach digest
581                  * in the last segment
582                  */
583                 struct rte_mbuf *last_seg =  op->sym->m_src;
584                 uint32_t d_offset = request->auth_icv_offset;
585                 u32 d_size = sess->sam_sess_params.u.basic.auth_icv_len;
586                 unsigned char *d_ptr;
587
588                 /* Find the last segment and the offset for the last segment */
589                 while ((last_seg->next != NULL) &&
590                                 (d_offset >= last_seg->data_len)) {
591                         d_offset -= last_seg->data_len;
592                         last_seg = last_seg->next;
593                 }
594
595                 if (rte_pktmbuf_mtod_offset(last_seg, uint8_t *,
596                                             d_offset) == digest)
597                         return 0;
598
599                 /* copy digest to last segment */
600                 if (last_seg->buf_len >= (d_size + d_offset)) {
601                         d_ptr = (unsigned char *)last_seg->buf_addr +
602                                  d_offset;
603                         rte_memcpy(d_ptr, digest, d_size);
604                         return 0;
605                 }
606         }
607
608         /*
609          * If we landed here it means that digest pointer is
610          * at different than expected place.
611          */
612         return -1;
613 }
614
615 /*
616  *-----------------------------------------------------------------------------
617  * PMD Framework handlers
618  *-----------------------------------------------------------------------------
619  */
620
621 /**
622  * Enqueue burst.
623  *
624  * @param queue_pair Pointer to queue pair.
625  * @param ops Pointer to ops requests array.
626  * @param nb_ops Number of elements in ops requests array.
627  * @returns Number of elements consumed from ops.
628  */
629 static uint16_t
630 mrvl_crypto_pmd_enqueue_burst(void *queue_pair, struct rte_crypto_op **ops,
631                 uint16_t nb_ops)
632 {
633         uint16_t iter_ops = 0;
634         uint16_t to_enq = 0;
635         uint16_t consumed = 0;
636         int ret;
637         struct sam_cio_op_params requests[nb_ops];
638         /*
639          * SAM does not store bd pointers, so on-stack scope will be enough.
640          */
641         struct mrvl_crypto_src_table src_bd[nb_ops];
642         struct sam_buf_info          dst_bd[nb_ops];
643         struct mrvl_crypto_qp *qp = (struct mrvl_crypto_qp *)queue_pair;
644
645         if (nb_ops == 0)
646                 return 0;
647
648         /* Prepare the burst. */
649         memset(&requests, 0, sizeof(requests));
650         memset(&src_bd, 0, sizeof(src_bd));
651
652         /* Iterate through */
653         for (; iter_ops < nb_ops; ++iter_ops) {
654                 /* store the op id for debug */
655                 src_bd[iter_ops].iter_ops = iter_ops;
656                 if (mrvl_request_prepare(&requests[iter_ops],
657                                         src_bd[iter_ops].src_bd,
658                                         &dst_bd[iter_ops],
659                                         ops[iter_ops]) < 0) {
660                         MRVL_CRYPTO_LOG_ERR(
661                                 "Error while parameters preparation!");
662                         qp->stats.enqueue_err_count++;
663                         ops[iter_ops]->status = RTE_CRYPTO_OP_STATUS_ERROR;
664
665                         /*
666                          * Number of handled ops is increased
667                          * (even if the result of handling is error).
668                          */
669                         ++consumed;
670                         break;
671                 }
672
673                 ops[iter_ops]->status =
674                         RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
675
676                 /* Increase the number of ops to enqueue. */
677                 ++to_enq;
678         } /* for (; iter_ops < nb_ops;... */
679
680         if (to_enq > 0) {
681                 /* Send the burst */
682                 ret = sam_cio_enq(qp->cio, requests, &to_enq);
683                 consumed += to_enq;
684                 if (ret < 0) {
685                         /*
686                          * Trust SAM that in this case returned value will be at
687                          * some point correct (now it is returned unmodified).
688                          */
689                         qp->stats.enqueue_err_count += to_enq;
690                         for (iter_ops = 0; iter_ops < to_enq; ++iter_ops)
691                                 ops[iter_ops]->status =
692                                         RTE_CRYPTO_OP_STATUS_ERROR;
693                 }
694         }
695
696         qp->stats.enqueued_count += to_enq;
697         return consumed;
698 }
699
700 /**
701  * Dequeue burst.
702  *
703  * @param queue_pair Pointer to queue pair.
704  * @param ops Pointer to ops requests array.
705  * @param nb_ops Number of elements in ops requests array.
706  * @returns Number of elements dequeued.
707  */
708 static uint16_t
709 mrvl_crypto_pmd_dequeue_burst(void *queue_pair,
710                 struct rte_crypto_op **ops,
711                 uint16_t nb_ops)
712 {
713         int ret;
714         struct mrvl_crypto_qp *qp = queue_pair;
715         struct sam_cio *cio = qp->cio;
716         struct sam_cio_op_result results[nb_ops];
717         uint16_t i;
718
719         ret = sam_cio_deq(cio, results, &nb_ops);
720         if (ret < 0) {
721                 /* Count all dequeued as error. */
722                 qp->stats.dequeue_err_count += nb_ops;
723
724                 /* But act as they were dequeued anyway*/
725                 qp->stats.dequeued_count += nb_ops;
726
727                 return 0;
728         }
729
730         /* Unpack and check results. */
731         for (i = 0; i < nb_ops; ++i) {
732                 ops[i] = results[i].cookie;
733
734                 switch (results[i].status) {
735                 case SAM_CIO_OK:
736                         ops[i]->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
737                         break;
738                 case SAM_CIO_ERR_ICV:
739                         MRVL_CRYPTO_LOG_DBG("CIO returned SAM_CIO_ERR_ICV.");
740                         ops[i]->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
741                         break;
742                 default:
743                         MRVL_CRYPTO_LOG_DBG(
744                                 "CIO returned Error: %d", results[i].status);
745                         ops[i]->status = RTE_CRYPTO_OP_STATUS_ERROR;
746                         break;
747                 }
748         }
749
750         qp->stats.dequeued_count += nb_ops;
751         return nb_ops;
752 }
753
754 /**
755  * Create a new crypto device.
756  *
757  * @param name Driver name.
758  * @param vdev Pointer to device structure.
759  * @param init_params Pointer to initialization parameters.
760  * @returns 0 in case of success, negative value otherwise.
761  */
762 static int
763 cryptodev_mrvl_crypto_create(const char *name,
764                 struct rte_vdev_device *vdev,
765                 struct mrvl_pmd_init_params *init_params)
766 {
767         struct rte_cryptodev *dev;
768         struct mrvl_crypto_private *internals;
769         struct sam_init_params  sam_params;
770         int ret;
771
772         dev = rte_cryptodev_pmd_create(name, &vdev->device,
773                         &init_params->common);
774         if (dev == NULL) {
775                 MRVL_CRYPTO_LOG_ERR("failed to create cryptodev vdev");
776                 goto init_error;
777         }
778
779         dev->driver_id = cryptodev_driver_id;
780         dev->dev_ops = rte_mrvl_crypto_pmd_ops;
781
782         /* Register rx/tx burst functions for data path. */
783         dev->enqueue_burst = mrvl_crypto_pmd_enqueue_burst;
784         dev->dequeue_burst = mrvl_crypto_pmd_dequeue_burst;
785
786         dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
787                         RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
788                         RTE_CRYPTODEV_FF_HW_ACCELERATED |
789                         RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
790                         RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT;
791
792         /* Set vector instructions mode supported */
793         internals = dev->data->dev_private;
794
795         internals->max_nb_qpairs = init_params->common.max_nb_queue_pairs;
796         internals->max_nb_sessions = init_params->max_nb_sessions;
797
798         /*
799          * ret == -EEXIST is correct, it means DMA
800          * has been already initialized.
801          */
802         ret = mv_sys_dma_mem_init(MRVL_MUSDK_DMA_MEMSIZE);
803         if (ret < 0) {
804                 if (ret != -EEXIST)
805                         return ret;
806
807                 MRVL_CRYPTO_LOG_INFO(
808                         "DMA memory has been already initialized by a different driver.");
809         }
810
811         sam_params.max_num_sessions = internals->max_nb_sessions;
812
813         /* sam_set_debug_flags(3); */
814         return sam_init(&sam_params);
815
816 init_error:
817         MRVL_CRYPTO_LOG_ERR(
818                 "driver %s: %s failed", init_params->common.name, __func__);
819
820         cryptodev_mrvl_crypto_uninit(vdev);
821         return -EFAULT;
822 }
823
824 /** Parse integer from integer argument */
825 static int
826 parse_integer_arg(const char *key __rte_unused,
827                 const char *value, void *extra_args)
828 {
829         int *i = (int *) extra_args;
830
831         *i = atoi(value);
832         if (*i < 0) {
833                 MRVL_CRYPTO_LOG_ERR("Argument has to be positive.\n");
834                 return -EINVAL;
835         }
836
837         return 0;
838 }
839
840 /** Parse name */
841 static int
842 parse_name_arg(const char *key __rte_unused,
843                 const char *value, void *extra_args)
844 {
845         struct rte_cryptodev_pmd_init_params *params = extra_args;
846
847         if (strlen(value) >= RTE_CRYPTODEV_NAME_MAX_LEN - 1) {
848                 MRVL_CRYPTO_LOG_ERR("Invalid name %s, should be less than "
849                                 "%u bytes.\n", value,
850                                 RTE_CRYPTODEV_NAME_MAX_LEN - 1);
851                 return -EINVAL;
852         }
853
854         strncpy(params->name, value, RTE_CRYPTODEV_NAME_MAX_LEN);
855
856         return 0;
857 }
858
859 static int
860 mrvl_pmd_parse_input_args(struct mrvl_pmd_init_params *params,
861                          const char *input_args)
862 {
863         struct rte_kvargs *kvlist = NULL;
864         int ret = 0;
865
866         if (params == NULL)
867                 return -EINVAL;
868
869         if (input_args) {
870                 kvlist = rte_kvargs_parse(input_args,
871                                           mrvl_pmd_valid_params);
872                 if (kvlist == NULL)
873                         return -1;
874
875                 /* Common VDEV parameters */
876                 ret = rte_kvargs_process(kvlist,
877                                          RTE_CRYPTODEV_PMD_MAX_NB_QP_ARG,
878                                          &parse_integer_arg,
879                                          &params->common.max_nb_queue_pairs);
880                 if (ret < 0)
881                         goto free_kvlist;
882
883                 ret = rte_kvargs_process(kvlist,
884                                          RTE_CRYPTODEV_PMD_SOCKET_ID_ARG,
885                                          &parse_integer_arg,
886                                          &params->common.socket_id);
887                 if (ret < 0)
888                         goto free_kvlist;
889
890                 ret = rte_kvargs_process(kvlist,
891                                          RTE_CRYPTODEV_PMD_NAME_ARG,
892                                          &parse_name_arg,
893                                          &params->common);
894                 if (ret < 0)
895                         goto free_kvlist;
896
897                 ret = rte_kvargs_process(kvlist,
898                                          MRVL_PMD_MAX_NB_SESS_ARG,
899                                          &parse_integer_arg,
900                                          params);
901                 if (ret < 0)
902                         goto free_kvlist;
903
904         }
905
906 free_kvlist:
907         rte_kvargs_free(kvlist);
908         return ret;
909 }
910
911 /**
912  * Initialize the crypto device.
913  *
914  * @param vdev Pointer to device structure.
915  * @returns 0 in case of success, negative value otherwise.
916  */
917 static int
918 cryptodev_mrvl_crypto_init(struct rte_vdev_device *vdev)
919 {
920         struct mrvl_pmd_init_params init_params = {
921                 .common = {
922                         .name = "",
923                         .private_data_size =
924                                 sizeof(struct mrvl_crypto_private),
925                         .max_nb_queue_pairs =
926                                 sam_get_num_inst() * sam_get_num_cios(0),
927                         .socket_id = rte_socket_id()
928                 },
929                 .max_nb_sessions = MRVL_PMD_DEFAULT_MAX_NB_SESSIONS
930         };
931
932         const char *name, *args;
933         int ret;
934
935         name = rte_vdev_device_name(vdev);
936         if (name == NULL)
937                 return -EINVAL;
938         args = rte_vdev_device_args(vdev);
939
940         ret = mrvl_pmd_parse_input_args(&init_params, args);
941         if (ret) {
942                 RTE_LOG(ERR, PMD,
943                         "Failed to parse initialisation arguments[%s]\n",
944                         args);
945                 return -EINVAL;
946         }
947
948         return cryptodev_mrvl_crypto_create(name, vdev, &init_params);
949 }
950
951 /**
952  * Uninitialize the crypto device
953  *
954  * @param vdev Pointer to device structure.
955  * @returns 0 in case of success, negative value otherwise.
956  */
957 static int
958 cryptodev_mrvl_crypto_uninit(struct rte_vdev_device *vdev)
959 {
960         struct rte_cryptodev *cryptodev;
961         const char *name = rte_vdev_device_name(vdev);
962
963         if (name == NULL)
964                 return -EINVAL;
965
966         RTE_LOG(INFO, PMD,
967                 "Closing Marvell crypto device %s on numa socket %u\n",
968                 name, rte_socket_id());
969
970         sam_deinit();
971
972         cryptodev = rte_cryptodev_pmd_get_named_dev(name);
973         if (cryptodev == NULL)
974                 return -ENODEV;
975
976         return rte_cryptodev_pmd_destroy(cryptodev);
977 }
978
979 /**
980  * Basic driver handlers for use in the constructor.
981  */
982 static struct rte_vdev_driver cryptodev_mrvl_pmd_drv = {
983         .probe = cryptodev_mrvl_crypto_init,
984         .remove = cryptodev_mrvl_crypto_uninit
985 };
986
987 static struct cryptodev_driver mrvl_crypto_drv;
988
989 /* Register the driver in constructor. */
990 RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_MRVL_PMD, cryptodev_mrvl_pmd_drv);
991 RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_MRVL_PMD,
992         "max_nb_queue_pairs=<int> "
993         "max_nb_sessions=<int> "
994         "socket_id=<int>");
995 RTE_PMD_REGISTER_CRYPTO_DRIVER(mrvl_crypto_drv, cryptodev_mrvl_pmd_drv.driver,
996                 cryptodev_driver_id);