net/bnxt: fix handling interface change status
[dpdk.git] / drivers / crypto / mvsam / rte_mrvl_pmd.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017 Marvell International Ltd.
3  * Copyright(c) 2017 Semihalf.
4  * All rights reserved.
5  */
6
7 #include <rte_common.h>
8 #include <rte_hexdump.h>
9 #include <rte_cryptodev.h>
10 #include <rte_cryptodev_pmd.h>
11 #include <rte_bus_vdev.h>
12 #include <rte_malloc.h>
13 #include <rte_cpuflags.h>
14 #include <rte_kvargs.h>
15 #include <rte_mvep_common.h>
16
17 #include "rte_mrvl_pmd_private.h"
18
19 #define MRVL_PMD_MAX_NB_SESS_ARG                ("max_nb_sessions")
20 #define MRVL_PMD_DEFAULT_MAX_NB_SESSIONS        2048
21
22 static uint8_t cryptodev_driver_id;
23
24 struct mrvl_pmd_init_params {
25         struct rte_cryptodev_pmd_init_params common;
26         uint32_t max_nb_sessions;
27 };
28
29 const char *mrvl_pmd_valid_params[] = {
30         RTE_CRYPTODEV_PMD_NAME_ARG,
31         RTE_CRYPTODEV_PMD_MAX_NB_QP_ARG,
32         RTE_CRYPTODEV_PMD_SOCKET_ID_ARG,
33         MRVL_PMD_MAX_NB_SESS_ARG
34 };
35
36 /**
37  * Flag if particular crypto algorithm is supported by PMD/MUSDK.
38  *
39  * The idea is to have Not Supported value as default (0).
40  * This way we need only to define proper map sizes,
41  * non-initialized entries will be by default not supported.
42  */
43 enum algo_supported {
44         ALGO_NOT_SUPPORTED = 0,
45         ALGO_SUPPORTED = 1,
46 };
47
48 /** Map elements for cipher mapping.*/
49 struct cipher_params_mapping {
50         enum algo_supported  supported;   /**< On/Off switch */
51         enum sam_cipher_alg  cipher_alg;  /**< Cipher algorithm */
52         enum sam_cipher_mode cipher_mode; /**< Cipher mode */
53         unsigned int max_key_len;         /**< Maximum key length (in bytes)*/
54 }
55 /* We want to squeeze in multiple maps into the cache line. */
56 __rte_aligned(32);
57
58 /** Map elements for auth mapping.*/
59 struct auth_params_mapping {
60         enum algo_supported supported;  /**< On/off switch */
61         enum sam_auth_alg   auth_alg;   /**< Auth algorithm */
62 }
63 /* We want to squeeze in multiple maps into the cache line. */
64 __rte_aligned(32);
65
66 /**
67  * Map of supported cipher algorithms.
68  */
69 static const
70 struct cipher_params_mapping cipher_map[RTE_CRYPTO_CIPHER_LIST_END] = {
71         [RTE_CRYPTO_CIPHER_NULL] = {
72                 .supported = ALGO_SUPPORTED,
73                 .cipher_alg = SAM_CIPHER_NONE },
74         [RTE_CRYPTO_CIPHER_3DES_CBC] = {
75                 .supported = ALGO_SUPPORTED,
76                 .cipher_alg = SAM_CIPHER_3DES,
77                 .cipher_mode = SAM_CIPHER_CBC,
78                 .max_key_len = BITS2BYTES(192) },
79         [RTE_CRYPTO_CIPHER_3DES_CTR] = {
80                 .supported = ALGO_SUPPORTED,
81                 .cipher_alg = SAM_CIPHER_3DES,
82                 .cipher_mode = SAM_CIPHER_CTR,
83                 .max_key_len = BITS2BYTES(192) },
84         [RTE_CRYPTO_CIPHER_3DES_ECB] = {
85                 .supported = ALGO_SUPPORTED,
86                 .cipher_alg = SAM_CIPHER_3DES,
87                 .cipher_mode = SAM_CIPHER_ECB,
88                 .max_key_len = BITS2BYTES(192) },
89         [RTE_CRYPTO_CIPHER_AES_CBC] = {
90                 .supported = ALGO_SUPPORTED,
91                 .cipher_alg = SAM_CIPHER_AES,
92                 .cipher_mode = SAM_CIPHER_CBC,
93                 .max_key_len = BITS2BYTES(256) },
94         [RTE_CRYPTO_CIPHER_AES_CTR] = {
95                 .supported = ALGO_SUPPORTED,
96                 .cipher_alg = SAM_CIPHER_AES,
97                 .cipher_mode = SAM_CIPHER_CTR,
98                 .max_key_len = BITS2BYTES(256) },
99         [RTE_CRYPTO_CIPHER_AES_ECB] = {
100                 .supported = ALGO_SUPPORTED,
101                 .cipher_alg = SAM_CIPHER_AES,
102                 .cipher_mode = SAM_CIPHER_ECB,
103                 .max_key_len = BITS2BYTES(256) },
104 };
105
106 /**
107  * Map of supported auth algorithms.
108  */
109 static const
110 struct auth_params_mapping auth_map[RTE_CRYPTO_AUTH_LIST_END] = {
111         [RTE_CRYPTO_AUTH_NULL] = {
112                 .supported = ALGO_SUPPORTED,
113                 .auth_alg = SAM_AUTH_NONE },
114         [RTE_CRYPTO_AUTH_MD5_HMAC] = {
115                 .supported = ALGO_SUPPORTED,
116                 .auth_alg = SAM_AUTH_HMAC_MD5 },
117         [RTE_CRYPTO_AUTH_MD5] = {
118                 .supported = ALGO_SUPPORTED,
119                 .auth_alg = SAM_AUTH_HASH_MD5 },
120         [RTE_CRYPTO_AUTH_SHA1_HMAC] = {
121                 .supported = ALGO_SUPPORTED,
122                 .auth_alg = SAM_AUTH_HMAC_SHA1 },
123         [RTE_CRYPTO_AUTH_SHA1] = {
124                 .supported = ALGO_SUPPORTED,
125                 .auth_alg = SAM_AUTH_HASH_SHA1 },
126         [RTE_CRYPTO_AUTH_SHA224_HMAC] = {
127                 .supported = ALGO_SUPPORTED,
128                 .auth_alg = SAM_AUTH_HMAC_SHA2_224 },
129         [RTE_CRYPTO_AUTH_SHA224] = {
130                 .supported = ALGO_SUPPORTED,
131                 .auth_alg = SAM_AUTH_HASH_SHA2_224 },
132         [RTE_CRYPTO_AUTH_SHA256_HMAC] = {
133                 .supported = ALGO_SUPPORTED,
134                 .auth_alg = SAM_AUTH_HMAC_SHA2_256 },
135         [RTE_CRYPTO_AUTH_SHA256] = {
136                 .supported = ALGO_SUPPORTED,
137                 .auth_alg = SAM_AUTH_HASH_SHA2_256 },
138         [RTE_CRYPTO_AUTH_SHA384_HMAC] = {
139                 .supported = ALGO_SUPPORTED,
140                 .auth_alg = SAM_AUTH_HMAC_SHA2_384 },
141         [RTE_CRYPTO_AUTH_SHA384] = {
142                 .supported = ALGO_SUPPORTED,
143                 .auth_alg = SAM_AUTH_HASH_SHA2_384 },
144         [RTE_CRYPTO_AUTH_SHA512_HMAC] = {
145                 .supported = ALGO_SUPPORTED,
146                 .auth_alg = SAM_AUTH_HMAC_SHA2_512 },
147         [RTE_CRYPTO_AUTH_SHA512] = {
148                 .supported = ALGO_SUPPORTED,
149                 .auth_alg = SAM_AUTH_HASH_SHA2_512 },
150         [RTE_CRYPTO_AUTH_AES_GMAC] = {
151                 .supported = ALGO_SUPPORTED,
152                 .auth_alg = SAM_AUTH_AES_GMAC },
153 };
154
155 /**
156  * Map of supported aead algorithms.
157  */
158 static const
159 struct cipher_params_mapping aead_map[RTE_CRYPTO_AEAD_LIST_END] = {
160         [RTE_CRYPTO_AEAD_AES_GCM] = {
161                 .supported = ALGO_SUPPORTED,
162                 .cipher_alg = SAM_CIPHER_AES,
163                 .cipher_mode = SAM_CIPHER_GCM,
164                 .max_key_len = BITS2BYTES(256) },
165 };
166
167 /*
168  *-----------------------------------------------------------------------------
169  * Forward declarations.
170  *-----------------------------------------------------------------------------
171  */
172 static int cryptodev_mrvl_crypto_uninit(struct rte_vdev_device *vdev);
173
174 /*
175  *-----------------------------------------------------------------------------
176  * Session Preparation.
177  *-----------------------------------------------------------------------------
178  */
179
180 /**
181  * Get xform chain order.
182  *
183  * @param xform Pointer to configuration structure chain for crypto operations.
184  * @returns Order of crypto operations.
185  */
186 static enum mrvl_crypto_chain_order
187 mrvl_crypto_get_chain_order(const struct rte_crypto_sym_xform *xform)
188 {
189         /* Currently, Marvell supports max 2 operations in chain */
190         if (xform->next != NULL && xform->next->next != NULL)
191                 return MRVL_CRYPTO_CHAIN_NOT_SUPPORTED;
192
193         if (xform->next != NULL) {
194                 if ((xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) &&
195                         (xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER))
196                         return MRVL_CRYPTO_CHAIN_AUTH_CIPHER;
197
198                 if ((xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) &&
199                         (xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH))
200                         return MRVL_CRYPTO_CHAIN_CIPHER_AUTH;
201         } else {
202                 if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH)
203                         return MRVL_CRYPTO_CHAIN_AUTH_ONLY;
204
205                 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
206                         return MRVL_CRYPTO_CHAIN_CIPHER_ONLY;
207
208                 if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD)
209                         return MRVL_CRYPTO_CHAIN_COMBINED;
210         }
211         return MRVL_CRYPTO_CHAIN_NOT_SUPPORTED;
212 }
213
214 /**
215  * Set session parameters for cipher part.
216  *
217  * @param sess Crypto session pointer.
218  * @param cipher_xform Pointer to configuration structure for cipher operations.
219  * @returns 0 in case of success, negative value otherwise.
220  */
221 static int
222 mrvl_crypto_set_cipher_session_parameters(struct mrvl_crypto_session *sess,
223                 const struct rte_crypto_sym_xform *cipher_xform)
224 {
225         uint8_t *cipher_key;
226
227         /* Make sure we've got proper struct */
228         if (cipher_xform->type != RTE_CRYPTO_SYM_XFORM_CIPHER) {
229                 MRVL_LOG(ERR, "Wrong xform struct provided!");
230                 return -EINVAL;
231         }
232
233         /* See if map data is present and valid */
234         if ((cipher_xform->cipher.algo > RTE_DIM(cipher_map)) ||
235                 (cipher_map[cipher_xform->cipher.algo].supported
236                         != ALGO_SUPPORTED)) {
237                 MRVL_LOG(ERR, "Cipher algorithm not supported!");
238                 return -EINVAL;
239         }
240
241         sess->cipher_iv_offset = cipher_xform->cipher.iv.offset;
242
243         sess->sam_sess_params.dir =
244                 (cipher_xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
245                 SAM_DIR_ENCRYPT : SAM_DIR_DECRYPT;
246         sess->sam_sess_params.cipher_alg =
247                 cipher_map[cipher_xform->cipher.algo].cipher_alg;
248         sess->sam_sess_params.cipher_mode =
249                 cipher_map[cipher_xform->cipher.algo].cipher_mode;
250
251         /* Assume IV will be passed together with data. */
252         sess->sam_sess_params.cipher_iv = NULL;
253
254         /* Get max key length. */
255         if (cipher_xform->cipher.key.length >
256                 cipher_map[cipher_xform->cipher.algo].max_key_len) {
257                 MRVL_LOG(ERR, "Wrong key length!");
258                 return -EINVAL;
259         }
260
261         cipher_key = malloc(cipher_xform->cipher.key.length);
262         if (cipher_key == NULL) {
263                 MRVL_LOG(ERR, "Insufficient memory!");
264                 return -ENOMEM;
265         }
266
267         memcpy(cipher_key, cipher_xform->cipher.key.data,
268                         cipher_xform->cipher.key.length);
269
270         sess->sam_sess_params.cipher_key_len = cipher_xform->cipher.key.length;
271         sess->sam_sess_params.cipher_key = cipher_key;
272
273         return 0;
274 }
275
276 /**
277  * Set session parameters for authentication part.
278  *
279  * @param sess Crypto session pointer.
280  * @param auth_xform Pointer to configuration structure for auth operations.
281  * @returns 0 in case of success, negative value otherwise.
282  */
283 static int
284 mrvl_crypto_set_auth_session_parameters(struct mrvl_crypto_session *sess,
285                 const struct rte_crypto_sym_xform *auth_xform)
286 {
287         uint8_t *auth_key = NULL;
288
289         /* Make sure we've got proper struct */
290         if (auth_xform->type != RTE_CRYPTO_SYM_XFORM_AUTH) {
291                 MRVL_LOG(ERR, "Wrong xform struct provided!");
292                 return -EINVAL;
293         }
294
295         /* See if map data is present and valid */
296         if ((auth_xform->auth.algo > RTE_DIM(auth_map)) ||
297                 (auth_map[auth_xform->auth.algo].supported != ALGO_SUPPORTED)) {
298                 MRVL_LOG(ERR, "Auth algorithm not supported!");
299                 return -EINVAL;
300         }
301
302         sess->sam_sess_params.dir =
303                 (auth_xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ?
304                 SAM_DIR_ENCRYPT : SAM_DIR_DECRYPT;
305         sess->sam_sess_params.auth_alg =
306                 auth_map[auth_xform->auth.algo].auth_alg;
307         sess->sam_sess_params.u.basic.auth_icv_len =
308                 auth_xform->auth.digest_length;
309
310         if (auth_xform->auth.key.length > 0) {
311                 auth_key = malloc(auth_xform->auth.key.length);
312                 if (auth_key == NULL) {
313                         MRVL_LOG(ERR, "Not enough memory!");
314                         return -EINVAL;
315                 }
316
317                 memcpy(auth_key, auth_xform->auth.key.data,
318                                 auth_xform->auth.key.length);
319         }
320
321         /* auth_key must be NULL if auth algorithm does not use HMAC */
322         sess->sam_sess_params.auth_key = auth_key;
323         sess->sam_sess_params.auth_key_len = auth_xform->auth.key.length;
324
325         return 0;
326 }
327
328 /**
329  * Set session parameters for aead part.
330  *
331  * @param sess Crypto session pointer.
332  * @param aead_xform Pointer to configuration structure for aead operations.
333  * @returns 0 in case of success, negative value otherwise.
334  */
335 static int
336 mrvl_crypto_set_aead_session_parameters(struct mrvl_crypto_session *sess,
337                 const struct rte_crypto_sym_xform *aead_xform)
338 {
339         uint8_t *aead_key;
340
341         /* Make sure we've got proper struct */
342         if (aead_xform->type != RTE_CRYPTO_SYM_XFORM_AEAD) {
343                 MRVL_LOG(ERR, "Wrong xform struct provided!");
344                 return -EINVAL;
345         }
346
347         /* See if map data is present and valid */
348         if ((aead_xform->aead.algo > RTE_DIM(aead_map)) ||
349                 (aead_map[aead_xform->aead.algo].supported
350                         != ALGO_SUPPORTED)) {
351                 MRVL_LOG(ERR, "AEAD algorithm not supported!");
352                 return -EINVAL;
353         }
354
355         sess->sam_sess_params.dir =
356                 (aead_xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
357                 SAM_DIR_ENCRYPT : SAM_DIR_DECRYPT;
358         sess->sam_sess_params.cipher_alg =
359                 aead_map[aead_xform->aead.algo].cipher_alg;
360         sess->sam_sess_params.cipher_mode =
361                 aead_map[aead_xform->aead.algo].cipher_mode;
362
363         /* Assume IV will be passed together with data. */
364         sess->sam_sess_params.cipher_iv = NULL;
365
366         /* Get max key length. */
367         if (aead_xform->aead.key.length >
368                 aead_map[aead_xform->aead.algo].max_key_len) {
369                 MRVL_LOG(ERR, "Wrong key length!");
370                 return -EINVAL;
371         }
372
373         aead_key = malloc(aead_xform->aead.key.length);
374         if (aead_key == NULL) {
375                 MRVL_LOG(ERR, "Insufficient memory!");
376                 return -ENOMEM;
377         }
378
379         memcpy(aead_key, aead_xform->aead.key.data,
380                         aead_xform->aead.key.length);
381
382         sess->sam_sess_params.cipher_key = aead_key;
383         sess->sam_sess_params.cipher_key_len = aead_xform->aead.key.length;
384
385         if (sess->sam_sess_params.cipher_mode == SAM_CIPHER_GCM)
386                 sess->sam_sess_params.auth_alg = SAM_AUTH_AES_GCM;
387
388         sess->sam_sess_params.u.basic.auth_icv_len =
389                 aead_xform->aead.digest_length;
390
391         sess->sam_sess_params.u.basic.auth_aad_len =
392                 aead_xform->aead.aad_length;
393
394         return 0;
395 }
396
397 /**
398  * Parse crypto transform chain and setup session parameters.
399  *
400  * @param dev Pointer to crypto device
401  * @param sess Poiner to crypto session
402  * @param xform Pointer to configuration structure chain for crypto operations.
403  * @returns 0 in case of success, negative value otherwise.
404  */
405 int
406 mrvl_crypto_set_session_parameters(struct mrvl_crypto_session *sess,
407                 const struct rte_crypto_sym_xform *xform)
408 {
409         const struct rte_crypto_sym_xform *cipher_xform = NULL;
410         const struct rte_crypto_sym_xform *auth_xform = NULL;
411         const struct rte_crypto_sym_xform *aead_xform = NULL;
412
413         /* Filter out spurious/broken requests */
414         if (xform == NULL)
415                 return -EINVAL;
416
417         sess->chain_order = mrvl_crypto_get_chain_order(xform);
418         switch (sess->chain_order) {
419         case MRVL_CRYPTO_CHAIN_CIPHER_AUTH:
420                 cipher_xform = xform;
421                 auth_xform = xform->next;
422                 break;
423         case MRVL_CRYPTO_CHAIN_AUTH_CIPHER:
424                 auth_xform = xform;
425                 cipher_xform = xform->next;
426                 break;
427         case MRVL_CRYPTO_CHAIN_CIPHER_ONLY:
428                 cipher_xform = xform;
429                 break;
430         case MRVL_CRYPTO_CHAIN_AUTH_ONLY:
431                 auth_xform = xform;
432                 break;
433         case MRVL_CRYPTO_CHAIN_COMBINED:
434                 aead_xform = xform;
435                 break;
436         default:
437                 return -EINVAL;
438         }
439
440         if ((cipher_xform != NULL) &&
441                 (mrvl_crypto_set_cipher_session_parameters(
442                         sess, cipher_xform) < 0)) {
443                 MRVL_LOG(ERR, "Invalid/unsupported cipher parameters!");
444                 return -EINVAL;
445         }
446
447         if ((auth_xform != NULL) &&
448                 (mrvl_crypto_set_auth_session_parameters(
449                         sess, auth_xform) < 0)) {
450                 MRVL_LOG(ERR, "Invalid/unsupported auth parameters!");
451                 return -EINVAL;
452         }
453
454         if ((aead_xform != NULL) &&
455                 (mrvl_crypto_set_aead_session_parameters(
456                         sess, aead_xform) < 0)) {
457                 MRVL_LOG(ERR, "Invalid/unsupported aead parameters!");
458                 return -EINVAL;
459         }
460
461         return 0;
462 }
463
464 /*
465  *-----------------------------------------------------------------------------
466  * Process Operations
467  *-----------------------------------------------------------------------------
468  */
469
470 /**
471  * Prepare a single request.
472  *
473  * This function basically translates DPDK crypto request into one
474  * understandable by MUDSK's SAM. If this is a first request in a session,
475  * it starts the session.
476  *
477  * @param request Pointer to pre-allocated && reset request buffer [Out].
478  * @param src_bd Pointer to pre-allocated source descriptor [Out].
479  * @param dst_bd Pointer to pre-allocated destination descriptor [Out].
480  * @param op Pointer to DPDK crypto operation struct [In].
481  */
482 static inline int
483 mrvl_request_prepare(struct sam_cio_op_params *request,
484                 struct sam_buf_info *src_bd,
485                 struct sam_buf_info *dst_bd,
486                 struct rte_crypto_op *op)
487 {
488         struct mrvl_crypto_session *sess;
489         struct rte_mbuf *src_mbuf, *dst_mbuf;
490         uint16_t segments_nb;
491         uint8_t *digest;
492         int i;
493
494         if (unlikely(op->sess_type == RTE_CRYPTO_OP_SESSIONLESS)) {
495                 MRVL_LOG(ERR, "MRVL CRYPTO PMD only supports session "
496                                 "oriented requests, op (%p) is sessionless!",
497                                 op);
498                 return -EINVAL;
499         }
500
501         sess = (struct mrvl_crypto_session *)get_sym_session_private_data(
502                         op->sym->session, cryptodev_driver_id);
503         if (unlikely(sess == NULL)) {
504                 MRVL_LOG(ERR, "Session was not created for this device!");
505                 return -EINVAL;
506         }
507
508         request->sa = sess->sam_sess;
509         request->cookie = op;
510
511         src_mbuf = op->sym->m_src;
512         segments_nb = src_mbuf->nb_segs;
513         /* The following conditions must be met:
514          * - Destination buffer is required when segmented source buffer
515          * - Segmented destination buffer is not supported
516          */
517         if ((segments_nb > 1) && (!op->sym->m_dst)) {
518                 MRVL_LOG(ERR, "op->sym->m_dst = NULL!");
519                 return -1;
520         }
521         /* For non SG case:
522          * If application delivered us null dst buffer, it means it expects
523          * us to deliver the result in src buffer.
524          */
525         dst_mbuf = op->sym->m_dst ? op->sym->m_dst : op->sym->m_src;
526
527         if (!rte_pktmbuf_is_contiguous(dst_mbuf)) {
528                 MRVL_LOG(ERR, "Segmented destination buffer not supported!");
529                 return -1;
530         }
531
532         request->num_bufs = segments_nb;
533         for (i = 0; i < segments_nb; i++) {
534                 /* Empty source. */
535                 if (rte_pktmbuf_data_len(src_mbuf) == 0) {
536                         /* EIP does not support 0 length buffers. */
537                         MRVL_LOG(ERR, "Buffer length == 0 not supported!");
538                         return -1;
539                 }
540                 src_bd[i].vaddr = rte_pktmbuf_mtod(src_mbuf, void *);
541                 src_bd[i].paddr = rte_pktmbuf_iova(src_mbuf);
542                 src_bd[i].len = rte_pktmbuf_data_len(src_mbuf);
543
544                 src_mbuf = src_mbuf->next;
545         }
546         request->src = src_bd;
547
548         /* Empty destination. */
549         if (rte_pktmbuf_data_len(dst_mbuf) == 0) {
550                 /* Make dst buffer fit at least source data. */
551                 if (rte_pktmbuf_append(dst_mbuf,
552                         rte_pktmbuf_data_len(op->sym->m_src)) == NULL) {
553                         MRVL_LOG(ERR, "Unable to set big enough dst buffer!");
554                         return -1;
555                 }
556         }
557
558         request->dst = dst_bd;
559         dst_bd->vaddr = rte_pktmbuf_mtod(dst_mbuf, void *);
560         dst_bd->paddr = rte_pktmbuf_iova(dst_mbuf);
561
562         /*
563          * We can use all available space in dst_mbuf,
564          * not only what's used currently.
565          */
566         dst_bd->len = dst_mbuf->buf_len - rte_pktmbuf_headroom(dst_mbuf);
567
568         if (sess->chain_order == MRVL_CRYPTO_CHAIN_COMBINED) {
569                 request->cipher_len = op->sym->aead.data.length;
570                 request->cipher_offset = op->sym->aead.data.offset;
571                 request->cipher_iv = rte_crypto_op_ctod_offset(op, uint8_t *,
572                         sess->cipher_iv_offset);
573
574                 request->auth_aad = op->sym->aead.aad.data;
575                 request->auth_offset = request->cipher_offset;
576                 request->auth_len = request->cipher_len;
577         } else {
578                 request->cipher_len = op->sym->cipher.data.length;
579                 request->cipher_offset = op->sym->cipher.data.offset;
580                 request->cipher_iv = rte_crypto_op_ctod_offset(op, uint8_t *,
581                                 sess->cipher_iv_offset);
582
583                 request->auth_offset = op->sym->auth.data.offset;
584                 request->auth_len = op->sym->auth.data.length;
585         }
586
587         digest = sess->chain_order == MRVL_CRYPTO_CHAIN_COMBINED ?
588                 op->sym->aead.digest.data : op->sym->auth.digest.data;
589         if (digest == NULL) {
590                 /* No auth - no worry. */
591                 return 0;
592         }
593
594         request->auth_icv_offset = request->auth_offset + request->auth_len;
595
596         /*
597          * EIP supports only scenarios where ICV(digest buffer) is placed at
598          * auth_icv_offset.
599          */
600         if (sess->sam_sess_params.dir == SAM_DIR_ENCRYPT) {
601                 /*
602                  * This should be the most common case anyway,
603                  * EIP will overwrite DST buffer at auth_icv_offset.
604                  */
605                 if (rte_pktmbuf_mtod_offset(
606                                 dst_mbuf, uint8_t *,
607                                 request->auth_icv_offset) == digest)
608                         return 0;
609         } else {/* sess->sam_sess_params.dir == SAM_DIR_DECRYPT */
610                 /*
611                  * EIP will look for digest at auth_icv_offset
612                  * offset in SRC buffer. It must be placed in the last
613                  * segment and the offset must be set to reach digest
614                  * in the last segment
615                  */
616                 struct rte_mbuf *last_seg =  op->sym->m_src;
617                 uint32_t d_offset = request->auth_icv_offset;
618                 u32 d_size = sess->sam_sess_params.u.basic.auth_icv_len;
619                 unsigned char *d_ptr;
620
621                 /* Find the last segment and the offset for the last segment */
622                 while ((last_seg->next != NULL) &&
623                                 (d_offset >= last_seg->data_len)) {
624                         d_offset -= last_seg->data_len;
625                         last_seg = last_seg->next;
626                 }
627
628                 if (rte_pktmbuf_mtod_offset(last_seg, uint8_t *,
629                                             d_offset) == digest)
630                         return 0;
631
632                 /* copy digest to last segment */
633                 if (last_seg->buf_len >= (d_size + d_offset)) {
634                         d_ptr = (unsigned char *)last_seg->buf_addr +
635                                  d_offset;
636                         rte_memcpy(d_ptr, digest, d_size);
637                         return 0;
638                 }
639         }
640
641         /*
642          * If we landed here it means that digest pointer is
643          * at different than expected place.
644          */
645         return -1;
646 }
647
648 /*
649  *-----------------------------------------------------------------------------
650  * PMD Framework handlers
651  *-----------------------------------------------------------------------------
652  */
653
654 /**
655  * Enqueue burst.
656  *
657  * @param queue_pair Pointer to queue pair.
658  * @param ops Pointer to ops requests array.
659  * @param nb_ops Number of elements in ops requests array.
660  * @returns Number of elements consumed from ops.
661  */
662 static uint16_t
663 mrvl_crypto_pmd_enqueue_burst(void *queue_pair, struct rte_crypto_op **ops,
664                 uint16_t nb_ops)
665 {
666         uint16_t iter_ops = 0;
667         uint16_t to_enq = 0;
668         uint16_t consumed = 0;
669         int ret;
670         struct sam_cio_op_params requests[nb_ops];
671         /*
672          * SAM does not store bd pointers, so on-stack scope will be enough.
673          */
674         struct mrvl_crypto_src_table src_bd[nb_ops];
675         struct sam_buf_info          dst_bd[nb_ops];
676         struct mrvl_crypto_qp *qp = (struct mrvl_crypto_qp *)queue_pair;
677
678         if (nb_ops == 0)
679                 return 0;
680
681         /* Prepare the burst. */
682         memset(&requests, 0, sizeof(requests));
683         memset(&src_bd, 0, sizeof(src_bd));
684
685         /* Iterate through */
686         for (; iter_ops < nb_ops; ++iter_ops) {
687                 /* store the op id for debug */
688                 src_bd[iter_ops].iter_ops = iter_ops;
689                 if (mrvl_request_prepare(&requests[iter_ops],
690                                         src_bd[iter_ops].src_bd,
691                                         &dst_bd[iter_ops],
692                                         ops[iter_ops]) < 0) {
693                         MRVL_LOG(ERR, "Error while preparing parameters!");
694                         qp->stats.enqueue_err_count++;
695                         ops[iter_ops]->status = RTE_CRYPTO_OP_STATUS_ERROR;
696
697                         /*
698                          * Number of handled ops is increased
699                          * (even if the result of handling is error).
700                          */
701                         ++consumed;
702                         break;
703                 }
704
705                 ops[iter_ops]->status =
706                         RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
707
708                 /* Increase the number of ops to enqueue. */
709                 ++to_enq;
710         } /* for (; iter_ops < nb_ops;... */
711
712         if (to_enq > 0) {
713                 /* Send the burst */
714                 ret = sam_cio_enq(qp->cio, requests, &to_enq);
715                 consumed += to_enq;
716                 if (ret < 0) {
717                         /*
718                          * Trust SAM that in this case returned value will be at
719                          * some point correct (now it is returned unmodified).
720                          */
721                         qp->stats.enqueue_err_count += to_enq;
722                         for (iter_ops = 0; iter_ops < to_enq; ++iter_ops)
723                                 ops[iter_ops]->status =
724                                         RTE_CRYPTO_OP_STATUS_ERROR;
725                 }
726         }
727
728         qp->stats.enqueued_count += to_enq;
729         return consumed;
730 }
731
732 /**
733  * Dequeue burst.
734  *
735  * @param queue_pair Pointer to queue pair.
736  * @param ops Pointer to ops requests array.
737  * @param nb_ops Number of elements in ops requests array.
738  * @returns Number of elements dequeued.
739  */
740 static uint16_t
741 mrvl_crypto_pmd_dequeue_burst(void *queue_pair,
742                 struct rte_crypto_op **ops,
743                 uint16_t nb_ops)
744 {
745         int ret;
746         struct mrvl_crypto_qp *qp = queue_pair;
747         struct sam_cio *cio = qp->cio;
748         struct sam_cio_op_result results[nb_ops];
749         uint16_t i;
750
751         ret = sam_cio_deq(cio, results, &nb_ops);
752         if (ret < 0) {
753                 /* Count all dequeued as error. */
754                 qp->stats.dequeue_err_count += nb_ops;
755
756                 /* But act as they were dequeued anyway*/
757                 qp->stats.dequeued_count += nb_ops;
758
759                 return 0;
760         }
761
762         /* Unpack and check results. */
763         for (i = 0; i < nb_ops; ++i) {
764                 ops[i] = results[i].cookie;
765
766                 switch (results[i].status) {
767                 case SAM_CIO_OK:
768                         ops[i]->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
769                         break;
770                 case SAM_CIO_ERR_ICV:
771                         MRVL_LOG(DEBUG, "CIO returned SAM_CIO_ERR_ICV.");
772                         ops[i]->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
773                         break;
774                 default:
775                         MRVL_LOG(DEBUG,
776                                 "CIO returned Error: %d.", results[i].status);
777                         ops[i]->status = RTE_CRYPTO_OP_STATUS_ERROR;
778                         break;
779                 }
780         }
781
782         qp->stats.dequeued_count += nb_ops;
783         return nb_ops;
784 }
785
786 /**
787  * Create a new crypto device.
788  *
789  * @param name Driver name.
790  * @param vdev Pointer to device structure.
791  * @param init_params Pointer to initialization parameters.
792  * @returns 0 in case of success, negative value otherwise.
793  */
794 static int
795 cryptodev_mrvl_crypto_create(const char *name,
796                 struct rte_vdev_device *vdev,
797                 struct mrvl_pmd_init_params *init_params)
798 {
799         struct rte_cryptodev *dev;
800         struct mrvl_crypto_private *internals;
801         struct sam_init_params  sam_params;
802         int ret = -EINVAL;
803
804         dev = rte_cryptodev_pmd_create(name, &vdev->device,
805                         &init_params->common);
806         if (dev == NULL) {
807                 MRVL_LOG(ERR, "Failed to create cryptodev vdev!");
808                 goto init_error;
809         }
810
811         dev->driver_id = cryptodev_driver_id;
812         dev->dev_ops = rte_mrvl_crypto_pmd_ops;
813
814         /* Register rx/tx burst functions for data path. */
815         dev->enqueue_burst = mrvl_crypto_pmd_enqueue_burst;
816         dev->dequeue_burst = mrvl_crypto_pmd_dequeue_burst;
817
818         dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
819                         RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
820                         RTE_CRYPTODEV_FF_HW_ACCELERATED |
821                         RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
822                         RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT;
823
824         internals = dev->data->dev_private;
825
826         internals->max_nb_qpairs = init_params->common.max_nb_queue_pairs;
827         internals->max_nb_sessions = init_params->max_nb_sessions;
828
829         ret = rte_mvep_init(MVEP_MOD_T_SAM, NULL);
830         if (ret)
831                 goto init_error;
832
833         sam_params.max_num_sessions = internals->max_nb_sessions;
834
835         /* sam_set_debug_flags(3); */
836
837         ret = sam_init(&sam_params);
838         if (ret)
839                 goto init_error;
840
841         return 0;
842
843 init_error:
844         MRVL_LOG(ERR,
845                 "Driver %s: %s failed!", init_params->common.name, __func__);
846
847         cryptodev_mrvl_crypto_uninit(vdev);
848         return ret;
849 }
850
851 /** Parse integer from integer argument */
852 static int
853 parse_integer_arg(const char *key __rte_unused,
854                 const char *value, void *extra_args)
855 {
856         int *i = (int *) extra_args;
857
858         *i = atoi(value);
859         if (*i < 0) {
860                 MRVL_LOG(ERR, "Argument has to be positive!");
861                 return -EINVAL;
862         }
863
864         return 0;
865 }
866
867 /** Parse name */
868 static int
869 parse_name_arg(const char *key __rte_unused,
870                 const char *value, void *extra_args)
871 {
872         struct rte_cryptodev_pmd_init_params *params = extra_args;
873
874         if (strlen(value) >= RTE_CRYPTODEV_NAME_MAX_LEN - 1) {
875                 MRVL_LOG(ERR, "Invalid name %s, should be less than %u bytes!",
876                          value, RTE_CRYPTODEV_NAME_MAX_LEN - 1);
877                 return -EINVAL;
878         }
879
880         strncpy(params->name, value, RTE_CRYPTODEV_NAME_MAX_LEN);
881
882         return 0;
883 }
884
885 static int
886 mrvl_pmd_parse_input_args(struct mrvl_pmd_init_params *params,
887                          const char *input_args)
888 {
889         struct rte_kvargs *kvlist = NULL;
890         int ret = 0;
891
892         if (params == NULL)
893                 return -EINVAL;
894
895         if (input_args) {
896                 kvlist = rte_kvargs_parse(input_args,
897                                           mrvl_pmd_valid_params);
898                 if (kvlist == NULL)
899                         return -1;
900
901                 /* Common VDEV parameters */
902                 ret = rte_kvargs_process(kvlist,
903                                          RTE_CRYPTODEV_PMD_MAX_NB_QP_ARG,
904                                          &parse_integer_arg,
905                                          &params->common.max_nb_queue_pairs);
906                 if (ret < 0)
907                         goto free_kvlist;
908
909                 ret = rte_kvargs_process(kvlist,
910                                          RTE_CRYPTODEV_PMD_SOCKET_ID_ARG,
911                                          &parse_integer_arg,
912                                          &params->common.socket_id);
913                 if (ret < 0)
914                         goto free_kvlist;
915
916                 ret = rte_kvargs_process(kvlist,
917                                          RTE_CRYPTODEV_PMD_NAME_ARG,
918                                          &parse_name_arg,
919                                          &params->common);
920                 if (ret < 0)
921                         goto free_kvlist;
922
923                 ret = rte_kvargs_process(kvlist,
924                                          MRVL_PMD_MAX_NB_SESS_ARG,
925                                          &parse_integer_arg,
926                                          params);
927                 if (ret < 0)
928                         goto free_kvlist;
929
930         }
931
932 free_kvlist:
933         rte_kvargs_free(kvlist);
934         return ret;
935 }
936
937 /**
938  * Initialize the crypto device.
939  *
940  * @param vdev Pointer to device structure.
941  * @returns 0 in case of success, negative value otherwise.
942  */
943 static int
944 cryptodev_mrvl_crypto_init(struct rte_vdev_device *vdev)
945 {
946         struct mrvl_pmd_init_params init_params = {
947                 .common = {
948                         .name = "",
949                         .private_data_size =
950                                 sizeof(struct mrvl_crypto_private),
951                         .max_nb_queue_pairs =
952                                 sam_get_num_inst() * sam_get_num_cios(0),
953                         .socket_id = rte_socket_id()
954                 },
955                 .max_nb_sessions = MRVL_PMD_DEFAULT_MAX_NB_SESSIONS
956         };
957
958         const char *name, *args;
959         int ret;
960
961         name = rte_vdev_device_name(vdev);
962         if (name == NULL)
963                 return -EINVAL;
964         args = rte_vdev_device_args(vdev);
965
966         ret = mrvl_pmd_parse_input_args(&init_params, args);
967         if (ret) {
968                 MRVL_LOG(ERR, "Failed to parse initialisation arguments[%s]!",
969                          args);
970                 return -EINVAL;
971         }
972
973         return cryptodev_mrvl_crypto_create(name, vdev, &init_params);
974 }
975
976 /**
977  * Uninitialize the crypto device
978  *
979  * @param vdev Pointer to device structure.
980  * @returns 0 in case of success, negative value otherwise.
981  */
982 static int
983 cryptodev_mrvl_crypto_uninit(struct rte_vdev_device *vdev)
984 {
985         struct rte_cryptodev *cryptodev;
986         const char *name = rte_vdev_device_name(vdev);
987
988         if (name == NULL)
989                 return -EINVAL;
990
991         MRVL_LOG(INFO, "Closing Marvell crypto device %s on numa socket %u.",
992                  name, rte_socket_id());
993
994         sam_deinit();
995         rte_mvep_deinit(MVEP_MOD_T_SAM);
996
997         cryptodev = rte_cryptodev_pmd_get_named_dev(name);
998         if (cryptodev == NULL)
999                 return -ENODEV;
1000
1001         return rte_cryptodev_pmd_destroy(cryptodev);
1002 }
1003
1004 /**
1005  * Basic driver handlers for use in the constructor.
1006  */
1007 static struct rte_vdev_driver cryptodev_mrvl_pmd_drv = {
1008         .probe = cryptodev_mrvl_crypto_init,
1009         .remove = cryptodev_mrvl_crypto_uninit
1010 };
1011
1012 static struct cryptodev_driver mrvl_crypto_drv;
1013
1014 /* Register the driver in constructor. */
1015 RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_MRVL_PMD, cryptodev_mrvl_pmd_drv);
1016 RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_MRVL_PMD,
1017         "max_nb_queue_pairs=<int> "
1018         "max_nb_sessions=<int> "
1019         "socket_id=<int>");
1020 RTE_PMD_REGISTER_CRYPTO_DRIVER(mrvl_crypto_drv, cryptodev_mrvl_pmd_drv.driver,
1021                 cryptodev_driver_id);
1022
1023 RTE_INIT(crypto_mrvl_init_log)
1024 {
1025         mrvl_logtype_driver = rte_log_register("pmd.crypto.mvsam");
1026 }