1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Marvell International Ltd.
3 * Copyright(c) 2017 Semihalf.
7 #include <rte_common.h>
8 #include <rte_hexdump.h>
9 #include <rte_cryptodev.h>
10 #include <rte_cryptodev_pmd.h>
11 #include <rte_bus_vdev.h>
12 #include <rte_malloc.h>
13 #include <rte_cpuflags.h>
14 #include <rte_kvargs.h>
16 #include "rte_mrvl_pmd_private.h"
18 #define MRVL_MUSDK_DMA_MEMSIZE 41943040
20 #define MRVL_PMD_MAX_NB_SESS_ARG ("max_nb_sessions")
21 #define MRVL_PMD_DEFAULT_MAX_NB_SESSIONS 2048
23 static uint8_t cryptodev_driver_id;
25 struct mrvl_pmd_init_params {
26 struct rte_cryptodev_pmd_init_params common;
27 uint32_t max_nb_sessions;
30 const char *mrvl_pmd_valid_params[] = {
31 RTE_CRYPTODEV_PMD_NAME_ARG,
32 RTE_CRYPTODEV_PMD_MAX_NB_QP_ARG,
33 RTE_CRYPTODEV_PMD_SOCKET_ID_ARG,
34 MRVL_PMD_MAX_NB_SESS_ARG
38 * Flag if particular crypto algorithm is supported by PMD/MUSDK.
40 * The idea is to have Not Supported value as default (0).
41 * This way we need only to define proper map sizes,
42 * non-initialized entries will be by default not supported.
45 ALGO_NOT_SUPPORTED = 0,
49 /** Map elements for cipher mapping.*/
50 struct cipher_params_mapping {
51 enum algo_supported supported; /**< On/Off switch */
52 enum sam_cipher_alg cipher_alg; /**< Cipher algorithm */
53 enum sam_cipher_mode cipher_mode; /**< Cipher mode */
54 unsigned int max_key_len; /**< Maximum key length (in bytes)*/
56 /* We want to squeeze in multiple maps into the cache line. */
59 /** Map elements for auth mapping.*/
60 struct auth_params_mapping {
61 enum algo_supported supported; /**< On/off switch */
62 enum sam_auth_alg auth_alg; /**< Auth algorithm */
64 /* We want to squeeze in multiple maps into the cache line. */
68 * Map of supported cipher algorithms.
71 struct cipher_params_mapping cipher_map[RTE_CRYPTO_CIPHER_LIST_END] = {
72 [RTE_CRYPTO_CIPHER_NULL] = {
73 .supported = ALGO_SUPPORTED,
74 .cipher_alg = SAM_CIPHER_NONE },
75 [RTE_CRYPTO_CIPHER_3DES_CBC] = {
76 .supported = ALGO_SUPPORTED,
77 .cipher_alg = SAM_CIPHER_3DES,
78 .cipher_mode = SAM_CIPHER_CBC,
79 .max_key_len = BITS2BYTES(192) },
80 [RTE_CRYPTO_CIPHER_3DES_CTR] = {
81 .supported = ALGO_SUPPORTED,
82 .cipher_alg = SAM_CIPHER_3DES,
83 .cipher_mode = SAM_CIPHER_CTR,
84 .max_key_len = BITS2BYTES(192) },
85 [RTE_CRYPTO_CIPHER_3DES_ECB] = {
86 .supported = ALGO_SUPPORTED,
87 .cipher_alg = SAM_CIPHER_3DES,
88 .cipher_mode = SAM_CIPHER_ECB,
89 .max_key_len = BITS2BYTES(192) },
90 [RTE_CRYPTO_CIPHER_AES_CBC] = {
91 .supported = ALGO_SUPPORTED,
92 .cipher_alg = SAM_CIPHER_AES,
93 .cipher_mode = SAM_CIPHER_CBC,
94 .max_key_len = BITS2BYTES(256) },
95 [RTE_CRYPTO_CIPHER_AES_CTR] = {
96 .supported = ALGO_SUPPORTED,
97 .cipher_alg = SAM_CIPHER_AES,
98 .cipher_mode = SAM_CIPHER_CTR,
99 .max_key_len = BITS2BYTES(256) },
100 [RTE_CRYPTO_CIPHER_AES_ECB] = {
101 .supported = ALGO_SUPPORTED,
102 .cipher_alg = SAM_CIPHER_AES,
103 .cipher_mode = SAM_CIPHER_ECB,
104 .max_key_len = BITS2BYTES(256) },
108 * Map of supported auth algorithms.
111 struct auth_params_mapping auth_map[RTE_CRYPTO_AUTH_LIST_END] = {
112 [RTE_CRYPTO_AUTH_NULL] = {
113 .supported = ALGO_SUPPORTED,
114 .auth_alg = SAM_AUTH_NONE },
115 [RTE_CRYPTO_AUTH_MD5_HMAC] = {
116 .supported = ALGO_SUPPORTED,
117 .auth_alg = SAM_AUTH_HMAC_MD5 },
118 [RTE_CRYPTO_AUTH_MD5] = {
119 .supported = ALGO_SUPPORTED,
120 .auth_alg = SAM_AUTH_HASH_MD5 },
121 [RTE_CRYPTO_AUTH_SHA1_HMAC] = {
122 .supported = ALGO_SUPPORTED,
123 .auth_alg = SAM_AUTH_HMAC_SHA1 },
124 [RTE_CRYPTO_AUTH_SHA1] = {
125 .supported = ALGO_SUPPORTED,
126 .auth_alg = SAM_AUTH_HASH_SHA1 },
127 [RTE_CRYPTO_AUTH_SHA224_HMAC] = {
128 .supported = ALGO_SUPPORTED,
129 .auth_alg = SAM_AUTH_HMAC_SHA2_224 },
130 [RTE_CRYPTO_AUTH_SHA224] = {
131 .supported = ALGO_SUPPORTED,
132 .auth_alg = SAM_AUTH_HASH_SHA2_224 },
133 [RTE_CRYPTO_AUTH_SHA256_HMAC] = {
134 .supported = ALGO_SUPPORTED,
135 .auth_alg = SAM_AUTH_HMAC_SHA2_256 },
136 [RTE_CRYPTO_AUTH_SHA256] = {
137 .supported = ALGO_SUPPORTED,
138 .auth_alg = SAM_AUTH_HASH_SHA2_256 },
139 [RTE_CRYPTO_AUTH_SHA384_HMAC] = {
140 .supported = ALGO_SUPPORTED,
141 .auth_alg = SAM_AUTH_HMAC_SHA2_384 },
142 [RTE_CRYPTO_AUTH_SHA384] = {
143 .supported = ALGO_SUPPORTED,
144 .auth_alg = SAM_AUTH_HASH_SHA2_384 },
145 [RTE_CRYPTO_AUTH_SHA512_HMAC] = {
146 .supported = ALGO_SUPPORTED,
147 .auth_alg = SAM_AUTH_HMAC_SHA2_512 },
148 [RTE_CRYPTO_AUTH_SHA512] = {
149 .supported = ALGO_SUPPORTED,
150 .auth_alg = SAM_AUTH_HASH_SHA2_512 },
151 [RTE_CRYPTO_AUTH_AES_GMAC] = {
152 .supported = ALGO_SUPPORTED,
153 .auth_alg = SAM_AUTH_AES_GMAC },
157 * Map of supported aead algorithms.
160 struct cipher_params_mapping aead_map[RTE_CRYPTO_AEAD_LIST_END] = {
161 [RTE_CRYPTO_AEAD_AES_GCM] = {
162 .supported = ALGO_SUPPORTED,
163 .cipher_alg = SAM_CIPHER_AES,
164 .cipher_mode = SAM_CIPHER_GCM,
165 .max_key_len = BITS2BYTES(256) },
169 *-----------------------------------------------------------------------------
170 * Forward declarations.
171 *-----------------------------------------------------------------------------
173 static int cryptodev_mrvl_crypto_uninit(struct rte_vdev_device *vdev);
176 *-----------------------------------------------------------------------------
177 * Session Preparation.
178 *-----------------------------------------------------------------------------
182 * Get xform chain order.
184 * @param xform Pointer to configuration structure chain for crypto operations.
185 * @returns Order of crypto operations.
187 static enum mrvl_crypto_chain_order
188 mrvl_crypto_get_chain_order(const struct rte_crypto_sym_xform *xform)
190 /* Currently, Marvell supports max 2 operations in chain */
191 if (xform->next != NULL && xform->next->next != NULL)
192 return MRVL_CRYPTO_CHAIN_NOT_SUPPORTED;
194 if (xform->next != NULL) {
195 if ((xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) &&
196 (xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER))
197 return MRVL_CRYPTO_CHAIN_AUTH_CIPHER;
199 if ((xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) &&
200 (xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH))
201 return MRVL_CRYPTO_CHAIN_CIPHER_AUTH;
203 if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH)
204 return MRVL_CRYPTO_CHAIN_AUTH_ONLY;
206 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
207 return MRVL_CRYPTO_CHAIN_CIPHER_ONLY;
209 if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD)
210 return MRVL_CRYPTO_CHAIN_COMBINED;
212 return MRVL_CRYPTO_CHAIN_NOT_SUPPORTED;
216 * Set session parameters for cipher part.
218 * @param sess Crypto session pointer.
219 * @param cipher_xform Pointer to configuration structure for cipher operations.
220 * @returns 0 in case of success, negative value otherwise.
223 mrvl_crypto_set_cipher_session_parameters(struct mrvl_crypto_session *sess,
224 const struct rte_crypto_sym_xform *cipher_xform)
226 /* Make sure we've got proper struct */
227 if (cipher_xform->type != RTE_CRYPTO_SYM_XFORM_CIPHER) {
228 MRVL_CRYPTO_LOG_ERR("Wrong xform struct provided!");
232 /* See if map data is present and valid */
233 if ((cipher_xform->cipher.algo > RTE_DIM(cipher_map)) ||
234 (cipher_map[cipher_xform->cipher.algo].supported
235 != ALGO_SUPPORTED)) {
236 MRVL_CRYPTO_LOG_ERR("Cipher algorithm not supported!");
240 sess->cipher_iv_offset = cipher_xform->cipher.iv.offset;
242 sess->sam_sess_params.dir =
243 (cipher_xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
244 SAM_DIR_ENCRYPT : SAM_DIR_DECRYPT;
245 sess->sam_sess_params.cipher_alg =
246 cipher_map[cipher_xform->cipher.algo].cipher_alg;
247 sess->sam_sess_params.cipher_mode =
248 cipher_map[cipher_xform->cipher.algo].cipher_mode;
250 /* Assume IV will be passed together with data. */
251 sess->sam_sess_params.cipher_iv = NULL;
253 /* Get max key length. */
254 if (cipher_xform->cipher.key.length >
255 cipher_map[cipher_xform->cipher.algo].max_key_len) {
256 MRVL_CRYPTO_LOG_ERR("Wrong key length!");
260 sess->sam_sess_params.cipher_key_len = cipher_xform->cipher.key.length;
261 sess->sam_sess_params.cipher_key = cipher_xform->cipher.key.data;
267 * Set session parameters for authentication part.
269 * @param sess Crypto session pointer.
270 * @param auth_xform Pointer to configuration structure for auth operations.
271 * @returns 0 in case of success, negative value otherwise.
274 mrvl_crypto_set_auth_session_parameters(struct mrvl_crypto_session *sess,
275 const struct rte_crypto_sym_xform *auth_xform)
277 /* Make sure we've got proper struct */
278 if (auth_xform->type != RTE_CRYPTO_SYM_XFORM_AUTH) {
279 MRVL_CRYPTO_LOG_ERR("Wrong xform struct provided!");
283 /* See if map data is present and valid */
284 if ((auth_xform->auth.algo > RTE_DIM(auth_map)) ||
285 (auth_map[auth_xform->auth.algo].supported != ALGO_SUPPORTED)) {
286 MRVL_CRYPTO_LOG_ERR("Auth algorithm not supported!");
290 sess->sam_sess_params.dir =
291 (auth_xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ?
292 SAM_DIR_ENCRYPT : SAM_DIR_DECRYPT;
293 sess->sam_sess_params.auth_alg =
294 auth_map[auth_xform->auth.algo].auth_alg;
295 sess->sam_sess_params.u.basic.auth_icv_len =
296 auth_xform->auth.digest_length;
297 /* auth_key must be NULL if auth algorithm does not use HMAC */
298 sess->sam_sess_params.auth_key = auth_xform->auth.key.length ?
299 auth_xform->auth.key.data : NULL;
300 sess->sam_sess_params.auth_key_len = auth_xform->auth.key.length;
306 * Set session parameters for aead part.
308 * @param sess Crypto session pointer.
309 * @param aead_xform Pointer to configuration structure for aead operations.
310 * @returns 0 in case of success, negative value otherwise.
313 mrvl_crypto_set_aead_session_parameters(struct mrvl_crypto_session *sess,
314 const struct rte_crypto_sym_xform *aead_xform)
316 /* Make sure we've got proper struct */
317 if (aead_xform->type != RTE_CRYPTO_SYM_XFORM_AEAD) {
318 MRVL_CRYPTO_LOG_ERR("Wrong xform struct provided!");
322 /* See if map data is present and valid */
323 if ((aead_xform->aead.algo > RTE_DIM(aead_map)) ||
324 (aead_map[aead_xform->aead.algo].supported
325 != ALGO_SUPPORTED)) {
326 MRVL_CRYPTO_LOG_ERR("AEAD algorithm not supported!");
330 sess->sam_sess_params.dir =
331 (aead_xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
332 SAM_DIR_ENCRYPT : SAM_DIR_DECRYPT;
333 sess->sam_sess_params.cipher_alg =
334 aead_map[aead_xform->aead.algo].cipher_alg;
335 sess->sam_sess_params.cipher_mode =
336 aead_map[aead_xform->aead.algo].cipher_mode;
338 /* Assume IV will be passed together with data. */
339 sess->sam_sess_params.cipher_iv = NULL;
341 /* Get max key length. */
342 if (aead_xform->aead.key.length >
343 aead_map[aead_xform->aead.algo].max_key_len) {
344 MRVL_CRYPTO_LOG_ERR("Wrong key length!");
348 sess->sam_sess_params.cipher_key = aead_xform->aead.key.data;
349 sess->sam_sess_params.cipher_key_len = aead_xform->aead.key.length;
351 if (sess->sam_sess_params.cipher_mode == SAM_CIPHER_GCM)
352 sess->sam_sess_params.auth_alg = SAM_AUTH_AES_GCM;
354 sess->sam_sess_params.u.basic.auth_icv_len =
355 aead_xform->aead.digest_length;
357 sess->sam_sess_params.u.basic.auth_aad_len =
358 aead_xform->aead.aad_length;
364 * Parse crypto transform chain and setup session parameters.
366 * @param dev Pointer to crypto device
367 * @param sess Poiner to crypto session
368 * @param xform Pointer to configuration structure chain for crypto operations.
369 * @returns 0 in case of success, negative value otherwise.
372 mrvl_crypto_set_session_parameters(struct mrvl_crypto_session *sess,
373 const struct rte_crypto_sym_xform *xform)
375 const struct rte_crypto_sym_xform *cipher_xform = NULL;
376 const struct rte_crypto_sym_xform *auth_xform = NULL;
377 const struct rte_crypto_sym_xform *aead_xform = NULL;
379 /* Filter out spurious/broken requests */
383 sess->chain_order = mrvl_crypto_get_chain_order(xform);
384 switch (sess->chain_order) {
385 case MRVL_CRYPTO_CHAIN_CIPHER_AUTH:
386 cipher_xform = xform;
387 auth_xform = xform->next;
389 case MRVL_CRYPTO_CHAIN_AUTH_CIPHER:
391 cipher_xform = xform->next;
393 case MRVL_CRYPTO_CHAIN_CIPHER_ONLY:
394 cipher_xform = xform;
396 case MRVL_CRYPTO_CHAIN_AUTH_ONLY:
399 case MRVL_CRYPTO_CHAIN_COMBINED:
406 if ((cipher_xform != NULL) &&
407 (mrvl_crypto_set_cipher_session_parameters(
408 sess, cipher_xform) < 0)) {
409 MRVL_CRYPTO_LOG_ERR("Invalid/unsupported cipher parameters");
413 if ((auth_xform != NULL) &&
414 (mrvl_crypto_set_auth_session_parameters(
415 sess, auth_xform) < 0)) {
416 MRVL_CRYPTO_LOG_ERR("Invalid/unsupported auth parameters");
420 if ((aead_xform != NULL) &&
421 (mrvl_crypto_set_aead_session_parameters(
422 sess, aead_xform) < 0)) {
423 MRVL_CRYPTO_LOG_ERR("Invalid/unsupported aead parameters");
431 *-----------------------------------------------------------------------------
433 *-----------------------------------------------------------------------------
437 * Prepare a single request.
439 * This function basically translates DPDK crypto request into one
440 * understandable by MUDSK's SAM. If this is a first request in a session,
441 * it starts the session.
443 * @param request Pointer to pre-allocated && reset request buffer [Out].
444 * @param src_bd Pointer to pre-allocated source descriptor [Out].
445 * @param dst_bd Pointer to pre-allocated destination descriptor [Out].
446 * @param op Pointer to DPDK crypto operation struct [In].
449 mrvl_request_prepare(struct sam_cio_op_params *request,
450 struct sam_buf_info *src_bd,
451 struct sam_buf_info *dst_bd,
452 struct rte_crypto_op *op)
454 struct mrvl_crypto_session *sess;
455 struct rte_mbuf *src_mbuf, *dst_mbuf;
456 uint16_t segments_nb;
460 if (unlikely(op->sess_type == RTE_CRYPTO_OP_SESSIONLESS)) {
461 MRVL_CRYPTO_LOG_ERR("MRVL CRYPTO PMD only supports session "
462 "oriented requests, op (%p) is sessionless.",
467 sess = (struct mrvl_crypto_session *)get_sym_session_private_data(
468 op->sym->session, cryptodev_driver_id);
469 if (unlikely(sess == NULL)) {
470 MRVL_CRYPTO_LOG_ERR("Session was not created for this device");
474 request->sa = sess->sam_sess;
475 request->cookie = op;
477 src_mbuf = op->sym->m_src;
478 segments_nb = src_mbuf->nb_segs;
479 /* The following conditions must be met:
480 * - Destination buffer is required when segmented source buffer
481 * - Segmented destination buffer is not supported
483 if ((segments_nb > 1) && (!op->sym->m_dst)) {
484 MRVL_CRYPTO_LOG_ERR("op->sym->m_dst = NULL!\n");
488 * If application delivered us null dst buffer, it means it expects
489 * us to deliver the result in src buffer.
491 dst_mbuf = op->sym->m_dst ? op->sym->m_dst : op->sym->m_src;
493 if (!rte_pktmbuf_is_contiguous(dst_mbuf)) {
494 MRVL_CRYPTO_LOG_ERR("Segmented destination buffer "
499 request->num_bufs = segments_nb;
500 for (i = 0; i < segments_nb; i++) {
502 if (rte_pktmbuf_data_len(src_mbuf) == 0) {
503 /* EIP does not support 0 length buffers. */
504 MRVL_CRYPTO_LOG_ERR("Buffer length == 0 not supported!");
507 src_bd[i].vaddr = rte_pktmbuf_mtod(src_mbuf, void *);
508 src_bd[i].paddr = rte_pktmbuf_iova(src_mbuf);
509 src_bd[i].len = rte_pktmbuf_data_len(src_mbuf);
511 src_mbuf = src_mbuf->next;
513 request->src = src_bd;
515 /* Empty destination. */
516 if (rte_pktmbuf_data_len(dst_mbuf) == 0) {
517 /* Make dst buffer fit at least source data. */
518 if (rte_pktmbuf_append(dst_mbuf,
519 rte_pktmbuf_data_len(op->sym->m_src)) == NULL) {
520 MRVL_CRYPTO_LOG_ERR("Unable to set big enough dst buffer!");
525 request->dst = dst_bd;
526 dst_bd->vaddr = rte_pktmbuf_mtod(dst_mbuf, void *);
527 dst_bd->paddr = rte_pktmbuf_iova(dst_mbuf);
530 * We can use all available space in dst_mbuf,
531 * not only what's used currently.
533 dst_bd->len = dst_mbuf->buf_len - rte_pktmbuf_headroom(dst_mbuf);
535 if (sess->chain_order == MRVL_CRYPTO_CHAIN_COMBINED) {
536 request->cipher_len = op->sym->aead.data.length;
537 request->cipher_offset = op->sym->aead.data.offset;
538 request->cipher_iv = rte_crypto_op_ctod_offset(op, uint8_t *,
539 sess->cipher_iv_offset);
541 request->auth_aad = op->sym->aead.aad.data;
542 request->auth_offset = request->cipher_offset;
543 request->auth_len = request->cipher_len;
545 request->cipher_len = op->sym->cipher.data.length;
546 request->cipher_offset = op->sym->cipher.data.offset;
547 request->cipher_iv = rte_crypto_op_ctod_offset(op, uint8_t *,
548 sess->cipher_iv_offset);
550 request->auth_offset = op->sym->auth.data.offset;
551 request->auth_len = op->sym->auth.data.length;
554 digest = sess->chain_order == MRVL_CRYPTO_CHAIN_COMBINED ?
555 op->sym->aead.digest.data : op->sym->auth.digest.data;
556 if (digest == NULL) {
557 /* No auth - no worry. */
561 request->auth_icv_offset = request->auth_offset + request->auth_len;
564 * EIP supports only scenarios where ICV(digest buffer) is placed at
567 if (sess->sam_sess_params.dir == SAM_DIR_ENCRYPT) {
569 * This should be the most common case anyway,
570 * EIP will overwrite DST buffer at auth_icv_offset.
572 if (rte_pktmbuf_mtod_offset(
574 request->auth_icv_offset) == digest)
576 } else {/* sess->sam_sess_params.dir == SAM_DIR_DECRYPT */
578 * EIP will look for digest at auth_icv_offset
579 * offset in SRC buffer. It must be placed in the last
580 * segment and the offset must be set to reach digest
581 * in the last segment
583 struct rte_mbuf *last_seg = op->sym->m_src;
584 uint32_t d_offset = request->auth_icv_offset;
585 u32 d_size = sess->sam_sess_params.u.basic.auth_icv_len;
586 unsigned char *d_ptr;
588 /* Find the last segment and the offset for the last segment */
589 while ((last_seg->next != NULL) &&
590 (d_offset >= last_seg->data_len)) {
591 d_offset -= last_seg->data_len;
592 last_seg = last_seg->next;
595 if (rte_pktmbuf_mtod_offset(last_seg, uint8_t *,
599 /* copy digest to last segment */
600 if (last_seg->buf_len >= (d_size + d_offset)) {
601 d_ptr = (unsigned char *)last_seg->buf_addr +
603 rte_memcpy(d_ptr, digest, d_size);
609 * If we landed here it means that digest pointer is
610 * at different than expected place.
616 *-----------------------------------------------------------------------------
617 * PMD Framework handlers
618 *-----------------------------------------------------------------------------
624 * @param queue_pair Pointer to queue pair.
625 * @param ops Pointer to ops requests array.
626 * @param nb_ops Number of elements in ops requests array.
627 * @returns Number of elements consumed from ops.
630 mrvl_crypto_pmd_enqueue_burst(void *queue_pair, struct rte_crypto_op **ops,
633 uint16_t iter_ops = 0;
635 uint16_t consumed = 0;
637 struct sam_cio_op_params requests[nb_ops];
639 * SAM does not store bd pointers, so on-stack scope will be enough.
641 struct mrvl_crypto_src_table src_bd[nb_ops];
642 struct sam_buf_info dst_bd[nb_ops];
643 struct mrvl_crypto_qp *qp = (struct mrvl_crypto_qp *)queue_pair;
648 /* Prepare the burst. */
649 memset(&requests, 0, sizeof(requests));
650 memset(&src_bd, 0, sizeof(src_bd));
652 /* Iterate through */
653 for (; iter_ops < nb_ops; ++iter_ops) {
654 /* store the op id for debug */
655 src_bd[iter_ops].iter_ops = iter_ops;
656 if (mrvl_request_prepare(&requests[iter_ops],
657 src_bd[iter_ops].src_bd,
659 ops[iter_ops]) < 0) {
661 "Error while parameters preparation!");
662 qp->stats.enqueue_err_count++;
663 ops[iter_ops]->status = RTE_CRYPTO_OP_STATUS_ERROR;
666 * Number of handled ops is increased
667 * (even if the result of handling is error).
673 ops[iter_ops]->status =
674 RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
676 /* Increase the number of ops to enqueue. */
678 } /* for (; iter_ops < nb_ops;... */
682 ret = sam_cio_enq(qp->cio, requests, &to_enq);
686 * Trust SAM that in this case returned value will be at
687 * some point correct (now it is returned unmodified).
689 qp->stats.enqueue_err_count += to_enq;
690 for (iter_ops = 0; iter_ops < to_enq; ++iter_ops)
691 ops[iter_ops]->status =
692 RTE_CRYPTO_OP_STATUS_ERROR;
696 qp->stats.enqueued_count += to_enq;
703 * @param queue_pair Pointer to queue pair.
704 * @param ops Pointer to ops requests array.
705 * @param nb_ops Number of elements in ops requests array.
706 * @returns Number of elements dequeued.
709 mrvl_crypto_pmd_dequeue_burst(void *queue_pair,
710 struct rte_crypto_op **ops,
714 struct mrvl_crypto_qp *qp = queue_pair;
715 struct sam_cio *cio = qp->cio;
716 struct sam_cio_op_result results[nb_ops];
719 ret = sam_cio_deq(cio, results, &nb_ops);
721 /* Count all dequeued as error. */
722 qp->stats.dequeue_err_count += nb_ops;
724 /* But act as they were dequeued anyway*/
725 qp->stats.dequeued_count += nb_ops;
730 /* Unpack and check results. */
731 for (i = 0; i < nb_ops; ++i) {
732 ops[i] = results[i].cookie;
734 switch (results[i].status) {
736 ops[i]->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
738 case SAM_CIO_ERR_ICV:
739 MRVL_CRYPTO_LOG_DBG("CIO returned SAM_CIO_ERR_ICV.");
740 ops[i]->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
744 "CIO returned Error: %d", results[i].status);
745 ops[i]->status = RTE_CRYPTO_OP_STATUS_ERROR;
750 qp->stats.dequeued_count += nb_ops;
755 * Create a new crypto device.
757 * @param name Driver name.
758 * @param vdev Pointer to device structure.
759 * @param init_params Pointer to initialization parameters.
760 * @returns 0 in case of success, negative value otherwise.
763 cryptodev_mrvl_crypto_create(const char *name,
764 struct rte_vdev_device *vdev,
765 struct mrvl_pmd_init_params *init_params)
767 struct rte_cryptodev *dev;
768 struct mrvl_crypto_private *internals;
769 struct sam_init_params sam_params;
772 dev = rte_cryptodev_pmd_create(name, &vdev->device,
773 &init_params->common);
775 MRVL_CRYPTO_LOG_ERR("failed to create cryptodev vdev");
779 dev->driver_id = cryptodev_driver_id;
780 dev->dev_ops = rte_mrvl_crypto_pmd_ops;
782 /* Register rx/tx burst functions for data path. */
783 dev->enqueue_burst = mrvl_crypto_pmd_enqueue_burst;
784 dev->dequeue_burst = mrvl_crypto_pmd_dequeue_burst;
786 dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
787 RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
788 RTE_CRYPTODEV_FF_HW_ACCELERATED |
789 RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
790 RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT;
792 /* Set vector instructions mode supported */
793 internals = dev->data->dev_private;
795 internals->max_nb_qpairs = init_params->common.max_nb_queue_pairs;
796 internals->max_nb_sessions = init_params->max_nb_sessions;
799 * ret == -EEXIST is correct, it means DMA
800 * has been already initialized.
802 ret = mv_sys_dma_mem_init(MRVL_MUSDK_DMA_MEMSIZE);
807 MRVL_CRYPTO_LOG_INFO(
808 "DMA memory has been already initialized by a different driver.");
811 sam_params.max_num_sessions = internals->max_nb_sessions;
813 /* sam_set_debug_flags(3); */
814 return sam_init(&sam_params);
818 "driver %s: %s failed", init_params->common.name, __func__);
820 cryptodev_mrvl_crypto_uninit(vdev);
824 /** Parse integer from integer argument */
826 parse_integer_arg(const char *key __rte_unused,
827 const char *value, void *extra_args)
829 int *i = (int *) extra_args;
833 MRVL_CRYPTO_LOG_ERR("Argument has to be positive.\n");
842 parse_name_arg(const char *key __rte_unused,
843 const char *value, void *extra_args)
845 struct rte_cryptodev_pmd_init_params *params = extra_args;
847 if (strlen(value) >= RTE_CRYPTODEV_NAME_MAX_LEN - 1) {
848 MRVL_CRYPTO_LOG_ERR("Invalid name %s, should be less than "
849 "%u bytes.\n", value,
850 RTE_CRYPTODEV_NAME_MAX_LEN - 1);
854 strncpy(params->name, value, RTE_CRYPTODEV_NAME_MAX_LEN);
860 mrvl_pmd_parse_input_args(struct mrvl_pmd_init_params *params,
861 const char *input_args)
863 struct rte_kvargs *kvlist = NULL;
870 kvlist = rte_kvargs_parse(input_args,
871 mrvl_pmd_valid_params);
875 /* Common VDEV parameters */
876 ret = rte_kvargs_process(kvlist,
877 RTE_CRYPTODEV_PMD_MAX_NB_QP_ARG,
879 ¶ms->common.max_nb_queue_pairs);
883 ret = rte_kvargs_process(kvlist,
884 RTE_CRYPTODEV_PMD_SOCKET_ID_ARG,
886 ¶ms->common.socket_id);
890 ret = rte_kvargs_process(kvlist,
891 RTE_CRYPTODEV_PMD_NAME_ARG,
897 ret = rte_kvargs_process(kvlist,
898 MRVL_PMD_MAX_NB_SESS_ARG,
907 rte_kvargs_free(kvlist);
912 * Initialize the crypto device.
914 * @param vdev Pointer to device structure.
915 * @returns 0 in case of success, negative value otherwise.
918 cryptodev_mrvl_crypto_init(struct rte_vdev_device *vdev)
920 struct mrvl_pmd_init_params init_params = {
924 sizeof(struct mrvl_crypto_private),
925 .max_nb_queue_pairs =
926 sam_get_num_inst() * sam_get_num_cios(0),
927 .socket_id = rte_socket_id()
929 .max_nb_sessions = MRVL_PMD_DEFAULT_MAX_NB_SESSIONS
932 const char *name, *args;
935 name = rte_vdev_device_name(vdev);
938 args = rte_vdev_device_args(vdev);
940 ret = mrvl_pmd_parse_input_args(&init_params, args);
943 "Failed to parse initialisation arguments[%s]\n",
948 return cryptodev_mrvl_crypto_create(name, vdev, &init_params);
952 * Uninitialize the crypto device
954 * @param vdev Pointer to device structure.
955 * @returns 0 in case of success, negative value otherwise.
958 cryptodev_mrvl_crypto_uninit(struct rte_vdev_device *vdev)
960 struct rte_cryptodev *cryptodev;
961 const char *name = rte_vdev_device_name(vdev);
967 "Closing Marvell crypto device %s on numa socket %u\n",
968 name, rte_socket_id());
972 cryptodev = rte_cryptodev_pmd_get_named_dev(name);
973 if (cryptodev == NULL)
976 return rte_cryptodev_pmd_destroy(cryptodev);
980 * Basic driver handlers for use in the constructor.
982 static struct rte_vdev_driver cryptodev_mrvl_pmd_drv = {
983 .probe = cryptodev_mrvl_crypto_init,
984 .remove = cryptodev_mrvl_crypto_uninit
987 static struct cryptodev_driver mrvl_crypto_drv;
989 /* Register the driver in constructor. */
990 RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_MRVL_PMD, cryptodev_mrvl_pmd_drv);
991 RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_MRVL_PMD,
992 "max_nb_queue_pairs=<int> "
993 "max_nb_sessions=<int> "
995 RTE_PMD_REGISTER_CRYPTO_DRIVER(mrvl_crypto_drv, cryptodev_mrvl_pmd_drv.driver,
996 cryptodev_driver_id);