1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Marvell International Ltd.
3 * Copyright(c) 2017 Semihalf.
7 #include <rte_common.h>
8 #include <rte_hexdump.h>
9 #include <rte_cryptodev.h>
10 #include <rte_cryptodev_pmd.h>
11 #include <rte_bus_vdev.h>
12 #include <rte_malloc.h>
13 #include <rte_cpuflags.h>
14 #include <rte_kvargs.h>
16 #include "rte_mrvl_pmd_private.h"
18 #define MRVL_MUSDK_DMA_MEMSIZE 41943040
20 #define MRVL_PMD_MAX_NB_SESS_ARG ("max_nb_sessions")
21 #define MRVL_PMD_DEFAULT_MAX_NB_SESSIONS 2048
23 static uint8_t cryptodev_driver_id;
25 struct mrvl_pmd_init_params {
26 struct rte_cryptodev_pmd_init_params common;
27 uint32_t max_nb_sessions;
30 const char *mrvl_pmd_valid_params[] = {
31 RTE_CRYPTODEV_PMD_NAME_ARG,
32 RTE_CRYPTODEV_PMD_MAX_NB_QP_ARG,
33 RTE_CRYPTODEV_PMD_SOCKET_ID_ARG,
34 MRVL_PMD_MAX_NB_SESS_ARG
38 * Flag if particular crypto algorithm is supported by PMD/MUSDK.
40 * The idea is to have Not Supported value as default (0).
41 * This way we need only to define proper map sizes,
42 * non-initialized entries will be by default not supported.
45 ALGO_NOT_SUPPORTED = 0,
49 /** Map elements for cipher mapping.*/
50 struct cipher_params_mapping {
51 enum algo_supported supported; /**< On/Off switch */
52 enum sam_cipher_alg cipher_alg; /**< Cipher algorithm */
53 enum sam_cipher_mode cipher_mode; /**< Cipher mode */
54 unsigned int max_key_len; /**< Maximum key length (in bytes)*/
56 /* We want to squeeze in multiple maps into the cache line. */
59 /** Map elements for auth mapping.*/
60 struct auth_params_mapping {
61 enum algo_supported supported; /**< On/off switch */
62 enum sam_auth_alg auth_alg; /**< Auth algorithm */
64 /* We want to squeeze in multiple maps into the cache line. */
68 * Map of supported cipher algorithms.
71 struct cipher_params_mapping cipher_map[RTE_CRYPTO_CIPHER_LIST_END] = {
72 [RTE_CRYPTO_CIPHER_3DES_CBC] = {
73 .supported = ALGO_SUPPORTED,
74 .cipher_alg = SAM_CIPHER_3DES,
75 .cipher_mode = SAM_CIPHER_CBC,
76 .max_key_len = BITS2BYTES(192) },
77 [RTE_CRYPTO_CIPHER_3DES_CTR] = {
78 .supported = ALGO_SUPPORTED,
79 .cipher_alg = SAM_CIPHER_3DES,
80 .cipher_mode = SAM_CIPHER_CTR,
81 .max_key_len = BITS2BYTES(192) },
82 [RTE_CRYPTO_CIPHER_3DES_ECB] = {
83 .supported = ALGO_SUPPORTED,
84 .cipher_alg = SAM_CIPHER_3DES,
85 .cipher_mode = SAM_CIPHER_ECB,
86 .max_key_len = BITS2BYTES(192) },
87 [RTE_CRYPTO_CIPHER_AES_CBC] = {
88 .supported = ALGO_SUPPORTED,
89 .cipher_alg = SAM_CIPHER_AES,
90 .cipher_mode = SAM_CIPHER_CBC,
91 .max_key_len = BITS2BYTES(256) },
92 [RTE_CRYPTO_CIPHER_AES_CTR] = {
93 .supported = ALGO_SUPPORTED,
94 .cipher_alg = SAM_CIPHER_AES,
95 .cipher_mode = SAM_CIPHER_CTR,
96 .max_key_len = BITS2BYTES(256) },
100 * Map of supported auth algorithms.
103 struct auth_params_mapping auth_map[RTE_CRYPTO_AUTH_LIST_END] = {
104 [RTE_CRYPTO_AUTH_MD5_HMAC] = {
105 .supported = ALGO_SUPPORTED,
106 .auth_alg = SAM_AUTH_HMAC_MD5 },
107 [RTE_CRYPTO_AUTH_MD5] = {
108 .supported = ALGO_SUPPORTED,
109 .auth_alg = SAM_AUTH_HASH_MD5 },
110 [RTE_CRYPTO_AUTH_SHA1_HMAC] = {
111 .supported = ALGO_SUPPORTED,
112 .auth_alg = SAM_AUTH_HMAC_SHA1 },
113 [RTE_CRYPTO_AUTH_SHA1] = {
114 .supported = ALGO_SUPPORTED,
115 .auth_alg = SAM_AUTH_HASH_SHA1 },
116 [RTE_CRYPTO_AUTH_SHA224] = {
117 .supported = ALGO_SUPPORTED,
118 .auth_alg = SAM_AUTH_HASH_SHA2_224 },
119 [RTE_CRYPTO_AUTH_SHA256_HMAC] = {
120 .supported = ALGO_SUPPORTED,
121 .auth_alg = SAM_AUTH_HMAC_SHA2_256 },
122 [RTE_CRYPTO_AUTH_SHA256] = {
123 .supported = ALGO_SUPPORTED,
124 .auth_alg = SAM_AUTH_HASH_SHA2_256 },
125 [RTE_CRYPTO_AUTH_SHA384_HMAC] = {
126 .supported = ALGO_SUPPORTED,
127 .auth_alg = SAM_AUTH_HMAC_SHA2_384 },
128 [RTE_CRYPTO_AUTH_SHA384] = {
129 .supported = ALGO_SUPPORTED,
130 .auth_alg = SAM_AUTH_HASH_SHA2_384 },
131 [RTE_CRYPTO_AUTH_SHA512_HMAC] = {
132 .supported = ALGO_SUPPORTED,
133 .auth_alg = SAM_AUTH_HMAC_SHA2_512 },
134 [RTE_CRYPTO_AUTH_SHA512] = {
135 .supported = ALGO_SUPPORTED,
136 .auth_alg = SAM_AUTH_HASH_SHA2_512 },
137 [RTE_CRYPTO_AUTH_AES_GMAC] = {
138 .supported = ALGO_SUPPORTED,
139 .auth_alg = SAM_AUTH_AES_GMAC },
143 * Map of supported aead algorithms.
146 struct cipher_params_mapping aead_map[RTE_CRYPTO_AEAD_LIST_END] = {
147 [RTE_CRYPTO_AEAD_AES_GCM] = {
148 .supported = ALGO_SUPPORTED,
149 .cipher_alg = SAM_CIPHER_AES,
150 .cipher_mode = SAM_CIPHER_GCM,
151 .max_key_len = BITS2BYTES(256) },
155 *-----------------------------------------------------------------------------
156 * Forward declarations.
157 *-----------------------------------------------------------------------------
159 static int cryptodev_mrvl_crypto_uninit(struct rte_vdev_device *vdev);
162 *-----------------------------------------------------------------------------
163 * Session Preparation.
164 *-----------------------------------------------------------------------------
168 * Get xform chain order.
170 * @param xform Pointer to configuration structure chain for crypto operations.
171 * @returns Order of crypto operations.
173 static enum mrvl_crypto_chain_order
174 mrvl_crypto_get_chain_order(const struct rte_crypto_sym_xform *xform)
176 /* Currently, Marvell supports max 2 operations in chain */
177 if (xform->next != NULL && xform->next->next != NULL)
178 return MRVL_CRYPTO_CHAIN_NOT_SUPPORTED;
180 if (xform->next != NULL) {
181 if ((xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) &&
182 (xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER))
183 return MRVL_CRYPTO_CHAIN_AUTH_CIPHER;
185 if ((xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) &&
186 (xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH))
187 return MRVL_CRYPTO_CHAIN_CIPHER_AUTH;
189 if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH)
190 return MRVL_CRYPTO_CHAIN_AUTH_ONLY;
192 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
193 return MRVL_CRYPTO_CHAIN_CIPHER_ONLY;
195 if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD)
196 return MRVL_CRYPTO_CHAIN_COMBINED;
198 return MRVL_CRYPTO_CHAIN_NOT_SUPPORTED;
202 * Set session parameters for cipher part.
204 * @param sess Crypto session pointer.
205 * @param cipher_xform Pointer to configuration structure for cipher operations.
206 * @returns 0 in case of success, negative value otherwise.
209 mrvl_crypto_set_cipher_session_parameters(struct mrvl_crypto_session *sess,
210 const struct rte_crypto_sym_xform *cipher_xform)
212 /* Make sure we've got proper struct */
213 if (cipher_xform->type != RTE_CRYPTO_SYM_XFORM_CIPHER) {
214 MRVL_CRYPTO_LOG_ERR("Wrong xform struct provided!");
218 /* See if map data is present and valid */
219 if ((cipher_xform->cipher.algo > RTE_DIM(cipher_map)) ||
220 (cipher_map[cipher_xform->cipher.algo].supported
221 != ALGO_SUPPORTED)) {
222 MRVL_CRYPTO_LOG_ERR("Cipher algorithm not supported!");
226 sess->cipher_iv_offset = cipher_xform->cipher.iv.offset;
228 sess->sam_sess_params.dir =
229 (cipher_xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
230 SAM_DIR_ENCRYPT : SAM_DIR_DECRYPT;
231 sess->sam_sess_params.cipher_alg =
232 cipher_map[cipher_xform->cipher.algo].cipher_alg;
233 sess->sam_sess_params.cipher_mode =
234 cipher_map[cipher_xform->cipher.algo].cipher_mode;
236 /* Assume IV will be passed together with data. */
237 sess->sam_sess_params.cipher_iv = NULL;
239 /* Get max key length. */
240 if (cipher_xform->cipher.key.length >
241 cipher_map[cipher_xform->cipher.algo].max_key_len) {
242 MRVL_CRYPTO_LOG_ERR("Wrong key length!");
246 sess->sam_sess_params.cipher_key_len = cipher_xform->cipher.key.length;
247 sess->sam_sess_params.cipher_key = cipher_xform->cipher.key.data;
253 * Set session parameters for authentication part.
255 * @param sess Crypto session pointer.
256 * @param auth_xform Pointer to configuration structure for auth operations.
257 * @returns 0 in case of success, negative value otherwise.
260 mrvl_crypto_set_auth_session_parameters(struct mrvl_crypto_session *sess,
261 const struct rte_crypto_sym_xform *auth_xform)
263 /* Make sure we've got proper struct */
264 if (auth_xform->type != RTE_CRYPTO_SYM_XFORM_AUTH) {
265 MRVL_CRYPTO_LOG_ERR("Wrong xform struct provided!");
269 /* See if map data is present and valid */
270 if ((auth_xform->auth.algo > RTE_DIM(auth_map)) ||
271 (auth_map[auth_xform->auth.algo].supported != ALGO_SUPPORTED)) {
272 MRVL_CRYPTO_LOG_ERR("Auth algorithm not supported!");
276 sess->sam_sess_params.dir =
277 (auth_xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ?
278 SAM_DIR_ENCRYPT : SAM_DIR_DECRYPT;
279 sess->sam_sess_params.auth_alg =
280 auth_map[auth_xform->auth.algo].auth_alg;
281 sess->sam_sess_params.u.basic.auth_icv_len =
282 auth_xform->auth.digest_length;
283 /* auth_key must be NULL if auth algorithm does not use HMAC */
284 sess->sam_sess_params.auth_key = auth_xform->auth.key.length ?
285 auth_xform->auth.key.data : NULL;
286 sess->sam_sess_params.auth_key_len = auth_xform->auth.key.length;
292 * Set session parameters for aead part.
294 * @param sess Crypto session pointer.
295 * @param aead_xform Pointer to configuration structure for aead operations.
296 * @returns 0 in case of success, negative value otherwise.
299 mrvl_crypto_set_aead_session_parameters(struct mrvl_crypto_session *sess,
300 const struct rte_crypto_sym_xform *aead_xform)
302 /* Make sure we've got proper struct */
303 if (aead_xform->type != RTE_CRYPTO_SYM_XFORM_AEAD) {
304 MRVL_CRYPTO_LOG_ERR("Wrong xform struct provided!");
308 /* See if map data is present and valid */
309 if ((aead_xform->aead.algo > RTE_DIM(aead_map)) ||
310 (aead_map[aead_xform->aead.algo].supported
311 != ALGO_SUPPORTED)) {
312 MRVL_CRYPTO_LOG_ERR("AEAD algorithm not supported!");
316 sess->sam_sess_params.dir =
317 (aead_xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
318 SAM_DIR_ENCRYPT : SAM_DIR_DECRYPT;
319 sess->sam_sess_params.cipher_alg =
320 aead_map[aead_xform->aead.algo].cipher_alg;
321 sess->sam_sess_params.cipher_mode =
322 aead_map[aead_xform->aead.algo].cipher_mode;
324 /* Assume IV will be passed together with data. */
325 sess->sam_sess_params.cipher_iv = NULL;
327 /* Get max key length. */
328 if (aead_xform->aead.key.length >
329 aead_map[aead_xform->aead.algo].max_key_len) {
330 MRVL_CRYPTO_LOG_ERR("Wrong key length!");
334 sess->sam_sess_params.cipher_key = aead_xform->aead.key.data;
335 sess->sam_sess_params.cipher_key_len = aead_xform->aead.key.length;
337 if (sess->sam_sess_params.cipher_mode == SAM_CIPHER_GCM)
338 sess->sam_sess_params.auth_alg = SAM_AUTH_AES_GCM;
340 sess->sam_sess_params.u.basic.auth_icv_len =
341 aead_xform->aead.digest_length;
343 sess->sam_sess_params.u.basic.auth_aad_len =
344 aead_xform->aead.aad_length;
350 * Parse crypto transform chain and setup session parameters.
352 * @param dev Pointer to crypto device
353 * @param sess Poiner to crypto session
354 * @param xform Pointer to configuration structure chain for crypto operations.
355 * @returns 0 in case of success, negative value otherwise.
358 mrvl_crypto_set_session_parameters(struct mrvl_crypto_session *sess,
359 const struct rte_crypto_sym_xform *xform)
361 const struct rte_crypto_sym_xform *cipher_xform = NULL;
362 const struct rte_crypto_sym_xform *auth_xform = NULL;
363 const struct rte_crypto_sym_xform *aead_xform = NULL;
365 /* Filter out spurious/broken requests */
369 sess->chain_order = mrvl_crypto_get_chain_order(xform);
370 switch (sess->chain_order) {
371 case MRVL_CRYPTO_CHAIN_CIPHER_AUTH:
372 cipher_xform = xform;
373 auth_xform = xform->next;
375 case MRVL_CRYPTO_CHAIN_AUTH_CIPHER:
377 cipher_xform = xform->next;
379 case MRVL_CRYPTO_CHAIN_CIPHER_ONLY:
380 cipher_xform = xform;
382 case MRVL_CRYPTO_CHAIN_AUTH_ONLY:
385 case MRVL_CRYPTO_CHAIN_COMBINED:
392 if ((cipher_xform != NULL) &&
393 (mrvl_crypto_set_cipher_session_parameters(
394 sess, cipher_xform) < 0)) {
395 MRVL_CRYPTO_LOG_ERR("Invalid/unsupported cipher parameters");
399 if ((auth_xform != NULL) &&
400 (mrvl_crypto_set_auth_session_parameters(
401 sess, auth_xform) < 0)) {
402 MRVL_CRYPTO_LOG_ERR("Invalid/unsupported auth parameters");
406 if ((aead_xform != NULL) &&
407 (mrvl_crypto_set_aead_session_parameters(
408 sess, aead_xform) < 0)) {
409 MRVL_CRYPTO_LOG_ERR("Invalid/unsupported aead parameters");
417 *-----------------------------------------------------------------------------
419 *-----------------------------------------------------------------------------
423 * Prepare a single request.
425 * This function basically translates DPDK crypto request into one
426 * understandable by MUDSK's SAM. If this is a first request in a session,
427 * it starts the session.
429 * @param request Pointer to pre-allocated && reset request buffer [Out].
430 * @param src_bd Pointer to pre-allocated source descriptor [Out].
431 * @param dst_bd Pointer to pre-allocated destination descriptor [Out].
432 * @param op Pointer to DPDK crypto operation struct [In].
435 mrvl_request_prepare(struct sam_cio_op_params *request,
436 struct sam_buf_info *src_bd,
437 struct sam_buf_info *dst_bd,
438 struct rte_crypto_op *op)
440 struct mrvl_crypto_session *sess;
441 struct rte_mbuf *dst_mbuf;
444 if (unlikely(op->sess_type == RTE_CRYPTO_OP_SESSIONLESS)) {
445 MRVL_CRYPTO_LOG_ERR("MRVL CRYPTO PMD only supports session "
446 "oriented requests, op (%p) is sessionless.",
451 sess = (struct mrvl_crypto_session *)get_sym_session_private_data(
452 op->sym->session, cryptodev_driver_id);
453 if (unlikely(sess == NULL)) {
454 MRVL_CRYPTO_LOG_ERR("Session was not created for this device");
459 * If application delivered us null dst buffer, it means it expects
460 * us to deliver the result in src buffer.
462 dst_mbuf = op->sym->m_dst ? op->sym->m_dst : op->sym->m_src;
464 request->sa = sess->sam_sess;
465 request->cookie = op;
467 /* Single buffers only, sorry. */
468 request->num_bufs = 1;
469 request->src = src_bd;
470 src_bd->vaddr = rte_pktmbuf_mtod(op->sym->m_src, void *);
471 src_bd->paddr = rte_pktmbuf_iova(op->sym->m_src);
472 src_bd->len = rte_pktmbuf_data_len(op->sym->m_src);
475 if (rte_pktmbuf_data_len(op->sym->m_src) == 0) {
476 /* EIP does not support 0 length buffers. */
477 MRVL_CRYPTO_LOG_ERR("Buffer length == 0 not supported!");
481 /* Empty destination. */
482 if (rte_pktmbuf_data_len(dst_mbuf) == 0) {
483 /* Make dst buffer fit at least source data. */
484 if (rte_pktmbuf_append(dst_mbuf,
485 rte_pktmbuf_data_len(op->sym->m_src)) == NULL) {
486 MRVL_CRYPTO_LOG_ERR("Unable to set big enough dst buffer!");
491 request->dst = dst_bd;
492 dst_bd->vaddr = rte_pktmbuf_mtod(dst_mbuf, void *);
493 dst_bd->paddr = rte_pktmbuf_iova(dst_mbuf);
496 * We can use all available space in dst_mbuf,
497 * not only what's used currently.
499 dst_bd->len = dst_mbuf->buf_len - rte_pktmbuf_headroom(dst_mbuf);
501 if (sess->chain_order == MRVL_CRYPTO_CHAIN_COMBINED) {
502 request->cipher_len = op->sym->aead.data.length;
503 request->cipher_offset = op->sym->aead.data.offset;
504 request->cipher_iv = rte_crypto_op_ctod_offset(op, uint8_t *,
505 sess->cipher_iv_offset);
507 request->auth_aad = op->sym->aead.aad.data;
508 request->auth_offset = request->cipher_offset;
509 request->auth_len = request->cipher_len;
511 request->cipher_len = op->sym->cipher.data.length;
512 request->cipher_offset = op->sym->cipher.data.offset;
513 request->cipher_iv = rte_crypto_op_ctod_offset(op, uint8_t *,
514 sess->cipher_iv_offset);
516 request->auth_offset = op->sym->auth.data.offset;
517 request->auth_len = op->sym->auth.data.length;
520 digest = sess->chain_order == MRVL_CRYPTO_CHAIN_COMBINED ?
521 op->sym->aead.digest.data : op->sym->auth.digest.data;
522 if (digest == NULL) {
523 /* No auth - no worry. */
527 request->auth_icv_offset = request->auth_offset + request->auth_len;
530 * EIP supports only scenarios where ICV(digest buffer) is placed at
531 * auth_icv_offset. Any other placement means risking errors.
533 if (sess->sam_sess_params.dir == SAM_DIR_ENCRYPT) {
535 * This should be the most common case anyway,
536 * EIP will overwrite DST buffer at auth_icv_offset.
538 if (rte_pktmbuf_mtod_offset(
540 request->auth_icv_offset) == digest) {
543 } else {/* sess->sam_sess_params.dir == SAM_DIR_DECRYPT */
545 * EIP will look for digest at auth_icv_offset
546 * offset in SRC buffer.
548 if (rte_pktmbuf_mtod_offset(
549 op->sym->m_src, uint8_t *,
550 request->auth_icv_offset) == digest) {
556 * If we landed here it means that digest pointer is
557 * at different than expected place.
563 *-----------------------------------------------------------------------------
564 * PMD Framework handlers
565 *-----------------------------------------------------------------------------
571 * @param queue_pair Pointer to queue pair.
572 * @param ops Pointer to ops requests array.
573 * @param nb_ops Number of elements in ops requests array.
574 * @returns Number of elements consumed from ops.
577 mrvl_crypto_pmd_enqueue_burst(void *queue_pair, struct rte_crypto_op **ops,
580 uint16_t iter_ops = 0;
582 uint16_t consumed = 0;
584 struct sam_cio_op_params requests[nb_ops];
586 * DPDK uses single fragment buffers, so we can KISS descriptors.
587 * SAM does not store bd pointers, so on-stack scope will be enough.
589 struct sam_buf_info src_bd[nb_ops];
590 struct sam_buf_info dst_bd[nb_ops];
591 struct mrvl_crypto_qp *qp = (struct mrvl_crypto_qp *)queue_pair;
596 /* Prepare the burst. */
597 memset(&requests, 0, sizeof(requests));
599 /* Iterate through */
600 for (; iter_ops < nb_ops; ++iter_ops) {
601 if (mrvl_request_prepare(&requests[iter_ops],
604 ops[iter_ops]) < 0) {
606 "Error while parameters preparation!");
607 qp->stats.enqueue_err_count++;
608 ops[iter_ops]->status = RTE_CRYPTO_OP_STATUS_ERROR;
611 * Number of handled ops is increased
612 * (even if the result of handling is error).
618 ops[iter_ops]->status =
619 RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
621 /* Increase the number of ops to enqueue. */
623 } /* for (; iter_ops < nb_ops;... */
627 ret = sam_cio_enq(qp->cio, requests, &to_enq);
631 * Trust SAM that in this case returned value will be at
632 * some point correct (now it is returned unmodified).
634 qp->stats.enqueue_err_count += to_enq;
635 for (iter_ops = 0; iter_ops < to_enq; ++iter_ops)
636 ops[iter_ops]->status =
637 RTE_CRYPTO_OP_STATUS_ERROR;
641 qp->stats.enqueued_count += to_enq;
648 * @param queue_pair Pointer to queue pair.
649 * @param ops Pointer to ops requests array.
650 * @param nb_ops Number of elements in ops requests array.
651 * @returns Number of elements dequeued.
654 mrvl_crypto_pmd_dequeue_burst(void *queue_pair,
655 struct rte_crypto_op **ops,
659 struct mrvl_crypto_qp *qp = queue_pair;
660 struct sam_cio *cio = qp->cio;
661 struct sam_cio_op_result results[nb_ops];
664 ret = sam_cio_deq(cio, results, &nb_ops);
666 /* Count all dequeued as error. */
667 qp->stats.dequeue_err_count += nb_ops;
669 /* But act as they were dequeued anyway*/
670 qp->stats.dequeued_count += nb_ops;
675 /* Unpack and check results. */
676 for (i = 0; i < nb_ops; ++i) {
677 ops[i] = results[i].cookie;
679 switch (results[i].status) {
681 ops[i]->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
683 case SAM_CIO_ERR_ICV:
684 MRVL_CRYPTO_LOG_DBG("CIO returned SAM_CIO_ERR_ICV.");
685 ops[i]->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
689 "CIO returned Error: %d", results[i].status);
690 ops[i]->status = RTE_CRYPTO_OP_STATUS_ERROR;
695 qp->stats.dequeued_count += nb_ops;
700 * Create a new crypto device.
702 * @param name Driver name.
703 * @param vdev Pointer to device structure.
704 * @param init_params Pointer to initialization parameters.
705 * @returns 0 in case of success, negative value otherwise.
708 cryptodev_mrvl_crypto_create(const char *name,
709 struct rte_vdev_device *vdev,
710 struct mrvl_pmd_init_params *init_params)
712 struct rte_cryptodev *dev;
713 struct mrvl_crypto_private *internals;
714 struct sam_init_params sam_params;
717 dev = rte_cryptodev_pmd_create(name, &vdev->device,
718 &init_params->common);
720 MRVL_CRYPTO_LOG_ERR("failed to create cryptodev vdev");
724 dev->driver_id = cryptodev_driver_id;
725 dev->dev_ops = rte_mrvl_crypto_pmd_ops;
727 /* Register rx/tx burst functions for data path. */
728 dev->enqueue_burst = mrvl_crypto_pmd_enqueue_burst;
729 dev->dequeue_burst = mrvl_crypto_pmd_dequeue_burst;
731 dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
732 RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
733 RTE_CRYPTODEV_FF_HW_ACCELERATED |
734 RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
735 RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT;
737 /* Set vector instructions mode supported */
738 internals = dev->data->dev_private;
740 internals->max_nb_qpairs = init_params->common.max_nb_queue_pairs;
741 internals->max_nb_sessions = init_params->max_nb_sessions;
744 * ret == -EEXIST is correct, it means DMA
745 * has been already initialized.
747 ret = mv_sys_dma_mem_init(MRVL_MUSDK_DMA_MEMSIZE);
752 MRVL_CRYPTO_LOG_INFO(
753 "DMA memory has been already initialized by a different driver.");
756 sam_params.max_num_sessions = internals->max_nb_sessions;
758 return sam_init(&sam_params);
762 "driver %s: %s failed", init_params->common.name, __func__);
764 cryptodev_mrvl_crypto_uninit(vdev);
768 /** Parse integer from integer argument */
770 parse_integer_arg(const char *key __rte_unused,
771 const char *value, void *extra_args)
773 int *i = (int *) extra_args;
777 MRVL_CRYPTO_LOG_ERR("Argument has to be positive.\n");
786 parse_name_arg(const char *key __rte_unused,
787 const char *value, void *extra_args)
789 struct rte_cryptodev_pmd_init_params *params = extra_args;
791 if (strlen(value) >= RTE_CRYPTODEV_NAME_MAX_LEN - 1) {
792 MRVL_CRYPTO_LOG_ERR("Invalid name %s, should be less than "
793 "%u bytes.\n", value,
794 RTE_CRYPTODEV_NAME_MAX_LEN - 1);
798 strncpy(params->name, value, RTE_CRYPTODEV_NAME_MAX_LEN);
804 mrvl_pmd_parse_input_args(struct mrvl_pmd_init_params *params,
805 const char *input_args)
807 struct rte_kvargs *kvlist = NULL;
814 kvlist = rte_kvargs_parse(input_args,
815 mrvl_pmd_valid_params);
819 /* Common VDEV parameters */
820 ret = rte_kvargs_process(kvlist,
821 RTE_CRYPTODEV_PMD_MAX_NB_QP_ARG,
823 ¶ms->common.max_nb_queue_pairs);
827 ret = rte_kvargs_process(kvlist,
828 RTE_CRYPTODEV_PMD_SOCKET_ID_ARG,
830 ¶ms->common.socket_id);
834 ret = rte_kvargs_process(kvlist,
835 RTE_CRYPTODEV_PMD_NAME_ARG,
841 ret = rte_kvargs_process(kvlist,
842 MRVL_PMD_MAX_NB_SESS_ARG,
851 rte_kvargs_free(kvlist);
856 * Initialize the crypto device.
858 * @param vdev Pointer to device structure.
859 * @returns 0 in case of success, negative value otherwise.
862 cryptodev_mrvl_crypto_init(struct rte_vdev_device *vdev)
864 struct mrvl_pmd_init_params init_params = {
868 sizeof(struct mrvl_crypto_private),
869 .max_nb_queue_pairs =
870 sam_get_num_inst() * sam_get_num_cios(0),
871 .socket_id = rte_socket_id()
873 .max_nb_sessions = MRVL_PMD_DEFAULT_MAX_NB_SESSIONS
876 const char *name, *args;
879 name = rte_vdev_device_name(vdev);
882 args = rte_vdev_device_args(vdev);
884 ret = mrvl_pmd_parse_input_args(&init_params, args);
887 "Failed to parse initialisation arguments[%s]\n",
892 return cryptodev_mrvl_crypto_create(name, vdev, &init_params);
896 * Uninitialize the crypto device
898 * @param vdev Pointer to device structure.
899 * @returns 0 in case of success, negative value otherwise.
902 cryptodev_mrvl_crypto_uninit(struct rte_vdev_device *vdev)
904 struct rte_cryptodev *cryptodev;
905 const char *name = rte_vdev_device_name(vdev);
911 "Closing Marvell crypto device %s on numa socket %u\n",
912 name, rte_socket_id());
916 cryptodev = rte_cryptodev_pmd_get_named_dev(name);
917 if (cryptodev == NULL)
920 return rte_cryptodev_pmd_destroy(cryptodev);
924 * Basic driver handlers for use in the constructor.
926 static struct rte_vdev_driver cryptodev_mrvl_pmd_drv = {
927 .probe = cryptodev_mrvl_crypto_init,
928 .remove = cryptodev_mrvl_crypto_uninit
931 static struct cryptodev_driver mrvl_crypto_drv;
933 /* Register the driver in constructor. */
934 RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_MRVL_PMD, cryptodev_mrvl_pmd_drv);
935 RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_MRVL_PMD,
936 "max_nb_queue_pairs=<int> "
937 "max_nb_sessions=<int> "
939 RTE_PMD_REGISTER_CRYPTO_DRIVER(mrvl_crypto_drv, cryptodev_mrvl_pmd_drv.driver,
940 cryptodev_driver_id);