1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Marvell International Ltd.
3 * Copyright(c) 2017 Semihalf.
7 #include <rte_common.h>
8 #include <rte_hexdump.h>
9 #include <rte_cryptodev.h>
10 #include <rte_cryptodev_pmd.h>
11 #include <rte_bus_vdev.h>
12 #include <rte_malloc.h>
13 #include <rte_cpuflags.h>
14 #include <rte_kvargs.h>
16 #include "rte_mrvl_pmd_private.h"
18 #define MRVL_MUSDK_DMA_MEMSIZE 41943040
20 #define MRVL_PMD_MAX_NB_SESS_ARG ("max_nb_sessions")
21 #define MRVL_PMD_DEFAULT_MAX_NB_SESSIONS 2048
23 static uint8_t cryptodev_driver_id;
25 struct mrvl_pmd_init_params {
26 struct rte_cryptodev_pmd_init_params common;
27 uint32_t max_nb_sessions;
30 const char *mrvl_pmd_valid_params[] = {
31 RTE_CRYPTODEV_PMD_NAME_ARG,
32 RTE_CRYPTODEV_PMD_MAX_NB_QP_ARG,
33 RTE_CRYPTODEV_PMD_SOCKET_ID_ARG,
34 MRVL_PMD_MAX_NB_SESS_ARG
38 * Flag if particular crypto algorithm is supported by PMD/MUSDK.
40 * The idea is to have Not Supported value as default (0).
41 * This way we need only to define proper map sizes,
42 * non-initialized entries will be by default not supported.
45 ALGO_NOT_SUPPORTED = 0,
49 /** Map elements for cipher mapping.*/
50 struct cipher_params_mapping {
51 enum algo_supported supported; /**< On/Off switch */
52 enum sam_cipher_alg cipher_alg; /**< Cipher algorithm */
53 enum sam_cipher_mode cipher_mode; /**< Cipher mode */
54 unsigned int max_key_len; /**< Maximum key length (in bytes)*/
56 /* We want to squeeze in multiple maps into the cache line. */
59 /** Map elements for auth mapping.*/
60 struct auth_params_mapping {
61 enum algo_supported supported; /**< On/off switch */
62 enum sam_auth_alg auth_alg; /**< Auth algorithm */
64 /* We want to squeeze in multiple maps into the cache line. */
68 * Map of supported cipher algorithms.
71 struct cipher_params_mapping cipher_map[RTE_CRYPTO_CIPHER_LIST_END] = {
72 [RTE_CRYPTO_CIPHER_3DES_CBC] = {
73 .supported = ALGO_SUPPORTED,
74 .cipher_alg = SAM_CIPHER_3DES,
75 .cipher_mode = SAM_CIPHER_CBC,
76 .max_key_len = BITS2BYTES(192) },
77 [RTE_CRYPTO_CIPHER_3DES_CTR] = {
78 .supported = ALGO_SUPPORTED,
79 .cipher_alg = SAM_CIPHER_3DES,
80 .cipher_mode = SAM_CIPHER_CTR,
81 .max_key_len = BITS2BYTES(192) },
82 [RTE_CRYPTO_CIPHER_3DES_ECB] = {
83 .supported = ALGO_SUPPORTED,
84 .cipher_alg = SAM_CIPHER_3DES,
85 .cipher_mode = SAM_CIPHER_ECB,
86 .max_key_len = BITS2BYTES(192) },
87 [RTE_CRYPTO_CIPHER_AES_CBC] = {
88 .supported = ALGO_SUPPORTED,
89 .cipher_alg = SAM_CIPHER_AES,
90 .cipher_mode = SAM_CIPHER_CBC,
91 .max_key_len = BITS2BYTES(256) },
92 [RTE_CRYPTO_CIPHER_AES_CTR] = {
93 .supported = ALGO_SUPPORTED,
94 .cipher_alg = SAM_CIPHER_AES,
95 .cipher_mode = SAM_CIPHER_CTR,
96 .max_key_len = BITS2BYTES(256) },
100 * Map of supported auth algorithms.
103 struct auth_params_mapping auth_map[RTE_CRYPTO_AUTH_LIST_END] = {
104 [RTE_CRYPTO_AUTH_MD5_HMAC] = {
105 .supported = ALGO_SUPPORTED,
106 .auth_alg = SAM_AUTH_HMAC_MD5 },
107 [RTE_CRYPTO_AUTH_MD5] = {
108 .supported = ALGO_SUPPORTED,
109 .auth_alg = SAM_AUTH_HASH_MD5 },
110 [RTE_CRYPTO_AUTH_SHA1_HMAC] = {
111 .supported = ALGO_SUPPORTED,
112 .auth_alg = SAM_AUTH_HMAC_SHA1 },
113 [RTE_CRYPTO_AUTH_SHA1] = {
114 .supported = ALGO_SUPPORTED,
115 .auth_alg = SAM_AUTH_HASH_SHA1 },
116 [RTE_CRYPTO_AUTH_SHA224_HMAC] = {
117 .supported = ALGO_SUPPORTED,
118 .auth_alg = SAM_AUTH_HMAC_SHA2_224 },
119 [RTE_CRYPTO_AUTH_SHA224] = {
120 .supported = ALGO_SUPPORTED,
121 .auth_alg = SAM_AUTH_HASH_SHA2_224 },
122 [RTE_CRYPTO_AUTH_SHA256_HMAC] = {
123 .supported = ALGO_SUPPORTED,
124 .auth_alg = SAM_AUTH_HMAC_SHA2_256 },
125 [RTE_CRYPTO_AUTH_SHA256] = {
126 .supported = ALGO_SUPPORTED,
127 .auth_alg = SAM_AUTH_HASH_SHA2_256 },
128 [RTE_CRYPTO_AUTH_SHA384_HMAC] = {
129 .supported = ALGO_SUPPORTED,
130 .auth_alg = SAM_AUTH_HMAC_SHA2_384 },
131 [RTE_CRYPTO_AUTH_SHA384] = {
132 .supported = ALGO_SUPPORTED,
133 .auth_alg = SAM_AUTH_HASH_SHA2_384 },
134 [RTE_CRYPTO_AUTH_SHA512_HMAC] = {
135 .supported = ALGO_SUPPORTED,
136 .auth_alg = SAM_AUTH_HMAC_SHA2_512 },
137 [RTE_CRYPTO_AUTH_SHA512] = {
138 .supported = ALGO_SUPPORTED,
139 .auth_alg = SAM_AUTH_HASH_SHA2_512 },
140 [RTE_CRYPTO_AUTH_AES_GMAC] = {
141 .supported = ALGO_SUPPORTED,
142 .auth_alg = SAM_AUTH_AES_GMAC },
146 * Map of supported aead algorithms.
149 struct cipher_params_mapping aead_map[RTE_CRYPTO_AEAD_LIST_END] = {
150 [RTE_CRYPTO_AEAD_AES_GCM] = {
151 .supported = ALGO_SUPPORTED,
152 .cipher_alg = SAM_CIPHER_AES,
153 .cipher_mode = SAM_CIPHER_GCM,
154 .max_key_len = BITS2BYTES(256) },
158 *-----------------------------------------------------------------------------
159 * Forward declarations.
160 *-----------------------------------------------------------------------------
162 static int cryptodev_mrvl_crypto_uninit(struct rte_vdev_device *vdev);
165 *-----------------------------------------------------------------------------
166 * Session Preparation.
167 *-----------------------------------------------------------------------------
171 * Get xform chain order.
173 * @param xform Pointer to configuration structure chain for crypto operations.
174 * @returns Order of crypto operations.
176 static enum mrvl_crypto_chain_order
177 mrvl_crypto_get_chain_order(const struct rte_crypto_sym_xform *xform)
179 /* Currently, Marvell supports max 2 operations in chain */
180 if (xform->next != NULL && xform->next->next != NULL)
181 return MRVL_CRYPTO_CHAIN_NOT_SUPPORTED;
183 if (xform->next != NULL) {
184 if ((xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) &&
185 (xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER))
186 return MRVL_CRYPTO_CHAIN_AUTH_CIPHER;
188 if ((xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) &&
189 (xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH))
190 return MRVL_CRYPTO_CHAIN_CIPHER_AUTH;
192 if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH)
193 return MRVL_CRYPTO_CHAIN_AUTH_ONLY;
195 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
196 return MRVL_CRYPTO_CHAIN_CIPHER_ONLY;
198 if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD)
199 return MRVL_CRYPTO_CHAIN_COMBINED;
201 return MRVL_CRYPTO_CHAIN_NOT_SUPPORTED;
205 * Set session parameters for cipher part.
207 * @param sess Crypto session pointer.
208 * @param cipher_xform Pointer to configuration structure for cipher operations.
209 * @returns 0 in case of success, negative value otherwise.
212 mrvl_crypto_set_cipher_session_parameters(struct mrvl_crypto_session *sess,
213 const struct rte_crypto_sym_xform *cipher_xform)
215 /* Make sure we've got proper struct */
216 if (cipher_xform->type != RTE_CRYPTO_SYM_XFORM_CIPHER) {
217 MRVL_CRYPTO_LOG_ERR("Wrong xform struct provided!");
221 /* See if map data is present and valid */
222 if ((cipher_xform->cipher.algo > RTE_DIM(cipher_map)) ||
223 (cipher_map[cipher_xform->cipher.algo].supported
224 != ALGO_SUPPORTED)) {
225 MRVL_CRYPTO_LOG_ERR("Cipher algorithm not supported!");
229 sess->cipher_iv_offset = cipher_xform->cipher.iv.offset;
231 sess->sam_sess_params.dir =
232 (cipher_xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
233 SAM_DIR_ENCRYPT : SAM_DIR_DECRYPT;
234 sess->sam_sess_params.cipher_alg =
235 cipher_map[cipher_xform->cipher.algo].cipher_alg;
236 sess->sam_sess_params.cipher_mode =
237 cipher_map[cipher_xform->cipher.algo].cipher_mode;
239 /* Assume IV will be passed together with data. */
240 sess->sam_sess_params.cipher_iv = NULL;
242 /* Get max key length. */
243 if (cipher_xform->cipher.key.length >
244 cipher_map[cipher_xform->cipher.algo].max_key_len) {
245 MRVL_CRYPTO_LOG_ERR("Wrong key length!");
249 sess->sam_sess_params.cipher_key_len = cipher_xform->cipher.key.length;
250 sess->sam_sess_params.cipher_key = cipher_xform->cipher.key.data;
256 * Set session parameters for authentication part.
258 * @param sess Crypto session pointer.
259 * @param auth_xform Pointer to configuration structure for auth operations.
260 * @returns 0 in case of success, negative value otherwise.
263 mrvl_crypto_set_auth_session_parameters(struct mrvl_crypto_session *sess,
264 const struct rte_crypto_sym_xform *auth_xform)
266 /* Make sure we've got proper struct */
267 if (auth_xform->type != RTE_CRYPTO_SYM_XFORM_AUTH) {
268 MRVL_CRYPTO_LOG_ERR("Wrong xform struct provided!");
272 /* See if map data is present and valid */
273 if ((auth_xform->auth.algo > RTE_DIM(auth_map)) ||
274 (auth_map[auth_xform->auth.algo].supported != ALGO_SUPPORTED)) {
275 MRVL_CRYPTO_LOG_ERR("Auth algorithm not supported!");
279 sess->sam_sess_params.dir =
280 (auth_xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ?
281 SAM_DIR_ENCRYPT : SAM_DIR_DECRYPT;
282 sess->sam_sess_params.auth_alg =
283 auth_map[auth_xform->auth.algo].auth_alg;
284 sess->sam_sess_params.u.basic.auth_icv_len =
285 auth_xform->auth.digest_length;
286 /* auth_key must be NULL if auth algorithm does not use HMAC */
287 sess->sam_sess_params.auth_key = auth_xform->auth.key.length ?
288 auth_xform->auth.key.data : NULL;
289 sess->sam_sess_params.auth_key_len = auth_xform->auth.key.length;
295 * Set session parameters for aead part.
297 * @param sess Crypto session pointer.
298 * @param aead_xform Pointer to configuration structure for aead operations.
299 * @returns 0 in case of success, negative value otherwise.
302 mrvl_crypto_set_aead_session_parameters(struct mrvl_crypto_session *sess,
303 const struct rte_crypto_sym_xform *aead_xform)
305 /* Make sure we've got proper struct */
306 if (aead_xform->type != RTE_CRYPTO_SYM_XFORM_AEAD) {
307 MRVL_CRYPTO_LOG_ERR("Wrong xform struct provided!");
311 /* See if map data is present and valid */
312 if ((aead_xform->aead.algo > RTE_DIM(aead_map)) ||
313 (aead_map[aead_xform->aead.algo].supported
314 != ALGO_SUPPORTED)) {
315 MRVL_CRYPTO_LOG_ERR("AEAD algorithm not supported!");
319 sess->sam_sess_params.dir =
320 (aead_xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
321 SAM_DIR_ENCRYPT : SAM_DIR_DECRYPT;
322 sess->sam_sess_params.cipher_alg =
323 aead_map[aead_xform->aead.algo].cipher_alg;
324 sess->sam_sess_params.cipher_mode =
325 aead_map[aead_xform->aead.algo].cipher_mode;
327 /* Assume IV will be passed together with data. */
328 sess->sam_sess_params.cipher_iv = NULL;
330 /* Get max key length. */
331 if (aead_xform->aead.key.length >
332 aead_map[aead_xform->aead.algo].max_key_len) {
333 MRVL_CRYPTO_LOG_ERR("Wrong key length!");
337 sess->sam_sess_params.cipher_key = aead_xform->aead.key.data;
338 sess->sam_sess_params.cipher_key_len = aead_xform->aead.key.length;
340 if (sess->sam_sess_params.cipher_mode == SAM_CIPHER_GCM)
341 sess->sam_sess_params.auth_alg = SAM_AUTH_AES_GCM;
343 sess->sam_sess_params.u.basic.auth_icv_len =
344 aead_xform->aead.digest_length;
346 sess->sam_sess_params.u.basic.auth_aad_len =
347 aead_xform->aead.aad_length;
353 * Parse crypto transform chain and setup session parameters.
355 * @param dev Pointer to crypto device
356 * @param sess Poiner to crypto session
357 * @param xform Pointer to configuration structure chain for crypto operations.
358 * @returns 0 in case of success, negative value otherwise.
361 mrvl_crypto_set_session_parameters(struct mrvl_crypto_session *sess,
362 const struct rte_crypto_sym_xform *xform)
364 const struct rte_crypto_sym_xform *cipher_xform = NULL;
365 const struct rte_crypto_sym_xform *auth_xform = NULL;
366 const struct rte_crypto_sym_xform *aead_xform = NULL;
368 /* Filter out spurious/broken requests */
372 sess->chain_order = mrvl_crypto_get_chain_order(xform);
373 switch (sess->chain_order) {
374 case MRVL_CRYPTO_CHAIN_CIPHER_AUTH:
375 cipher_xform = xform;
376 auth_xform = xform->next;
378 case MRVL_CRYPTO_CHAIN_AUTH_CIPHER:
380 cipher_xform = xform->next;
382 case MRVL_CRYPTO_CHAIN_CIPHER_ONLY:
383 cipher_xform = xform;
385 case MRVL_CRYPTO_CHAIN_AUTH_ONLY:
388 case MRVL_CRYPTO_CHAIN_COMBINED:
395 if ((cipher_xform != NULL) &&
396 (mrvl_crypto_set_cipher_session_parameters(
397 sess, cipher_xform) < 0)) {
398 MRVL_CRYPTO_LOG_ERR("Invalid/unsupported cipher parameters");
402 if ((auth_xform != NULL) &&
403 (mrvl_crypto_set_auth_session_parameters(
404 sess, auth_xform) < 0)) {
405 MRVL_CRYPTO_LOG_ERR("Invalid/unsupported auth parameters");
409 if ((aead_xform != NULL) &&
410 (mrvl_crypto_set_aead_session_parameters(
411 sess, aead_xform) < 0)) {
412 MRVL_CRYPTO_LOG_ERR("Invalid/unsupported aead parameters");
420 *-----------------------------------------------------------------------------
422 *-----------------------------------------------------------------------------
426 * Prepare a single request.
428 * This function basically translates DPDK crypto request into one
429 * understandable by MUDSK's SAM. If this is a first request in a session,
430 * it starts the session.
432 * @param request Pointer to pre-allocated && reset request buffer [Out].
433 * @param src_bd Pointer to pre-allocated source descriptor [Out].
434 * @param dst_bd Pointer to pre-allocated destination descriptor [Out].
435 * @param op Pointer to DPDK crypto operation struct [In].
438 mrvl_request_prepare(struct sam_cio_op_params *request,
439 struct sam_buf_info *src_bd,
440 struct sam_buf_info *dst_bd,
441 struct rte_crypto_op *op)
443 struct mrvl_crypto_session *sess;
444 struct rte_mbuf *dst_mbuf;
447 if (unlikely(op->sess_type == RTE_CRYPTO_OP_SESSIONLESS)) {
448 MRVL_CRYPTO_LOG_ERR("MRVL CRYPTO PMD only supports session "
449 "oriented requests, op (%p) is sessionless.",
454 sess = (struct mrvl_crypto_session *)get_sym_session_private_data(
455 op->sym->session, cryptodev_driver_id);
456 if (unlikely(sess == NULL)) {
457 MRVL_CRYPTO_LOG_ERR("Session was not created for this device");
462 * If application delivered us null dst buffer, it means it expects
463 * us to deliver the result in src buffer.
465 dst_mbuf = op->sym->m_dst ? op->sym->m_dst : op->sym->m_src;
467 request->sa = sess->sam_sess;
468 request->cookie = op;
470 /* Single buffers only, sorry. */
471 request->num_bufs = 1;
472 request->src = src_bd;
473 src_bd->vaddr = rte_pktmbuf_mtod(op->sym->m_src, void *);
474 src_bd->paddr = rte_pktmbuf_iova(op->sym->m_src);
475 src_bd->len = rte_pktmbuf_data_len(op->sym->m_src);
478 if (rte_pktmbuf_data_len(op->sym->m_src) == 0) {
479 /* EIP does not support 0 length buffers. */
480 MRVL_CRYPTO_LOG_ERR("Buffer length == 0 not supported!");
484 /* Empty destination. */
485 if (rte_pktmbuf_data_len(dst_mbuf) == 0) {
486 /* Make dst buffer fit at least source data. */
487 if (rte_pktmbuf_append(dst_mbuf,
488 rte_pktmbuf_data_len(op->sym->m_src)) == NULL) {
489 MRVL_CRYPTO_LOG_ERR("Unable to set big enough dst buffer!");
494 request->dst = dst_bd;
495 dst_bd->vaddr = rte_pktmbuf_mtod(dst_mbuf, void *);
496 dst_bd->paddr = rte_pktmbuf_iova(dst_mbuf);
499 * We can use all available space in dst_mbuf,
500 * not only what's used currently.
502 dst_bd->len = dst_mbuf->buf_len - rte_pktmbuf_headroom(dst_mbuf);
504 if (sess->chain_order == MRVL_CRYPTO_CHAIN_COMBINED) {
505 request->cipher_len = op->sym->aead.data.length;
506 request->cipher_offset = op->sym->aead.data.offset;
507 request->cipher_iv = rte_crypto_op_ctod_offset(op, uint8_t *,
508 sess->cipher_iv_offset);
510 request->auth_aad = op->sym->aead.aad.data;
511 request->auth_offset = request->cipher_offset;
512 request->auth_len = request->cipher_len;
514 request->cipher_len = op->sym->cipher.data.length;
515 request->cipher_offset = op->sym->cipher.data.offset;
516 request->cipher_iv = rte_crypto_op_ctod_offset(op, uint8_t *,
517 sess->cipher_iv_offset);
519 request->auth_offset = op->sym->auth.data.offset;
520 request->auth_len = op->sym->auth.data.length;
523 digest = sess->chain_order == MRVL_CRYPTO_CHAIN_COMBINED ?
524 op->sym->aead.digest.data : op->sym->auth.digest.data;
525 if (digest == NULL) {
526 /* No auth - no worry. */
530 request->auth_icv_offset = request->auth_offset + request->auth_len;
533 * EIP supports only scenarios where ICV(digest buffer) is placed at
534 * auth_icv_offset. Any other placement means risking errors.
536 if (sess->sam_sess_params.dir == SAM_DIR_ENCRYPT) {
538 * This should be the most common case anyway,
539 * EIP will overwrite DST buffer at auth_icv_offset.
541 if (rte_pktmbuf_mtod_offset(
543 request->auth_icv_offset) == digest) {
546 } else {/* sess->sam_sess_params.dir == SAM_DIR_DECRYPT */
548 * EIP will look for digest at auth_icv_offset
549 * offset in SRC buffer.
551 if (rte_pktmbuf_mtod_offset(
552 op->sym->m_src, uint8_t *,
553 request->auth_icv_offset) == digest) {
559 * If we landed here it means that digest pointer is
560 * at different than expected place.
566 *-----------------------------------------------------------------------------
567 * PMD Framework handlers
568 *-----------------------------------------------------------------------------
574 * @param queue_pair Pointer to queue pair.
575 * @param ops Pointer to ops requests array.
576 * @param nb_ops Number of elements in ops requests array.
577 * @returns Number of elements consumed from ops.
580 mrvl_crypto_pmd_enqueue_burst(void *queue_pair, struct rte_crypto_op **ops,
583 uint16_t iter_ops = 0;
585 uint16_t consumed = 0;
587 struct sam_cio_op_params requests[nb_ops];
589 * DPDK uses single fragment buffers, so we can KISS descriptors.
590 * SAM does not store bd pointers, so on-stack scope will be enough.
592 struct sam_buf_info src_bd[nb_ops];
593 struct sam_buf_info dst_bd[nb_ops];
594 struct mrvl_crypto_qp *qp = (struct mrvl_crypto_qp *)queue_pair;
599 /* Prepare the burst. */
600 memset(&requests, 0, sizeof(requests));
602 /* Iterate through */
603 for (; iter_ops < nb_ops; ++iter_ops) {
604 if (mrvl_request_prepare(&requests[iter_ops],
607 ops[iter_ops]) < 0) {
609 "Error while parameters preparation!");
610 qp->stats.enqueue_err_count++;
611 ops[iter_ops]->status = RTE_CRYPTO_OP_STATUS_ERROR;
614 * Number of handled ops is increased
615 * (even if the result of handling is error).
621 ops[iter_ops]->status =
622 RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
624 /* Increase the number of ops to enqueue. */
626 } /* for (; iter_ops < nb_ops;... */
630 ret = sam_cio_enq(qp->cio, requests, &to_enq);
634 * Trust SAM that in this case returned value will be at
635 * some point correct (now it is returned unmodified).
637 qp->stats.enqueue_err_count += to_enq;
638 for (iter_ops = 0; iter_ops < to_enq; ++iter_ops)
639 ops[iter_ops]->status =
640 RTE_CRYPTO_OP_STATUS_ERROR;
644 qp->stats.enqueued_count += to_enq;
651 * @param queue_pair Pointer to queue pair.
652 * @param ops Pointer to ops requests array.
653 * @param nb_ops Number of elements in ops requests array.
654 * @returns Number of elements dequeued.
657 mrvl_crypto_pmd_dequeue_burst(void *queue_pair,
658 struct rte_crypto_op **ops,
662 struct mrvl_crypto_qp *qp = queue_pair;
663 struct sam_cio *cio = qp->cio;
664 struct sam_cio_op_result results[nb_ops];
667 ret = sam_cio_deq(cio, results, &nb_ops);
669 /* Count all dequeued as error. */
670 qp->stats.dequeue_err_count += nb_ops;
672 /* But act as they were dequeued anyway*/
673 qp->stats.dequeued_count += nb_ops;
678 /* Unpack and check results. */
679 for (i = 0; i < nb_ops; ++i) {
680 ops[i] = results[i].cookie;
682 switch (results[i].status) {
684 ops[i]->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
686 case SAM_CIO_ERR_ICV:
687 MRVL_CRYPTO_LOG_DBG("CIO returned SAM_CIO_ERR_ICV.");
688 ops[i]->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
692 "CIO returned Error: %d", results[i].status);
693 ops[i]->status = RTE_CRYPTO_OP_STATUS_ERROR;
698 qp->stats.dequeued_count += nb_ops;
703 * Create a new crypto device.
705 * @param name Driver name.
706 * @param vdev Pointer to device structure.
707 * @param init_params Pointer to initialization parameters.
708 * @returns 0 in case of success, negative value otherwise.
711 cryptodev_mrvl_crypto_create(const char *name,
712 struct rte_vdev_device *vdev,
713 struct mrvl_pmd_init_params *init_params)
715 struct rte_cryptodev *dev;
716 struct mrvl_crypto_private *internals;
717 struct sam_init_params sam_params;
720 dev = rte_cryptodev_pmd_create(name, &vdev->device,
721 &init_params->common);
723 MRVL_CRYPTO_LOG_ERR("failed to create cryptodev vdev");
727 dev->driver_id = cryptodev_driver_id;
728 dev->dev_ops = rte_mrvl_crypto_pmd_ops;
730 /* Register rx/tx burst functions for data path. */
731 dev->enqueue_burst = mrvl_crypto_pmd_enqueue_burst;
732 dev->dequeue_burst = mrvl_crypto_pmd_dequeue_burst;
734 dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
735 RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
736 RTE_CRYPTODEV_FF_HW_ACCELERATED |
737 RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
738 RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT;
740 /* Set vector instructions mode supported */
741 internals = dev->data->dev_private;
743 internals->max_nb_qpairs = init_params->common.max_nb_queue_pairs;
744 internals->max_nb_sessions = init_params->max_nb_sessions;
747 * ret == -EEXIST is correct, it means DMA
748 * has been already initialized.
750 ret = mv_sys_dma_mem_init(MRVL_MUSDK_DMA_MEMSIZE);
755 MRVL_CRYPTO_LOG_INFO(
756 "DMA memory has been already initialized by a different driver.");
759 sam_params.max_num_sessions = internals->max_nb_sessions;
761 return sam_init(&sam_params);
765 "driver %s: %s failed", init_params->common.name, __func__);
767 cryptodev_mrvl_crypto_uninit(vdev);
771 /** Parse integer from integer argument */
773 parse_integer_arg(const char *key __rte_unused,
774 const char *value, void *extra_args)
776 int *i = (int *) extra_args;
780 MRVL_CRYPTO_LOG_ERR("Argument has to be positive.\n");
789 parse_name_arg(const char *key __rte_unused,
790 const char *value, void *extra_args)
792 struct rte_cryptodev_pmd_init_params *params = extra_args;
794 if (strlen(value) >= RTE_CRYPTODEV_NAME_MAX_LEN - 1) {
795 MRVL_CRYPTO_LOG_ERR("Invalid name %s, should be less than "
796 "%u bytes.\n", value,
797 RTE_CRYPTODEV_NAME_MAX_LEN - 1);
801 strncpy(params->name, value, RTE_CRYPTODEV_NAME_MAX_LEN);
807 mrvl_pmd_parse_input_args(struct mrvl_pmd_init_params *params,
808 const char *input_args)
810 struct rte_kvargs *kvlist = NULL;
817 kvlist = rte_kvargs_parse(input_args,
818 mrvl_pmd_valid_params);
822 /* Common VDEV parameters */
823 ret = rte_kvargs_process(kvlist,
824 RTE_CRYPTODEV_PMD_MAX_NB_QP_ARG,
826 ¶ms->common.max_nb_queue_pairs);
830 ret = rte_kvargs_process(kvlist,
831 RTE_CRYPTODEV_PMD_SOCKET_ID_ARG,
833 ¶ms->common.socket_id);
837 ret = rte_kvargs_process(kvlist,
838 RTE_CRYPTODEV_PMD_NAME_ARG,
844 ret = rte_kvargs_process(kvlist,
845 MRVL_PMD_MAX_NB_SESS_ARG,
854 rte_kvargs_free(kvlist);
859 * Initialize the crypto device.
861 * @param vdev Pointer to device structure.
862 * @returns 0 in case of success, negative value otherwise.
865 cryptodev_mrvl_crypto_init(struct rte_vdev_device *vdev)
867 struct mrvl_pmd_init_params init_params = {
871 sizeof(struct mrvl_crypto_private),
872 .max_nb_queue_pairs =
873 sam_get_num_inst() * sam_get_num_cios(0),
874 .socket_id = rte_socket_id()
876 .max_nb_sessions = MRVL_PMD_DEFAULT_MAX_NB_SESSIONS
879 const char *name, *args;
882 name = rte_vdev_device_name(vdev);
885 args = rte_vdev_device_args(vdev);
887 ret = mrvl_pmd_parse_input_args(&init_params, args);
890 "Failed to parse initialisation arguments[%s]\n",
895 return cryptodev_mrvl_crypto_create(name, vdev, &init_params);
899 * Uninitialize the crypto device
901 * @param vdev Pointer to device structure.
902 * @returns 0 in case of success, negative value otherwise.
905 cryptodev_mrvl_crypto_uninit(struct rte_vdev_device *vdev)
907 struct rte_cryptodev *cryptodev;
908 const char *name = rte_vdev_device_name(vdev);
914 "Closing Marvell crypto device %s on numa socket %u\n",
915 name, rte_socket_id());
919 cryptodev = rte_cryptodev_pmd_get_named_dev(name);
920 if (cryptodev == NULL)
923 return rte_cryptodev_pmd_destroy(cryptodev);
927 * Basic driver handlers for use in the constructor.
929 static struct rte_vdev_driver cryptodev_mrvl_pmd_drv = {
930 .probe = cryptodev_mrvl_crypto_init,
931 .remove = cryptodev_mrvl_crypto_uninit
934 static struct cryptodev_driver mrvl_crypto_drv;
936 /* Register the driver in constructor. */
937 RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_MRVL_PMD, cryptodev_mrvl_pmd_drv);
938 RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_MRVL_PMD,
939 "max_nb_queue_pairs=<int> "
940 "max_nb_sessions=<int> "
942 RTE_PMD_REGISTER_CRYPTO_DRIVER(mrvl_crypto_drv, cryptodev_mrvl_pmd_drv.driver,
943 cryptodev_driver_id);