4 * Copyright (C) Semihalf 2017. All rights reserved.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in
14 * the documentation and/or other materials provided with the
16 * * Neither the name of Semihalf nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 #include <rte_common.h>
34 #include <rte_hexdump.h>
35 #include <rte_cryptodev.h>
36 #include <rte_cryptodev_pmd.h>
37 #include <rte_cryptodev_vdev.h>
39 #include <rte_malloc.h>
40 #include <rte_cpuflags.h>
42 #include "rte_mrvl_pmd_private.h"
44 #define MRVL_MUSDK_DMA_MEMSIZE 41943040
46 static uint8_t cryptodev_driver_id;
49 * Flag if particular crypto algorithm is supported by PMD/MUSDK.
51 * The idea is to have Not Supported value as default (0).
52 * This way we need only to define proper map sizes,
53 * non-initialized entries will be by default not supported.
56 ALGO_NOT_SUPPORTED = 0,
60 /** Map elements for cipher mapping.*/
61 struct cipher_params_mapping {
62 enum algo_supported supported; /**< On/Off switch */
63 enum sam_cipher_alg cipher_alg; /**< Cipher algorithm */
64 enum sam_cipher_mode cipher_mode; /**< Cipher mode */
65 unsigned int max_key_len; /**< Maximum key length (in bytes)*/
67 /* We want to squeeze in multiple maps into the cache line. */
70 /** Map elements for auth mapping.*/
71 struct auth_params_mapping {
72 enum algo_supported supported; /**< On/off switch */
73 enum sam_auth_alg auth_alg; /**< Auth algorithm */
75 /* We want to squeeze in multiple maps into the cache line. */
79 * Map of supported cipher algorithms.
82 struct cipher_params_mapping cipher_map[RTE_CRYPTO_CIPHER_LIST_END] = {
83 [RTE_CRYPTO_CIPHER_3DES_CBC] = {
84 .supported = ALGO_SUPPORTED,
85 .cipher_alg = SAM_CIPHER_3DES,
86 .cipher_mode = SAM_CIPHER_CBC,
87 .max_key_len = BITS2BYTES(192) },
88 [RTE_CRYPTO_CIPHER_3DES_CTR] = {
89 .supported = ALGO_SUPPORTED,
90 .cipher_alg = SAM_CIPHER_3DES,
91 .cipher_mode = SAM_CIPHER_CTR,
92 .max_key_len = BITS2BYTES(192) },
93 [RTE_CRYPTO_CIPHER_3DES_ECB] = {
94 .supported = ALGO_SUPPORTED,
95 .cipher_alg = SAM_CIPHER_3DES,
96 .cipher_mode = SAM_CIPHER_ECB,
97 .max_key_len = BITS2BYTES(192) },
98 [RTE_CRYPTO_CIPHER_AES_CBC] = {
99 .supported = ALGO_SUPPORTED,
100 .cipher_alg = SAM_CIPHER_AES,
101 .cipher_mode = SAM_CIPHER_CBC,
102 .max_key_len = BITS2BYTES(256) },
103 [RTE_CRYPTO_CIPHER_AES_CTR] = {
104 .supported = ALGO_SUPPORTED,
105 .cipher_alg = SAM_CIPHER_AES,
106 .cipher_mode = SAM_CIPHER_CTR,
107 .max_key_len = BITS2BYTES(256) },
111 * Map of supported auth algorithms.
114 struct auth_params_mapping auth_map[RTE_CRYPTO_AUTH_LIST_END] = {
115 [RTE_CRYPTO_AUTH_MD5_HMAC] = {
116 .supported = ALGO_SUPPORTED,
117 .auth_alg = SAM_AUTH_HMAC_MD5 },
118 [RTE_CRYPTO_AUTH_MD5] = {
119 .supported = ALGO_SUPPORTED,
120 .auth_alg = SAM_AUTH_HASH_MD5 },
121 [RTE_CRYPTO_AUTH_SHA1_HMAC] = {
122 .supported = ALGO_SUPPORTED,
123 .auth_alg = SAM_AUTH_HMAC_SHA1 },
124 [RTE_CRYPTO_AUTH_SHA1] = {
125 .supported = ALGO_SUPPORTED,
126 .auth_alg = SAM_AUTH_HASH_SHA1 },
127 [RTE_CRYPTO_AUTH_SHA224] = {
128 .supported = ALGO_SUPPORTED,
129 .auth_alg = SAM_AUTH_HASH_SHA2_224 },
130 [RTE_CRYPTO_AUTH_SHA256_HMAC] = {
131 .supported = ALGO_SUPPORTED,
132 .auth_alg = SAM_AUTH_HMAC_SHA2_256 },
133 [RTE_CRYPTO_AUTH_SHA256] = {
134 .supported = ALGO_SUPPORTED,
135 .auth_alg = SAM_AUTH_HASH_SHA2_256 },
136 [RTE_CRYPTO_AUTH_SHA384_HMAC] = {
137 .supported = ALGO_SUPPORTED,
138 .auth_alg = SAM_AUTH_HMAC_SHA2_384 },
139 [RTE_CRYPTO_AUTH_SHA384] = {
140 .supported = ALGO_SUPPORTED,
141 .auth_alg = SAM_AUTH_HASH_SHA2_384 },
142 [RTE_CRYPTO_AUTH_SHA512_HMAC] = {
143 .supported = ALGO_SUPPORTED,
144 .auth_alg = SAM_AUTH_HMAC_SHA2_512 },
145 [RTE_CRYPTO_AUTH_SHA512] = {
146 .supported = ALGO_SUPPORTED,
147 .auth_alg = SAM_AUTH_HASH_SHA2_512 },
148 [RTE_CRYPTO_AUTH_AES_GMAC] = {
149 .supported = ALGO_SUPPORTED,
150 .auth_alg = SAM_AUTH_AES_GMAC },
154 * Map of supported aead algorithms.
157 struct cipher_params_mapping aead_map[RTE_CRYPTO_AEAD_LIST_END] = {
158 [RTE_CRYPTO_AEAD_AES_GCM] = {
159 .supported = ALGO_SUPPORTED,
160 .cipher_alg = SAM_CIPHER_AES,
161 .cipher_mode = SAM_CIPHER_GCM,
162 .max_key_len = BITS2BYTES(256) },
166 *-----------------------------------------------------------------------------
167 * Forward declarations.
168 *-----------------------------------------------------------------------------
170 static int cryptodev_mrvl_crypto_uninit(struct rte_vdev_device *vdev);
173 *-----------------------------------------------------------------------------
174 * Session Preparation.
175 *-----------------------------------------------------------------------------
179 * Get xform chain order.
181 * @param xform Pointer to configuration structure chain for crypto operations.
182 * @returns Order of crypto operations.
184 static enum mrvl_crypto_chain_order
185 mrvl_crypto_get_chain_order(const struct rte_crypto_sym_xform *xform)
187 /* Currently, Marvell supports max 2 operations in chain */
188 if (xform->next != NULL && xform->next->next != NULL)
189 return MRVL_CRYPTO_CHAIN_NOT_SUPPORTED;
191 if (xform->next != NULL) {
192 if ((xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) &&
193 (xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER))
194 return MRVL_CRYPTO_CHAIN_AUTH_CIPHER;
196 if ((xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) &&
197 (xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH))
198 return MRVL_CRYPTO_CHAIN_CIPHER_AUTH;
200 if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH)
201 return MRVL_CRYPTO_CHAIN_AUTH_ONLY;
203 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
204 return MRVL_CRYPTO_CHAIN_CIPHER_ONLY;
206 if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD)
207 return MRVL_CRYPTO_CHAIN_COMBINED;
209 return MRVL_CRYPTO_CHAIN_NOT_SUPPORTED;
213 * Set session parameters for cipher part.
215 * @param sess Crypto session pointer.
216 * @param cipher_xform Pointer to configuration structure for cipher operations.
217 * @returns 0 in case of success, negative value otherwise.
220 mrvl_crypto_set_cipher_session_parameters(struct mrvl_crypto_session *sess,
221 const struct rte_crypto_sym_xform *cipher_xform)
223 /* Make sure we've got proper struct */
224 if (cipher_xform->type != RTE_CRYPTO_SYM_XFORM_CIPHER) {
225 MRVL_CRYPTO_LOG_ERR("Wrong xform struct provided!");
229 /* See if map data is present and valid */
230 if ((cipher_xform->cipher.algo > RTE_DIM(cipher_map)) ||
231 (cipher_map[cipher_xform->cipher.algo].supported
232 != ALGO_SUPPORTED)) {
233 MRVL_CRYPTO_LOG_ERR("Cipher algorithm not supported!");
237 sess->cipher_iv_offset = cipher_xform->cipher.iv.offset;
239 sess->sam_sess_params.dir =
240 (cipher_xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
241 SAM_DIR_ENCRYPT : SAM_DIR_DECRYPT;
242 sess->sam_sess_params.cipher_alg =
243 cipher_map[cipher_xform->cipher.algo].cipher_alg;
244 sess->sam_sess_params.cipher_mode =
245 cipher_map[cipher_xform->cipher.algo].cipher_mode;
247 /* Assume IV will be passed together with data. */
248 sess->sam_sess_params.cipher_iv = NULL;
250 /* Get max key length. */
251 if (cipher_xform->cipher.key.length >
252 cipher_map[cipher_xform->cipher.algo].max_key_len) {
253 MRVL_CRYPTO_LOG_ERR("Wrong key length!");
257 sess->sam_sess_params.cipher_key_len = cipher_xform->cipher.key.length;
258 sess->sam_sess_params.cipher_key = cipher_xform->cipher.key.data;
264 * Set session parameters for authentication part.
266 * @param sess Crypto session pointer.
267 * @param auth_xform Pointer to configuration structure for auth operations.
268 * @returns 0 in case of success, negative value otherwise.
271 mrvl_crypto_set_auth_session_parameters(struct mrvl_crypto_session *sess,
272 const struct rte_crypto_sym_xform *auth_xform)
274 /* Make sure we've got proper struct */
275 if (auth_xform->type != RTE_CRYPTO_SYM_XFORM_AUTH) {
276 MRVL_CRYPTO_LOG_ERR("Wrong xform struct provided!");
280 /* See if map data is present and valid */
281 if ((auth_xform->auth.algo > RTE_DIM(auth_map)) ||
282 (auth_map[auth_xform->auth.algo].supported != ALGO_SUPPORTED)) {
283 MRVL_CRYPTO_LOG_ERR("Auth algorithm not supported!");
287 sess->sam_sess_params.dir =
288 (auth_xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ?
289 SAM_DIR_ENCRYPT : SAM_DIR_DECRYPT;
290 sess->sam_sess_params.auth_alg =
291 auth_map[auth_xform->auth.algo].auth_alg;
292 sess->sam_sess_params.u.basic.auth_icv_len =
293 auth_xform->auth.digest_length;
294 /* auth_key must be NULL if auth algorithm does not use HMAC */
295 sess->sam_sess_params.auth_key = auth_xform->auth.key.length ?
296 auth_xform->auth.key.data : NULL;
297 sess->sam_sess_params.auth_key_len = auth_xform->auth.key.length;
303 * Set session parameters for aead part.
305 * @param sess Crypto session pointer.
306 * @param aead_xform Pointer to configuration structure for aead operations.
307 * @returns 0 in case of success, negative value otherwise.
310 mrvl_crypto_set_aead_session_parameters(struct mrvl_crypto_session *sess,
311 const struct rte_crypto_sym_xform *aead_xform)
313 /* Make sure we've got proper struct */
314 if (aead_xform->type != RTE_CRYPTO_SYM_XFORM_AEAD) {
315 MRVL_CRYPTO_LOG_ERR("Wrong xform struct provided!");
319 /* See if map data is present and valid */
320 if ((aead_xform->aead.algo > RTE_DIM(aead_map)) ||
321 (aead_map[aead_xform->aead.algo].supported
322 != ALGO_SUPPORTED)) {
323 MRVL_CRYPTO_LOG_ERR("AEAD algorithm not supported!");
327 sess->sam_sess_params.dir =
328 (aead_xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
329 SAM_DIR_ENCRYPT : SAM_DIR_DECRYPT;
330 sess->sam_sess_params.cipher_alg =
331 aead_map[aead_xform->aead.algo].cipher_alg;
332 sess->sam_sess_params.cipher_mode =
333 aead_map[aead_xform->aead.algo].cipher_mode;
335 /* Assume IV will be passed together with data. */
336 sess->sam_sess_params.cipher_iv = NULL;
338 /* Get max key length. */
339 if (aead_xform->aead.key.length >
340 aead_map[aead_xform->aead.algo].max_key_len) {
341 MRVL_CRYPTO_LOG_ERR("Wrong key length!");
345 sess->sam_sess_params.cipher_key = aead_xform->aead.key.data;
346 sess->sam_sess_params.cipher_key_len = aead_xform->aead.key.length;
348 if (sess->sam_sess_params.cipher_mode == SAM_CIPHER_GCM)
349 sess->sam_sess_params.auth_alg = SAM_AUTH_AES_GCM;
351 sess->sam_sess_params.u.basic.auth_icv_len =
352 aead_xform->aead.digest_length;
354 sess->sam_sess_params.u.basic.auth_aad_len =
355 aead_xform->aead.aad_length;
361 * Parse crypto transform chain and setup session parameters.
363 * @param dev Pointer to crypto device
364 * @param sess Poiner to crypto session
365 * @param xform Pointer to configuration structure chain for crypto operations.
366 * @returns 0 in case of success, negative value otherwise.
369 mrvl_crypto_set_session_parameters(struct mrvl_crypto_session *sess,
370 const struct rte_crypto_sym_xform *xform)
372 const struct rte_crypto_sym_xform *cipher_xform = NULL;
373 const struct rte_crypto_sym_xform *auth_xform = NULL;
374 const struct rte_crypto_sym_xform *aead_xform = NULL;
376 /* Filter out spurious/broken requests */
380 sess->chain_order = mrvl_crypto_get_chain_order(xform);
381 switch (sess->chain_order) {
382 case MRVL_CRYPTO_CHAIN_CIPHER_AUTH:
383 cipher_xform = xform;
384 auth_xform = xform->next;
386 case MRVL_CRYPTO_CHAIN_AUTH_CIPHER:
388 cipher_xform = xform->next;
390 case MRVL_CRYPTO_CHAIN_CIPHER_ONLY:
391 cipher_xform = xform;
393 case MRVL_CRYPTO_CHAIN_AUTH_ONLY:
396 case MRVL_CRYPTO_CHAIN_COMBINED:
403 if ((cipher_xform != NULL) &&
404 (mrvl_crypto_set_cipher_session_parameters(
405 sess, cipher_xform) < 0)) {
406 MRVL_CRYPTO_LOG_ERR("Invalid/unsupported cipher parameters");
410 if ((auth_xform != NULL) &&
411 (mrvl_crypto_set_auth_session_parameters(
412 sess, auth_xform) < 0)) {
413 MRVL_CRYPTO_LOG_ERR("Invalid/unsupported auth parameters");
417 if ((aead_xform != NULL) &&
418 (mrvl_crypto_set_aead_session_parameters(
419 sess, aead_xform) < 0)) {
420 MRVL_CRYPTO_LOG_ERR("Invalid/unsupported aead parameters");
428 *-----------------------------------------------------------------------------
430 *-----------------------------------------------------------------------------
434 * Prepare a single request.
436 * This function basically translates DPDK crypto request into one
437 * understandable by MUDSK's SAM. If this is a first request in a session,
438 * it starts the session.
440 * @param request Pointer to pre-allocated && reset request buffer [Out].
441 * @param src_bd Pointer to pre-allocated source descriptor [Out].
442 * @param dst_bd Pointer to pre-allocated destination descriptor [Out].
443 * @param op Pointer to DPDK crypto operation struct [In].
446 mrvl_request_prepare(struct sam_cio_op_params *request,
447 struct sam_buf_info *src_bd,
448 struct sam_buf_info *dst_bd,
449 struct rte_crypto_op *op)
451 struct mrvl_crypto_session *sess;
452 struct rte_mbuf *dst_mbuf;
455 if (unlikely(op->sess_type == RTE_CRYPTO_OP_SESSIONLESS)) {
456 MRVL_CRYPTO_LOG_ERR("MRVL CRYPTO PMD only supports session "
457 "oriented requests, op (%p) is sessionless.",
462 sess = (struct mrvl_crypto_session *)get_session_private_data(
463 op->sym->session, cryptodev_driver_id);
464 if (unlikely(sess == NULL)) {
465 MRVL_CRYPTO_LOG_ERR("Session was not created for this device");
470 * If application delivered us null dst buffer, it means it expects
471 * us to deliver the result in src buffer.
473 dst_mbuf = op->sym->m_dst ? op->sym->m_dst : op->sym->m_src;
475 request->sa = sess->sam_sess;
476 request->cookie = op;
478 /* Single buffers only, sorry. */
479 request->num_bufs = 1;
480 request->src = src_bd;
481 src_bd->vaddr = rte_pktmbuf_mtod(op->sym->m_src, void *);
482 src_bd->paddr = rte_pktmbuf_mtophys(op->sym->m_src);
483 src_bd->len = rte_pktmbuf_data_len(op->sym->m_src);
486 if (rte_pktmbuf_data_len(op->sym->m_src) == 0) {
487 /* EIP does not support 0 length buffers. */
488 MRVL_CRYPTO_LOG_ERR("Buffer length == 0 not supported!");
492 /* Empty destination. */
493 if (rte_pktmbuf_data_len(dst_mbuf) == 0) {
494 /* Make dst buffer fit at least source data. */
495 if (rte_pktmbuf_append(dst_mbuf,
496 rte_pktmbuf_data_len(op->sym->m_src)) == NULL) {
497 MRVL_CRYPTO_LOG_ERR("Unable to set big enough dst buffer!");
502 request->dst = dst_bd;
503 dst_bd->vaddr = rte_pktmbuf_mtod(dst_mbuf, void *);
504 dst_bd->paddr = rte_pktmbuf_mtophys(dst_mbuf);
507 * We can use all available space in dst_mbuf,
508 * not only what's used currently.
510 dst_bd->len = dst_mbuf->buf_len - rte_pktmbuf_headroom(dst_mbuf);
512 if (sess->chain_order == MRVL_CRYPTO_CHAIN_COMBINED) {
513 request->cipher_len = op->sym->aead.data.length;
514 request->cipher_offset = op->sym->aead.data.offset;
515 request->cipher_iv = rte_crypto_op_ctod_offset(op, uint8_t *,
516 sess->cipher_iv_offset);
518 request->auth_aad = op->sym->aead.aad.data;
519 request->auth_offset = request->cipher_offset;
520 request->auth_len = request->cipher_len;
522 request->cipher_len = op->sym->cipher.data.length;
523 request->cipher_offset = op->sym->cipher.data.offset;
524 request->cipher_iv = rte_crypto_op_ctod_offset(op, uint8_t *,
525 sess->cipher_iv_offset);
527 request->auth_offset = op->sym->auth.data.offset;
528 request->auth_len = op->sym->auth.data.length;
531 digest = sess->chain_order == MRVL_CRYPTO_CHAIN_COMBINED ?
532 op->sym->aead.digest.data : op->sym->auth.digest.data;
533 if (digest == NULL) {
534 /* No auth - no worry. */
538 request->auth_icv_offset = request->auth_offset + request->auth_len;
541 * EIP supports only scenarios where ICV(digest buffer) is placed at
542 * auth_icv_offset. Any other placement means risking errors.
544 if (sess->sam_sess_params.dir == SAM_DIR_ENCRYPT) {
546 * This should be the most common case anyway,
547 * EIP will overwrite DST buffer at auth_icv_offset.
549 if (rte_pktmbuf_mtod_offset(
551 request->auth_icv_offset) == digest) {
554 } else {/* sess->sam_sess_params.dir == SAM_DIR_DECRYPT */
556 * EIP will look for digest at auth_icv_offset
557 * offset in SRC buffer.
559 if (rte_pktmbuf_mtod_offset(
560 op->sym->m_src, uint8_t *,
561 request->auth_icv_offset) == digest) {
567 * If we landed here it means that digest pointer is
568 * at different than expected place.
574 *-----------------------------------------------------------------------------
575 * PMD Framework handlers
576 *-----------------------------------------------------------------------------
582 * @param queue_pair Pointer to queue pair.
583 * @param ops Pointer to ops requests array.
584 * @param nb_ops Number of elements in ops requests array.
585 * @returns Number of elements consumed from ops.
588 mrvl_crypto_pmd_enqueue_burst(void *queue_pair, struct rte_crypto_op **ops,
591 uint16_t iter_ops = 0;
593 uint16_t consumed = 0;
595 struct sam_cio_op_params requests[nb_ops];
597 * DPDK uses single fragment buffers, so we can KISS descriptors.
598 * SAM does not store bd pointers, so on-stack scope will be enough.
600 struct sam_buf_info src_bd[nb_ops];
601 struct sam_buf_info dst_bd[nb_ops];
602 struct mrvl_crypto_qp *qp = (struct mrvl_crypto_qp *)queue_pair;
607 /* Prepare the burst. */
608 memset(&requests, 0, sizeof(requests));
610 /* Iterate through */
611 for (; iter_ops < nb_ops; ++iter_ops) {
612 if (mrvl_request_prepare(&requests[iter_ops],
615 ops[iter_ops]) < 0) {
617 "Error while parameters preparation!");
618 qp->stats.enqueue_err_count++;
619 ops[iter_ops]->status = RTE_CRYPTO_OP_STATUS_ERROR;
622 * Number of handled ops is increased
623 * (even if the result of handling is error).
629 ops[iter_ops]->status =
630 RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
632 /* Increase the number of ops to enqueue. */
634 } /* for (; iter_ops < nb_ops;... */
638 ret = sam_cio_enq(qp->cio, requests, &to_enq);
642 * Trust SAM that in this case returned value will be at
643 * some point correct (now it is returned unmodified).
645 qp->stats.enqueue_err_count += to_enq;
646 for (iter_ops = 0; iter_ops < to_enq; ++iter_ops)
647 ops[iter_ops]->status =
648 RTE_CRYPTO_OP_STATUS_ERROR;
652 qp->stats.enqueued_count += to_enq;
659 * @param queue_pair Pointer to queue pair.
660 * @param ops Pointer to ops requests array.
661 * @param nb_ops Number of elements in ops requests array.
662 * @returns Number of elements dequeued.
665 mrvl_crypto_pmd_dequeue_burst(void *queue_pair,
666 struct rte_crypto_op **ops,
670 struct mrvl_crypto_qp *qp = queue_pair;
671 struct sam_cio *cio = qp->cio;
672 struct sam_cio_op_result results[nb_ops];
675 ret = sam_cio_deq(cio, results, &nb_ops);
677 /* Count all dequeued as error. */
678 qp->stats.dequeue_err_count += nb_ops;
680 /* But act as they were dequeued anyway*/
681 qp->stats.dequeued_count += nb_ops;
686 /* Unpack and check results. */
687 for (i = 0; i < nb_ops; ++i) {
688 ops[i] = results[i].cookie;
690 switch (results[i].status) {
692 ops[i]->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
694 case SAM_CIO_ERR_ICV:
695 MRVL_CRYPTO_LOG_DBG("CIO returned SAM_CIO_ERR_ICV.");
696 ops[i]->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
700 "CIO returned Error: %d", results[i].status);
701 ops[i]->status = RTE_CRYPTO_OP_STATUS_ERROR;
706 qp->stats.dequeued_count += nb_ops;
711 * Create a new crypto device.
713 * @param name Driver name.
714 * @param vdev Pointer to device structure.
715 * @param init_params Pointer to initialization parameters.
716 * @returns 0 in case of success, negative value otherwise.
719 cryptodev_mrvl_crypto_create(const char *name,
720 struct rte_vdev_device *vdev,
721 struct rte_crypto_vdev_init_params *init_params)
723 struct rte_cryptodev *dev;
724 struct mrvl_crypto_private *internals;
725 struct sam_init_params sam_params;
728 if (init_params->name[0] == '\0') {
729 ret = rte_cryptodev_pmd_create_dev_name(
730 init_params->name, name);
733 MRVL_CRYPTO_LOG_ERR("failed to create unique name");
738 dev = rte_cryptodev_vdev_pmd_init(init_params->name,
739 sizeof(struct mrvl_crypto_private),
740 init_params->socket_id, vdev);
742 MRVL_CRYPTO_LOG_ERR("failed to create cryptodev vdev");
746 dev->driver_id = cryptodev_driver_id;
747 dev->dev_ops = rte_mrvl_crypto_pmd_ops;
749 /* Register rx/tx burst functions for data path. */
750 dev->enqueue_burst = mrvl_crypto_pmd_enqueue_burst;
751 dev->dequeue_burst = mrvl_crypto_pmd_dequeue_burst;
753 dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
754 RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
755 RTE_CRYPTODEV_FF_HW_ACCELERATED;
757 /* Set vector instructions mode supported */
758 internals = dev->data->dev_private;
760 internals->max_nb_qpairs = init_params->max_nb_queue_pairs;
761 internals->max_nb_sessions = init_params->max_nb_sessions;
764 * ret == -EEXIST is correct, it means DMA
765 * has been already initialized.
767 ret = mv_sys_dma_mem_init(MRVL_MUSDK_DMA_MEMSIZE);
772 MRVL_CRYPTO_LOG_INFO(
773 "DMA memory has been already initialized by a different driver.");
776 sam_params.max_num_sessions = internals->max_nb_sessions;
778 return sam_init(&sam_params);
782 "driver %s: %s failed", init_params->name, __func__);
784 cryptodev_mrvl_crypto_uninit(vdev);
789 * Initialize the crypto device.
791 * @param vdev Pointer to device structure.
792 * @returns 0 in case of success, negative value otherwise.
795 cryptodev_mrvl_crypto_init(struct rte_vdev_device *vdev)
797 struct rte_crypto_vdev_init_params init_params = { };
799 const char *input_args;
802 name = rte_vdev_device_name(vdev);
805 input_args = rte_vdev_device_args(vdev);
810 init_params.max_nb_queue_pairs = sam_get_num_inst() * SAM_HW_RING_NUM;
811 init_params.max_nb_sessions =
812 RTE_CRYPTODEV_VDEV_DEFAULT_MAX_NB_SESSIONS;
813 init_params.socket_id = rte_socket_id();
815 ret = rte_cryptodev_vdev_parse_init_params(&init_params, input_args);
817 RTE_LOG(ERR, PMD, "Failed to parse input arguments\n");
821 RTE_LOG(INFO, PMD, "Initialising %s on NUMA node %d\n", name,
822 init_params.socket_id);
823 if (init_params.name[0] != '\0') {
824 RTE_LOG(INFO, PMD, " User defined name = %s\n",
827 RTE_LOG(INFO, PMD, " Max number of queue pairs = %d\n",
828 init_params.max_nb_queue_pairs);
829 RTE_LOG(INFO, PMD, " Max number of sessions = %d\n",
830 init_params.max_nb_sessions);
832 return cryptodev_mrvl_crypto_create(name, vdev, &init_params);
836 * Uninitialize the crypto device
838 * @param vdev Pointer to device structure.
839 * @returns 0 in case of success, negative value otherwise.
842 cryptodev_mrvl_crypto_uninit(struct rte_vdev_device *vdev)
844 const char *name = rte_vdev_device_name(vdev);
850 "Closing Marvell crypto device %s on numa socket %u\n",
851 name, rte_socket_id());
859 * Basic driver handlers for use in the constructor.
861 static struct rte_vdev_driver cryptodev_mrvl_pmd_drv = {
862 .probe = cryptodev_mrvl_crypto_init,
863 .remove = cryptodev_mrvl_crypto_uninit
866 static struct cryptodev_driver mrvl_crypto_drv;
868 /* Register the driver in constructor. */
869 RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_MRVL_PMD, cryptodev_mrvl_pmd_drv);
870 RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_MRVL_PMD,
871 "max_nb_queue_pairs=<int> "
872 "max_nb_sessions=<int> "
874 RTE_PMD_REGISTER_CRYPTO_DRIVER(mrvl_crypto_drv, cryptodev_mrvl_pmd_drv,
875 cryptodev_driver_id);