4 * Copyright(c) 2017 Marvell International Ltd.
5 * Copyright(c) 2017 Semihalf.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
12 * * Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * * Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in
16 * the documentation and/or other materials provided with the
18 * * Neither the name of the copyright holder nor the names of its
19 * contributors may be used to endorse or promote products derived
20 * from this software without specific prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
25 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
26 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
27 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
28 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
32 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 #include <rte_common.h>
36 #include <rte_hexdump.h>
37 #include <rte_cryptodev.h>
38 #include <rte_cryptodev_pmd.h>
40 #include <rte_malloc.h>
41 #include <rte_cpuflags.h>
43 #include "rte_mrvl_pmd_private.h"
45 #define MRVL_MUSDK_DMA_MEMSIZE 41943040
47 static uint8_t cryptodev_driver_id;
50 * Flag if particular crypto algorithm is supported by PMD/MUSDK.
52 * The idea is to have Not Supported value as default (0).
53 * This way we need only to define proper map sizes,
54 * non-initialized entries will be by default not supported.
57 ALGO_NOT_SUPPORTED = 0,
61 /** Map elements for cipher mapping.*/
62 struct cipher_params_mapping {
63 enum algo_supported supported; /**< On/Off switch */
64 enum sam_cipher_alg cipher_alg; /**< Cipher algorithm */
65 enum sam_cipher_mode cipher_mode; /**< Cipher mode */
66 unsigned int max_key_len; /**< Maximum key length (in bytes)*/
68 /* We want to squeeze in multiple maps into the cache line. */
71 /** Map elements for auth mapping.*/
72 struct auth_params_mapping {
73 enum algo_supported supported; /**< On/off switch */
74 enum sam_auth_alg auth_alg; /**< Auth algorithm */
76 /* We want to squeeze in multiple maps into the cache line. */
80 * Map of supported cipher algorithms.
83 struct cipher_params_mapping cipher_map[RTE_CRYPTO_CIPHER_LIST_END] = {
84 [RTE_CRYPTO_CIPHER_3DES_CBC] = {
85 .supported = ALGO_SUPPORTED,
86 .cipher_alg = SAM_CIPHER_3DES,
87 .cipher_mode = SAM_CIPHER_CBC,
88 .max_key_len = BITS2BYTES(192) },
89 [RTE_CRYPTO_CIPHER_3DES_CTR] = {
90 .supported = ALGO_SUPPORTED,
91 .cipher_alg = SAM_CIPHER_3DES,
92 .cipher_mode = SAM_CIPHER_CTR,
93 .max_key_len = BITS2BYTES(192) },
94 [RTE_CRYPTO_CIPHER_3DES_ECB] = {
95 .supported = ALGO_SUPPORTED,
96 .cipher_alg = SAM_CIPHER_3DES,
97 .cipher_mode = SAM_CIPHER_ECB,
98 .max_key_len = BITS2BYTES(192) },
99 [RTE_CRYPTO_CIPHER_AES_CBC] = {
100 .supported = ALGO_SUPPORTED,
101 .cipher_alg = SAM_CIPHER_AES,
102 .cipher_mode = SAM_CIPHER_CBC,
103 .max_key_len = BITS2BYTES(256) },
104 [RTE_CRYPTO_CIPHER_AES_CTR] = {
105 .supported = ALGO_SUPPORTED,
106 .cipher_alg = SAM_CIPHER_AES,
107 .cipher_mode = SAM_CIPHER_CTR,
108 .max_key_len = BITS2BYTES(256) },
112 * Map of supported auth algorithms.
115 struct auth_params_mapping auth_map[RTE_CRYPTO_AUTH_LIST_END] = {
116 [RTE_CRYPTO_AUTH_MD5_HMAC] = {
117 .supported = ALGO_SUPPORTED,
118 .auth_alg = SAM_AUTH_HMAC_MD5 },
119 [RTE_CRYPTO_AUTH_MD5] = {
120 .supported = ALGO_SUPPORTED,
121 .auth_alg = SAM_AUTH_HASH_MD5 },
122 [RTE_CRYPTO_AUTH_SHA1_HMAC] = {
123 .supported = ALGO_SUPPORTED,
124 .auth_alg = SAM_AUTH_HMAC_SHA1 },
125 [RTE_CRYPTO_AUTH_SHA1] = {
126 .supported = ALGO_SUPPORTED,
127 .auth_alg = SAM_AUTH_HASH_SHA1 },
128 [RTE_CRYPTO_AUTH_SHA224] = {
129 .supported = ALGO_SUPPORTED,
130 .auth_alg = SAM_AUTH_HASH_SHA2_224 },
131 [RTE_CRYPTO_AUTH_SHA256_HMAC] = {
132 .supported = ALGO_SUPPORTED,
133 .auth_alg = SAM_AUTH_HMAC_SHA2_256 },
134 [RTE_CRYPTO_AUTH_SHA256] = {
135 .supported = ALGO_SUPPORTED,
136 .auth_alg = SAM_AUTH_HASH_SHA2_256 },
137 [RTE_CRYPTO_AUTH_SHA384_HMAC] = {
138 .supported = ALGO_SUPPORTED,
139 .auth_alg = SAM_AUTH_HMAC_SHA2_384 },
140 [RTE_CRYPTO_AUTH_SHA384] = {
141 .supported = ALGO_SUPPORTED,
142 .auth_alg = SAM_AUTH_HASH_SHA2_384 },
143 [RTE_CRYPTO_AUTH_SHA512_HMAC] = {
144 .supported = ALGO_SUPPORTED,
145 .auth_alg = SAM_AUTH_HMAC_SHA2_512 },
146 [RTE_CRYPTO_AUTH_SHA512] = {
147 .supported = ALGO_SUPPORTED,
148 .auth_alg = SAM_AUTH_HASH_SHA2_512 },
149 [RTE_CRYPTO_AUTH_AES_GMAC] = {
150 .supported = ALGO_SUPPORTED,
151 .auth_alg = SAM_AUTH_AES_GMAC },
155 * Map of supported aead algorithms.
158 struct cipher_params_mapping aead_map[RTE_CRYPTO_AEAD_LIST_END] = {
159 [RTE_CRYPTO_AEAD_AES_GCM] = {
160 .supported = ALGO_SUPPORTED,
161 .cipher_alg = SAM_CIPHER_AES,
162 .cipher_mode = SAM_CIPHER_GCM,
163 .max_key_len = BITS2BYTES(256) },
167 *-----------------------------------------------------------------------------
168 * Forward declarations.
169 *-----------------------------------------------------------------------------
171 static int cryptodev_mrvl_crypto_uninit(struct rte_vdev_device *vdev);
174 *-----------------------------------------------------------------------------
175 * Session Preparation.
176 *-----------------------------------------------------------------------------
180 * Get xform chain order.
182 * @param xform Pointer to configuration structure chain for crypto operations.
183 * @returns Order of crypto operations.
185 static enum mrvl_crypto_chain_order
186 mrvl_crypto_get_chain_order(const struct rte_crypto_sym_xform *xform)
188 /* Currently, Marvell supports max 2 operations in chain */
189 if (xform->next != NULL && xform->next->next != NULL)
190 return MRVL_CRYPTO_CHAIN_NOT_SUPPORTED;
192 if (xform->next != NULL) {
193 if ((xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) &&
194 (xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER))
195 return MRVL_CRYPTO_CHAIN_AUTH_CIPHER;
197 if ((xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) &&
198 (xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH))
199 return MRVL_CRYPTO_CHAIN_CIPHER_AUTH;
201 if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH)
202 return MRVL_CRYPTO_CHAIN_AUTH_ONLY;
204 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
205 return MRVL_CRYPTO_CHAIN_CIPHER_ONLY;
207 if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD)
208 return MRVL_CRYPTO_CHAIN_COMBINED;
210 return MRVL_CRYPTO_CHAIN_NOT_SUPPORTED;
214 * Set session parameters for cipher part.
216 * @param sess Crypto session pointer.
217 * @param cipher_xform Pointer to configuration structure for cipher operations.
218 * @returns 0 in case of success, negative value otherwise.
221 mrvl_crypto_set_cipher_session_parameters(struct mrvl_crypto_session *sess,
222 const struct rte_crypto_sym_xform *cipher_xform)
224 /* Make sure we've got proper struct */
225 if (cipher_xform->type != RTE_CRYPTO_SYM_XFORM_CIPHER) {
226 MRVL_CRYPTO_LOG_ERR("Wrong xform struct provided!");
230 /* See if map data is present and valid */
231 if ((cipher_xform->cipher.algo > RTE_DIM(cipher_map)) ||
232 (cipher_map[cipher_xform->cipher.algo].supported
233 != ALGO_SUPPORTED)) {
234 MRVL_CRYPTO_LOG_ERR("Cipher algorithm not supported!");
238 sess->cipher_iv_offset = cipher_xform->cipher.iv.offset;
240 sess->sam_sess_params.dir =
241 (cipher_xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
242 SAM_DIR_ENCRYPT : SAM_DIR_DECRYPT;
243 sess->sam_sess_params.cipher_alg =
244 cipher_map[cipher_xform->cipher.algo].cipher_alg;
245 sess->sam_sess_params.cipher_mode =
246 cipher_map[cipher_xform->cipher.algo].cipher_mode;
248 /* Assume IV will be passed together with data. */
249 sess->sam_sess_params.cipher_iv = NULL;
251 /* Get max key length. */
252 if (cipher_xform->cipher.key.length >
253 cipher_map[cipher_xform->cipher.algo].max_key_len) {
254 MRVL_CRYPTO_LOG_ERR("Wrong key length!");
258 sess->sam_sess_params.cipher_key_len = cipher_xform->cipher.key.length;
259 sess->sam_sess_params.cipher_key = cipher_xform->cipher.key.data;
265 * Set session parameters for authentication part.
267 * @param sess Crypto session pointer.
268 * @param auth_xform Pointer to configuration structure for auth operations.
269 * @returns 0 in case of success, negative value otherwise.
272 mrvl_crypto_set_auth_session_parameters(struct mrvl_crypto_session *sess,
273 const struct rte_crypto_sym_xform *auth_xform)
275 /* Make sure we've got proper struct */
276 if (auth_xform->type != RTE_CRYPTO_SYM_XFORM_AUTH) {
277 MRVL_CRYPTO_LOG_ERR("Wrong xform struct provided!");
281 /* See if map data is present and valid */
282 if ((auth_xform->auth.algo > RTE_DIM(auth_map)) ||
283 (auth_map[auth_xform->auth.algo].supported != ALGO_SUPPORTED)) {
284 MRVL_CRYPTO_LOG_ERR("Auth algorithm not supported!");
288 sess->sam_sess_params.dir =
289 (auth_xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ?
290 SAM_DIR_ENCRYPT : SAM_DIR_DECRYPT;
291 sess->sam_sess_params.auth_alg =
292 auth_map[auth_xform->auth.algo].auth_alg;
293 sess->sam_sess_params.u.basic.auth_icv_len =
294 auth_xform->auth.digest_length;
295 /* auth_key must be NULL if auth algorithm does not use HMAC */
296 sess->sam_sess_params.auth_key = auth_xform->auth.key.length ?
297 auth_xform->auth.key.data : NULL;
298 sess->sam_sess_params.auth_key_len = auth_xform->auth.key.length;
304 * Set session parameters for aead part.
306 * @param sess Crypto session pointer.
307 * @param aead_xform Pointer to configuration structure for aead operations.
308 * @returns 0 in case of success, negative value otherwise.
311 mrvl_crypto_set_aead_session_parameters(struct mrvl_crypto_session *sess,
312 const struct rte_crypto_sym_xform *aead_xform)
314 /* Make sure we've got proper struct */
315 if (aead_xform->type != RTE_CRYPTO_SYM_XFORM_AEAD) {
316 MRVL_CRYPTO_LOG_ERR("Wrong xform struct provided!");
320 /* See if map data is present and valid */
321 if ((aead_xform->aead.algo > RTE_DIM(aead_map)) ||
322 (aead_map[aead_xform->aead.algo].supported
323 != ALGO_SUPPORTED)) {
324 MRVL_CRYPTO_LOG_ERR("AEAD algorithm not supported!");
328 sess->sam_sess_params.dir =
329 (aead_xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
330 SAM_DIR_ENCRYPT : SAM_DIR_DECRYPT;
331 sess->sam_sess_params.cipher_alg =
332 aead_map[aead_xform->aead.algo].cipher_alg;
333 sess->sam_sess_params.cipher_mode =
334 aead_map[aead_xform->aead.algo].cipher_mode;
336 /* Assume IV will be passed together with data. */
337 sess->sam_sess_params.cipher_iv = NULL;
339 /* Get max key length. */
340 if (aead_xform->aead.key.length >
341 aead_map[aead_xform->aead.algo].max_key_len) {
342 MRVL_CRYPTO_LOG_ERR("Wrong key length!");
346 sess->sam_sess_params.cipher_key = aead_xform->aead.key.data;
347 sess->sam_sess_params.cipher_key_len = aead_xform->aead.key.length;
349 if (sess->sam_sess_params.cipher_mode == SAM_CIPHER_GCM)
350 sess->sam_sess_params.auth_alg = SAM_AUTH_AES_GCM;
352 sess->sam_sess_params.u.basic.auth_icv_len =
353 aead_xform->aead.digest_length;
355 sess->sam_sess_params.u.basic.auth_aad_len =
356 aead_xform->aead.aad_length;
362 * Parse crypto transform chain and setup session parameters.
364 * @param dev Pointer to crypto device
365 * @param sess Poiner to crypto session
366 * @param xform Pointer to configuration structure chain for crypto operations.
367 * @returns 0 in case of success, negative value otherwise.
370 mrvl_crypto_set_session_parameters(struct mrvl_crypto_session *sess,
371 const struct rte_crypto_sym_xform *xform)
373 const struct rte_crypto_sym_xform *cipher_xform = NULL;
374 const struct rte_crypto_sym_xform *auth_xform = NULL;
375 const struct rte_crypto_sym_xform *aead_xform = NULL;
377 /* Filter out spurious/broken requests */
381 sess->chain_order = mrvl_crypto_get_chain_order(xform);
382 switch (sess->chain_order) {
383 case MRVL_CRYPTO_CHAIN_CIPHER_AUTH:
384 cipher_xform = xform;
385 auth_xform = xform->next;
387 case MRVL_CRYPTO_CHAIN_AUTH_CIPHER:
389 cipher_xform = xform->next;
391 case MRVL_CRYPTO_CHAIN_CIPHER_ONLY:
392 cipher_xform = xform;
394 case MRVL_CRYPTO_CHAIN_AUTH_ONLY:
397 case MRVL_CRYPTO_CHAIN_COMBINED:
404 if ((cipher_xform != NULL) &&
405 (mrvl_crypto_set_cipher_session_parameters(
406 sess, cipher_xform) < 0)) {
407 MRVL_CRYPTO_LOG_ERR("Invalid/unsupported cipher parameters");
411 if ((auth_xform != NULL) &&
412 (mrvl_crypto_set_auth_session_parameters(
413 sess, auth_xform) < 0)) {
414 MRVL_CRYPTO_LOG_ERR("Invalid/unsupported auth parameters");
418 if ((aead_xform != NULL) &&
419 (mrvl_crypto_set_aead_session_parameters(
420 sess, aead_xform) < 0)) {
421 MRVL_CRYPTO_LOG_ERR("Invalid/unsupported aead parameters");
429 *-----------------------------------------------------------------------------
431 *-----------------------------------------------------------------------------
435 * Prepare a single request.
437 * This function basically translates DPDK crypto request into one
438 * understandable by MUDSK's SAM. If this is a first request in a session,
439 * it starts the session.
441 * @param request Pointer to pre-allocated && reset request buffer [Out].
442 * @param src_bd Pointer to pre-allocated source descriptor [Out].
443 * @param dst_bd Pointer to pre-allocated destination descriptor [Out].
444 * @param op Pointer to DPDK crypto operation struct [In].
447 mrvl_request_prepare(struct sam_cio_op_params *request,
448 struct sam_buf_info *src_bd,
449 struct sam_buf_info *dst_bd,
450 struct rte_crypto_op *op)
452 struct mrvl_crypto_session *sess;
453 struct rte_mbuf *dst_mbuf;
456 if (unlikely(op->sess_type == RTE_CRYPTO_OP_SESSIONLESS)) {
457 MRVL_CRYPTO_LOG_ERR("MRVL CRYPTO PMD only supports session "
458 "oriented requests, op (%p) is sessionless.",
463 sess = (struct mrvl_crypto_session *)get_session_private_data(
464 op->sym->session, cryptodev_driver_id);
465 if (unlikely(sess == NULL)) {
466 MRVL_CRYPTO_LOG_ERR("Session was not created for this device");
471 * If application delivered us null dst buffer, it means it expects
472 * us to deliver the result in src buffer.
474 dst_mbuf = op->sym->m_dst ? op->sym->m_dst : op->sym->m_src;
476 request->sa = sess->sam_sess;
477 request->cookie = op;
479 /* Single buffers only, sorry. */
480 request->num_bufs = 1;
481 request->src = src_bd;
482 src_bd->vaddr = rte_pktmbuf_mtod(op->sym->m_src, void *);
483 src_bd->paddr = rte_pktmbuf_mtophys(op->sym->m_src);
484 src_bd->len = rte_pktmbuf_data_len(op->sym->m_src);
487 if (rte_pktmbuf_data_len(op->sym->m_src) == 0) {
488 /* EIP does not support 0 length buffers. */
489 MRVL_CRYPTO_LOG_ERR("Buffer length == 0 not supported!");
493 /* Empty destination. */
494 if (rte_pktmbuf_data_len(dst_mbuf) == 0) {
495 /* Make dst buffer fit at least source data. */
496 if (rte_pktmbuf_append(dst_mbuf,
497 rte_pktmbuf_data_len(op->sym->m_src)) == NULL) {
498 MRVL_CRYPTO_LOG_ERR("Unable to set big enough dst buffer!");
503 request->dst = dst_bd;
504 dst_bd->vaddr = rte_pktmbuf_mtod(dst_mbuf, void *);
505 dst_bd->paddr = rte_pktmbuf_mtophys(dst_mbuf);
508 * We can use all available space in dst_mbuf,
509 * not only what's used currently.
511 dst_bd->len = dst_mbuf->buf_len - rte_pktmbuf_headroom(dst_mbuf);
513 if (sess->chain_order == MRVL_CRYPTO_CHAIN_COMBINED) {
514 request->cipher_len = op->sym->aead.data.length;
515 request->cipher_offset = op->sym->aead.data.offset;
516 request->cipher_iv = rte_crypto_op_ctod_offset(op, uint8_t *,
517 sess->cipher_iv_offset);
519 request->auth_aad = op->sym->aead.aad.data;
520 request->auth_offset = request->cipher_offset;
521 request->auth_len = request->cipher_len;
523 request->cipher_len = op->sym->cipher.data.length;
524 request->cipher_offset = op->sym->cipher.data.offset;
525 request->cipher_iv = rte_crypto_op_ctod_offset(op, uint8_t *,
526 sess->cipher_iv_offset);
528 request->auth_offset = op->sym->auth.data.offset;
529 request->auth_len = op->sym->auth.data.length;
532 digest = sess->chain_order == MRVL_CRYPTO_CHAIN_COMBINED ?
533 op->sym->aead.digest.data : op->sym->auth.digest.data;
534 if (digest == NULL) {
535 /* No auth - no worry. */
539 request->auth_icv_offset = request->auth_offset + request->auth_len;
542 * EIP supports only scenarios where ICV(digest buffer) is placed at
543 * auth_icv_offset. Any other placement means risking errors.
545 if (sess->sam_sess_params.dir == SAM_DIR_ENCRYPT) {
547 * This should be the most common case anyway,
548 * EIP will overwrite DST buffer at auth_icv_offset.
550 if (rte_pktmbuf_mtod_offset(
552 request->auth_icv_offset) == digest) {
555 } else {/* sess->sam_sess_params.dir == SAM_DIR_DECRYPT */
557 * EIP will look for digest at auth_icv_offset
558 * offset in SRC buffer.
560 if (rte_pktmbuf_mtod_offset(
561 op->sym->m_src, uint8_t *,
562 request->auth_icv_offset) == digest) {
568 * If we landed here it means that digest pointer is
569 * at different than expected place.
575 *-----------------------------------------------------------------------------
576 * PMD Framework handlers
577 *-----------------------------------------------------------------------------
583 * @param queue_pair Pointer to queue pair.
584 * @param ops Pointer to ops requests array.
585 * @param nb_ops Number of elements in ops requests array.
586 * @returns Number of elements consumed from ops.
589 mrvl_crypto_pmd_enqueue_burst(void *queue_pair, struct rte_crypto_op **ops,
592 uint16_t iter_ops = 0;
594 uint16_t consumed = 0;
596 struct sam_cio_op_params requests[nb_ops];
598 * DPDK uses single fragment buffers, so we can KISS descriptors.
599 * SAM does not store bd pointers, so on-stack scope will be enough.
601 struct sam_buf_info src_bd[nb_ops];
602 struct sam_buf_info dst_bd[nb_ops];
603 struct mrvl_crypto_qp *qp = (struct mrvl_crypto_qp *)queue_pair;
608 /* Prepare the burst. */
609 memset(&requests, 0, sizeof(requests));
611 /* Iterate through */
612 for (; iter_ops < nb_ops; ++iter_ops) {
613 if (mrvl_request_prepare(&requests[iter_ops],
616 ops[iter_ops]) < 0) {
618 "Error while parameters preparation!");
619 qp->stats.enqueue_err_count++;
620 ops[iter_ops]->status = RTE_CRYPTO_OP_STATUS_ERROR;
623 * Number of handled ops is increased
624 * (even if the result of handling is error).
630 ops[iter_ops]->status =
631 RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
633 /* Increase the number of ops to enqueue. */
635 } /* for (; iter_ops < nb_ops;... */
639 ret = sam_cio_enq(qp->cio, requests, &to_enq);
643 * Trust SAM that in this case returned value will be at
644 * some point correct (now it is returned unmodified).
646 qp->stats.enqueue_err_count += to_enq;
647 for (iter_ops = 0; iter_ops < to_enq; ++iter_ops)
648 ops[iter_ops]->status =
649 RTE_CRYPTO_OP_STATUS_ERROR;
653 qp->stats.enqueued_count += to_enq;
660 * @param queue_pair Pointer to queue pair.
661 * @param ops Pointer to ops requests array.
662 * @param nb_ops Number of elements in ops requests array.
663 * @returns Number of elements dequeued.
666 mrvl_crypto_pmd_dequeue_burst(void *queue_pair,
667 struct rte_crypto_op **ops,
671 struct mrvl_crypto_qp *qp = queue_pair;
672 struct sam_cio *cio = qp->cio;
673 struct sam_cio_op_result results[nb_ops];
676 ret = sam_cio_deq(cio, results, &nb_ops);
678 /* Count all dequeued as error. */
679 qp->stats.dequeue_err_count += nb_ops;
681 /* But act as they were dequeued anyway*/
682 qp->stats.dequeued_count += nb_ops;
687 /* Unpack and check results. */
688 for (i = 0; i < nb_ops; ++i) {
689 ops[i] = results[i].cookie;
691 switch (results[i].status) {
693 ops[i]->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
695 case SAM_CIO_ERR_ICV:
696 MRVL_CRYPTO_LOG_DBG("CIO returned SAM_CIO_ERR_ICV.");
697 ops[i]->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
701 "CIO returned Error: %d", results[i].status);
702 ops[i]->status = RTE_CRYPTO_OP_STATUS_ERROR;
707 qp->stats.dequeued_count += nb_ops;
712 * Create a new crypto device.
714 * @param name Driver name.
715 * @param vdev Pointer to device structure.
716 * @param init_params Pointer to initialization parameters.
717 * @returns 0 in case of success, negative value otherwise.
720 cryptodev_mrvl_crypto_create(const char *name,
721 struct rte_vdev_device *vdev,
722 struct rte_cryptodev_pmd_init_params *init_params)
724 struct rte_cryptodev *dev;
725 struct mrvl_crypto_private *internals;
726 struct sam_init_params sam_params;
729 dev = rte_cryptodev_pmd_create(name, &vdev->device, init_params);
731 MRVL_CRYPTO_LOG_ERR("failed to create cryptodev vdev");
735 dev->driver_id = cryptodev_driver_id;
736 dev->dev_ops = rte_mrvl_crypto_pmd_ops;
738 /* Register rx/tx burst functions for data path. */
739 dev->enqueue_burst = mrvl_crypto_pmd_enqueue_burst;
740 dev->dequeue_burst = mrvl_crypto_pmd_dequeue_burst;
742 dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
743 RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
744 RTE_CRYPTODEV_FF_HW_ACCELERATED;
746 /* Set vector instructions mode supported */
747 internals = dev->data->dev_private;
749 internals->max_nb_qpairs = init_params->max_nb_queue_pairs;
750 internals->max_nb_sessions = init_params->max_nb_sessions;
753 * ret == -EEXIST is correct, it means DMA
754 * has been already initialized.
756 ret = mv_sys_dma_mem_init(MRVL_MUSDK_DMA_MEMSIZE);
761 MRVL_CRYPTO_LOG_INFO(
762 "DMA memory has been already initialized by a different driver.");
765 sam_params.max_num_sessions = internals->max_nb_sessions;
767 return sam_init(&sam_params);
771 "driver %s: %s failed", init_params->name, __func__);
773 cryptodev_mrvl_crypto_uninit(vdev);
778 * Initialize the crypto device.
780 * @param vdev Pointer to device structure.
781 * @returns 0 in case of success, negative value otherwise.
784 cryptodev_mrvl_crypto_init(struct rte_vdev_device *vdev)
786 struct rte_cryptodev_pmd_init_params init_params = { };
787 const char *name, *args;
790 name = rte_vdev_device_name(vdev);
793 args = rte_vdev_device_args(vdev);
795 init_params.private_data_size = sizeof(struct mrvl_crypto_private);
796 init_params.max_nb_queue_pairs = sam_get_num_inst() * SAM_HW_RING_NUM;
797 init_params.max_nb_sessions =
798 RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_SESSIONS;
799 init_params.socket_id = rte_socket_id();
801 ret = rte_cryptodev_pmd_parse_input_args(&init_params, args);
804 "Failed to parse initialisation arguments[%s]\n",
809 return cryptodev_mrvl_crypto_create(name, vdev, &init_params);
813 * Uninitialize the crypto device
815 * @param vdev Pointer to device structure.
816 * @returns 0 in case of success, negative value otherwise.
819 cryptodev_mrvl_crypto_uninit(struct rte_vdev_device *vdev)
821 struct rte_cryptodev *cryptodev;
822 const char *name = rte_vdev_device_name(vdev);
828 "Closing Marvell crypto device %s on numa socket %u\n",
829 name, rte_socket_id());
833 cryptodev = rte_cryptodev_pmd_get_named_dev(name);
834 if (cryptodev == NULL)
837 return rte_cryptodev_pmd_destroy(cryptodev);
841 * Basic driver handlers for use in the constructor.
843 static struct rte_vdev_driver cryptodev_mrvl_pmd_drv = {
844 .probe = cryptodev_mrvl_crypto_init,
845 .remove = cryptodev_mrvl_crypto_uninit
848 static struct cryptodev_driver mrvl_crypto_drv;
850 /* Register the driver in constructor. */
851 RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_MRVL_PMD, cryptodev_mrvl_pmd_drv);
852 RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_MRVL_PMD,
853 "max_nb_queue_pairs=<int> "
854 "max_nb_sessions=<int> "
856 RTE_PMD_REGISTER_CRYPTO_DRIVER(mrvl_crypto_drv, cryptodev_mrvl_pmd_drv,
857 cryptodev_driver_id);