1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Marvell International Ltd.
3 * Copyright(c) 2017 Semihalf.
7 #include <rte_common.h>
8 #include <rte_hexdump.h>
9 #include <rte_cryptodev.h>
10 #include <rte_cryptodev_pmd.h>
11 #include <rte_bus_vdev.h>
12 #include <rte_malloc.h>
13 #include <rte_cpuflags.h>
14 #include <rte_kvargs.h>
15 #include <rte_mvep_common.h>
17 #include "mrvl_pmd_private.h"
19 #define MRVL_PMD_MAX_NB_SESS_ARG ("max_nb_sessions")
20 #define MRVL_PMD_DEFAULT_MAX_NB_SESSIONS 2048
22 static uint8_t cryptodev_driver_id;
24 struct mrvl_pmd_init_params {
25 struct rte_cryptodev_pmd_init_params common;
26 uint32_t max_nb_sessions;
29 const char *mrvl_pmd_valid_params[] = {
30 RTE_CRYPTODEV_PMD_NAME_ARG,
31 RTE_CRYPTODEV_PMD_MAX_NB_QP_ARG,
32 RTE_CRYPTODEV_PMD_SOCKET_ID_ARG,
33 MRVL_PMD_MAX_NB_SESS_ARG
37 * Flag if particular crypto algorithm is supported by PMD/MUSDK.
39 * The idea is to have Not Supported value as default (0).
40 * This way we need only to define proper map sizes,
41 * non-initialized entries will be by default not supported.
44 ALGO_NOT_SUPPORTED = 0,
48 /** Map elements for cipher mapping.*/
49 struct cipher_params_mapping {
50 enum algo_supported supported; /**< On/Off switch */
51 enum sam_cipher_alg cipher_alg; /**< Cipher algorithm */
52 enum sam_cipher_mode cipher_mode; /**< Cipher mode */
53 unsigned int max_key_len; /**< Maximum key length (in bytes)*/
55 /* We want to squeeze in multiple maps into the cache line. */
58 /** Map elements for auth mapping.*/
59 struct auth_params_mapping {
60 enum algo_supported supported; /**< On/off switch */
61 enum sam_auth_alg auth_alg; /**< Auth algorithm */
63 /* We want to squeeze in multiple maps into the cache line. */
67 * Map of supported cipher algorithms.
70 struct cipher_params_mapping cipher_map[] = {
71 [RTE_CRYPTO_CIPHER_NULL] = {
72 .supported = ALGO_SUPPORTED,
73 .cipher_alg = SAM_CIPHER_NONE },
74 [RTE_CRYPTO_CIPHER_3DES_CBC] = {
75 .supported = ALGO_SUPPORTED,
76 .cipher_alg = SAM_CIPHER_3DES,
77 .cipher_mode = SAM_CIPHER_CBC,
78 .max_key_len = BITS2BYTES(192) },
79 [RTE_CRYPTO_CIPHER_3DES_CTR] = {
80 .supported = ALGO_SUPPORTED,
81 .cipher_alg = SAM_CIPHER_3DES,
82 .cipher_mode = SAM_CIPHER_CTR,
83 .max_key_len = BITS2BYTES(192) },
84 [RTE_CRYPTO_CIPHER_3DES_ECB] = {
85 .supported = ALGO_SUPPORTED,
86 .cipher_alg = SAM_CIPHER_3DES,
87 .cipher_mode = SAM_CIPHER_ECB,
88 .max_key_len = BITS2BYTES(192) },
89 [RTE_CRYPTO_CIPHER_AES_CBC] = {
90 .supported = ALGO_SUPPORTED,
91 .cipher_alg = SAM_CIPHER_AES,
92 .cipher_mode = SAM_CIPHER_CBC,
93 .max_key_len = BITS2BYTES(256) },
94 [RTE_CRYPTO_CIPHER_AES_CTR] = {
95 .supported = ALGO_SUPPORTED,
96 .cipher_alg = SAM_CIPHER_AES,
97 .cipher_mode = SAM_CIPHER_CTR,
98 .max_key_len = BITS2BYTES(256) },
99 [RTE_CRYPTO_CIPHER_AES_ECB] = {
100 .supported = ALGO_SUPPORTED,
101 .cipher_alg = SAM_CIPHER_AES,
102 .cipher_mode = SAM_CIPHER_ECB,
103 .max_key_len = BITS2BYTES(256) },
107 * Map of supported auth algorithms.
110 struct auth_params_mapping auth_map[] = {
111 [RTE_CRYPTO_AUTH_NULL] = {
112 .supported = ALGO_SUPPORTED,
113 .auth_alg = SAM_AUTH_NONE },
114 [RTE_CRYPTO_AUTH_MD5_HMAC] = {
115 .supported = ALGO_SUPPORTED,
116 .auth_alg = SAM_AUTH_HMAC_MD5 },
117 [RTE_CRYPTO_AUTH_MD5] = {
118 .supported = ALGO_SUPPORTED,
119 .auth_alg = SAM_AUTH_HASH_MD5 },
120 [RTE_CRYPTO_AUTH_SHA1_HMAC] = {
121 .supported = ALGO_SUPPORTED,
122 .auth_alg = SAM_AUTH_HMAC_SHA1 },
123 [RTE_CRYPTO_AUTH_SHA1] = {
124 .supported = ALGO_SUPPORTED,
125 .auth_alg = SAM_AUTH_HASH_SHA1 },
126 [RTE_CRYPTO_AUTH_SHA224_HMAC] = {
127 .supported = ALGO_SUPPORTED,
128 .auth_alg = SAM_AUTH_HMAC_SHA2_224 },
129 [RTE_CRYPTO_AUTH_SHA224] = {
130 .supported = ALGO_SUPPORTED,
131 .auth_alg = SAM_AUTH_HASH_SHA2_224 },
132 [RTE_CRYPTO_AUTH_SHA256_HMAC] = {
133 .supported = ALGO_SUPPORTED,
134 .auth_alg = SAM_AUTH_HMAC_SHA2_256 },
135 [RTE_CRYPTO_AUTH_SHA256] = {
136 .supported = ALGO_SUPPORTED,
137 .auth_alg = SAM_AUTH_HASH_SHA2_256 },
138 [RTE_CRYPTO_AUTH_SHA384_HMAC] = {
139 .supported = ALGO_SUPPORTED,
140 .auth_alg = SAM_AUTH_HMAC_SHA2_384 },
141 [RTE_CRYPTO_AUTH_SHA384] = {
142 .supported = ALGO_SUPPORTED,
143 .auth_alg = SAM_AUTH_HASH_SHA2_384 },
144 [RTE_CRYPTO_AUTH_SHA512_HMAC] = {
145 .supported = ALGO_SUPPORTED,
146 .auth_alg = SAM_AUTH_HMAC_SHA2_512 },
147 [RTE_CRYPTO_AUTH_SHA512] = {
148 .supported = ALGO_SUPPORTED,
149 .auth_alg = SAM_AUTH_HASH_SHA2_512 },
150 [RTE_CRYPTO_AUTH_AES_GMAC] = {
151 .supported = ALGO_SUPPORTED,
152 .auth_alg = SAM_AUTH_AES_GMAC },
156 * Map of supported aead algorithms.
159 struct cipher_params_mapping aead_map[] = {
160 [RTE_CRYPTO_AEAD_AES_GCM] = {
161 .supported = ALGO_SUPPORTED,
162 .cipher_alg = SAM_CIPHER_AES,
163 .cipher_mode = SAM_CIPHER_GCM,
164 .max_key_len = BITS2BYTES(256) },
168 *-----------------------------------------------------------------------------
169 * Forward declarations.
170 *-----------------------------------------------------------------------------
172 static int cryptodev_mrvl_crypto_uninit(struct rte_vdev_device *vdev);
175 *-----------------------------------------------------------------------------
176 * Session Preparation.
177 *-----------------------------------------------------------------------------
181 * Get xform chain order.
183 * @param xform Pointer to configuration structure chain for crypto operations.
184 * @returns Order of crypto operations.
186 static enum mrvl_crypto_chain_order
187 mrvl_crypto_get_chain_order(const struct rte_crypto_sym_xform *xform)
189 /* Currently, Marvell supports max 2 operations in chain */
190 if (xform->next != NULL && xform->next->next != NULL)
191 return MRVL_CRYPTO_CHAIN_NOT_SUPPORTED;
193 if (xform->next != NULL) {
194 if ((xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) &&
195 (xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER))
196 return MRVL_CRYPTO_CHAIN_AUTH_CIPHER;
198 if ((xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) &&
199 (xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH))
200 return MRVL_CRYPTO_CHAIN_CIPHER_AUTH;
202 if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH)
203 return MRVL_CRYPTO_CHAIN_AUTH_ONLY;
205 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
206 return MRVL_CRYPTO_CHAIN_CIPHER_ONLY;
208 if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD)
209 return MRVL_CRYPTO_CHAIN_COMBINED;
211 return MRVL_CRYPTO_CHAIN_NOT_SUPPORTED;
215 * Set session parameters for cipher part.
217 * @param sess Crypto session pointer.
218 * @param cipher_xform Pointer to configuration structure for cipher operations.
219 * @returns 0 in case of success, negative value otherwise.
222 mrvl_crypto_set_cipher_session_parameters(struct mrvl_crypto_session *sess,
223 const struct rte_crypto_sym_xform *cipher_xform)
227 /* Make sure we've got proper struct */
228 if (cipher_xform->type != RTE_CRYPTO_SYM_XFORM_CIPHER) {
229 MRVL_LOG(ERR, "Wrong xform struct provided!");
233 /* See if map data is present and valid */
234 if ((cipher_xform->cipher.algo > RTE_DIM(cipher_map)) ||
235 (cipher_map[cipher_xform->cipher.algo].supported
236 != ALGO_SUPPORTED)) {
237 MRVL_LOG(ERR, "Cipher algorithm not supported!");
241 sess->cipher_iv_offset = cipher_xform->cipher.iv.offset;
243 sess->sam_sess_params.dir =
244 (cipher_xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
245 SAM_DIR_ENCRYPT : SAM_DIR_DECRYPT;
246 sess->sam_sess_params.cipher_alg =
247 cipher_map[cipher_xform->cipher.algo].cipher_alg;
248 sess->sam_sess_params.cipher_mode =
249 cipher_map[cipher_xform->cipher.algo].cipher_mode;
251 /* Assume IV will be passed together with data. */
252 sess->sam_sess_params.cipher_iv = NULL;
254 /* Get max key length. */
255 if (cipher_xform->cipher.key.length >
256 cipher_map[cipher_xform->cipher.algo].max_key_len) {
257 MRVL_LOG(ERR, "Wrong key length!");
261 cipher_key = malloc(cipher_xform->cipher.key.length);
262 if (cipher_key == NULL) {
263 MRVL_LOG(ERR, "Insufficient memory!");
267 memcpy(cipher_key, cipher_xform->cipher.key.data,
268 cipher_xform->cipher.key.length);
270 sess->sam_sess_params.cipher_key_len = cipher_xform->cipher.key.length;
271 sess->sam_sess_params.cipher_key = cipher_key;
277 * Set session parameters for authentication part.
279 * @param sess Crypto session pointer.
280 * @param auth_xform Pointer to configuration structure for auth operations.
281 * @returns 0 in case of success, negative value otherwise.
284 mrvl_crypto_set_auth_session_parameters(struct mrvl_crypto_session *sess,
285 const struct rte_crypto_sym_xform *auth_xform)
287 uint8_t *auth_key = NULL;
289 /* Make sure we've got proper struct */
290 if (auth_xform->type != RTE_CRYPTO_SYM_XFORM_AUTH) {
291 MRVL_LOG(ERR, "Wrong xform struct provided!");
295 /* See if map data is present and valid */
296 if ((auth_xform->auth.algo > RTE_DIM(auth_map)) ||
297 (auth_map[auth_xform->auth.algo].supported != ALGO_SUPPORTED)) {
298 MRVL_LOG(ERR, "Auth algorithm not supported!");
302 sess->sam_sess_params.dir =
303 (auth_xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ?
304 SAM_DIR_ENCRYPT : SAM_DIR_DECRYPT;
305 sess->sam_sess_params.auth_alg =
306 auth_map[auth_xform->auth.algo].auth_alg;
307 sess->sam_sess_params.u.basic.auth_icv_len =
308 auth_xform->auth.digest_length;
310 if (auth_xform->auth.key.length > 0) {
311 auth_key = malloc(auth_xform->auth.key.length);
312 if (auth_key == NULL) {
313 MRVL_LOG(ERR, "Not enough memory!");
317 memcpy(auth_key, auth_xform->auth.key.data,
318 auth_xform->auth.key.length);
321 /* auth_key must be NULL if auth algorithm does not use HMAC */
322 sess->sam_sess_params.auth_key = auth_key;
323 sess->sam_sess_params.auth_key_len = auth_xform->auth.key.length;
329 * Set session parameters for aead part.
331 * @param sess Crypto session pointer.
332 * @param aead_xform Pointer to configuration structure for aead operations.
333 * @returns 0 in case of success, negative value otherwise.
336 mrvl_crypto_set_aead_session_parameters(struct mrvl_crypto_session *sess,
337 const struct rte_crypto_sym_xform *aead_xform)
341 /* Make sure we've got proper struct */
342 if (aead_xform->type != RTE_CRYPTO_SYM_XFORM_AEAD) {
343 MRVL_LOG(ERR, "Wrong xform struct provided!");
347 /* See if map data is present and valid */
348 if ((aead_xform->aead.algo > RTE_DIM(aead_map)) ||
349 (aead_map[aead_xform->aead.algo].supported
350 != ALGO_SUPPORTED)) {
351 MRVL_LOG(ERR, "AEAD algorithm not supported!");
355 sess->sam_sess_params.dir =
356 (aead_xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
357 SAM_DIR_ENCRYPT : SAM_DIR_DECRYPT;
358 sess->sam_sess_params.cipher_alg =
359 aead_map[aead_xform->aead.algo].cipher_alg;
360 sess->sam_sess_params.cipher_mode =
361 aead_map[aead_xform->aead.algo].cipher_mode;
363 if (sess->sam_sess_params.cipher_mode == SAM_CIPHER_GCM) {
364 /* IV must include nonce for all counter modes */
365 sess->cipher_iv_offset = aead_xform->cipher.iv.offset;
367 /* Set order of authentication then encryption to 0 in GCM */
368 sess->sam_sess_params.u.basic.auth_then_encrypt = 0;
371 /* Assume IV will be passed together with data. */
372 sess->sam_sess_params.cipher_iv = NULL;
374 /* Get max key length. */
375 if (aead_xform->aead.key.length >
376 aead_map[aead_xform->aead.algo].max_key_len) {
377 MRVL_LOG(ERR, "Wrong key length!");
381 aead_key = malloc(aead_xform->aead.key.length);
382 if (aead_key == NULL) {
383 MRVL_LOG(ERR, "Insufficient memory!");
387 memcpy(aead_key, aead_xform->aead.key.data,
388 aead_xform->aead.key.length);
390 sess->sam_sess_params.cipher_key = aead_key;
391 sess->sam_sess_params.cipher_key_len = aead_xform->aead.key.length;
393 if (sess->sam_sess_params.cipher_mode == SAM_CIPHER_GCM)
394 sess->sam_sess_params.auth_alg = SAM_AUTH_AES_GCM;
396 sess->sam_sess_params.u.basic.auth_icv_len =
397 aead_xform->aead.digest_length;
399 sess->sam_sess_params.u.basic.auth_aad_len =
400 aead_xform->aead.aad_length;
406 * Parse crypto transform chain and setup session parameters.
408 * @param dev Pointer to crypto device
409 * @param sess Poiner to crypto session
410 * @param xform Pointer to configuration structure chain for crypto operations.
411 * @returns 0 in case of success, negative value otherwise.
414 mrvl_crypto_set_session_parameters(struct mrvl_crypto_session *sess,
415 const struct rte_crypto_sym_xform *xform)
417 const struct rte_crypto_sym_xform *cipher_xform = NULL;
418 const struct rte_crypto_sym_xform *auth_xform = NULL;
419 const struct rte_crypto_sym_xform *aead_xform = NULL;
421 /* Filter out spurious/broken requests */
425 sess->chain_order = mrvl_crypto_get_chain_order(xform);
426 switch (sess->chain_order) {
427 case MRVL_CRYPTO_CHAIN_CIPHER_AUTH:
428 cipher_xform = xform;
429 auth_xform = xform->next;
431 case MRVL_CRYPTO_CHAIN_AUTH_CIPHER:
433 cipher_xform = xform->next;
435 case MRVL_CRYPTO_CHAIN_CIPHER_ONLY:
436 cipher_xform = xform;
438 case MRVL_CRYPTO_CHAIN_AUTH_ONLY:
441 case MRVL_CRYPTO_CHAIN_COMBINED:
448 if ((cipher_xform != NULL) &&
449 (mrvl_crypto_set_cipher_session_parameters(
450 sess, cipher_xform) < 0)) {
451 MRVL_LOG(ERR, "Invalid/unsupported cipher parameters!");
455 if ((auth_xform != NULL) &&
456 (mrvl_crypto_set_auth_session_parameters(
457 sess, auth_xform) < 0)) {
458 MRVL_LOG(ERR, "Invalid/unsupported auth parameters!");
462 if ((aead_xform != NULL) &&
463 (mrvl_crypto_set_aead_session_parameters(
464 sess, aead_xform) < 0)) {
465 MRVL_LOG(ERR, "Invalid/unsupported aead parameters!");
473 *-----------------------------------------------------------------------------
475 *-----------------------------------------------------------------------------
479 * Prepare a single request.
481 * This function basically translates DPDK crypto request into one
482 * understandable by MUDSK's SAM. If this is a first request in a session,
483 * it starts the session.
485 * @param request Pointer to pre-allocated && reset request buffer [Out].
486 * @param src_bd Pointer to pre-allocated source descriptor [Out].
487 * @param dst_bd Pointer to pre-allocated destination descriptor [Out].
488 * @param op Pointer to DPDK crypto operation struct [In].
491 mrvl_request_prepare(struct sam_cio_op_params *request,
492 struct sam_buf_info *src_bd,
493 struct sam_buf_info *dst_bd,
494 struct rte_crypto_op *op)
496 struct mrvl_crypto_session *sess;
497 struct rte_mbuf *src_mbuf, *dst_mbuf;
498 uint16_t segments_nb;
502 if (unlikely(op->sess_type == RTE_CRYPTO_OP_SESSIONLESS)) {
503 MRVL_LOG(ERR, "MRVL CRYPTO PMD only supports session "
504 "oriented requests, op (%p) is sessionless!",
509 sess = (struct mrvl_crypto_session *)get_sym_session_private_data(
510 op->sym->session, cryptodev_driver_id);
511 if (unlikely(sess == NULL)) {
512 MRVL_LOG(ERR, "Session was not created for this device!");
516 request->sa = sess->sam_sess;
517 request->cookie = op;
519 src_mbuf = op->sym->m_src;
520 segments_nb = src_mbuf->nb_segs;
521 /* The following conditions must be met:
522 * - Destination buffer is required when segmented source buffer
523 * - Segmented destination buffer is not supported
525 if ((segments_nb > 1) && (!op->sym->m_dst)) {
526 MRVL_LOG(ERR, "op->sym->m_dst = NULL!");
530 * If application delivered us null dst buffer, it means it expects
531 * us to deliver the result in src buffer.
533 dst_mbuf = op->sym->m_dst ? op->sym->m_dst : op->sym->m_src;
535 if (!rte_pktmbuf_is_contiguous(dst_mbuf)) {
536 MRVL_LOG(ERR, "Segmented destination buffer not supported!");
540 request->num_bufs = segments_nb;
541 for (i = 0; i < segments_nb; i++) {
543 if (rte_pktmbuf_data_len(src_mbuf) == 0) {
544 /* EIP does not support 0 length buffers. */
545 MRVL_LOG(ERR, "Buffer length == 0 not supported!");
548 src_bd[i].vaddr = rte_pktmbuf_mtod(src_mbuf, void *);
549 src_bd[i].paddr = rte_pktmbuf_iova(src_mbuf);
550 src_bd[i].len = rte_pktmbuf_data_len(src_mbuf);
552 src_mbuf = src_mbuf->next;
554 request->src = src_bd;
556 /* Empty destination. */
557 if (rte_pktmbuf_data_len(dst_mbuf) == 0) {
558 /* Make dst buffer fit at least source data. */
559 if (rte_pktmbuf_append(dst_mbuf,
560 rte_pktmbuf_data_len(op->sym->m_src)) == NULL) {
561 MRVL_LOG(ERR, "Unable to set big enough dst buffer!");
566 request->dst = dst_bd;
567 dst_bd->vaddr = rte_pktmbuf_mtod(dst_mbuf, void *);
568 dst_bd->paddr = rte_pktmbuf_iova(dst_mbuf);
571 * We can use all available space in dst_mbuf,
572 * not only what's used currently.
574 dst_bd->len = dst_mbuf->buf_len - rte_pktmbuf_headroom(dst_mbuf);
576 if (sess->chain_order == MRVL_CRYPTO_CHAIN_COMBINED) {
577 request->cipher_len = op->sym->aead.data.length;
578 request->cipher_offset = op->sym->aead.data.offset;
579 request->cipher_iv = rte_crypto_op_ctod_offset(op, uint8_t *,
580 sess->cipher_iv_offset);
582 request->auth_aad = op->sym->aead.aad.data;
583 request->auth_offset = request->cipher_offset;
584 request->auth_len = request->cipher_len;
586 request->cipher_len = op->sym->cipher.data.length;
587 request->cipher_offset = op->sym->cipher.data.offset;
588 request->cipher_iv = rte_crypto_op_ctod_offset(op, uint8_t *,
589 sess->cipher_iv_offset);
591 request->auth_offset = op->sym->auth.data.offset;
592 request->auth_len = op->sym->auth.data.length;
595 digest = sess->chain_order == MRVL_CRYPTO_CHAIN_COMBINED ?
596 op->sym->aead.digest.data : op->sym->auth.digest.data;
597 if (digest == NULL) {
598 /* No auth - no worry. */
602 request->auth_icv_offset = request->auth_offset + request->auth_len;
605 * EIP supports only scenarios where ICV(digest buffer) is placed at
608 if (sess->sam_sess_params.dir == SAM_DIR_ENCRYPT) {
610 * This should be the most common case anyway,
611 * EIP will overwrite DST buffer at auth_icv_offset.
613 if (rte_pktmbuf_mtod_offset(
615 request->auth_icv_offset) == digest)
617 } else {/* sess->sam_sess_params.dir == SAM_DIR_DECRYPT */
619 * EIP will look for digest at auth_icv_offset
620 * offset in SRC buffer. It must be placed in the last
621 * segment and the offset must be set to reach digest
622 * in the last segment
624 struct rte_mbuf *last_seg = op->sym->m_src;
625 uint32_t d_offset = request->auth_icv_offset;
626 u32 d_size = sess->sam_sess_params.u.basic.auth_icv_len;
627 unsigned char *d_ptr;
629 /* Find the last segment and the offset for the last segment */
630 while ((last_seg->next != NULL) &&
631 (d_offset >= last_seg->data_len)) {
632 d_offset -= last_seg->data_len;
633 last_seg = last_seg->next;
636 if (rte_pktmbuf_mtod_offset(last_seg, uint8_t *,
640 /* copy digest to last segment */
641 if (last_seg->buf_len >= (d_size + d_offset)) {
642 d_ptr = (unsigned char *)last_seg->buf_addr +
644 rte_memcpy(d_ptr, digest, d_size);
650 * If we landed here it means that digest pointer is
651 * at different than expected place.
657 *-----------------------------------------------------------------------------
658 * PMD Framework handlers
659 *-----------------------------------------------------------------------------
665 * @param queue_pair Pointer to queue pair.
666 * @param ops Pointer to ops requests array.
667 * @param nb_ops Number of elements in ops requests array.
668 * @returns Number of elements consumed from ops.
671 mrvl_crypto_pmd_enqueue_burst(void *queue_pair, struct rte_crypto_op **ops,
674 uint16_t iter_ops = 0;
676 uint16_t consumed = 0;
678 struct sam_cio_op_params requests[nb_ops];
680 * SAM does not store bd pointers, so on-stack scope will be enough.
682 struct mrvl_crypto_src_table src_bd[nb_ops];
683 struct sam_buf_info dst_bd[nb_ops];
684 struct mrvl_crypto_qp *qp = (struct mrvl_crypto_qp *)queue_pair;
689 /* Prepare the burst. */
690 memset(&requests, 0, sizeof(requests));
691 memset(&src_bd, 0, sizeof(src_bd));
693 /* Iterate through */
694 for (; iter_ops < nb_ops; ++iter_ops) {
695 /* store the op id for debug */
696 src_bd[iter_ops].iter_ops = iter_ops;
697 if (mrvl_request_prepare(&requests[iter_ops],
698 src_bd[iter_ops].src_bd,
700 ops[iter_ops]) < 0) {
701 MRVL_LOG(ERR, "Error while preparing parameters!");
702 qp->stats.enqueue_err_count++;
703 ops[iter_ops]->status = RTE_CRYPTO_OP_STATUS_ERROR;
706 * Number of handled ops is increased
707 * (even if the result of handling is error).
713 ops[iter_ops]->status =
714 RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
716 /* Increase the number of ops to enqueue. */
718 } /* for (; iter_ops < nb_ops;... */
722 ret = sam_cio_enq(qp->cio, requests, &to_enq);
726 * Trust SAM that in this case returned value will be at
727 * some point correct (now it is returned unmodified).
729 qp->stats.enqueue_err_count += to_enq;
730 for (iter_ops = 0; iter_ops < to_enq; ++iter_ops)
731 ops[iter_ops]->status =
732 RTE_CRYPTO_OP_STATUS_ERROR;
736 qp->stats.enqueued_count += to_enq;
743 * @param queue_pair Pointer to queue pair.
744 * @param ops Pointer to ops requests array.
745 * @param nb_ops Number of elements in ops requests array.
746 * @returns Number of elements dequeued.
749 mrvl_crypto_pmd_dequeue_burst(void *queue_pair,
750 struct rte_crypto_op **ops,
754 struct mrvl_crypto_qp *qp = queue_pair;
755 struct sam_cio *cio = qp->cio;
756 struct sam_cio_op_result results[nb_ops];
759 ret = sam_cio_deq(cio, results, &nb_ops);
761 /* Count all dequeued as error. */
762 qp->stats.dequeue_err_count += nb_ops;
764 /* But act as they were dequeued anyway*/
765 qp->stats.dequeued_count += nb_ops;
770 /* Unpack and check results. */
771 for (i = 0; i < nb_ops; ++i) {
772 ops[i] = results[i].cookie;
774 switch (results[i].status) {
776 ops[i]->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
778 case SAM_CIO_ERR_ICV:
779 MRVL_LOG(DEBUG, "CIO returned SAM_CIO_ERR_ICV.");
780 ops[i]->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
784 "CIO returned Error: %d.", results[i].status);
785 ops[i]->status = RTE_CRYPTO_OP_STATUS_ERROR;
790 qp->stats.dequeued_count += nb_ops;
795 * Create a new crypto device.
797 * @param name Driver name.
798 * @param vdev Pointer to device structure.
799 * @param init_params Pointer to initialization parameters.
800 * @returns 0 in case of success, negative value otherwise.
803 cryptodev_mrvl_crypto_create(const char *name,
804 struct rte_vdev_device *vdev,
805 struct mrvl_pmd_init_params *init_params)
807 struct rte_cryptodev *dev;
808 struct mrvl_crypto_private *internals;
809 struct sam_init_params sam_params;
812 dev = rte_cryptodev_pmd_create(name, &vdev->device,
813 &init_params->common);
815 MRVL_LOG(ERR, "Failed to create cryptodev vdev!");
819 dev->driver_id = cryptodev_driver_id;
820 dev->dev_ops = rte_mrvl_crypto_pmd_ops;
822 /* Register rx/tx burst functions for data path. */
823 dev->enqueue_burst = mrvl_crypto_pmd_enqueue_burst;
824 dev->dequeue_burst = mrvl_crypto_pmd_dequeue_burst;
826 dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
827 RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
828 RTE_CRYPTODEV_FF_HW_ACCELERATED |
829 RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
830 RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT;
832 internals = dev->data->dev_private;
834 internals->max_nb_qpairs = init_params->common.max_nb_queue_pairs;
835 internals->max_nb_sessions = init_params->max_nb_sessions;
837 ret = rte_mvep_init(MVEP_MOD_T_SAM, NULL);
841 sam_params.max_num_sessions = internals->max_nb_sessions;
843 /* sam_set_debug_flags(3); */
845 ret = sam_init(&sam_params);
853 "Driver %s: %s failed!", init_params->common.name, __func__);
855 cryptodev_mrvl_crypto_uninit(vdev);
859 /** Parse integer from integer argument */
861 parse_integer_arg(const char *key __rte_unused,
862 const char *value, void *extra_args)
864 int *i = (int *) extra_args;
868 MRVL_LOG(ERR, "Argument has to be positive!");
877 parse_name_arg(const char *key __rte_unused,
878 const char *value, void *extra_args)
880 struct rte_cryptodev_pmd_init_params *params = extra_args;
882 if (strlen(value) >= RTE_CRYPTODEV_NAME_MAX_LEN - 1) {
883 MRVL_LOG(ERR, "Invalid name %s, should be less than %u bytes!",
884 value, RTE_CRYPTODEV_NAME_MAX_LEN - 1);
888 strncpy(params->name, value, RTE_CRYPTODEV_NAME_MAX_LEN);
894 mrvl_pmd_parse_input_args(struct mrvl_pmd_init_params *params,
895 const char *input_args)
897 struct rte_kvargs *kvlist = NULL;
904 kvlist = rte_kvargs_parse(input_args,
905 mrvl_pmd_valid_params);
909 /* Common VDEV parameters */
910 ret = rte_kvargs_process(kvlist,
911 RTE_CRYPTODEV_PMD_MAX_NB_QP_ARG,
913 ¶ms->common.max_nb_queue_pairs);
917 ret = rte_kvargs_process(kvlist,
918 RTE_CRYPTODEV_PMD_SOCKET_ID_ARG,
920 ¶ms->common.socket_id);
924 ret = rte_kvargs_process(kvlist,
925 RTE_CRYPTODEV_PMD_NAME_ARG,
927 ¶ms->common.name);
931 ret = rte_kvargs_process(kvlist,
932 MRVL_PMD_MAX_NB_SESS_ARG,
934 ¶ms->max_nb_sessions);
941 rte_kvargs_free(kvlist);
946 * Initialize the crypto device.
948 * @param vdev Pointer to device structure.
949 * @returns 0 in case of success, negative value otherwise.
952 cryptodev_mrvl_crypto_init(struct rte_vdev_device *vdev)
954 struct mrvl_pmd_init_params init_params = {
958 sizeof(struct mrvl_crypto_private),
959 .max_nb_queue_pairs =
960 sam_get_num_inst() * sam_get_num_cios(0),
961 .socket_id = rte_socket_id()
963 .max_nb_sessions = MRVL_PMD_DEFAULT_MAX_NB_SESSIONS
966 const char *name, *args;
969 name = rte_vdev_device_name(vdev);
972 args = rte_vdev_device_args(vdev);
974 ret = mrvl_pmd_parse_input_args(&init_params, args);
976 MRVL_LOG(ERR, "Failed to parse initialisation arguments[%s]!",
981 return cryptodev_mrvl_crypto_create(name, vdev, &init_params);
985 * Uninitialize the crypto device
987 * @param vdev Pointer to device structure.
988 * @returns 0 in case of success, negative value otherwise.
991 cryptodev_mrvl_crypto_uninit(struct rte_vdev_device *vdev)
993 struct rte_cryptodev *cryptodev;
994 const char *name = rte_vdev_device_name(vdev);
999 MRVL_LOG(INFO, "Closing Marvell crypto device %s on numa socket %u.",
1000 name, rte_socket_id());
1003 rte_mvep_deinit(MVEP_MOD_T_SAM);
1005 cryptodev = rte_cryptodev_pmd_get_named_dev(name);
1006 if (cryptodev == NULL)
1009 return rte_cryptodev_pmd_destroy(cryptodev);
1013 * Basic driver handlers for use in the constructor.
1015 static struct rte_vdev_driver cryptodev_mrvl_pmd_drv = {
1016 .probe = cryptodev_mrvl_crypto_init,
1017 .remove = cryptodev_mrvl_crypto_uninit
1020 static struct cryptodev_driver mrvl_crypto_drv;
1022 /* Register the driver in constructor. */
1023 RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_MRVL_PMD, cryptodev_mrvl_pmd_drv);
1024 RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_MRVL_PMD,
1025 "max_nb_queue_pairs=<int> "
1026 "max_nb_sessions=<int> "
1028 RTE_PMD_REGISTER_CRYPTO_DRIVER(mrvl_crypto_drv, cryptodev_mrvl_pmd_drv.driver,
1029 cryptodev_driver_id);
1030 RTE_LOG_REGISTER_DEFAULT(mrvl_logtype_driver, NOTICE);