#include <rte_common.h>
#include <rte_hexdump.h>
#include <rte_cryptodev.h>
-#include <rte_cryptodev_pmd.h>
+#include <cryptodev_pmd.h>
+#include <rte_security_driver.h>
#include <rte_bus_vdev.h>
#include <rte_malloc.h>
#include <rte_cpuflags.h>
+#include <rte_kvargs.h>
+#include <rte_mvep_common.h>
-#include "rte_mrvl_pmd_private.h"
-
-#define MRVL_MUSDK_DMA_MEMSIZE 41943040
+#include "mrvl_pmd_private.h"
#define MRVL_PMD_MAX_NB_SESS_ARG ("max_nb_sessions")
#define MRVL_PMD_DEFAULT_MAX_NB_SESSIONS 2048
* Map of supported cipher algorithms.
*/
static const
-struct cipher_params_mapping cipher_map[RTE_CRYPTO_CIPHER_LIST_END] = {
+struct cipher_params_mapping cipher_map[] = {
+ [RTE_CRYPTO_CIPHER_NULL] = {
+ .supported = ALGO_SUPPORTED,
+ .cipher_alg = SAM_CIPHER_NONE },
[RTE_CRYPTO_CIPHER_3DES_CBC] = {
.supported = ALGO_SUPPORTED,
.cipher_alg = SAM_CIPHER_3DES,
.cipher_alg = SAM_CIPHER_AES,
.cipher_mode = SAM_CIPHER_CTR,
.max_key_len = BITS2BYTES(256) },
+ [RTE_CRYPTO_CIPHER_AES_ECB] = {
+ .supported = ALGO_SUPPORTED,
+ .cipher_alg = SAM_CIPHER_AES,
+ .cipher_mode = SAM_CIPHER_ECB,
+ .max_key_len = BITS2BYTES(256) },
};
/**
* Map of supported auth algorithms.
*/
static const
-struct auth_params_mapping auth_map[RTE_CRYPTO_AUTH_LIST_END] = {
+struct auth_params_mapping auth_map[] = {
+ [RTE_CRYPTO_AUTH_NULL] = {
+ .supported = ALGO_SUPPORTED,
+ .auth_alg = SAM_AUTH_NONE },
[RTE_CRYPTO_AUTH_MD5_HMAC] = {
.supported = ALGO_SUPPORTED,
.auth_alg = SAM_AUTH_HMAC_MD5 },
[RTE_CRYPTO_AUTH_SHA1] = {
.supported = ALGO_SUPPORTED,
.auth_alg = SAM_AUTH_HASH_SHA1 },
+ [RTE_CRYPTO_AUTH_SHA224_HMAC] = {
+ .supported = ALGO_SUPPORTED,
+ .auth_alg = SAM_AUTH_HMAC_SHA2_224 },
[RTE_CRYPTO_AUTH_SHA224] = {
.supported = ALGO_SUPPORTED,
.auth_alg = SAM_AUTH_HASH_SHA2_224 },
* Map of supported aead algorithms.
*/
static const
-struct cipher_params_mapping aead_map[RTE_CRYPTO_AEAD_LIST_END] = {
+struct cipher_params_mapping aead_map[] = {
[RTE_CRYPTO_AEAD_AES_GCM] = {
.supported = ALGO_SUPPORTED,
.cipher_alg = SAM_CIPHER_AES,
mrvl_crypto_set_cipher_session_parameters(struct mrvl_crypto_session *sess,
const struct rte_crypto_sym_xform *cipher_xform)
{
+ uint8_t *cipher_key;
+
/* Make sure we've got proper struct */
if (cipher_xform->type != RTE_CRYPTO_SYM_XFORM_CIPHER) {
- MRVL_CRYPTO_LOG_ERR("Wrong xform struct provided!");
+ MRVL_LOG(ERR, "Wrong xform struct provided!");
return -EINVAL;
}
if ((cipher_xform->cipher.algo > RTE_DIM(cipher_map)) ||
(cipher_map[cipher_xform->cipher.algo].supported
!= ALGO_SUPPORTED)) {
- MRVL_CRYPTO_LOG_ERR("Cipher algorithm not supported!");
+ MRVL_LOG(ERR, "Cipher algorithm not supported!");
return -EINVAL;
}
/* Get max key length. */
if (cipher_xform->cipher.key.length >
cipher_map[cipher_xform->cipher.algo].max_key_len) {
- MRVL_CRYPTO_LOG_ERR("Wrong key length!");
+ MRVL_LOG(ERR, "Wrong key length!");
return -EINVAL;
}
+ cipher_key = malloc(cipher_xform->cipher.key.length);
+ if (cipher_key == NULL) {
+ MRVL_LOG(ERR, "Insufficient memory!");
+ return -ENOMEM;
+ }
+
+ memcpy(cipher_key, cipher_xform->cipher.key.data,
+ cipher_xform->cipher.key.length);
+
sess->sam_sess_params.cipher_key_len = cipher_xform->cipher.key.length;
- sess->sam_sess_params.cipher_key = cipher_xform->cipher.key.data;
+ sess->sam_sess_params.cipher_key = cipher_key;
return 0;
}
mrvl_crypto_set_auth_session_parameters(struct mrvl_crypto_session *sess,
const struct rte_crypto_sym_xform *auth_xform)
{
+ uint8_t *auth_key = NULL;
+
/* Make sure we've got proper struct */
if (auth_xform->type != RTE_CRYPTO_SYM_XFORM_AUTH) {
- MRVL_CRYPTO_LOG_ERR("Wrong xform struct provided!");
+ MRVL_LOG(ERR, "Wrong xform struct provided!");
return -EINVAL;
}
/* See if map data is present and valid */
if ((auth_xform->auth.algo > RTE_DIM(auth_map)) ||
(auth_map[auth_xform->auth.algo].supported != ALGO_SUPPORTED)) {
- MRVL_CRYPTO_LOG_ERR("Auth algorithm not supported!");
+ MRVL_LOG(ERR, "Auth algorithm not supported!");
return -EINVAL;
}
auth_map[auth_xform->auth.algo].auth_alg;
sess->sam_sess_params.u.basic.auth_icv_len =
auth_xform->auth.digest_length;
+
+ if (auth_xform->auth.key.length > 0) {
+ auth_key = malloc(auth_xform->auth.key.length);
+ if (auth_key == NULL) {
+ MRVL_LOG(ERR, "Not enough memory!");
+ return -EINVAL;
+ }
+
+ memcpy(auth_key, auth_xform->auth.key.data,
+ auth_xform->auth.key.length);
+ }
+
/* auth_key must be NULL if auth algorithm does not use HMAC */
- sess->sam_sess_params.auth_key = auth_xform->auth.key.length ?
- auth_xform->auth.key.data : NULL;
+ sess->sam_sess_params.auth_key = auth_key;
sess->sam_sess_params.auth_key_len = auth_xform->auth.key.length;
return 0;
mrvl_crypto_set_aead_session_parameters(struct mrvl_crypto_session *sess,
const struct rte_crypto_sym_xform *aead_xform)
{
+ uint8_t *aead_key;
+
/* Make sure we've got proper struct */
if (aead_xform->type != RTE_CRYPTO_SYM_XFORM_AEAD) {
- MRVL_CRYPTO_LOG_ERR("Wrong xform struct provided!");
+ MRVL_LOG(ERR, "Wrong xform struct provided!");
return -EINVAL;
}
if ((aead_xform->aead.algo > RTE_DIM(aead_map)) ||
(aead_map[aead_xform->aead.algo].supported
!= ALGO_SUPPORTED)) {
- MRVL_CRYPTO_LOG_ERR("AEAD algorithm not supported!");
+ MRVL_LOG(ERR, "AEAD algorithm not supported!");
return -EINVAL;
}
sess->sam_sess_params.cipher_mode =
aead_map[aead_xform->aead.algo].cipher_mode;
+ if (sess->sam_sess_params.cipher_mode == SAM_CIPHER_GCM) {
+ /* IV must include nonce for all counter modes */
+ sess->cipher_iv_offset = aead_xform->cipher.iv.offset;
+
+ /* Set order of authentication then encryption to 0 in GCM */
+ sess->sam_sess_params.u.basic.auth_then_encrypt = 0;
+ }
+
/* Assume IV will be passed together with data. */
sess->sam_sess_params.cipher_iv = NULL;
/* Get max key length. */
if (aead_xform->aead.key.length >
aead_map[aead_xform->aead.algo].max_key_len) {
- MRVL_CRYPTO_LOG_ERR("Wrong key length!");
+ MRVL_LOG(ERR, "Wrong key length!");
return -EINVAL;
}
- sess->sam_sess_params.cipher_key = aead_xform->aead.key.data;
+ aead_key = malloc(aead_xform->aead.key.length);
+ if (aead_key == NULL) {
+ MRVL_LOG(ERR, "Insufficient memory!");
+ return -ENOMEM;
+ }
+
+ memcpy(aead_key, aead_xform->aead.key.data,
+ aead_xform->aead.key.length);
+
+ sess->sam_sess_params.cipher_key = aead_key;
sess->sam_sess_params.cipher_key_len = aead_xform->aead.key.length;
if (sess->sam_sess_params.cipher_mode == SAM_CIPHER_GCM)
* Parse crypto transform chain and setup session parameters.
*
* @param dev Pointer to crypto device
- * @param sess Poiner to crypto session
+ * @param sess Pointer to crypto session
* @param xform Pointer to configuration structure chain for crypto operations.
* @returns 0 in case of success, negative value otherwise.
*/
if ((cipher_xform != NULL) &&
(mrvl_crypto_set_cipher_session_parameters(
sess, cipher_xform) < 0)) {
- MRVL_CRYPTO_LOG_ERR("Invalid/unsupported cipher parameters");
+ MRVL_LOG(ERR, "Invalid/unsupported cipher parameters!");
return -EINVAL;
}
if ((auth_xform != NULL) &&
(mrvl_crypto_set_auth_session_parameters(
sess, auth_xform) < 0)) {
- MRVL_CRYPTO_LOG_ERR("Invalid/unsupported auth parameters");
+ MRVL_LOG(ERR, "Invalid/unsupported auth parameters!");
return -EINVAL;
}
if ((aead_xform != NULL) &&
(mrvl_crypto_set_aead_session_parameters(
sess, aead_xform) < 0)) {
- MRVL_CRYPTO_LOG_ERR("Invalid/unsupported aead parameters");
+ MRVL_LOG(ERR, "Invalid/unsupported aead parameters!");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+replay_wsz_to_mask(uint32_t replay_win_sz)
+{
+ int mask = 0;
+
+ switch (replay_win_sz) {
+ case 0:
+ mask = SAM_ANTI_REPLY_MASK_NONE;
+ break;
+ case 32:
+ mask = SAM_ANTI_REPLY_MASK_32B;
+ break;
+ case 64:
+ mask = SAM_ANTI_REPLY_MASK_64B;
+ break;
+ case 128:
+ mask = SAM_ANTI_REPLY_MASK_128B;
+ break;
+ default:
+ MRVL_LOG(ERR, "Invalid antireplay window size");
return -EINVAL;
}
+ return mask;
+}
+
+/**
+ * Parse IPSEC session parameters.
+ *
+ * @param sess Pointer to security session
+ * @param ipsec_xform Pointer to configuration structure IPSEC operations.
+ * @param crypto_xform Pointer to chain for crypto operations.
+ * @returns 0 in case of success, negative value otherwise.
+ */
+int
+mrvl_ipsec_set_session_parameters(struct mrvl_crypto_session *sess,
+ struct rte_security_ipsec_xform *ipsec_xform,
+ struct rte_crypto_sym_xform *crypto_xform)
+{
+ int seq_mask_size;
+
+ /* Filter out spurious/broken requests */
+ if (ipsec_xform == NULL || crypto_xform == NULL)
+ return -EINVAL;
+
+ /* Crypto parameters handling */
+ if (mrvl_crypto_set_session_parameters(sess, crypto_xform))
+ return -EINVAL;
+
+ seq_mask_size = replay_wsz_to_mask(ipsec_xform->replay_win_sz);
+ if (seq_mask_size < 0)
+ return -EINVAL;
+
+ /* IPSEC protocol parameters handling */
+ sess->sam_sess_params.proto = SAM_PROTO_IPSEC;
+ sess->sam_sess_params.u.ipsec.is_esp =
+ (ipsec_xform->proto == RTE_SECURITY_IPSEC_SA_PROTO_ESP) ?
+ 1 : 0;
+ sess->sam_sess_params.u.ipsec.is_ip6 = 0;
+ sess->sam_sess_params.u.ipsec.is_tunnel =
+ (ipsec_xform->mode == RTE_SECURITY_IPSEC_SA_MODE_TUNNEL) ?
+ 1 : 0;
+ sess->sam_sess_params.u.ipsec.is_esn = ipsec_xform->options.esn;
+ sess->sam_sess_params.u.ipsec.seq_mask_size = seq_mask_size;
+
+ sess->sam_sess_params.u.ipsec.tunnel.u.ipv4.sip =
+ (uint8_t *)(&ipsec_xform->tunnel.ipv4.src_ip.s_addr);
+ sess->sam_sess_params.u.ipsec.tunnel.u.ipv4.dip =
+ (uint8_t *)&(ipsec_xform->tunnel.ipv4.dst_ip.s_addr);
+
+ sess->sam_sess_params.u.ipsec.tunnel.u.ipv4.dscp =
+ ipsec_xform->tunnel.ipv4.dscp;
+ sess->sam_sess_params.u.ipsec.tunnel.u.ipv4.ttl =
+ ipsec_xform->tunnel.ipv4.ttl;
+ sess->sam_sess_params.u.ipsec.tunnel.u.ipv4.df =
+ ipsec_xform->tunnel.ipv4.df;
+ sess->sam_sess_params.u.ipsec.tunnel.copy_dscp =
+ ipsec_xform->options.copy_dscp;
+ sess->sam_sess_params.u.ipsec.tunnel.copy_flabel =
+ ipsec_xform->options.copy_flabel;
+ sess->sam_sess_params.u.ipsec.tunnel.copy_df =
+ ipsec_xform->options.copy_df;
+
+ sess->sam_sess_params.u.ipsec.is_natt = 0;
+ sess->sam_sess_params.u.ipsec.spi = ipsec_xform->spi;
+ sess->sam_sess_params.u.ipsec.seq = 0;
+
return 0;
}
* @param op Pointer to DPDK crypto operation struct [In].
*/
static inline int
-mrvl_request_prepare(struct sam_cio_op_params *request,
+mrvl_request_prepare_crp(struct sam_cio_op_params *request,
struct sam_buf_info *src_bd,
struct sam_buf_info *dst_bd,
struct rte_crypto_op *op)
{
struct mrvl_crypto_session *sess;
- struct rte_mbuf *dst_mbuf;
+ struct rte_mbuf *src_mbuf, *dst_mbuf;
+ uint16_t segments_nb;
uint8_t *digest;
+ int i;
if (unlikely(op->sess_type == RTE_CRYPTO_OP_SESSIONLESS)) {
- MRVL_CRYPTO_LOG_ERR("MRVL CRYPTO PMD only supports session "
- "oriented requests, op (%p) is sessionless.",
+ MRVL_LOG(ERR, "MRVL CRYPTO PMD only supports session "
+ "oriented requests, op (%p) is sessionless!",
op);
return -EINVAL;
}
sess = (struct mrvl_crypto_session *)get_sym_session_private_data(
- op->sym->session, cryptodev_driver_id);
+ op->sym->session,
+ cryptodev_driver_id);
if (unlikely(sess == NULL)) {
- MRVL_CRYPTO_LOG_ERR("Session was not created for this device");
+ MRVL_LOG(ERR, "Session was not created for this device!");
return -EINVAL;
}
- /*
+ request->sa = sess->sam_sess;
+ request->cookie = op;
+
+ src_mbuf = op->sym->m_src;
+ segments_nb = src_mbuf->nb_segs;
+ /* The following conditions must be met:
+ * - Destination buffer is required when segmented source buffer
+ * - Segmented destination buffer is not supported
+ */
+ if ((segments_nb > 1) && (!op->sym->m_dst)) {
+ MRVL_LOG(ERR, "op->sym->m_dst = NULL!");
+ return -1;
+ }
+ /* For non SG case:
* If application delivered us null dst buffer, it means it expects
* us to deliver the result in src buffer.
*/
dst_mbuf = op->sym->m_dst ? op->sym->m_dst : op->sym->m_src;
- request->sa = sess->sam_sess;
- request->cookie = op;
-
- /* Single buffers only, sorry. */
- request->num_bufs = 1;
- request->src = src_bd;
- src_bd->vaddr = rte_pktmbuf_mtod(op->sym->m_src, void *);
- src_bd->paddr = rte_pktmbuf_iova(op->sym->m_src);
- src_bd->len = rte_pktmbuf_data_len(op->sym->m_src);
-
- /* Empty source. */
- if (rte_pktmbuf_data_len(op->sym->m_src) == 0) {
- /* EIP does not support 0 length buffers. */
- MRVL_CRYPTO_LOG_ERR("Buffer length == 0 not supported!");
+ if (!rte_pktmbuf_is_contiguous(dst_mbuf)) {
+ MRVL_LOG(ERR, "Segmented destination buffer not supported!");
return -1;
}
+ request->num_bufs = segments_nb;
+ for (i = 0; i < segments_nb; i++) {
+ /* Empty source. */
+ if (rte_pktmbuf_data_len(src_mbuf) == 0) {
+ /* EIP does not support 0 length buffers. */
+ MRVL_LOG(ERR, "Buffer length == 0 not supported!");
+ return -1;
+ }
+ src_bd[i].vaddr = rte_pktmbuf_mtod(src_mbuf, void *);
+ src_bd[i].paddr = rte_pktmbuf_iova(src_mbuf);
+ src_bd[i].len = rte_pktmbuf_data_len(src_mbuf);
+
+ src_mbuf = src_mbuf->next;
+ }
+ request->src = src_bd;
+
/* Empty destination. */
if (rte_pktmbuf_data_len(dst_mbuf) == 0) {
/* Make dst buffer fit at least source data. */
if (rte_pktmbuf_append(dst_mbuf,
rte_pktmbuf_data_len(op->sym->m_src)) == NULL) {
- MRVL_CRYPTO_LOG_ERR("Unable to set big enough dst buffer!");
+ MRVL_LOG(ERR, "Unable to set big enough dst buffer!");
return -1;
}
}
request->cipher_len = op->sym->aead.data.length;
request->cipher_offset = op->sym->aead.data.offset;
request->cipher_iv = rte_crypto_op_ctod_offset(op, uint8_t *,
- sess->cipher_iv_offset);
+ sess->cipher_iv_offset);
request->auth_aad = op->sym->aead.aad.data;
request->auth_offset = request->cipher_offset;
/*
* EIP supports only scenarios where ICV(digest buffer) is placed at
- * auth_icv_offset. Any other placement means risking errors.
+ * auth_icv_offset.
*/
if (sess->sam_sess_params.dir == SAM_DIR_ENCRYPT) {
/*
*/
if (rte_pktmbuf_mtod_offset(
dst_mbuf, uint8_t *,
- request->auth_icv_offset) == digest) {
+ request->auth_icv_offset) == digest)
return 0;
- }
} else {/* sess->sam_sess_params.dir == SAM_DIR_DECRYPT */
/*
* EIP will look for digest at auth_icv_offset
- * offset in SRC buffer.
+ * offset in SRC buffer. It must be placed in the last
+ * segment and the offset must be set to reach digest
+ * in the last segment
*/
- if (rte_pktmbuf_mtod_offset(
- op->sym->m_src, uint8_t *,
- request->auth_icv_offset) == digest) {
+ struct rte_mbuf *last_seg = op->sym->m_src;
+ uint32_t d_offset = request->auth_icv_offset;
+ u32 d_size = sess->sam_sess_params.u.basic.auth_icv_len;
+ unsigned char *d_ptr;
+
+ /* Find the last segment and the offset for the last segment */
+ while ((last_seg->next != NULL) &&
+ (d_offset >= last_seg->data_len)) {
+ d_offset -= last_seg->data_len;
+ last_seg = last_seg->next;
+ }
+
+ if (rte_pktmbuf_mtod_offset(last_seg, uint8_t *,
+ d_offset) == digest)
+ return 0;
+
+ /* copy digest to last segment */
+ if (last_seg->buf_len >= (d_size + d_offset)) {
+ d_ptr = (unsigned char *)last_seg->buf_addr +
+ d_offset;
+ rte_memcpy(d_ptr, digest, d_size);
return 0;
}
}
return -1;
}
+/**
+ * Prepare a single security protocol request.
+ *
+ * This function basically translates DPDK security request into one
+ * understandable by MUDSK's SAM. If this is a first request in a session,
+ * it starts the session.
+ *
+ * @param request Pointer to pre-allocated && reset request buffer [Out].
+ * @param src_bd Pointer to pre-allocated source descriptor [Out].
+ * @param dst_bd Pointer to pre-allocated destination descriptor [Out].
+ * @param op Pointer to DPDK crypto operation struct [In].
+ */
+static inline int
+mrvl_request_prepare_sec(struct sam_cio_ipsec_params *request,
+ struct sam_buf_info *src_bd,
+ struct sam_buf_info *dst_bd,
+ struct rte_crypto_op *op)
+{
+ struct mrvl_crypto_session *sess;
+ struct rte_mbuf *src_mbuf, *dst_mbuf;
+ uint16_t segments_nb;
+ int i;
+
+ if (unlikely(op->sess_type != RTE_CRYPTO_OP_SECURITY_SESSION)) {
+ MRVL_LOG(ERR, "MRVL SECURITY: sess_type is not SECURITY_SESSION");
+ return -EINVAL;
+ }
+
+ sess = (struct mrvl_crypto_session *)get_sec_session_private_data(
+ op->sym->sec_session);
+ if (unlikely(sess == NULL)) {
+ MRVL_LOG(ERR, "Session was not created for this device! %d",
+ cryptodev_driver_id);
+ return -EINVAL;
+ }
+
+ request->sa = sess->sam_sess;
+ request->cookie = op;
+ src_mbuf = op->sym->m_src;
+ segments_nb = src_mbuf->nb_segs;
+ /* The following conditions must be met:
+ * - Destination buffer is required when segmented source buffer
+ * - Segmented destination buffer is not supported
+ */
+ if ((segments_nb > 1) && (!op->sym->m_dst)) {
+ MRVL_LOG(ERR, "op->sym->m_dst = NULL!");
+ return -1;
+ }
+ /* For non SG case:
+ * If application delivered us null dst buffer, it means it expects
+ * us to deliver the result in src buffer.
+ */
+ dst_mbuf = op->sym->m_dst ? op->sym->m_dst : op->sym->m_src;
+
+ if (!rte_pktmbuf_is_contiguous(dst_mbuf)) {
+ MRVL_LOG(ERR, "Segmented destination buffer not supported!");
+ return -1;
+ }
+
+ request->num_bufs = segments_nb;
+ for (i = 0; i < segments_nb; i++) {
+ /* Empty source. */
+ if (rte_pktmbuf_data_len(src_mbuf) == 0) {
+ /* EIP does not support 0 length buffers. */
+ MRVL_LOG(ERR, "Buffer length == 0 not supported!");
+ return -1;
+ }
+ src_bd[i].vaddr = rte_pktmbuf_mtod(src_mbuf, void *);
+ src_bd[i].paddr = rte_pktmbuf_iova(src_mbuf);
+ src_bd[i].len = rte_pktmbuf_data_len(src_mbuf);
+
+ src_mbuf = src_mbuf->next;
+ }
+ request->src = src_bd;
+
+ /* Empty destination. */
+ if (rte_pktmbuf_data_len(dst_mbuf) == 0) {
+ /* Make dst buffer fit at least source data. */
+ if (rte_pktmbuf_append(dst_mbuf,
+ rte_pktmbuf_data_len(op->sym->m_src)) == NULL) {
+ MRVL_LOG(ERR, "Unable to set big enough dst buffer!");
+ return -1;
+ }
+ }
+
+ request->dst = dst_bd;
+ dst_bd->vaddr = rte_pktmbuf_mtod(dst_mbuf, void *);
+ dst_bd->paddr = rte_pktmbuf_iova(dst_mbuf);
+
+ /*
+ * We can use all available space in dst_mbuf,
+ * not only what's used currently.
+ */
+ dst_bd->len = dst_mbuf->buf_len - rte_pktmbuf_headroom(dst_mbuf);
+
+
+ request->l3_offset = 0;
+ request->pkt_size = rte_pktmbuf_pkt_len(op->sym->m_src);
+
+ return 0;
+}
+
/*
*-----------------------------------------------------------------------------
* PMD Framework handlers
uint16_t nb_ops)
{
uint16_t iter_ops = 0;
- uint16_t to_enq = 0;
+ uint16_t to_enq_crp = 0;
+ uint16_t to_enq_sec = 0;
uint16_t consumed = 0;
int ret;
- struct sam_cio_op_params requests[nb_ops];
+ int iter;
+ struct sam_cio_op_params requests_crp[nb_ops];
+ struct sam_cio_ipsec_params requests_sec[nb_ops];
+ uint16_t indx_map_crp[nb_ops];
+ uint16_t indx_map_sec[nb_ops];
+
/*
- * DPDK uses single fragment buffers, so we can KISS descriptors.
* SAM does not store bd pointers, so on-stack scope will be enough.
*/
- struct sam_buf_info src_bd[nb_ops];
- struct sam_buf_info dst_bd[nb_ops];
+ struct mrvl_crypto_src_table src_bd[nb_ops];
+ struct sam_buf_info dst_bd[nb_ops];
struct mrvl_crypto_qp *qp = (struct mrvl_crypto_qp *)queue_pair;
if (nb_ops == 0)
return 0;
/* Prepare the burst. */
- memset(&requests, 0, sizeof(requests));
+ memset(&requests_crp, 0, sizeof(requests_crp));
+ memset(&requests_sec, 0, sizeof(requests_sec));
+ memset(&src_bd, 0, sizeof(src_bd));
/* Iterate through */
for (; iter_ops < nb_ops; ++iter_ops) {
- if (mrvl_request_prepare(&requests[iter_ops],
- &src_bd[iter_ops],
- &dst_bd[iter_ops],
- ops[iter_ops]) < 0) {
- MRVL_CRYPTO_LOG_ERR(
- "Error while parameters preparation!");
- qp->stats.enqueue_err_count++;
- ops[iter_ops]->status = RTE_CRYPTO_OP_STATUS_ERROR;
-
- /*
- * Number of handled ops is increased
- * (even if the result of handling is error).
- */
- ++consumed;
- break;
+ /* store the op id for debug */
+ if (ops[iter_ops]->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
+ src_bd[iter_ops].iter_ops = to_enq_crp;
+ indx_map_crp[to_enq_crp] = iter_ops;
+
+ if (mrvl_request_prepare_crp(&requests_crp[to_enq_crp],
+ src_bd[iter_ops].src_bd,
+ &dst_bd[iter_ops],
+ ops[iter_ops]) < 0) {
+ MRVL_LOG(ERR,
+ "Error while preparing parameters!");
+ qp->stats.enqueue_err_count++;
+ ops[iter_ops]->status =
+ RTE_CRYPTO_OP_STATUS_ERROR;
+ /*
+ * Number of handled ops is increased
+ * (even if the result of handling is error).
+ */
+ ++consumed;
+
+ break;
+ }
+ /* Increase the number of ops to enqueue. */
+ ++to_enq_crp;
+ } else {
+ src_bd[iter_ops].iter_ops = to_enq_sec;
+ indx_map_sec[to_enq_sec] = iter_ops;
+ if (mrvl_request_prepare_sec(&requests_sec[to_enq_sec],
+ src_bd[iter_ops].src_bd,
+ &dst_bd[iter_ops],
+ ops[iter_ops]) < 0) {
+ MRVL_LOG(ERR,
+ "Error while preparing parameters!");
+ qp->stats.enqueue_err_count++;
+ ops[iter_ops]->status =
+ RTE_CRYPTO_OP_STATUS_ERROR;
+ /*
+ * Number of handled ops is increased
+ * (even if the result of handling is error).
+ */
+ ++consumed;
+
+ break;
+ }
+ /* Increase the number of ops to enqueue. */
+ ++to_enq_sec;
}
ops[iter_ops]->status =
RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
- /* Increase the number of ops to enqueue. */
- ++to_enq;
} /* for (; iter_ops < nb_ops;... */
- if (to_enq > 0) {
+ if (to_enq_crp > 0) {
/* Send the burst */
- ret = sam_cio_enq(qp->cio, requests, &to_enq);
- consumed += to_enq;
+ ret = sam_cio_enq(qp->cio, requests_crp, &to_enq_crp);
+ consumed += to_enq_crp;
if (ret < 0) {
/*
* Trust SAM that in this case returned value will be at
* some point correct (now it is returned unmodified).
*/
- qp->stats.enqueue_err_count += to_enq;
- for (iter_ops = 0; iter_ops < to_enq; ++iter_ops)
- ops[iter_ops]->status =
+ qp->stats.enqueue_err_count += to_enq_crp;
+ for (iter = 0; iter < to_enq_crp; ++iter)
+ ops[indx_map_crp[iter]]->status =
RTE_CRYPTO_OP_STATUS_ERROR;
}
}
- qp->stats.enqueued_count += to_enq;
+ if (to_enq_sec > 0) {
+ /* Send the burst */
+ ret = sam_cio_enq_ipsec(qp->cio, requests_sec, &to_enq_sec);
+ consumed += to_enq_sec;
+ if (ret < 0) {
+ /*
+ * Trust SAM that in this case returned value will be at
+ * some point correct (now it is returned unmodified).
+ */
+ qp->stats.enqueue_err_count += to_enq_sec;
+ for (iter = 0; iter < to_enq_crp; ++iter)
+ ops[indx_map_sec[iter]]->status =
+ RTE_CRYPTO_OP_STATUS_ERROR;
+ }
+ }
+
+ qp->stats.enqueued_count += to_enq_sec + to_enq_crp;
return consumed;
}
struct sam_cio *cio = qp->cio;
struct sam_cio_op_result results[nb_ops];
uint16_t i;
+ struct rte_mbuf *dst;
ret = sam_cio_deq(cio, results, &nb_ops);
if (ret < 0) {
switch (results[i].status) {
case SAM_CIO_OK:
ops[i]->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+ if (ops[i]->sess_type ==
+ RTE_CRYPTO_OP_SECURITY_SESSION) {
+
+ if (ops[i]->sym->m_dst)
+ dst = ops[i]->sym->m_dst;
+ else
+ dst = ops[i]->sym->m_src;
+ dst->pkt_len = results[i].out_len;
+ dst->data_len = results[i].out_len;
+ }
break;
case SAM_CIO_ERR_ICV:
- MRVL_CRYPTO_LOG_DBG("CIO returned SAM_CIO_ERR_ICV.");
+ MRVL_LOG(DEBUG, "CIO returned SAM_CIO_ERR_ICV.");
ops[i]->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
break;
default:
- MRVL_CRYPTO_LOG_DBG(
- "CIO returned Error: %d", results[i].status);
+ MRVL_LOG(DEBUG,
+ "CIO returned Error: %d.", results[i].status);
ops[i]->status = RTE_CRYPTO_OP_STATUS_ERROR;
break;
}
struct rte_cryptodev *dev;
struct mrvl_crypto_private *internals;
struct sam_init_params sam_params;
- int ret;
+ struct rte_security_ctx *security_instance;
+ int ret = -EINVAL;
dev = rte_cryptodev_pmd_create(name, &vdev->device,
&init_params->common);
if (dev == NULL) {
- MRVL_CRYPTO_LOG_ERR("failed to create cryptodev vdev");
+ MRVL_LOG(ERR, "Failed to create cryptodev vdev!");
goto init_error;
}
RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
RTE_CRYPTODEV_FF_HW_ACCELERATED |
RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
- RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT;
+ RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT |
+ RTE_CRYPTODEV_FF_SECURITY;
- /* Set vector instructions mode supported */
internals = dev->data->dev_private;
internals->max_nb_qpairs = init_params->common.max_nb_queue_pairs;
internals->max_nb_sessions = init_params->max_nb_sessions;
- /*
- * ret == -EEXIST is correct, it means DMA
- * has been already initialized.
- */
- ret = mv_sys_dma_mem_init(MRVL_MUSDK_DMA_MEMSIZE);
- if (ret < 0) {
- if (ret != -EEXIST)
- return ret;
-
- MRVL_CRYPTO_LOG_INFO(
- "DMA memory has been already initialized by a different driver.");
- }
+ ret = rte_mvep_init(MVEP_MOD_T_SAM, NULL);
+ if (ret)
+ goto init_error;
sam_params.max_num_sessions = internals->max_nb_sessions;
- return sam_init(&sam_params);
+ /* Initialize security_ctx only for primary process*/
+ security_instance = rte_malloc("rte_security_instances_ops",
+ sizeof(struct rte_security_ctx), 0);
+ if (security_instance == NULL)
+ return -ENOMEM;
+ security_instance->device = (void *)dev;
+ security_instance->ops = rte_mrvl_security_pmd_ops;
+ security_instance->sess_cnt = 0;
+ dev->security_ctx = security_instance;
+
+ /*sam_set_debug_flags(3);*/
+
+ ret = sam_init(&sam_params);
+ if (ret)
+ goto init_error;
+
+ rte_cryptodev_pmd_probing_finish(dev);
+
+ return 0;
init_error:
- MRVL_CRYPTO_LOG_ERR(
- "driver %s: %s failed", init_params->common.name, __func__);
+ MRVL_LOG(ERR,
+ "Driver %s: %s failed!", init_params->common.name, __func__);
cryptodev_mrvl_crypto_uninit(vdev);
- return -EFAULT;
+ return ret;
}
/** Parse integer from integer argument */
*i = atoi(value);
if (*i < 0) {
- MRVL_CRYPTO_LOG_ERR("Argument has to be positive.\n");
+ MRVL_LOG(ERR, "Argument has to be positive!");
return -EINVAL;
}
struct rte_cryptodev_pmd_init_params *params = extra_args;
if (strlen(value) >= RTE_CRYPTODEV_NAME_MAX_LEN - 1) {
- MRVL_CRYPTO_LOG_ERR("Invalid name %s, should be less than "
- "%u bytes.\n", value,
- RTE_CRYPTODEV_NAME_MAX_LEN - 1);
+ MRVL_LOG(ERR, "Invalid name %s, should be less than %u bytes!",
+ value, RTE_CRYPTODEV_NAME_MAX_LEN - 1);
return -EINVAL;
}
ret = rte_kvargs_process(kvlist,
RTE_CRYPTODEV_PMD_NAME_ARG,
&parse_name_arg,
- ¶ms->common);
+ ¶ms->common.name);
if (ret < 0)
goto free_kvlist;
ret = rte_kvargs_process(kvlist,
MRVL_PMD_MAX_NB_SESS_ARG,
&parse_integer_arg,
- params);
+ ¶ms->max_nb_sessions);
if (ret < 0)
goto free_kvlist;
ret = mrvl_pmd_parse_input_args(&init_params, args);
if (ret) {
- RTE_LOG(ERR, PMD,
- "Failed to parse initialisation arguments[%s]\n",
- args);
+ MRVL_LOG(ERR, "Failed to parse initialisation arguments[%s]!",
+ args);
return -EINVAL;
}
if (name == NULL)
return -EINVAL;
- RTE_LOG(INFO, PMD,
- "Closing Marvell crypto device %s on numa socket %u\n",
- name, rte_socket_id());
+ MRVL_LOG(INFO, "Closing Marvell crypto device %s on numa socket %u.",
+ name, rte_socket_id());
sam_deinit();
+ rte_mvep_deinit(MVEP_MOD_T_SAM);
cryptodev = rte_cryptodev_pmd_get_named_dev(name);
if (cryptodev == NULL)
"socket_id=<int>");
RTE_PMD_REGISTER_CRYPTO_DRIVER(mrvl_crypto_drv, cryptodev_mrvl_pmd_drv.driver,
cryptodev_driver_id);
+RTE_LOG_REGISTER_DEFAULT(mrvl_logtype_driver, NOTICE);