crypto/bcmfs: add crypto HW module
authorVikas Gupta <vikas.gupta@broadcom.com>
Wed, 7 Oct 2020 17:18:59 +0000 (22:48 +0530)
committerAkhil Goyal <akhil.goyal@nxp.com>
Wed, 14 Oct 2020 20:22:06 +0000 (22:22 +0200)
Add crypto h/w module to process crypto op. Crypto op is processed via
sym_engine module before submitting the crypto request to HW queues.

Signed-off-by: Vikas Gupta <vikas.gupta@broadcom.com>
Signed-off-by: Raveendra Padasalagi <raveendra.padasalagi@broadcom.com>
Reviewed-by: Ajit Khaparde <ajit.khaparde@broadcom.com>
Acked-by: Akhil Goyal <akhil.goyal@nxp.com>
drivers/crypto/bcmfs/bcmfs_sym.c [new file with mode: 0644]
drivers/crypto/bcmfs/bcmfs_sym_engine.c [new file with mode: 0644]
drivers/crypto/bcmfs/bcmfs_sym_engine.h [new file with mode: 0644]
drivers/crypto/bcmfs/bcmfs_sym_pmd.c
drivers/crypto/bcmfs/bcmfs_sym_req.h
drivers/crypto/bcmfs/meson.build

diff --git a/drivers/crypto/bcmfs/bcmfs_sym.c b/drivers/crypto/bcmfs/bcmfs_sym.c
new file mode 100644 (file)
index 0000000..2d164a1
--- /dev/null
@@ -0,0 +1,289 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2020 Broadcom
+ * All rights reserved.
+ */
+
+#include <stdbool.h>
+
+#include <rte_byteorder.h>
+#include <rte_crypto_sym.h>
+#include <rte_cryptodev.h>
+#include <rte_mbuf.h>
+#include <rte_mempool.h>
+
+#include "bcmfs_sym_defs.h"
+#include "bcmfs_sym_engine.h"
+#include "bcmfs_sym_req.h"
+#include "bcmfs_sym_session.h"
+
+/** Process cipher operation */
+static int
+process_crypto_cipher_op(struct rte_crypto_op *op,
+                        struct rte_mbuf *mbuf_src,
+                        struct rte_mbuf *mbuf_dst,
+                        struct bcmfs_sym_session *sess,
+                        struct bcmfs_sym_request *req)
+{
+       int rc = 0;
+       struct fsattr src, dst, iv, key;
+       struct rte_crypto_sym_op *sym_op = op->sym;
+
+       fsattr_sz(&src) = sym_op->cipher.data.length;
+       fsattr_sz(&dst) = sym_op->cipher.data.length;
+
+       fsattr_va(&src) = rte_pktmbuf_mtod_offset
+                                       (mbuf_src,
+                                        uint8_t *,
+                                        op->sym->cipher.data.offset);
+
+       fsattr_va(&dst) = rte_pktmbuf_mtod_offset
+                                       (mbuf_dst,
+                                        uint8_t *,
+                                        op->sym->cipher.data.offset);
+
+       fsattr_pa(&src) = rte_pktmbuf_iova(mbuf_src);
+       fsattr_pa(&dst) = rte_pktmbuf_iova(mbuf_dst);
+
+       fsattr_va(&iv) = rte_crypto_op_ctod_offset(op,
+                                                  uint8_t *,
+                                                  sess->cipher.iv.offset);
+
+       fsattr_sz(&iv) = sess->cipher.iv.length;
+
+       fsattr_va(&key) = sess->cipher.key.data;
+       fsattr_pa(&key) = 0;
+       fsattr_sz(&key) = sess->cipher.key.length;
+
+       rc = bcmfs_crypto_build_cipher_req(req, sess->cipher.algo,
+                                          sess->cipher.op, &src,
+                                          &dst, &key, &iv);
+       if (rc)
+               op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+
+       return rc;
+}
+
+/** Process auth operation */
+static int
+process_crypto_auth_op(struct rte_crypto_op *op,
+                      struct rte_mbuf *mbuf_src,
+                      struct bcmfs_sym_session *sess,
+                      struct bcmfs_sym_request *req)
+{
+       int rc = 0;
+       struct fsattr src, dst, mac, key, iv;
+
+       fsattr_sz(&src) = op->sym->auth.data.length;
+       fsattr_va(&src) = rte_pktmbuf_mtod_offset(mbuf_src,
+                                                 uint8_t *,
+                                                 op->sym->auth.data.offset);
+       fsattr_pa(&src) = rte_pktmbuf_iova(mbuf_src);
+
+       if (!sess->auth.op) {
+               fsattr_va(&mac) = op->sym->auth.digest.data;
+               fsattr_pa(&mac) = op->sym->auth.digest.phys_addr;
+               fsattr_sz(&mac) = sess->auth.digest_length;
+       } else {
+               fsattr_va(&dst) = op->sym->auth.digest.data;
+               fsattr_pa(&dst) = op->sym->auth.digest.phys_addr;
+               fsattr_sz(&dst) = sess->auth.digest_length;
+       }
+
+       fsattr_va(&key) = sess->auth.key.data;
+       fsattr_pa(&key) = 0;
+       fsattr_sz(&key) = sess->auth.key.length;
+
+       /* AES-GMAC uses AES-GCM-128 authenticator */
+       if (sess->auth.algo == RTE_CRYPTO_AUTH_AES_GMAC) {
+               fsattr_va(&iv) = rte_crypto_op_ctod_offset(op,
+                                                          uint8_t *,
+                                                          sess->auth.iv.offset);
+               fsattr_pa(&iv) = 0;
+               fsattr_sz(&iv) = sess->auth.iv.length;
+       } else {
+               fsattr_va(&iv) = NULL;
+               fsattr_sz(&iv) = 0;
+       }
+
+               rc = bcmfs_crypto_build_auth_req(req, sess->auth.algo,
+                                                sess->auth.op,
+                                                &src,
+                                                (sess->auth.op) ? (&dst) : NULL,
+                                                (sess->auth.op) ? NULL  : (&mac),
+                                                &key, &iv);
+
+       if (rc)
+               op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+
+       return rc;
+}
+
+/** Process combined/chained mode operation */
+static int
+process_crypto_combined_op(struct rte_crypto_op *op,
+                          struct rte_mbuf *mbuf_src,
+                          struct rte_mbuf *mbuf_dst,
+                          struct bcmfs_sym_session *sess,
+                          struct bcmfs_sym_request *req)
+{
+       int rc = 0, aad_size = 0;
+       struct fsattr src, dst, iv;
+       struct rte_crypto_sym_op *sym_op = op->sym;
+       struct fsattr cipher_key, aad, mac, auth_key;
+
+       fsattr_sz(&src) = sym_op->cipher.data.length;
+       fsattr_sz(&dst) = sym_op->cipher.data.length;
+
+       fsattr_va(&src) = rte_pktmbuf_mtod_offset
+                                       (mbuf_src,
+                                        uint8_t *,
+                                        sym_op->cipher.data.offset);
+
+       fsattr_va(&dst) = rte_pktmbuf_mtod_offset
+                                       (mbuf_dst,
+                                        uint8_t *,
+                                        sym_op->cipher.data.offset);
+
+       fsattr_pa(&src) = rte_pktmbuf_iova_offset(mbuf_src,
+                                                 sym_op->cipher.data.offset);
+       fsattr_pa(&dst) = rte_pktmbuf_iova_offset(mbuf_dst,
+                                                 sym_op->cipher.data.offset);
+
+       fsattr_va(&iv) = rte_crypto_op_ctod_offset(op,
+                                                  uint8_t *,
+                                                  sess->cipher.iv.offset);
+
+       fsattr_pa(&iv) = 0;
+       fsattr_sz(&iv) = sess->cipher.iv.length;
+
+       fsattr_va(&cipher_key) = sess->cipher.key.data;
+       fsattr_pa(&cipher_key) = 0;
+       fsattr_sz(&cipher_key) = sess->cipher.key.length;
+
+       fsattr_va(&auth_key) = sess->auth.key.data;
+       fsattr_pa(&auth_key) = 0;
+       fsattr_sz(&auth_key) = sess->auth.key.length;
+
+       fsattr_va(&mac) = op->sym->auth.digest.data;
+       fsattr_pa(&mac) = op->sym->auth.digest.phys_addr;
+       fsattr_sz(&mac) = sess->auth.digest_length;
+
+       aad_size = sym_op->auth.data.length - sym_op->cipher.data.length;
+
+       if (aad_size > 0) {
+               fsattr_sz(&aad) =  aad_size;
+               fsattr_va(&aad) = rte_pktmbuf_mtod_offset
+                                               (mbuf_src,
+                                                uint8_t *,
+                                               sym_op->auth.data.offset);
+               fsattr_pa(&aad) = rte_pktmbuf_iova_offset(mbuf_src,
+                                               sym_op->auth.data.offset);
+       }
+
+       rc = bcmfs_crypto_build_chain_request(req, sess->cipher.algo,
+                                             sess->cipher.op,
+                                             sess->auth.algo,
+                                             sess->auth.op,
+                                             &src, &dst, &cipher_key,
+                                             &auth_key, &iv,
+                                             (aad_size > 0) ? (&aad) : NULL,
+                                             &mac, sess->cipher_first);
+
+       if (rc)
+               op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+
+       return rc;
+}
+
+/** Process AEAD operation */
+static int
+process_crypto_aead_op(struct rte_crypto_op *op,
+                      struct rte_mbuf *mbuf_src,
+                      struct rte_mbuf *mbuf_dst,
+                      struct bcmfs_sym_session *sess,
+                      struct bcmfs_sym_request *req)
+{
+       int rc = 0;
+       struct fsattr src, dst, iv;
+       struct rte_crypto_sym_op *sym_op = op->sym;
+       struct fsattr key, aad, mac;
+
+       fsattr_sz(&src) = sym_op->aead.data.length;
+       fsattr_sz(&dst) = sym_op->aead.data.length;
+
+       fsattr_va(&src) = rte_pktmbuf_mtod_offset(mbuf_src,
+                                                 uint8_t *,
+                                                 sym_op->aead.data.offset);
+
+       fsattr_va(&dst) = rte_pktmbuf_mtod_offset(mbuf_dst,
+                                                 uint8_t *,
+                                                 sym_op->aead.data.offset);
+
+       fsattr_pa(&src) = rte_pktmbuf_iova_offset(mbuf_src,
+                                                 sym_op->aead.data.offset);
+       fsattr_pa(&dst) = rte_pktmbuf_iova_offset(mbuf_dst,
+                                                 sym_op->aead.data.offset);
+
+       fsattr_va(&iv) = rte_crypto_op_ctod_offset(op,
+                                                  uint8_t *,
+                                                  sess->aead.iv.offset);
+
+       fsattr_pa(&iv) = 0;
+       fsattr_sz(&iv) = sess->aead.iv.length;
+
+       fsattr_va(&key) = sess->aead.key.data;
+       fsattr_pa(&key) = 0;
+       fsattr_sz(&key) = sess->aead.key.length;
+
+       fsattr_va(&mac) = op->sym->aead.digest.data;
+       fsattr_pa(&mac) = op->sym->aead.digest.phys_addr;
+       fsattr_sz(&mac) = sess->aead.digest_length;
+
+       fsattr_va(&aad) = op->sym->aead.aad.data;
+       fsattr_pa(&aad) = op->sym->aead.aad.phys_addr;
+       fsattr_sz(&aad) = sess->aead.aad_length;
+
+       rc = bcmfs_crypto_build_aead_request(req, sess->aead.algo,
+                                            sess->aead.op, &src, &dst,
+                                            &key, &iv, &aad, &mac);
+
+       if (rc)
+               op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+
+       return rc;
+}
+
+/** Process crypto operation for mbuf */
+int
+bcmfs_process_sym_crypto_op(struct rte_crypto_op *op,
+                           struct bcmfs_sym_session *sess,
+                           struct bcmfs_sym_request *req)
+{
+       struct rte_mbuf *msrc, *mdst;
+       int rc = 0;
+
+       msrc = op->sym->m_src;
+       mdst = op->sym->m_dst ? op->sym->m_dst : op->sym->m_src;
+       op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+
+       switch (sess->chain_order) {
+       case BCMFS_SYM_CHAIN_ONLY_CIPHER:
+               rc = process_crypto_cipher_op(op, msrc, mdst, sess, req);
+               break;
+       case BCMFS_SYM_CHAIN_ONLY_AUTH:
+               rc = process_crypto_auth_op(op, msrc, sess, req);
+               break;
+       case BCMFS_SYM_CHAIN_CIPHER_AUTH:
+       case BCMFS_SYM_CHAIN_AUTH_CIPHER:
+               rc = process_crypto_combined_op(op, msrc, mdst, sess, req);
+               break;
+       case BCMFS_SYM_CHAIN_AEAD:
+               rc = process_crypto_aead_op(op, msrc, mdst, sess, req);
+               break;
+       default:
+               op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+               break;
+       }
+
+       return rc;
+}
diff --git a/drivers/crypto/bcmfs/bcmfs_sym_engine.c b/drivers/crypto/bcmfs/bcmfs_sym_engine.c
new file mode 100644 (file)
index 0000000..537bfbe
--- /dev/null
@@ -0,0 +1,1155 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2020 Broadcom.
+ * All rights reserved.
+ */
+
+#include <stdbool.h>
+#include <string.h>
+
+#include <rte_common.h>
+#include <rte_cryptodev.h>
+#include <rte_crypto_sym.h>
+
+#include "bcmfs_logs.h"
+#include "bcmfs_sym_defs.h"
+#include "bcmfs_dev_msg.h"
+#include "bcmfs_sym_req.h"
+#include "bcmfs_sym_engine.h"
+
+enum spu2_cipher_type {
+       SPU2_CIPHER_TYPE_NONE = 0x0,
+       SPU2_CIPHER_TYPE_AES128 = 0x1,
+       SPU2_CIPHER_TYPE_AES192 = 0x2,
+       SPU2_CIPHER_TYPE_AES256 = 0x3,
+       SPU2_CIPHER_TYPE_DES = 0x4,
+       SPU2_CIPHER_TYPE_3DES = 0x5,
+       SPU2_CIPHER_TYPE_LAST
+};
+
+enum spu2_cipher_mode {
+       SPU2_CIPHER_MODE_ECB = 0x0,
+       SPU2_CIPHER_MODE_CBC = 0x1,
+       SPU2_CIPHER_MODE_CTR = 0x2,
+       SPU2_CIPHER_MODE_CFB = 0x3,
+       SPU2_CIPHER_MODE_OFB = 0x4,
+       SPU2_CIPHER_MODE_XTS = 0x5,
+       SPU2_CIPHER_MODE_CCM = 0x6,
+       SPU2_CIPHER_MODE_GCM = 0x7,
+       SPU2_CIPHER_MODE_LAST
+};
+
+enum spu2_hash_type {
+       SPU2_HASH_TYPE_NONE = 0x0,
+       SPU2_HASH_TYPE_AES128 = 0x1,
+       SPU2_HASH_TYPE_AES192 = 0x2,
+       SPU2_HASH_TYPE_AES256 = 0x3,
+       SPU2_HASH_TYPE_MD5 = 0x6,
+       SPU2_HASH_TYPE_SHA1 = 0x7,
+       SPU2_HASH_TYPE_SHA224 = 0x8,
+       SPU2_HASH_TYPE_SHA256 = 0x9,
+       SPU2_HASH_TYPE_SHA384 = 0xa,
+       SPU2_HASH_TYPE_SHA512 = 0xb,
+       SPU2_HASH_TYPE_SHA512_224 = 0xc,
+       SPU2_HASH_TYPE_SHA512_256 = 0xd,
+       SPU2_HASH_TYPE_SHA3_224 = 0xe,
+       SPU2_HASH_TYPE_SHA3_256 = 0xf,
+       SPU2_HASH_TYPE_SHA3_384 = 0x10,
+       SPU2_HASH_TYPE_SHA3_512 = 0x11,
+       SPU2_HASH_TYPE_LAST
+};
+
+enum spu2_hash_mode {
+       SPU2_HASH_MODE_CMAC = 0x0,
+       SPU2_HASH_MODE_CBC_MAC = 0x1,
+       SPU2_HASH_MODE_XCBC_MAC = 0x2,
+       SPU2_HASH_MODE_HMAC = 0x3,
+       SPU2_HASH_MODE_RABIN = 0x4,
+       SPU2_HASH_MODE_CCM = 0x5,
+       SPU2_HASH_MODE_GCM = 0x6,
+       SPU2_HASH_MODE_RESERVED = 0x7,
+       SPU2_HASH_MODE_LAST
+};
+
+enum spu2_proto_sel {
+       SPU2_PROTO_RESV = 0,
+       SPU2_MACSEC_SECTAG8_ECB = 1,
+       SPU2_MACSEC_SECTAG8_SCB = 2,
+       SPU2_MACSEC_SECTAG16 = 3,
+       SPU2_MACSEC_SECTAG16_8_XPN = 4,
+       SPU2_IPSEC = 5,
+       SPU2_IPSEC_ESN = 6,
+       SPU2_TLS_CIPHER = 7,
+       SPU2_TLS_AEAD = 8,
+       SPU2_DTLS_CIPHER = 9,
+       SPU2_DTLS_AEAD = 10
+};
+
+/* SPU2 response size */
+#define SPU2_STATUS_LEN                        2
+
+/* Metadata settings in response */
+enum spu2_ret_md_opts {
+       SPU2_RET_NO_MD = 0,             /* return no metadata */
+       SPU2_RET_FMD_OMD = 1,           /* return both FMD and OMD */
+       SPU2_RET_FMD_ONLY = 2,          /* return only FMD */
+       SPU2_RET_FMD_OMD_IV = 3,        /* return FMD and OMD with just IVs */
+};
+
+/* FMD ctrl0 field masks */
+#define SPU2_CIPH_ENCRYPT_EN            0x1 /* 0: decrypt, 1: encrypt */
+#define SPU2_CIPH_TYPE_SHIFT              4
+#define SPU2_CIPH_MODE                0xF00 /* one of spu2_cipher_mode */
+#define SPU2_CIPH_MODE_SHIFT              8
+#define SPU2_CFB_MASK                0x7000 /* cipher feedback mask */
+#define SPU2_CFB_MASK_SHIFT              12
+#define SPU2_PROTO_SEL             0xF00000 /* MACsec, IPsec, TLS... */
+#define SPU2_PROTO_SEL_SHIFT             20
+#define SPU2_HASH_FIRST           0x1000000 /* 1: hash input is input pkt
+                                            * data
+                                            */
+#define SPU2_CHK_TAG              0x2000000 /* 1: check digest provided */
+#define SPU2_HASH_TYPE          0x1F0000000 /* one of spu2_hash_type */
+#define SPU2_HASH_TYPE_SHIFT             28
+#define SPU2_HASH_MODE         0xF000000000 /* one of spu2_hash_mode */
+#define SPU2_HASH_MODE_SHIFT             36
+#define SPU2_CIPH_PAD_EN     0x100000000000 /* 1: Add pad to end of payload for
+                                            *    enc
+                                            */
+#define SPU2_CIPH_PAD      0xFF000000000000 /* cipher pad value */
+#define SPU2_CIPH_PAD_SHIFT              48
+
+/* FMD ctrl1 field masks */
+#define SPU2_TAG_LOC                    0x1 /* 1: end of payload, 0: undef */
+#define SPU2_HAS_FR_DATA                0x2 /* 1: msg has frame data */
+#define SPU2_HAS_AAD1                   0x4 /* 1: msg has AAD1 field */
+#define SPU2_HAS_NAAD                   0x8 /* 1: msg has NAAD field */
+#define SPU2_HAS_AAD2                  0x10 /* 1: msg has AAD2 field */
+#define SPU2_HAS_ESN                   0x20 /* 1: msg has ESN field */
+#define SPU2_HASH_KEY_LEN            0xFF00 /* len of hash key in bytes.
+                                            * HMAC only.
+                                            */
+#define SPU2_HASH_KEY_LEN_SHIFT           8
+#define SPU2_CIPH_KEY_LEN         0xFF00000 /* len of cipher key in bytes */
+#define SPU2_CIPH_KEY_LEN_SHIFT          20
+#define SPU2_GENIV               0x10000000 /* 1: hw generates IV */
+#define SPU2_HASH_IV             0x20000000 /* 1: IV incl in hash */
+#define SPU2_RET_IV              0x40000000 /* 1: return IV in output msg
+                                            *    b4 payload
+                                            */
+#define SPU2_RET_IV_LEN         0xF00000000 /* length in bytes of IV returned.
+                                            * 0 = 16 bytes
+                                            */
+#define SPU2_RET_IV_LEN_SHIFT            32
+#define SPU2_IV_OFFSET         0xF000000000 /* gen IV offset */
+#define SPU2_IV_OFFSET_SHIFT             36
+#define SPU2_IV_LEN          0x1F0000000000 /* length of input IV in bytes */
+#define SPU2_IV_LEN_SHIFT                40
+#define SPU2_HASH_TAG_LEN  0x7F000000000000 /* hash tag length in bytes */
+#define SPU2_HASH_TAG_LEN_SHIFT          48
+#define SPU2_RETURN_MD    0x300000000000000 /* return metadata */
+#define SPU2_RETURN_MD_SHIFT             56
+#define SPU2_RETURN_FD    0x400000000000000
+#define SPU2_RETURN_AAD1  0x800000000000000
+#define SPU2_RETURN_NAAD 0x1000000000000000
+#define SPU2_RETURN_AAD2 0x2000000000000000
+#define SPU2_RETURN_PAY  0x4000000000000000 /* return payload */
+
+/* FMD ctrl2 field masks */
+#define SPU2_AAD1_OFFSET              0xFFF /* byte offset of AAD1 field */
+#define SPU2_AAD1_LEN               0xFF000 /* length of AAD1 in bytes */
+#define SPU2_AAD1_LEN_SHIFT              12
+#define SPU2_AAD2_OFFSET         0xFFF00000 /* byte offset of AAD2 field */
+#define SPU2_AAD2_OFFSET_SHIFT           20
+#define SPU2_PL_OFFSET   0xFFFFFFFF00000000 /* payload offset from AAD2 */
+#define SPU2_PL_OFFSET_SHIFT             32
+
+/* FMD ctrl3 field masks */
+#define SPU2_PL_LEN              0xFFFFFFFF /* payload length in bytes */
+#define SPU2_TLS_LEN         0xFFFF00000000 /* TLS encrypt: cipher len
+                                            * TLS decrypt: compressed len
+                                            */
+#define SPU2_TLS_LEN_SHIFT               32
+
+/*
+ * Max value that can be represented in the Payload Length field of the
+ * ctrl3 word of FMD.
+ */
+#define SPU2_MAX_PAYLOAD  SPU2_PL_LEN
+
+#define SPU2_VAL_NONE  0
+
+/* CCM B_0 field definitions, common for SPU-M and SPU2 */
+#define CCM_B0_ADATA           0x40
+#define CCM_B0_ADATA_SHIFT        6
+#define CCM_B0_M_PRIME         0x38
+#define CCM_B0_M_PRIME_SHIFT      3
+#define CCM_B0_L_PRIME         0x07
+#define CCM_B0_L_PRIME_SHIFT      0
+#define CCM_ESP_L_VALUE                   4
+
+static uint16_t
+spu2_cipher_type_xlate(enum rte_crypto_cipher_algorithm cipher_alg,
+                      enum spu2_cipher_type *spu2_type,
+                      struct fsattr *key)
+{
+       int ret = 0;
+       int key_size = fsattr_sz(key);
+
+       if (cipher_alg == RTE_CRYPTO_CIPHER_AES_XTS)
+               key_size = key_size / 2;
+
+       switch (key_size) {
+       case BCMFS_CRYPTO_AES128:
+               *spu2_type = SPU2_CIPHER_TYPE_AES128;
+               break;
+       case BCMFS_CRYPTO_AES192:
+               *spu2_type = SPU2_CIPHER_TYPE_AES192;
+               break;
+       case BCMFS_CRYPTO_AES256:
+               *spu2_type = SPU2_CIPHER_TYPE_AES256;
+               break;
+       default:
+               ret = -EINVAL;
+       }
+
+       return ret;
+}
+
+static int
+spu2_hash_xlate(enum rte_crypto_auth_algorithm auth_alg,
+               struct fsattr *key,
+               enum spu2_hash_type *spu2_type,
+               enum spu2_hash_mode *spu2_mode)
+{
+       *spu2_mode = 0;
+
+       switch (auth_alg) {
+       case RTE_CRYPTO_AUTH_NULL:
+               *spu2_type = SPU2_HASH_TYPE_NONE;
+               break;
+       case RTE_CRYPTO_AUTH_MD5:
+               *spu2_type = SPU2_HASH_TYPE_MD5;
+               break;
+       case RTE_CRYPTO_AUTH_MD5_HMAC:
+               *spu2_type = SPU2_HASH_TYPE_MD5;
+               *spu2_mode = SPU2_HASH_MODE_HMAC;
+               break;
+       case RTE_CRYPTO_AUTH_SHA1:
+               *spu2_type = SPU2_HASH_TYPE_SHA1;
+               break;
+       case RTE_CRYPTO_AUTH_SHA1_HMAC:
+               *spu2_type = SPU2_HASH_TYPE_SHA1;
+               *spu2_mode = SPU2_HASH_MODE_HMAC;
+               break;
+       case RTE_CRYPTO_AUTH_SHA224:
+               *spu2_type = SPU2_HASH_TYPE_SHA224;
+               break;
+       case RTE_CRYPTO_AUTH_SHA224_HMAC:
+               *spu2_type = SPU2_HASH_TYPE_SHA224;
+               *spu2_mode = SPU2_HASH_MODE_HMAC;
+               break;
+       case RTE_CRYPTO_AUTH_SHA256:
+               *spu2_type = SPU2_HASH_TYPE_SHA256;
+               break;
+       case RTE_CRYPTO_AUTH_SHA256_HMAC:
+               *spu2_type = SPU2_HASH_TYPE_SHA256;
+               *spu2_mode = SPU2_HASH_MODE_HMAC;
+               break;
+       case RTE_CRYPTO_AUTH_SHA384:
+               *spu2_type = SPU2_HASH_TYPE_SHA384;
+               break;
+       case RTE_CRYPTO_AUTH_SHA384_HMAC:
+               *spu2_type = SPU2_HASH_TYPE_SHA384;
+               *spu2_mode = SPU2_HASH_MODE_HMAC;
+               break;
+       case RTE_CRYPTO_AUTH_SHA512:
+               *spu2_type = SPU2_HASH_TYPE_SHA512;
+               break;
+       case RTE_CRYPTO_AUTH_SHA512_HMAC:
+               *spu2_type = SPU2_HASH_TYPE_SHA512;
+               *spu2_mode = SPU2_HASH_MODE_HMAC;
+               break;
+       case RTE_CRYPTO_AUTH_SHA3_224:
+               *spu2_type = SPU2_HASH_TYPE_SHA3_224;
+               break;
+       case RTE_CRYPTO_AUTH_SHA3_224_HMAC:
+               *spu2_type = SPU2_HASH_TYPE_SHA3_224;
+               *spu2_mode = SPU2_HASH_MODE_HMAC;
+               break;
+       case RTE_CRYPTO_AUTH_SHA3_256:
+               *spu2_type = SPU2_HASH_TYPE_SHA3_256;
+               break;
+       case RTE_CRYPTO_AUTH_SHA3_256_HMAC:
+               *spu2_type = SPU2_HASH_TYPE_SHA3_256;
+               *spu2_mode = SPU2_HASH_MODE_HMAC;
+               break;
+       case RTE_CRYPTO_AUTH_SHA3_384:
+               *spu2_type = SPU2_HASH_TYPE_SHA3_384;
+               break;
+       case RTE_CRYPTO_AUTH_SHA3_384_HMAC:
+               *spu2_type = SPU2_HASH_TYPE_SHA3_384;
+               *spu2_mode = SPU2_HASH_MODE_HMAC;
+               break;
+       case RTE_CRYPTO_AUTH_SHA3_512:
+               *spu2_type = SPU2_HASH_TYPE_SHA3_512;
+               break;
+       case RTE_CRYPTO_AUTH_SHA3_512_HMAC:
+               *spu2_type = SPU2_HASH_TYPE_SHA3_512;
+               *spu2_mode = SPU2_HASH_MODE_HMAC;
+               break;
+       case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
+               *spu2_mode = SPU2_HASH_MODE_XCBC_MAC;
+               switch (fsattr_sz(key)) {
+               case BCMFS_CRYPTO_AES128:
+                       *spu2_type = SPU2_HASH_TYPE_AES128;
+                       break;
+               case BCMFS_CRYPTO_AES192:
+                       *spu2_type = SPU2_HASH_TYPE_AES192;
+                       break;
+               case BCMFS_CRYPTO_AES256:
+                       *spu2_type = SPU2_HASH_TYPE_AES256;
+                       break;
+               default:
+                       return -EINVAL;
+               }
+               break;
+       case RTE_CRYPTO_AUTH_AES_CMAC:
+               *spu2_mode = SPU2_HASH_MODE_CMAC;
+               switch (fsattr_sz(key)) {
+               case BCMFS_CRYPTO_AES128:
+                       *spu2_type = SPU2_HASH_TYPE_AES128;
+                       break;
+               case BCMFS_CRYPTO_AES192:
+                       *spu2_type = SPU2_HASH_TYPE_AES192;
+                       break;
+               case BCMFS_CRYPTO_AES256:
+                       *spu2_type = SPU2_HASH_TYPE_AES256;
+                       break;
+               default:
+                       return -EINVAL;
+               }
+               break;
+       case RTE_CRYPTO_AUTH_AES_GMAC:
+               *spu2_mode = SPU2_HASH_MODE_GCM;
+               switch (fsattr_sz(key)) {
+               case BCMFS_CRYPTO_AES128:
+                       *spu2_type = SPU2_HASH_TYPE_AES128;
+                       break;
+               case BCMFS_CRYPTO_AES192:
+                       *spu2_type = SPU2_HASH_TYPE_AES192;
+                       break;
+               case BCMFS_CRYPTO_AES256:
+                       *spu2_type = SPU2_HASH_TYPE_AES256;
+                       break;
+               default:
+                       return -EINVAL;
+               }
+               break;
+       case RTE_CRYPTO_AUTH_AES_CBC_MAC:
+               *spu2_mode = SPU2_HASH_MODE_CBC_MAC;
+               switch (fsattr_sz(key)) {
+               case BCMFS_CRYPTO_AES128:
+                       *spu2_type = SPU2_HASH_TYPE_AES128;
+                       break;
+               case BCMFS_CRYPTO_AES192:
+                       *spu2_type = SPU2_HASH_TYPE_AES192;
+                       break;
+               case BCMFS_CRYPTO_AES256:
+                       *spu2_type = SPU2_HASH_TYPE_AES256;
+                       break;
+               default:
+                       return -EINVAL;
+               }
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static int
+spu2_cipher_xlate(enum rte_crypto_cipher_algorithm cipher_alg,
+                 struct fsattr *key,
+                 enum spu2_cipher_type *spu2_type,
+                 enum spu2_cipher_mode *spu2_mode)
+{
+       int ret = 0;
+
+       switch (cipher_alg) {
+       case RTE_CRYPTO_CIPHER_NULL:
+               *spu2_type = SPU2_CIPHER_TYPE_NONE;
+               break;
+       case RTE_CRYPTO_CIPHER_DES_CBC:
+               *spu2_mode =  SPU2_CIPHER_MODE_CBC;
+               *spu2_type = SPU2_CIPHER_TYPE_DES;
+               break;
+       case RTE_CRYPTO_CIPHER_3DES_ECB:
+               *spu2_mode =  SPU2_CIPHER_MODE_ECB;
+               *spu2_type = SPU2_CIPHER_TYPE_3DES;
+               break;
+       case RTE_CRYPTO_CIPHER_3DES_CBC:
+               *spu2_mode =  SPU2_CIPHER_MODE_CBC;
+               *spu2_type = SPU2_CIPHER_TYPE_3DES;
+               break;
+       case RTE_CRYPTO_CIPHER_AES_CBC:
+               *spu2_mode =  SPU2_CIPHER_MODE_CBC;
+               ret = spu2_cipher_type_xlate(cipher_alg, spu2_type, key);
+               break;
+       case RTE_CRYPTO_CIPHER_AES_ECB:
+               *spu2_mode =  SPU2_CIPHER_MODE_ECB;
+               ret = spu2_cipher_type_xlate(cipher_alg, spu2_type, key);
+               break;
+       case RTE_CRYPTO_CIPHER_AES_CTR:
+               *spu2_mode =  SPU2_CIPHER_MODE_CTR;
+               ret = spu2_cipher_type_xlate(cipher_alg, spu2_type, key);
+               break;
+       case RTE_CRYPTO_CIPHER_AES_XTS:
+               *spu2_mode =  SPU2_CIPHER_MODE_XTS;
+               ret = spu2_cipher_type_xlate(cipher_alg, spu2_type, key);
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       return ret;
+}
+
+static void
+spu2_fmd_ctrl0_write(struct spu2_fmd *fmd,
+                    bool is_inbound, bool auth_first,
+                    enum spu2_proto_sel protocol,
+                    enum spu2_cipher_type cipher_type,
+                    enum spu2_cipher_mode cipher_mode,
+                    enum spu2_hash_type auth_type,
+                    enum spu2_hash_mode auth_mode)
+{
+       uint64_t ctrl0 = 0;
+
+       if (cipher_type != SPU2_CIPHER_TYPE_NONE && !is_inbound)
+               ctrl0 |= SPU2_CIPH_ENCRYPT_EN;
+
+       ctrl0 |= ((uint64_t)cipher_type << SPU2_CIPH_TYPE_SHIFT) |
+                 ((uint64_t)cipher_mode << SPU2_CIPH_MODE_SHIFT);
+
+       if (protocol != SPU2_PROTO_RESV)
+               ctrl0 |= (uint64_t)protocol << SPU2_PROTO_SEL_SHIFT;
+
+       if (auth_first)
+               ctrl0 |= SPU2_HASH_FIRST;
+
+       if (is_inbound && auth_type != SPU2_HASH_TYPE_NONE)
+               ctrl0 |= SPU2_CHK_TAG;
+
+       ctrl0 |= (((uint64_t)auth_type << SPU2_HASH_TYPE_SHIFT) |
+                 ((uint64_t)auth_mode << SPU2_HASH_MODE_SHIFT));
+
+       fmd->ctrl0 = ctrl0;
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+       BCMFS_DP_HEXDUMP_LOG(DEBUG, "ctrl0:", &fmd->ctrl0, sizeof(uint64_t));
+#endif
+}
+
+static void
+spu2_fmd_ctrl1_write(struct spu2_fmd *fmd, bool is_inbound,
+                    uint64_t assoc_size, uint64_t auth_key_len,
+                    uint64_t cipher_key_len, bool gen_iv, bool hash_iv,
+                    bool return_iv, uint64_t ret_iv_len,
+                    uint64_t ret_iv_offset, uint64_t cipher_iv_len,
+                    uint64_t digest_size, bool return_payload, bool return_md)
+{
+       uint64_t ctrl1 = 0;
+
+       if (is_inbound && digest_size != 0)
+               ctrl1 |= SPU2_TAG_LOC;
+
+       if (assoc_size != 0)
+               ctrl1 |= SPU2_HAS_AAD2;
+
+       if (auth_key_len != 0)
+               ctrl1 |= ((auth_key_len << SPU2_HASH_KEY_LEN_SHIFT) &
+                         SPU2_HASH_KEY_LEN);
+
+       if (cipher_key_len != 0)
+               ctrl1 |= ((cipher_key_len << SPU2_CIPH_KEY_LEN_SHIFT) &
+                         SPU2_CIPH_KEY_LEN);
+
+       if (gen_iv)
+               ctrl1 |= SPU2_GENIV;
+
+       if (hash_iv)
+               ctrl1 |= SPU2_HASH_IV;
+
+       if (return_iv) {
+               ctrl1 |= SPU2_RET_IV;
+               ctrl1 |= ret_iv_len << SPU2_RET_IV_LEN_SHIFT;
+               ctrl1 |= ret_iv_offset << SPU2_IV_OFFSET_SHIFT;
+       }
+
+       ctrl1 |= ((cipher_iv_len << SPU2_IV_LEN_SHIFT) & SPU2_IV_LEN);
+
+       if (digest_size != 0) {
+               ctrl1 |= ((digest_size << SPU2_HASH_TAG_LEN_SHIFT) &
+                         SPU2_HASH_TAG_LEN);
+       }
+
+       /*
+        * Let's ask for the output pkt to include FMD, but don't need to
+        * get keys and IVs back in OMD.
+        */
+       if (return_md)
+               ctrl1 |= ((uint64_t)SPU2_RET_FMD_ONLY << SPU2_RETURN_MD_SHIFT);
+       else
+               ctrl1 |= ((uint64_t)SPU2_RET_NO_MD << SPU2_RETURN_MD_SHIFT);
+
+       /* Crypto API does not get assoc data back. So no need for AAD2. */
+
+       if (return_payload)
+               ctrl1 |= SPU2_RETURN_PAY;
+
+       fmd->ctrl1 = ctrl1;
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+       BCMFS_DP_HEXDUMP_LOG(DEBUG, "ctrl1:", &fmd->ctrl1, sizeof(uint64_t));
+#endif
+}
+
+static void
+spu2_fmd_ctrl2_write(struct spu2_fmd *fmd, uint64_t cipher_offset,
+                    uint64_t auth_key_len __rte_unused,
+                    uint64_t auth_iv_len  __rte_unused,
+                    uint64_t cipher_key_len  __rte_unused,
+                    uint64_t cipher_iv_len  __rte_unused)
+{
+       uint64_t aad1_offset;
+       uint64_t aad2_offset;
+       uint16_t aad1_len = 0;
+       uint64_t payload_offset;
+
+       /* AAD1 offset is from start of FD. FD length always 0. */
+       aad1_offset = 0;
+
+       aad2_offset = aad1_offset;
+       payload_offset = cipher_offset;
+       fmd->ctrl2 = aad1_offset |
+                    (aad1_len << SPU2_AAD1_LEN_SHIFT) |
+                    (aad2_offset << SPU2_AAD2_OFFSET_SHIFT) |
+                    (payload_offset << SPU2_PL_OFFSET_SHIFT);
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+       BCMFS_DP_HEXDUMP_LOG(DEBUG, "ctrl2:", &fmd->ctrl2, sizeof(uint64_t));
+#endif
+}
+
+static void
+spu2_fmd_ctrl3_write(struct spu2_fmd *fmd, uint64_t payload_len)
+{
+       fmd->ctrl3 = payload_len & SPU2_PL_LEN;
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+       BCMFS_DP_HEXDUMP_LOG(DEBUG, "ctrl3:", &fmd->ctrl3, sizeof(uint64_t));
+#endif
+}
+
+int
+bcmfs_crypto_build_auth_req(struct bcmfs_sym_request *sreq,
+                           enum rte_crypto_auth_algorithm a_alg,
+                           enum rte_crypto_auth_operation auth_op,
+                           struct fsattr *src, struct fsattr *dst,
+                           struct fsattr *mac, struct fsattr *auth_key,
+                           struct fsattr *iv)
+{
+       int ret;
+       uint64_t dst_size;
+       int src_index = 0;
+       struct spu2_fmd *fmd;
+       uint64_t payload_len;
+       enum spu2_hash_mode spu2_auth_mode;
+       enum spu2_hash_type spu2_auth_type = SPU2_HASH_TYPE_NONE;
+       uint64_t iv_size = (iv != NULL) ? fsattr_sz(iv) : 0;
+       uint64_t auth_ksize = (auth_key != NULL) ? fsattr_sz(auth_key) : 0;
+       bool is_inbound = (auth_op == RTE_CRYPTO_AUTH_OP_VERIFY);
+
+       if (src == NULL)
+               return -EINVAL;
+
+       payload_len = fsattr_sz(src);
+       if (!payload_len) {
+               BCMFS_DP_LOG(ERR, "null payload not supported");
+               return -EINVAL;
+       }
+
+       /* one of dst or mac should not be NULL */
+       if (dst == NULL && mac == NULL)
+               return -EINVAL;
+
+       if (auth_op == RTE_CRYPTO_AUTH_OP_GENERATE && dst != NULL)
+               dst_size = fsattr_sz(dst);
+       else if (auth_op == RTE_CRYPTO_AUTH_OP_VERIFY && mac != NULL)
+               dst_size = fsattr_sz(mac);
+       else
+               return -EINVAL;
+
+       /* spu2 hash algorithm and hash algorithm mode */
+       ret = spu2_hash_xlate(a_alg, auth_key, &spu2_auth_type,
+                             &spu2_auth_mode);
+       if (ret)
+               return -EINVAL;
+
+       fmd  = &sreq->fmd;
+
+       spu2_fmd_ctrl0_write(fmd, is_inbound, SPU2_VAL_NONE,
+                            SPU2_PROTO_RESV, SPU2_VAL_NONE,
+                            SPU2_VAL_NONE, spu2_auth_type, spu2_auth_mode);
+
+       spu2_fmd_ctrl1_write(fmd, is_inbound, SPU2_VAL_NONE,
+                            auth_ksize, SPU2_VAL_NONE, false,
+                            false, SPU2_VAL_NONE, SPU2_VAL_NONE,
+                            SPU2_VAL_NONE, iv_size,
+                            dst_size, SPU2_VAL_NONE, SPU2_VAL_NONE);
+
+       memset(&fmd->ctrl2, 0, sizeof(uint64_t));
+
+       spu2_fmd_ctrl3_write(fmd, fsattr_sz(src));
+
+       /* Source metadata and data pointers */
+       sreq->msgs.srcs_addr[src_index] = sreq->fptr;
+       sreq->msgs.srcs_len[src_index] = sizeof(struct spu2_fmd);
+       src_index++;
+
+       if (auth_key != NULL && fsattr_sz(auth_key) != 0) {
+               memcpy(sreq->auth_key, fsattr_va(auth_key),
+                      fsattr_sz(auth_key));
+
+               sreq->msgs.srcs_addr[src_index] = sreq->aptr;
+               sreq->msgs.srcs_len[src_index] = fsattr_sz(auth_key);
+               src_index++;
+       }
+
+       if (iv != NULL && fsattr_sz(iv) != 0) {
+               memcpy(sreq->iv, fsattr_va(iv), fsattr_sz(iv));
+               sreq->msgs.srcs_addr[src_index] = sreq->iptr;
+               sreq->msgs.srcs_len[src_index] = iv_size;
+               src_index++;
+       }
+
+       sreq->msgs.srcs_addr[src_index] = fsattr_pa(src);
+       sreq->msgs.srcs_len[src_index] = fsattr_sz(src);
+       src_index++;
+
+       /*
+        * In case of authentication verify operation, use input mac data to
+        * SPU2 engine.
+        */
+       if (auth_op == RTE_CRYPTO_AUTH_OP_VERIFY && mac != NULL) {
+               sreq->msgs.srcs_addr[src_index] = fsattr_pa(mac);
+               sreq->msgs.srcs_len[src_index] = fsattr_sz(mac);
+               src_index++;
+       }
+       sreq->msgs.srcs_count = src_index;
+
+       /*
+        * Output packet contains actual output from SPU2 and
+        * the status packet, so the dsts_count is always 2  below.
+        */
+       if (auth_op == RTE_CRYPTO_AUTH_OP_GENERATE) {
+               sreq->msgs.dsts_addr[0] = fsattr_pa(dst);
+               sreq->msgs.dsts_len[0] = fsattr_sz(dst);
+       } else {
+               /*
+                * In case of authentication verify operation, provide dummy
+                * location to SPU2 engine to generate hash. This is needed
+                * because SPU2 generates hash even in case of verify operation.
+                */
+               sreq->msgs.dsts_addr[0] = sreq->dptr;
+               sreq->msgs.dsts_len[0] = fsattr_sz(mac);
+       }
+
+       sreq->msgs.dsts_addr[1] = sreq->rptr;
+       sreq->msgs.dsts_len[1] = SPU2_STATUS_LEN;
+       sreq->msgs.dsts_count = 2;
+
+       return 0;
+}
+
+int
+bcmfs_crypto_build_cipher_req(struct bcmfs_sym_request *sreq,
+                             enum rte_crypto_cipher_algorithm calgo,
+                             enum rte_crypto_cipher_operation cipher_op,
+                             struct fsattr *src, struct fsattr *dst,
+                             struct fsattr *cipher_key, struct fsattr *iv)
+{
+       int ret = 0;
+       int src_index = 0;
+       struct spu2_fmd *fmd;
+       unsigned int xts_keylen;
+       enum spu2_cipher_mode spu2_ciph_mode = 0;
+       enum spu2_cipher_type spu2_ciph_type = SPU2_CIPHER_TYPE_NONE;
+       bool is_inbound = (cipher_op == RTE_CRYPTO_CIPHER_OP_DECRYPT);
+
+       if (src == NULL || dst == NULL || iv == NULL)
+               return -EINVAL;
+
+       fmd  = &sreq->fmd;
+
+       /* spu2 cipher algorithm and cipher algorithm mode */
+       ret = spu2_cipher_xlate(calgo, cipher_key,
+                               &spu2_ciph_type, &spu2_ciph_mode);
+       if (ret)
+               return -EINVAL;
+
+       spu2_fmd_ctrl0_write(fmd, is_inbound, SPU2_VAL_NONE,
+                            SPU2_PROTO_RESV, spu2_ciph_type, spu2_ciph_mode,
+                            SPU2_VAL_NONE, SPU2_VAL_NONE);
+
+       spu2_fmd_ctrl1_write(fmd, SPU2_VAL_NONE, SPU2_VAL_NONE, SPU2_VAL_NONE,
+                            fsattr_sz(cipher_key), false, false,
+                            SPU2_VAL_NONE, SPU2_VAL_NONE, SPU2_VAL_NONE,
+                            fsattr_sz(iv), SPU2_VAL_NONE, SPU2_VAL_NONE,
+                            SPU2_VAL_NONE);
+
+       /* Nothing for FMD2 */
+       memset(&fmd->ctrl2, 0, sizeof(uint64_t));
+
+       spu2_fmd_ctrl3_write(fmd, fsattr_sz(src));
+
+       /* Source metadata and data pointers */
+       sreq->msgs.srcs_addr[src_index] = sreq->fptr;
+       sreq->msgs.srcs_len[src_index] = sizeof(struct spu2_fmd);
+       src_index++;
+
+       if (cipher_key != NULL && fsattr_sz(cipher_key) != 0) {
+               if (calgo == RTE_CRYPTO_CIPHER_AES_XTS) {
+                       xts_keylen = fsattr_sz(cipher_key) / 2;
+                       memcpy(sreq->cipher_key,
+                              (uint8_t *)fsattr_va(cipher_key) + xts_keylen,
+                              xts_keylen);
+                       memcpy(sreq->cipher_key + xts_keylen,
+                              fsattr_va(cipher_key), xts_keylen);
+               } else {
+                       memcpy(sreq->cipher_key,
+                               fsattr_va(cipher_key), fsattr_sz(cipher_key));
+               }
+
+               sreq->msgs.srcs_addr[src_index] = sreq->cptr;
+               sreq->msgs.srcs_len[src_index] = fsattr_sz(cipher_key);
+               src_index++;
+       }
+
+       if (iv != NULL && fsattr_sz(iv) != 0) {
+               memcpy(sreq->iv,
+                       fsattr_va(iv), fsattr_sz(iv));
+               sreq->msgs.srcs_addr[src_index] = sreq->iptr;
+               sreq->msgs.srcs_len[src_index] = fsattr_sz(iv);
+               src_index++;
+       }
+
+       sreq->msgs.srcs_addr[src_index] = fsattr_pa(src);
+       sreq->msgs.srcs_len[src_index] = fsattr_sz(src);
+       src_index++;
+       sreq->msgs.srcs_count = src_index;
+
+       /**
+        * Output packet contains actual output from SPU2 and
+        * the status packet, so the dsts_count is always 2  below.
+        */
+       sreq->msgs.dsts_addr[0] = fsattr_pa(dst);
+       sreq->msgs.dsts_len[0] = fsattr_sz(dst);
+
+       sreq->msgs.dsts_addr[1] = sreq->rptr;
+       sreq->msgs.dsts_len[1] = SPU2_STATUS_LEN;
+       sreq->msgs.dsts_count = 2;
+
+       return 0;
+}
+
+int
+bcmfs_crypto_build_chain_request(struct bcmfs_sym_request *sreq,
+                                enum rte_crypto_cipher_algorithm cipher_alg,
+                                enum rte_crypto_cipher_operation cipher_op __rte_unused,
+                                enum rte_crypto_auth_algorithm auth_alg,
+                                enum rte_crypto_auth_operation auth_op,
+                                struct fsattr *src, struct fsattr *dst,
+                                struct fsattr *cipher_key,
+                                struct fsattr *auth_key,
+                                struct fsattr *iv, struct fsattr *aad,
+                                struct fsattr *digest, bool cipher_first)
+{
+       int ret = 0;
+       int src_index = 0;
+       int dst_index = 0;
+       bool auth_first = 0;
+       struct spu2_fmd *fmd;
+       uint64_t payload_len;
+       enum spu2_cipher_mode spu2_ciph_mode = 0;
+       enum spu2_hash_mode spu2_auth_mode = 0;
+       uint64_t aad_size = (aad != NULL) ? fsattr_sz(aad) : 0;
+       uint64_t iv_size = (iv != NULL) ? fsattr_sz(iv) : 0;
+       enum spu2_cipher_type spu2_ciph_type = SPU2_CIPHER_TYPE_NONE;
+       uint64_t auth_ksize = (auth_key != NULL) ?
+                               fsattr_sz(auth_key) : 0;
+       uint64_t cipher_ksize = (cipher_key != NULL) ?
+                                       fsattr_sz(cipher_key) : 0;
+       uint64_t digest_size = (digest != NULL) ?
+                                       fsattr_sz(digest) : 0;
+       enum spu2_hash_type spu2_auth_type = SPU2_HASH_TYPE_NONE;
+       bool is_inbound = (auth_op == RTE_CRYPTO_AUTH_OP_VERIFY);
+
+       if (src == NULL)
+               return -EINVAL;
+
+       payload_len = fsattr_sz(src);
+       if (!payload_len) {
+               BCMFS_DP_LOG(ERR, "null payload not supported");
+               return -EINVAL;
+       }
+
+       /* spu2 hash algorithm and hash algorithm mode */
+       ret = spu2_hash_xlate(auth_alg, auth_key, &spu2_auth_type,
+                             &spu2_auth_mode);
+       if (ret)
+               return -EINVAL;
+
+       /* spu2 cipher algorithm and cipher algorithm mode */
+       ret = spu2_cipher_xlate(cipher_alg, cipher_key, &spu2_ciph_type,
+                               &spu2_ciph_mode);
+       if (ret) {
+               BCMFS_DP_LOG(ERR, "cipher xlate error");
+               return -EINVAL;
+       }
+
+       auth_first = cipher_first ? 0 : 1;
+
+       if (iv != NULL && fsattr_sz(iv) != 0)
+               memcpy(sreq->iv, fsattr_va(iv), fsattr_sz(iv));
+
+       fmd  = &sreq->fmd;
+
+       spu2_fmd_ctrl0_write(fmd, is_inbound, auth_first, SPU2_PROTO_RESV,
+                            spu2_ciph_type, spu2_ciph_mode,
+                            spu2_auth_type, spu2_auth_mode);
+
+       spu2_fmd_ctrl1_write(fmd, is_inbound, aad_size, auth_ksize,
+                            cipher_ksize, false, false, SPU2_VAL_NONE,
+                            SPU2_VAL_NONE, SPU2_VAL_NONE, iv_size,
+                            digest_size, false, SPU2_VAL_NONE);
+
+       spu2_fmd_ctrl2_write(fmd, aad_size, auth_ksize, 0,
+                            cipher_ksize, iv_size);
+
+       spu2_fmd_ctrl3_write(fmd, payload_len);
+
+       /* Source metadata and data pointers */
+       sreq->msgs.srcs_addr[src_index] = sreq->fptr;
+       sreq->msgs.srcs_len[src_index] = sizeof(struct spu2_fmd);
+       src_index++;
+
+       if (auth_key != NULL && fsattr_sz(auth_key) != 0) {
+               memcpy(sreq->auth_key,
+                      fsattr_va(auth_key), fsattr_sz(auth_key));
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+       BCMFS_DP_HEXDUMP_LOG(DEBUG, "auth key:", fsattr_va(auth_key),
+                            fsattr_sz(auth_key));
+#endif
+               sreq->msgs.srcs_addr[src_index] = sreq->aptr;
+               sreq->msgs.srcs_len[src_index] = fsattr_sz(auth_key);
+               src_index++;
+       }
+
+       if (cipher_key != NULL && fsattr_sz(cipher_key) != 0) {
+               memcpy(sreq->cipher_key,
+                      fsattr_va(cipher_key), fsattr_sz(cipher_key));
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+       BCMFS_DP_HEXDUMP_LOG(DEBUG, "cipher key:", fsattr_va(cipher_key),
+                            fsattr_sz(cipher_key));
+#endif
+               sreq->msgs.srcs_addr[src_index] = sreq->cptr;
+               sreq->msgs.srcs_len[src_index] = fsattr_sz(cipher_key);
+               src_index++;
+       }
+
+       if (iv != NULL && fsattr_sz(iv) != 0) {
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+               BCMFS_DP_HEXDUMP_LOG(DEBUG, "iv key:", fsattr_va(iv),
+                                    fsattr_sz(iv));
+#endif
+               sreq->msgs.srcs_addr[src_index] = sreq->iptr;
+               sreq->msgs.srcs_len[src_index] = iv_size;
+               src_index++;
+       }
+
+       if (aad != NULL && fsattr_sz(aad) != 0) {
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+               BCMFS_DP_HEXDUMP_LOG(DEBUG, "aad :", fsattr_va(aad),
+                                    fsattr_sz(aad));
+#endif
+               sreq->msgs.srcs_addr[src_index] = fsattr_pa(aad);
+               sreq->msgs.srcs_len[src_index] = fsattr_sz(aad);
+               src_index++;
+       }
+
+       sreq->msgs.srcs_addr[src_index] = fsattr_pa(src);
+       sreq->msgs.srcs_len[src_index] = fsattr_sz(src);
+       src_index++;
+
+       if (auth_op == RTE_CRYPTO_AUTH_OP_VERIFY && digest != NULL &&
+           fsattr_sz(digest) != 0) {
+               sreq->msgs.srcs_addr[src_index] = fsattr_pa(digest);
+               sreq->msgs.srcs_len[src_index] = fsattr_sz(digest);
+               src_index++;
+       }
+       sreq->msgs.srcs_count = src_index;
+
+       if (dst != NULL) {
+               sreq->msgs.dsts_addr[dst_index] = fsattr_pa(dst);
+               sreq->msgs.dsts_len[dst_index] = fsattr_sz(dst);
+               dst_index++;
+       }
+
+       if (auth_op == RTE_CRYPTO_AUTH_OP_VERIFY) {
+               /*
+                * In case of decryption digest data is generated by
+                * SPU2 engine  but application doesn't need digest
+                * as such. So program dummy location to capture
+                * digest data
+                */
+               if (digest != NULL && fsattr_sz(digest) != 0) {
+                       sreq->msgs.dsts_addr[dst_index] =
+                               sreq->dptr;
+                       sreq->msgs.dsts_len[dst_index] =
+                               fsattr_sz(digest);
+                       dst_index++;
+               }
+       } else {
+               if (digest != NULL && fsattr_sz(digest) != 0) {
+                       sreq->msgs.dsts_addr[dst_index] =
+                               fsattr_pa(digest);
+                       sreq->msgs.dsts_len[dst_index] =
+                               fsattr_sz(digest);
+                       dst_index++;
+               }
+       }
+
+       sreq->msgs.dsts_addr[dst_index] = sreq->rptr;
+       sreq->msgs.dsts_len[dst_index] = SPU2_STATUS_LEN;
+       dst_index++;
+       sreq->msgs.dsts_count = dst_index;
+
+       return 0;
+}
+
+static void
+bcmfs_crypto_ccm_update_iv(uint8_t *ivbuf,
+                          unsigned int *ivlen, bool is_esp)
+{
+       int L;  /* size of length field, in bytes */
+
+       /*
+        * In RFC4309 mode, L is fixed at 4 bytes; otherwise, IV from
+        * testmgr contains (L-1) in bottom 3 bits of first byte,
+        * per RFC 3610.
+        */
+       if (is_esp)
+               L = CCM_ESP_L_VALUE;
+       else
+               L = ((ivbuf[0] & CCM_B0_L_PRIME) >>
+                     CCM_B0_L_PRIME_SHIFT) + 1;
+
+       /* SPU2 doesn't want these length bytes nor the first byte... */
+       *ivlen -= (1 + L);
+       memmove(ivbuf, &ivbuf[1], *ivlen);
+}
+
+int
+bcmfs_crypto_build_aead_request(struct bcmfs_sym_request *sreq,
+                               enum rte_crypto_aead_algorithm ae_algo,
+                               enum rte_crypto_aead_operation aeop,
+                               struct fsattr *src, struct fsattr *dst,
+                               struct fsattr *key, struct fsattr *iv,
+                               struct fsattr *aad, struct fsattr *digest)
+{
+       int src_index = 0;
+       int dst_index = 0;
+       bool auth_first = 0;
+       struct spu2_fmd *fmd;
+       uint64_t payload_len;
+       uint64_t aad_size = (aad != NULL) ? fsattr_sz(aad) : 0;
+       unsigned int iv_size = (iv != NULL) ? fsattr_sz(iv) : 0;
+       enum spu2_cipher_mode spu2_ciph_mode = 0;
+       enum spu2_hash_mode spu2_auth_mode = 0;
+       enum spu2_cipher_type spu2_ciph_type = SPU2_CIPHER_TYPE_NONE;
+       enum spu2_hash_type spu2_auth_type = SPU2_HASH_TYPE_NONE;
+       uint64_t ksize = (key != NULL) ? fsattr_sz(key) : 0;
+       uint64_t digest_size = (digest != NULL) ?
+                                       fsattr_sz(digest) : 0;
+       bool is_inbound = (aeop == RTE_CRYPTO_AEAD_OP_DECRYPT);
+
+       if (src == NULL)
+               return -EINVAL;
+
+       payload_len = fsattr_sz(src);
+       if (!payload_len) {
+               BCMFS_DP_LOG(ERR, "null payload not supported");
+               return -EINVAL;
+       }
+
+       switch (ksize) {
+       case BCMFS_CRYPTO_AES128:
+               spu2_auth_type = SPU2_HASH_TYPE_AES128;
+               spu2_ciph_type = SPU2_CIPHER_TYPE_AES128;
+               break;
+       case BCMFS_CRYPTO_AES192:
+               spu2_auth_type = SPU2_HASH_TYPE_AES192;
+               spu2_ciph_type = SPU2_CIPHER_TYPE_AES192;
+               break;
+       case BCMFS_CRYPTO_AES256:
+               spu2_auth_type = SPU2_HASH_TYPE_AES256;
+               spu2_ciph_type = SPU2_CIPHER_TYPE_AES256;
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       if (ae_algo == RTE_CRYPTO_AEAD_AES_GCM) {
+               spu2_auth_mode = SPU2_HASH_MODE_GCM;
+               spu2_ciph_mode = SPU2_CIPHER_MODE_GCM;
+               /*
+                * SPU2 needs in total 12 bytes of IV
+                * ie IV of 8 bytes(random number) and 4 bytes of salt.
+                */
+               if (fsattr_sz(iv) > 12)
+                       iv_size = 12;
+
+               /*
+                * On SPU 2, aes gcm cipher first on encrypt, auth first on
+                * decrypt
+                */
+
+               auth_first = (aeop == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
+                               0 : 1;
+       }
+
+       if (iv != NULL && fsattr_sz(iv) != 0)
+               memcpy(sreq->iv, fsattr_va(iv), fsattr_sz(iv));
+
+       if (ae_algo == RTE_CRYPTO_AEAD_AES_CCM) {
+               spu2_auth_mode = SPU2_HASH_MODE_CCM;
+               spu2_ciph_mode = SPU2_CIPHER_MODE_CCM;
+               if (iv != NULL)  {
+                       memcpy(sreq->iv, fsattr_va(iv),
+                              fsattr_sz(iv));
+                       iv_size = fsattr_sz(iv);
+                       bcmfs_crypto_ccm_update_iv(sreq->iv, &iv_size, false);
+               }
+
+               /* opposite for ccm (auth 1st on encrypt) */
+               auth_first = (aeop == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
+                             0 : 1;
+       }
+
+       fmd  = &sreq->fmd;
+
+       spu2_fmd_ctrl0_write(fmd, is_inbound, auth_first, SPU2_PROTO_RESV,
+                            spu2_ciph_type, spu2_ciph_mode,
+                            spu2_auth_type, spu2_auth_mode);
+
+       spu2_fmd_ctrl1_write(fmd, is_inbound, aad_size, 0,
+                            ksize, false, false, SPU2_VAL_NONE,
+                            SPU2_VAL_NONE, SPU2_VAL_NONE, iv_size,
+                            digest_size, false, SPU2_VAL_NONE);
+
+       spu2_fmd_ctrl2_write(fmd, aad_size, 0, 0,
+                            ksize, iv_size);
+
+       spu2_fmd_ctrl3_write(fmd, payload_len);
+
+       /* Source metadata and data pointers */
+       sreq->msgs.srcs_addr[src_index] = sreq->fptr;
+       sreq->msgs.srcs_len[src_index] = sizeof(struct spu2_fmd);
+       src_index++;
+
+       if (key != NULL && fsattr_sz(key) != 0) {
+               memcpy(sreq->cipher_key,
+                      fsattr_va(key), fsattr_sz(key));
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+       BCMFS_DP_HEXDUMP_LOG(DEBUG, "cipher key:", fsattr_va(key),
+                            fsattr_sz(key));
+#endif
+               sreq->msgs.srcs_addr[src_index] = sreq->cptr;
+               sreq->msgs.srcs_len[src_index] = fsattr_sz(key);
+               src_index++;
+       }
+
+       if (iv != NULL && fsattr_sz(iv) != 0) {
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+               BCMFS_DP_HEXDUMP_LOG(DEBUG, "iv key:", fsattr_va(iv),
+                                    fsattr_sz(iv));
+#endif
+               sreq->msgs.srcs_addr[src_index] = sreq->iptr;
+               sreq->msgs.srcs_len[src_index] = iv_size;
+               src_index++;
+       }
+
+       if (aad != NULL && fsattr_sz(aad) != 0) {
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+               BCMFS_DP_HEXDUMP_LOG(DEBUG, "aad :", fsattr_va(aad),
+                                    fsattr_sz(aad));
+#endif
+               sreq->msgs.srcs_addr[src_index] = fsattr_pa(aad);
+               sreq->msgs.srcs_len[src_index] = fsattr_sz(aad);
+               src_index++;
+       }
+
+       sreq->msgs.srcs_addr[src_index] = fsattr_pa(src);
+       sreq->msgs.srcs_len[src_index] = fsattr_sz(src);
+       src_index++;
+
+       if (aeop == RTE_CRYPTO_AEAD_OP_DECRYPT && digest != NULL &&
+           fsattr_sz(digest) != 0) {
+               sreq->msgs.srcs_addr[src_index] = fsattr_pa(digest);
+               sreq->msgs.srcs_len[src_index] = fsattr_sz(digest);
+               src_index++;
+       }
+       sreq->msgs.srcs_count = src_index;
+
+       if (dst != NULL) {
+               sreq->msgs.dsts_addr[dst_index] = fsattr_pa(dst);
+               sreq->msgs.dsts_len[dst_index] = fsattr_sz(dst);
+               dst_index++;
+       }
+
+       if (aeop == RTE_CRYPTO_AEAD_OP_DECRYPT) {
+               /*
+                * In case of decryption digest data is generated by
+                * SPU2 engine but application doesn't need digest
+                * as such. So program dummy location to capture
+                * digest data
+                */
+               if (digest != NULL && fsattr_sz(digest) != 0) {
+                       sreq->msgs.dsts_addr[dst_index] =
+                               sreq->dptr;
+                       sreq->msgs.dsts_len[dst_index] =
+                               fsattr_sz(digest);
+                       dst_index++;
+               }
+       } else {
+               if (digest != NULL && fsattr_sz(digest) != 0) {
+                       sreq->msgs.dsts_addr[dst_index] =
+                               fsattr_pa(digest);
+                       sreq->msgs.dsts_len[dst_index] =
+                               fsattr_sz(digest);
+                       dst_index++;
+               }
+       }
+
+       sreq->msgs.dsts_addr[dst_index] = sreq->rptr;
+       sreq->msgs.dsts_len[dst_index] = SPU2_STATUS_LEN;
+       dst_index++;
+       sreq->msgs.dsts_count = dst_index;
+
+       return 0;
+}
diff --git a/drivers/crypto/bcmfs/bcmfs_sym_engine.h b/drivers/crypto/bcmfs/bcmfs_sym_engine.h
new file mode 100644 (file)
index 0000000..d959424
--- /dev/null
@@ -0,0 +1,115 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2020 Broadcom
+ * All rights reserved.
+ */
+
+#ifndef _BCMFS_SYM_ENGINE_H_
+#define _BCMFS_SYM_ENGINE_H_
+
+#include <rte_crypto_sym.h>
+
+#include "bcmfs_dev_msg.h"
+#include "bcmfs_sym_defs.h"
+#include "bcmfs_sym_req.h"
+
+/* structure to hold element's arrtibutes */
+struct fsattr {
+       void *va;
+       uint64_t pa;
+       uint64_t sz;
+};
+
+#define fsattr_va(__ptr)      ((__ptr)->va)
+#define fsattr_pa(__ptr)      ((__ptr)->pa)
+#define fsattr_sz(__ptr)      ((__ptr)->sz)
+
+/*
+ *  Macros for Crypto h/w constraints
+ */
+
+#define BCMFS_CRYPTO_AES_BLOCK_SIZE    16
+#define BCMFS_CRYPTO_AES_MIN_KEY_SIZE  16
+#define BCMFS_CRYPTO_AES_MAX_KEY_SIZE  32
+
+#define BCMFS_CRYPTO_DES_BLOCK_SIZE    8
+#define BCMFS_CRYPTO_DES_KEY_SIZE      8
+
+#define BCMFS_CRYPTO_3DES_BLOCK_SIZE   8
+#define BCMFS_CRYPTO_3DES_KEY_SIZE     (3 * 8)
+
+#define BCMFS_CRYPTO_MD5_DIGEST_SIZE   16
+#define BCMFS_CRYPTO_MD5_BLOCK_SIZE    64
+
+#define BCMFS_CRYPTO_SHA1_DIGEST_SIZE  20
+#define BCMFS_CRYPTO_SHA1_BLOCK_SIZE   64
+
+#define BCMFS_CRYPTO_SHA224_DIGEST_SIZE        28
+#define BCMFS_CRYPTO_SHA224_BLOCK_SIZE 64
+
+#define BCMFS_CRYPTO_SHA256_DIGEST_SIZE        32
+#define BCMFS_CRYPTO_SHA256_BLOCK_SIZE 64
+
+#define BCMFS_CRYPTO_SHA384_DIGEST_SIZE        48
+#define BCMFS_CRYPTO_SHA384_BLOCK_SIZE 128
+
+#define BCMFS_CRYPTO_SHA512_DIGEST_SIZE        64
+#define BCMFS_CRYPTO_SHA512_BLOCK_SIZE 128
+
+#define BCMFS_CRYPTO_SHA3_224_DIGEST_SIZE      (224 / 8)
+#define BCMFS_CRYPTO_SHA3_224_BLOCK_SIZE       (200 - 2 * \
+                                       BCMFS_CRYPTO_SHA3_224_DIGEST_SIZE)
+
+#define BCMFS_CRYPTO_SHA3_256_DIGEST_SIZE      (256 / 8)
+#define BCMFS_CRYPTO_SHA3_256_BLOCK_SIZE       (200 - 2 * \
+                                       BCMFS_CRYPTO_SHA3_256_DIGEST_SIZE)
+
+#define BCMFS_CRYPTO_SHA3_384_DIGEST_SIZE      (384 / 8)
+#define BCMFS_CRYPTO_SHA3_384_BLOCK_SIZE       (200 - 2 * \
+                                       BCMFS_CRYPTO_SHA3_384_DIGEST_SIZE)
+
+#define BCMFS_CRYPTO_SHA3_512_DIGEST_SIZE      (512 / 8)
+#define BCMFS_CRYPTO_SHA3_512_BLOCK_SIZE       (200 - 2 * \
+                                       BCMFS_CRYPTO_SHA3_512_DIGEST_SIZE)
+
+enum bcmfs_crypto_aes_cipher_key {
+       BCMFS_CRYPTO_AES128 = 16,
+       BCMFS_CRYPTO_AES192 = 24,
+       BCMFS_CRYPTO_AES256 = 32,
+};
+
+int
+bcmfs_crypto_build_cipher_req(struct bcmfs_sym_request *req,
+                             enum rte_crypto_cipher_algorithm c_algo,
+                             enum rte_crypto_cipher_operation cop,
+                             struct fsattr *src, struct fsattr *dst,
+                             struct fsattr *key, struct fsattr *iv);
+
+int
+bcmfs_crypto_build_auth_req(struct bcmfs_sym_request *req,
+                           enum rte_crypto_auth_algorithm a_algo,
+                           enum rte_crypto_auth_operation aop,
+                           struct fsattr *src, struct fsattr *dst,
+                           struct fsattr *mac, struct fsattr *key,
+                           struct fsattr *iv);
+
+int
+bcmfs_crypto_build_chain_request(struct bcmfs_sym_request *req,
+                                enum rte_crypto_cipher_algorithm c_algo,
+                                enum rte_crypto_cipher_operation cop,
+                                enum rte_crypto_auth_algorithm a_algo,
+                                enum rte_crypto_auth_operation aop,
+                                struct fsattr *src, struct fsattr *dst,
+                                struct fsattr *cipher_key,
+                                struct fsattr *auth_key,
+                                struct fsattr *iv, struct fsattr *aad,
+                                struct fsattr *digest, bool cipher_first);
+
+int
+bcmfs_crypto_build_aead_request(struct bcmfs_sym_request *req,
+                               enum rte_crypto_aead_algorithm ae_algo,
+                               enum rte_crypto_aead_operation aeop,
+                               struct fsattr *src, struct fsattr *dst,
+                               struct fsattr *key, struct fsattr *iv,
+                               struct fsattr *aad, struct fsattr *digest);
+
+#endif /* _BCMFS_SYM_ENGINE_H_ */
index 381ca8e..568797b 100644 (file)
@@ -132,6 +132,12 @@ static void
 spu_req_init(struct bcmfs_sym_request *sr, rte_iova_t iova __rte_unused)
 {
        memset(sr, 0, sizeof(*sr));
+       sr->fptr = iova;
+       sr->cptr = iova + offsetof(struct bcmfs_sym_request, cipher_key);
+       sr->aptr = iova + offsetof(struct bcmfs_sym_request, auth_key);
+       sr->iptr = iova + offsetof(struct bcmfs_sym_request, iv);
+       sr->dptr = iova + offsetof(struct bcmfs_sym_request, digest);
+       sr->rptr = iova + offsetof(struct bcmfs_sym_request, resp);
 }
 
 static void
@@ -244,6 +250,7 @@ bcmfs_sym_pmd_enqueue_op_burst(void *queue_pair,
                               uint16_t nb_ops)
 {
        int i, j;
+       int retval;
        uint16_t enq = 0;
        struct bcmfs_sym_request *sreq;
        struct bcmfs_sym_session *sess;
@@ -273,6 +280,11 @@ bcmfs_sym_pmd_enqueue_op_burst(void *queue_pair,
                /* save context */
                qp->infl_msgs[i] = &sreq->msgs;
                qp->infl_msgs[i]->ctx = (void *)sreq;
+
+               /* pre process the request crypto h/w acceleration */
+               retval = bcmfs_process_sym_crypto_op(ops[i], sess, sreq);
+               if (unlikely(retval < 0))
+                       goto enqueue_err;
        }
        /* Send burst request to hw QP */
        enq = bcmfs_enqueue_op_burst(qp, (void **)qp->infl_msgs, i);
@@ -289,6 +301,17 @@ enqueue_err:
        return enq;
 }
 
+static void bcmfs_sym_set_request_status(struct rte_crypto_op *op,
+                                        struct bcmfs_sym_request *out)
+{
+       if (*out->resp == BCMFS_SYM_RESPONSE_SUCCESS)
+               op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+       else if (*out->resp == BCMFS_SYM_RESPONSE_HASH_TAG_ERROR)
+               op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
+       else
+               op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+}
+
 static uint16_t
 bcmfs_sym_pmd_dequeue_op_burst(void *queue_pair,
                               struct rte_crypto_op **ops,
@@ -308,6 +331,9 @@ bcmfs_sym_pmd_dequeue_op_burst(void *queue_pair,
        for (i = 0; i < deq; i++) {
                sreq = (struct bcmfs_sym_request *)qp->infl_msgs[i]->ctx;
 
+               /* set the status based on the response from the crypto h/w */
+               bcmfs_sym_set_request_status(sreq->op, sreq);
+
                ops[pkts++] = sreq->op;
 
                rte_mempool_put(qp->sr_mp, sreq);
index 0f0b051..e53c50a 100644 (file)
@@ -6,13 +6,53 @@
 #ifndef _BCMFS_SYM_REQ_H_
 #define _BCMFS_SYM_REQ_H_
 
+#include <rte_cryptodev.h>
+
 #include "bcmfs_dev_msg.h"
+#include "bcmfs_sym_defs.h"
+
+/* Fixed SPU2 Metadata */
+struct spu2_fmd {
+       uint64_t ctrl0;
+       uint64_t ctrl1;
+       uint64_t ctrl2;
+       uint64_t ctrl3;
+};
 
 /*
  * This structure hold the supportive data required to process a
  * rte_crypto_op
  */
 struct bcmfs_sym_request {
+       /* spu2 engine related data */
+       struct spu2_fmd fmd;
+       /* cipher key */
+       uint8_t cipher_key[BCMFS_MAX_KEY_SIZE];
+       /* auth key */
+       uint8_t auth_key[BCMFS_MAX_KEY_SIZE];
+       /* iv key */
+       uint8_t iv[BCMFS_MAX_IV_SIZE];
+       /* digest data output from crypto h/w */
+       uint8_t digest[BCMFS_MAX_DIGEST_SIZE];
+       /* 2-Bytes response from crypto h/w */
+       uint8_t resp[2];
+       /*
+        * Below are all iovas for above members
+        * from top
+        */
+       /* iova for fmd */
+       rte_iova_t fptr;
+       /* iova for cipher key */
+       rte_iova_t cptr;
+       /* iova for auth key */
+       rte_iova_t aptr;
+       /* iova for iv key */
+       rte_iova_t iptr;
+       /* iova for digest */
+       rte_iova_t dptr;
+       /* iova for response */
+       rte_iova_t rptr;
+
        /* bcmfs qp message for h/w queues to process */
        struct bcmfs_qp_message msgs;
        /* crypto op */
index 2e86c73..7aa0f05 100644 (file)
@@ -14,5 +14,7 @@ sources = files(
                'hw/bcmfs_rm_common.c',
                'bcmfs_sym_pmd.c',
                'bcmfs_sym_capabilities.c',
-               'bcmfs_sym_session.c'
+               'bcmfs_sym_session.c',
+               'bcmfs_sym.c',
+               'bcmfs_sym_engine.c'
                )