+ struct dpaa2_sec_qp *qp =
+ (struct dpaa2_sec_qp *)dev->data->queue_pairs[queue_pair_id];
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (qp->rx_vq.q_storage) {
+ dpaa2_free_dq_storage(qp->rx_vq.q_storage);
+ rte_free(qp->rx_vq.q_storage);
+ }
+ rte_free(qp);
+
+ dev->data->queue_pairs[queue_pair_id] = NULL;
+
+ return 0;
+}
+
+/** Setup a queue pair */
+static int
+dpaa2_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
+ __rte_unused const struct rte_cryptodev_qp_conf *qp_conf,
+ __rte_unused int socket_id,
+ __rte_unused struct rte_mempool *session_pool)
+{
+ struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
+ struct dpaa2_sec_qp *qp;
+ struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
+ struct dpseci_rx_queue_cfg cfg;
+ int32_t retcode;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* If qp is already in use free ring memory and qp metadata. */
+ if (dev->data->queue_pairs[qp_id] != NULL) {
+ PMD_DRV_LOG(INFO, "QP already setup");
+ return 0;
+ }
+
+ PMD_DRV_LOG(DEBUG, "dev =%p, queue =%d, conf =%p",
+ dev, qp_id, qp_conf);
+
+ memset(&cfg, 0, sizeof(struct dpseci_rx_queue_cfg));
+
+ qp = rte_malloc(NULL, sizeof(struct dpaa2_sec_qp),
+ RTE_CACHE_LINE_SIZE);
+ if (!qp) {
+ RTE_LOG(ERR, PMD, "malloc failed for rx/tx queues\n");
+ return -1;
+ }
+
+ qp->rx_vq.dev = dev;
+ qp->tx_vq.dev = dev;
+ qp->rx_vq.q_storage = rte_malloc("sec dq storage",
+ sizeof(struct queue_storage_info_t),
+ RTE_CACHE_LINE_SIZE);
+ if (!qp->rx_vq.q_storage) {
+ RTE_LOG(ERR, PMD, "malloc failed for q_storage\n");
+ return -1;
+ }
+ memset(qp->rx_vq.q_storage, 0, sizeof(struct queue_storage_info_t));
+
+ if (dpaa2_alloc_dq_storage(qp->rx_vq.q_storage)) {
+ RTE_LOG(ERR, PMD, "dpaa2_alloc_dq_storage failed\n");
+ return -1;
+ }
+
+ dev->data->queue_pairs[qp_id] = qp;
+
+ cfg.options = cfg.options | DPSECI_QUEUE_OPT_USER_CTX;
+ cfg.user_ctx = (uint64_t)(&qp->rx_vq);
+ retcode = dpseci_set_rx_queue(dpseci, CMD_PRI_LOW, priv->token,
+ qp_id, &cfg);
+ return retcode;
+}
+
+/** Start queue pair */
+static int
+dpaa2_sec_queue_pair_start(__rte_unused struct rte_cryptodev *dev,
+ __rte_unused uint16_t queue_pair_id)
+{
+ PMD_INIT_FUNC_TRACE();
+
+ return 0;
+}
+
+/** Stop queue pair */
+static int
+dpaa2_sec_queue_pair_stop(__rte_unused struct rte_cryptodev *dev,
+ __rte_unused uint16_t queue_pair_id)
+{
+ PMD_INIT_FUNC_TRACE();
+
+ return 0;
+}
+
+/** Return the number of allocated queue pairs */
+static uint32_t
+dpaa2_sec_queue_pair_count(struct rte_cryptodev *dev)
+{
+ PMD_INIT_FUNC_TRACE();
+
+ return dev->data->nb_queue_pairs;
+}
+
+/** Returns the size of the aesni gcm session structure */
+static unsigned int
+dpaa2_sec_session_get_size(struct rte_cryptodev *dev __rte_unused)
+{
+ PMD_INIT_FUNC_TRACE();
+
+ return sizeof(dpaa2_sec_session);
+}
+
+static int
+dpaa2_sec_cipher_init(struct rte_cryptodev *dev,
+ struct rte_crypto_sym_xform *xform,
+ dpaa2_sec_session *session)
+{
+ struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
+ struct alginfo cipherdata;
+ int bufsize, i;
+ struct ctxt_priv *priv;
+ struct sec_flow_context *flc;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* For SEC CIPHER only one descriptor is required. */
+ priv = (struct ctxt_priv *)rte_zmalloc(NULL,
+ sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc),
+ RTE_CACHE_LINE_SIZE);
+ if (priv == NULL) {
+ RTE_LOG(ERR, PMD, "No Memory for priv CTXT\n");
+ return -1;
+ }
+
+ priv->fle_pool = dev_priv->fle_pool;
+
+ flc = &priv->flc_desc[0].flc;
+
+ session->cipher_key.data = rte_zmalloc(NULL, xform->cipher.key.length,
+ RTE_CACHE_LINE_SIZE);
+ if (session->cipher_key.data == NULL) {
+ RTE_LOG(ERR, PMD, "No Memory for cipher key\n");
+ rte_free(priv);
+ return -1;
+ }
+ session->cipher_key.length = xform->cipher.key.length;
+
+ memcpy(session->cipher_key.data, xform->cipher.key.data,
+ xform->cipher.key.length);
+ cipherdata.key = (uint64_t)session->cipher_key.data;
+ cipherdata.keylen = session->cipher_key.length;
+ cipherdata.key_enc_flags = 0;
+ cipherdata.key_type = RTA_DATA_IMM;
+
+ /* Set IV parameters */
+ session->iv.offset = xform->cipher.iv.offset;
+ session->iv.length = xform->cipher.iv.length;
+
+ switch (xform->cipher.algo) {
+ case RTE_CRYPTO_CIPHER_AES_CBC:
+ cipherdata.algtype = OP_ALG_ALGSEL_AES;
+ cipherdata.algmode = OP_ALG_AAI_CBC;
+ session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC;
+ break;
+ case RTE_CRYPTO_CIPHER_3DES_CBC:
+ cipherdata.algtype = OP_ALG_ALGSEL_3DES;
+ cipherdata.algmode = OP_ALG_AAI_CBC;
+ session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC;
+ break;
+ case RTE_CRYPTO_CIPHER_AES_CTR:
+ cipherdata.algtype = OP_ALG_ALGSEL_AES;
+ cipherdata.algmode = OP_ALG_AAI_CTR;
+ session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CTR;
+ break;
+ case RTE_CRYPTO_CIPHER_3DES_CTR:
+ case RTE_CRYPTO_CIPHER_AES_ECB:
+ case RTE_CRYPTO_CIPHER_3DES_ECB:
+ case RTE_CRYPTO_CIPHER_AES_XTS:
+ case RTE_CRYPTO_CIPHER_AES_F8:
+ case RTE_CRYPTO_CIPHER_ARC4:
+ case RTE_CRYPTO_CIPHER_KASUMI_F8:
+ case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
+ case RTE_CRYPTO_CIPHER_ZUC_EEA3:
+ case RTE_CRYPTO_CIPHER_NULL:
+ RTE_LOG(ERR, PMD, "Crypto: Unsupported Cipher alg %u\n",
+ xform->cipher.algo);
+ goto error_out;
+ default:
+ RTE_LOG(ERR, PMD, "Crypto: Undefined Cipher specified %u\n",
+ xform->cipher.algo);
+ goto error_out;
+ }
+ session->dir = (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
+ DIR_ENC : DIR_DEC;
+
+ bufsize = cnstr_shdsc_blkcipher(priv->flc_desc[0].desc, 1, 0,
+ &cipherdata, NULL, session->iv.length,
+ session->dir);
+ if (bufsize < 0) {
+ RTE_LOG(ERR, PMD, "Crypto: Descriptor build failed\n");
+ goto error_out;
+ }
+ flc->dhr = 0;
+ flc->bpv0 = 0x1;
+ flc->mode_bits = 0x8000;
+
+ flc->word1_sdl = (uint8_t)bufsize;
+ flc->word2_rflc_31_0 = lower_32_bits(
+ (uint64_t)&(((struct dpaa2_sec_qp *)
+ dev->data->queue_pairs[0])->rx_vq));
+ flc->word3_rflc_63_32 = upper_32_bits(
+ (uint64_t)&(((struct dpaa2_sec_qp *)
+ dev->data->queue_pairs[0])->rx_vq));
+ session->ctxt = priv;
+
+ for (i = 0; i < bufsize; i++)
+ PMD_DRV_LOG(DEBUG, "DESC[%d]:0x%x\n",
+ i, priv->flc_desc[0].desc[i]);
+
+ return 0;
+
+error_out:
+ rte_free(session->cipher_key.data);
+ rte_free(priv);
+ return -1;
+}
+
+static int
+dpaa2_sec_auth_init(struct rte_cryptodev *dev,
+ struct rte_crypto_sym_xform *xform,
+ dpaa2_sec_session *session)
+{
+ struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
+ struct alginfo authdata;
+ unsigned int bufsize, i;
+ struct ctxt_priv *priv;
+ struct sec_flow_context *flc;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* For SEC AUTH three descriptors are required for various stages */
+ priv = (struct ctxt_priv *)rte_zmalloc(NULL,
+ sizeof(struct ctxt_priv) + 3 *
+ sizeof(struct sec_flc_desc),
+ RTE_CACHE_LINE_SIZE);
+ if (priv == NULL) {
+ RTE_LOG(ERR, PMD, "No Memory for priv CTXT\n");
+ return -1;
+ }
+
+ priv->fle_pool = dev_priv->fle_pool;
+ flc = &priv->flc_desc[DESC_INITFINAL].flc;
+
+ session->auth_key.data = rte_zmalloc(NULL, xform->auth.key.length,
+ RTE_CACHE_LINE_SIZE);
+ if (session->auth_key.data == NULL) {
+ RTE_LOG(ERR, PMD, "No Memory for auth key\n");
+ rte_free(priv);
+ return -1;
+ }
+ session->auth_key.length = xform->auth.key.length;
+
+ memcpy(session->auth_key.data, xform->auth.key.data,
+ xform->auth.key.length);
+ authdata.key = (uint64_t)session->auth_key.data;
+ authdata.keylen = session->auth_key.length;
+ authdata.key_enc_flags = 0;
+ authdata.key_type = RTA_DATA_IMM;
+
+ session->digest_length = xform->auth.digest_length;
+
+ switch (xform->auth.algo) {
+ case RTE_CRYPTO_AUTH_SHA1_HMAC:
+ authdata.algtype = OP_ALG_ALGSEL_SHA1;
+ authdata.algmode = OP_ALG_AAI_HMAC;
+ session->auth_alg = RTE_CRYPTO_AUTH_SHA1_HMAC;
+ break;
+ case RTE_CRYPTO_AUTH_MD5_HMAC:
+ authdata.algtype = OP_ALG_ALGSEL_MD5;
+ authdata.algmode = OP_ALG_AAI_HMAC;
+ session->auth_alg = RTE_CRYPTO_AUTH_MD5_HMAC;
+ break;
+ case RTE_CRYPTO_AUTH_SHA256_HMAC:
+ authdata.algtype = OP_ALG_ALGSEL_SHA256;
+ authdata.algmode = OP_ALG_AAI_HMAC;
+ session->auth_alg = RTE_CRYPTO_AUTH_SHA256_HMAC;
+ break;
+ case RTE_CRYPTO_AUTH_SHA384_HMAC:
+ authdata.algtype = OP_ALG_ALGSEL_SHA384;
+ authdata.algmode = OP_ALG_AAI_HMAC;
+ session->auth_alg = RTE_CRYPTO_AUTH_SHA384_HMAC;
+ break;
+ case RTE_CRYPTO_AUTH_SHA512_HMAC:
+ authdata.algtype = OP_ALG_ALGSEL_SHA512;
+ authdata.algmode = OP_ALG_AAI_HMAC;
+ session->auth_alg = RTE_CRYPTO_AUTH_SHA512_HMAC;
+ break;
+ case RTE_CRYPTO_AUTH_SHA224_HMAC:
+ authdata.algtype = OP_ALG_ALGSEL_SHA224;
+ authdata.algmode = OP_ALG_AAI_HMAC;
+ session->auth_alg = RTE_CRYPTO_AUTH_SHA224_HMAC;
+ break;
+ case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
+ case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
+ case RTE_CRYPTO_AUTH_NULL:
+ case RTE_CRYPTO_AUTH_SHA1:
+ case RTE_CRYPTO_AUTH_SHA256:
+ case RTE_CRYPTO_AUTH_SHA512:
+ case RTE_CRYPTO_AUTH_SHA224:
+ case RTE_CRYPTO_AUTH_SHA384:
+ case RTE_CRYPTO_AUTH_MD5:
+ case RTE_CRYPTO_AUTH_AES_GMAC:
+ case RTE_CRYPTO_AUTH_KASUMI_F9:
+ case RTE_CRYPTO_AUTH_AES_CMAC:
+ case RTE_CRYPTO_AUTH_AES_CBC_MAC:
+ case RTE_CRYPTO_AUTH_ZUC_EIA3:
+ RTE_LOG(ERR, PMD, "Crypto: Unsupported auth alg %u\n",
+ xform->auth.algo);
+ goto error_out;
+ default:
+ RTE_LOG(ERR, PMD, "Crypto: Undefined Auth specified %u\n",
+ xform->auth.algo);
+ goto error_out;
+ }
+ session->dir = (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ?
+ DIR_ENC : DIR_DEC;
+
+ bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc,
+ 1, 0, &authdata, !session->dir,
+ session->digest_length);
+
+ flc->word1_sdl = (uint8_t)bufsize;
+ flc->word2_rflc_31_0 = lower_32_bits(
+ (uint64_t)&(((struct dpaa2_sec_qp *)
+ dev->data->queue_pairs[0])->rx_vq));
+ flc->word3_rflc_63_32 = upper_32_bits(
+ (uint64_t)&(((struct dpaa2_sec_qp *)
+ dev->data->queue_pairs[0])->rx_vq));
+ session->ctxt = priv;
+ for (i = 0; i < bufsize; i++)
+ PMD_DRV_LOG(DEBUG, "DESC[%d]:0x%x\n",
+ i, priv->flc_desc[DESC_INITFINAL].desc[i]);
+
+
+ return 0;
+
+error_out:
+ rte_free(session->auth_key.data);
+ rte_free(priv);
+ return -1;
+}
+
+static int
+dpaa2_sec_aead_init(struct rte_cryptodev *dev,
+ struct rte_crypto_sym_xform *xform,
+ dpaa2_sec_session *session)
+{
+ struct dpaa2_sec_aead_ctxt *ctxt = &session->ext_params.aead_ctxt;
+ struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
+ struct alginfo aeaddata;
+ unsigned int bufsize, i;
+ struct ctxt_priv *priv;
+ struct sec_flow_context *flc;
+ struct rte_crypto_aead_xform *aead_xform = &xform->aead;
+ int err;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* Set IV parameters */
+ session->iv.offset = aead_xform->iv.offset;
+ session->iv.length = aead_xform->iv.length;
+ session->ctxt_type = DPAA2_SEC_AEAD;
+
+ /* For SEC AEAD only one descriptor is required */
+ priv = (struct ctxt_priv *)rte_zmalloc(NULL,
+ sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc),
+ RTE_CACHE_LINE_SIZE);
+ if (priv == NULL) {
+ RTE_LOG(ERR, PMD, "No Memory for priv CTXT\n");
+ return -1;
+ }
+
+ priv->fle_pool = dev_priv->fle_pool;
+ flc = &priv->flc_desc[0].flc;
+
+ session->aead_key.data = rte_zmalloc(NULL, aead_xform->key.length,
+ RTE_CACHE_LINE_SIZE);
+ if (session->aead_key.data == NULL && aead_xform->key.length > 0) {
+ RTE_LOG(ERR, PMD, "No Memory for aead key\n");
+ rte_free(priv);
+ return -1;
+ }
+ memcpy(session->aead_key.data, aead_xform->key.data,
+ aead_xform->key.length);
+
+ session->digest_length = aead_xform->digest_length;
+ session->aead_key.length = aead_xform->key.length;
+ ctxt->auth_only_len = aead_xform->aad_length;
+
+ aeaddata.key = (uint64_t)session->aead_key.data;
+ aeaddata.keylen = session->aead_key.length;
+ aeaddata.key_enc_flags = 0;
+ aeaddata.key_type = RTA_DATA_IMM;
+
+ switch (aead_xform->algo) {
+ case RTE_CRYPTO_AEAD_AES_GCM:
+ aeaddata.algtype = OP_ALG_ALGSEL_AES;
+ aeaddata.algmode = OP_ALG_AAI_GCM;
+ session->cipher_alg = RTE_CRYPTO_AEAD_AES_GCM;
+ break;
+ case RTE_CRYPTO_AEAD_AES_CCM:
+ RTE_LOG(ERR, PMD, "Crypto: Unsupported AEAD alg %u\n",
+ aead_xform->algo);
+ goto error_out;
+ default:
+ RTE_LOG(ERR, PMD, "Crypto: Undefined AEAD specified %u\n",
+ aead_xform->algo);
+ goto error_out;
+ }
+ session->dir = (aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
+ DIR_ENC : DIR_DEC;
+
+ priv->flc_desc[0].desc[0] = aeaddata.keylen;
+ err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
+ MIN_JOB_DESC_SIZE,
+ (unsigned int *)priv->flc_desc[0].desc,
+ &priv->flc_desc[0].desc[1], 1);
+
+ if (err < 0) {
+ PMD_DRV_LOG(ERR, "Crypto: Incorrect key lengths\n");
+ goto error_out;
+ }
+ if (priv->flc_desc[0].desc[1] & 1) {
+ aeaddata.key_type = RTA_DATA_IMM;
+ } else {
+ aeaddata.key = DPAA2_VADDR_TO_IOVA(aeaddata.key);
+ aeaddata.key_type = RTA_DATA_PTR;
+ }
+ priv->flc_desc[0].desc[0] = 0;
+ priv->flc_desc[0].desc[1] = 0;
+
+ if (session->dir == DIR_ENC)
+ bufsize = cnstr_shdsc_gcm_encap(
+ priv->flc_desc[0].desc, 1, 0,
+ &aeaddata, session->iv.length,
+ session->digest_length);
+ else
+ bufsize = cnstr_shdsc_gcm_decap(
+ priv->flc_desc[0].desc, 1, 0,
+ &aeaddata, session->iv.length,
+ session->digest_length);
+ flc->word1_sdl = (uint8_t)bufsize;
+ flc->word2_rflc_31_0 = lower_32_bits(
+ (uint64_t)&(((struct dpaa2_sec_qp *)
+ dev->data->queue_pairs[0])->rx_vq));
+ flc->word3_rflc_63_32 = upper_32_bits(
+ (uint64_t)&(((struct dpaa2_sec_qp *)
+ dev->data->queue_pairs[0])->rx_vq));
+ session->ctxt = priv;
+ for (i = 0; i < bufsize; i++)
+ PMD_DRV_LOG(DEBUG, "DESC[%d]:0x%x\n",
+ i, priv->flc_desc[0].desc[i]);
+
+ return 0;
+
+error_out:
+ rte_free(session->aead_key.data);
+ rte_free(priv);
+ return -1;
+}
+
+
+static int
+dpaa2_sec_aead_chain_init(struct rte_cryptodev *dev,
+ struct rte_crypto_sym_xform *xform,
+ dpaa2_sec_session *session)
+{
+ struct dpaa2_sec_aead_ctxt *ctxt = &session->ext_params.aead_ctxt;
+ struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
+ struct alginfo authdata, cipherdata;
+ unsigned int bufsize, i;
+ struct ctxt_priv *priv;
+ struct sec_flow_context *flc;
+ struct rte_crypto_cipher_xform *cipher_xform;
+ struct rte_crypto_auth_xform *auth_xform;
+ int err;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (session->ext_params.aead_ctxt.auth_cipher_text) {
+ cipher_xform = &xform->cipher;
+ auth_xform = &xform->next->auth;
+ session->ctxt_type =
+ (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
+ DPAA2_SEC_CIPHER_HASH : DPAA2_SEC_HASH_CIPHER;
+ } else {
+ cipher_xform = &xform->next->cipher;
+ auth_xform = &xform->auth;
+ session->ctxt_type =
+ (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
+ DPAA2_SEC_HASH_CIPHER : DPAA2_SEC_CIPHER_HASH;
+ }
+
+ /* Set IV parameters */
+ session->iv.offset = cipher_xform->iv.offset;
+ session->iv.length = cipher_xform->iv.length;
+
+ /* For SEC AEAD only one descriptor is required */
+ priv = (struct ctxt_priv *)rte_zmalloc(NULL,
+ sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc),
+ RTE_CACHE_LINE_SIZE);
+ if (priv == NULL) {
+ RTE_LOG(ERR, PMD, "No Memory for priv CTXT\n");
+ return -1;
+ }
+
+ priv->fle_pool = dev_priv->fle_pool;
+ flc = &priv->flc_desc[0].flc;
+
+ session->cipher_key.data = rte_zmalloc(NULL, cipher_xform->key.length,
+ RTE_CACHE_LINE_SIZE);
+ if (session->cipher_key.data == NULL && cipher_xform->key.length > 0) {
+ RTE_LOG(ERR, PMD, "No Memory for cipher key\n");
+ rte_free(priv);
+ return -1;
+ }
+ session->cipher_key.length = cipher_xform->key.length;
+ session->auth_key.data = rte_zmalloc(NULL, auth_xform->key.length,
+ RTE_CACHE_LINE_SIZE);
+ if (session->auth_key.data == NULL && auth_xform->key.length > 0) {
+ RTE_LOG(ERR, PMD, "No Memory for auth key\n");
+ rte_free(session->cipher_key.data);
+ rte_free(priv);
+ return -1;
+ }
+ session->auth_key.length = auth_xform->key.length;
+ memcpy(session->cipher_key.data, cipher_xform->key.data,
+ cipher_xform->key.length);
+ memcpy(session->auth_key.data, auth_xform->key.data,
+ auth_xform->key.length);
+
+ authdata.key = (uint64_t)session->auth_key.data;
+ authdata.keylen = session->auth_key.length;
+ authdata.key_enc_flags = 0;
+ authdata.key_type = RTA_DATA_IMM;
+
+ session->digest_length = auth_xform->digest_length;
+
+ switch (auth_xform->algo) {
+ case RTE_CRYPTO_AUTH_SHA1_HMAC:
+ authdata.algtype = OP_ALG_ALGSEL_SHA1;
+ authdata.algmode = OP_ALG_AAI_HMAC;
+ session->auth_alg = RTE_CRYPTO_AUTH_SHA1_HMAC;
+ break;
+ case RTE_CRYPTO_AUTH_MD5_HMAC:
+ authdata.algtype = OP_ALG_ALGSEL_MD5;
+ authdata.algmode = OP_ALG_AAI_HMAC;
+ session->auth_alg = RTE_CRYPTO_AUTH_MD5_HMAC;
+ break;
+ case RTE_CRYPTO_AUTH_SHA224_HMAC:
+ authdata.algtype = OP_ALG_ALGSEL_SHA224;
+ authdata.algmode = OP_ALG_AAI_HMAC;
+ session->auth_alg = RTE_CRYPTO_AUTH_SHA224_HMAC;
+ break;
+ case RTE_CRYPTO_AUTH_SHA256_HMAC:
+ authdata.algtype = OP_ALG_ALGSEL_SHA256;
+ authdata.algmode = OP_ALG_AAI_HMAC;
+ session->auth_alg = RTE_CRYPTO_AUTH_SHA256_HMAC;
+ break;
+ case RTE_CRYPTO_AUTH_SHA384_HMAC:
+ authdata.algtype = OP_ALG_ALGSEL_SHA384;
+ authdata.algmode = OP_ALG_AAI_HMAC;
+ session->auth_alg = RTE_CRYPTO_AUTH_SHA384_HMAC;
+ break;
+ case RTE_CRYPTO_AUTH_SHA512_HMAC:
+ authdata.algtype = OP_ALG_ALGSEL_SHA512;
+ authdata.algmode = OP_ALG_AAI_HMAC;
+ session->auth_alg = RTE_CRYPTO_AUTH_SHA512_HMAC;
+ break;
+ case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
+ case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
+ case RTE_CRYPTO_AUTH_NULL:
+ case RTE_CRYPTO_AUTH_SHA1:
+ case RTE_CRYPTO_AUTH_SHA256:
+ case RTE_CRYPTO_AUTH_SHA512:
+ case RTE_CRYPTO_AUTH_SHA224:
+ case RTE_CRYPTO_AUTH_SHA384:
+ case RTE_CRYPTO_AUTH_MD5:
+ case RTE_CRYPTO_AUTH_AES_GMAC:
+ case RTE_CRYPTO_AUTH_KASUMI_F9:
+ case RTE_CRYPTO_AUTH_AES_CMAC:
+ case RTE_CRYPTO_AUTH_AES_CBC_MAC:
+ case RTE_CRYPTO_AUTH_ZUC_EIA3:
+ RTE_LOG(ERR, PMD, "Crypto: Unsupported auth alg %u\n",
+ auth_xform->algo);
+ goto error_out;
+ default:
+ RTE_LOG(ERR, PMD, "Crypto: Undefined Auth specified %u\n",
+ auth_xform->algo);
+ goto error_out;
+ }
+ cipherdata.key = (uint64_t)session->cipher_key.data;
+ cipherdata.keylen = session->cipher_key.length;
+ cipherdata.key_enc_flags = 0;
+ cipherdata.key_type = RTA_DATA_IMM;
+
+ switch (cipher_xform->algo) {
+ case RTE_CRYPTO_CIPHER_AES_CBC:
+ cipherdata.algtype = OP_ALG_ALGSEL_AES;
+ cipherdata.algmode = OP_ALG_AAI_CBC;
+ session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC;
+ break;
+ case RTE_CRYPTO_CIPHER_3DES_CBC:
+ cipherdata.algtype = OP_ALG_ALGSEL_3DES;
+ cipherdata.algmode = OP_ALG_AAI_CBC;
+ session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC;
+ break;
+ case RTE_CRYPTO_CIPHER_AES_CTR:
+ cipherdata.algtype = OP_ALG_ALGSEL_AES;
+ cipherdata.algmode = OP_ALG_AAI_CTR;
+ session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CTR;
+ break;
+ case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
+ case RTE_CRYPTO_CIPHER_NULL:
+ case RTE_CRYPTO_CIPHER_3DES_ECB:
+ case RTE_CRYPTO_CIPHER_AES_ECB:
+ case RTE_CRYPTO_CIPHER_KASUMI_F8:
+ RTE_LOG(ERR, PMD, "Crypto: Unsupported Cipher alg %u\n",
+ cipher_xform->algo);
+ goto error_out;
+ default:
+ RTE_LOG(ERR, PMD, "Crypto: Undefined Cipher specified %u\n",
+ cipher_xform->algo);
+ goto error_out;
+ }
+ session->dir = (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
+ DIR_ENC : DIR_DEC;
+
+ priv->flc_desc[0].desc[0] = cipherdata.keylen;
+ priv->flc_desc[0].desc[1] = authdata.keylen;
+ err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
+ MIN_JOB_DESC_SIZE,
+ (unsigned int *)priv->flc_desc[0].desc,
+ &priv->flc_desc[0].desc[2], 2);
+
+ if (err < 0) {
+ PMD_DRV_LOG(ERR, "Crypto: Incorrect key lengths\n");
+ goto error_out;
+ }
+ if (priv->flc_desc[0].desc[2] & 1) {
+ cipherdata.key_type = RTA_DATA_IMM;
+ } else {
+ cipherdata.key = DPAA2_VADDR_TO_IOVA(cipherdata.key);
+ cipherdata.key_type = RTA_DATA_PTR;
+ }
+ if (priv->flc_desc[0].desc[2] & (1 << 1)) {
+ authdata.key_type = RTA_DATA_IMM;
+ } else {
+ authdata.key = DPAA2_VADDR_TO_IOVA(authdata.key);
+ authdata.key_type = RTA_DATA_PTR;
+ }
+ priv->flc_desc[0].desc[0] = 0;
+ priv->flc_desc[0].desc[1] = 0;
+ priv->flc_desc[0].desc[2] = 0;
+
+ if (session->ctxt_type == DPAA2_SEC_CIPHER_HASH) {
+ bufsize = cnstr_shdsc_authenc(priv->flc_desc[0].desc, 1,
+ 0, &cipherdata, &authdata,
+ session->iv.length,
+ ctxt->auth_only_len,
+ session->digest_length,
+ session->dir);
+ } else {
+ RTE_LOG(ERR, PMD, "Hash before cipher not supported\n");
+ goto error_out;
+ }
+
+ flc->word1_sdl = (uint8_t)bufsize;
+ flc->word2_rflc_31_0 = lower_32_bits(
+ (uint64_t)&(((struct dpaa2_sec_qp *)
+ dev->data->queue_pairs[0])->rx_vq));
+ flc->word3_rflc_63_32 = upper_32_bits(
+ (uint64_t)&(((struct dpaa2_sec_qp *)
+ dev->data->queue_pairs[0])->rx_vq));
+ session->ctxt = priv;
+ for (i = 0; i < bufsize; i++)
+ PMD_DRV_LOG(DEBUG, "DESC[%d]:0x%x\n",
+ i, priv->flc_desc[0].desc[i]);
+
+ return 0;
+
+error_out:
+ rte_free(session->cipher_key.data);
+ rte_free(session->auth_key.data);
+ rte_free(priv);
+ return -1;
+}
+
+static int
+dpaa2_sec_set_session_parameters(struct rte_cryptodev *dev,
+ struct rte_crypto_sym_xform *xform, void *sess)
+{
+ dpaa2_sec_session *session = sess;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (unlikely(sess == NULL)) {
+ RTE_LOG(ERR, PMD, "invalid session struct\n");
+ return -1;
+ }
+
+ /* Default IV length = 0 */
+ session->iv.length = 0;
+
+ /* Cipher Only */
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) {
+ session->ctxt_type = DPAA2_SEC_CIPHER;
+ dpaa2_sec_cipher_init(dev, xform, session);
+
+ /* Authentication Only */
+ } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
+ xform->next == NULL) {
+ session->ctxt_type = DPAA2_SEC_AUTH;
+ dpaa2_sec_auth_init(dev, xform, session);
+
+ /* Cipher then Authenticate */
+ } else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
+ xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
+ session->ext_params.aead_ctxt.auth_cipher_text = true;
+ dpaa2_sec_aead_chain_init(dev, xform, session);
+
+ /* Authenticate then Cipher */
+ } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
+ xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
+ session->ext_params.aead_ctxt.auth_cipher_text = false;
+ dpaa2_sec_aead_chain_init(dev, xform, session);
+
+ /* AEAD operation for AES-GCM kind of Algorithms */
+ } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
+ xform->next == NULL) {
+ dpaa2_sec_aead_init(dev, xform, session);
+
+ } else {
+ RTE_LOG(ERR, PMD, "Invalid crypto type\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+dpaa2_sec_session_configure(struct rte_cryptodev *dev,
+ struct rte_crypto_sym_xform *xform,
+ struct rte_cryptodev_sym_session *sess,
+ struct rte_mempool *mempool)
+{
+ void *sess_private_data;
+ int ret;
+
+ if (rte_mempool_get(mempool, &sess_private_data)) {
+ CDEV_LOG_ERR(
+ "Couldn't get object from session mempool");
+ return -ENOMEM;
+ }
+
+ ret = dpaa2_sec_set_session_parameters(dev, xform, sess_private_data);
+ if (ret != 0) {
+ PMD_DRV_LOG(ERR, "DPAA2 PMD: failed to configure "
+ "session parameters");
+
+ /* Return session to mempool */
+ rte_mempool_put(mempool, sess_private_data);
+ return ret;
+ }
+
+ set_session_private_data(sess, dev->driver_id,
+ sess_private_data);
+
+ return 0;
+}
+
+/** Clear the memory of session so it doesn't leave key material behind */
+static void
+dpaa2_sec_session_clear(struct rte_cryptodev *dev,
+ struct rte_cryptodev_sym_session *sess)
+{
+ PMD_INIT_FUNC_TRACE();
+ uint8_t index = dev->driver_id;
+ void *sess_priv = get_session_private_data(sess, index);
+ dpaa2_sec_session *s = (dpaa2_sec_session *)sess_priv;
+
+ if (sess_priv) {
+ rte_free(s->ctxt);
+ rte_free(s->cipher_key.data);
+ rte_free(s->auth_key.data);
+ memset(sess, 0, sizeof(dpaa2_sec_session));
+ struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
+ set_session_private_data(sess, index, NULL);
+ rte_mempool_put(sess_mp, sess_priv);
+ }
+}
+
+static int
+dpaa2_sec_dev_configure(struct rte_cryptodev *dev __rte_unused,
+ struct rte_cryptodev_config *config __rte_unused)
+{
+ PMD_INIT_FUNC_TRACE();
+
+ return 0;
+}
+
+static int
+dpaa2_sec_dev_start(struct rte_cryptodev *dev)
+{
+ struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
+ struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
+ struct dpseci_attr attr;
+ struct dpaa2_queue *dpaa2_q;
+ struct dpaa2_sec_qp **qp = (struct dpaa2_sec_qp **)
+ dev->data->queue_pairs;
+ struct dpseci_rx_queue_attr rx_attr;
+ struct dpseci_tx_queue_attr tx_attr;
+ int ret, i;
+
+ PMD_INIT_FUNC_TRACE();
+
+ memset(&attr, 0, sizeof(struct dpseci_attr));
+
+ ret = dpseci_enable(dpseci, CMD_PRI_LOW, priv->token);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "DPSECI with HW_ID = %d ENABLE FAILED\n",
+ priv->hw_id);
+ goto get_attr_failure;
+ }
+ ret = dpseci_get_attributes(dpseci, CMD_PRI_LOW, priv->token, &attr);
+ if (ret) {
+ PMD_INIT_LOG(ERR,
+ "DPSEC ATTRIBUTE READ FAILED, disabling DPSEC\n");
+ goto get_attr_failure;
+ }
+ for (i = 0; i < attr.num_rx_queues && qp[i]; i++) {
+ dpaa2_q = &qp[i]->rx_vq;
+ dpseci_get_rx_queue(dpseci, CMD_PRI_LOW, priv->token, i,
+ &rx_attr);
+ dpaa2_q->fqid = rx_attr.fqid;
+ PMD_INIT_LOG(DEBUG, "rx_fqid: %d", dpaa2_q->fqid);
+ }
+ for (i = 0; i < attr.num_tx_queues && qp[i]; i++) {
+ dpaa2_q = &qp[i]->tx_vq;
+ dpseci_get_tx_queue(dpseci, CMD_PRI_LOW, priv->token, i,
+ &tx_attr);
+ dpaa2_q->fqid = tx_attr.fqid;
+ PMD_INIT_LOG(DEBUG, "tx_fqid: %d", dpaa2_q->fqid);
+ }
+
+ return 0;
+get_attr_failure:
+ dpseci_disable(dpseci, CMD_PRI_LOW, priv->token);
+ return -1;
+}
+
+static void
+dpaa2_sec_dev_stop(struct rte_cryptodev *dev)
+{
+ struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
+ struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
+ int ret;
+
+ PMD_INIT_FUNC_TRACE();
+
+ ret = dpseci_disable(dpseci, CMD_PRI_LOW, priv->token);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Failure in disabling dpseci %d device",
+ priv->hw_id);
+ return;
+ }
+
+ ret = dpseci_reset(dpseci, CMD_PRI_LOW, priv->token);
+ if (ret < 0) {
+ PMD_INIT_LOG(ERR, "SEC Device cannot be reset:Error = %0x\n",
+ ret);
+ return;
+ }
+}
+
+static int
+dpaa2_sec_dev_close(struct rte_cryptodev *dev)
+{
+ struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
+ struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
+ int ret;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* Function is reverse of dpaa2_sec_dev_init.
+ * It does the following:
+ * 1. Detach a DPSECI from attached resources i.e. buffer pools, dpbp_id
+ * 2. Close the DPSECI device
+ * 3. Free the allocated resources.
+ */
+
+ /*Close the device at underlying layer*/
+ ret = dpseci_close(dpseci, CMD_PRI_LOW, priv->token);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Failure closing dpseci device with"
+ " error code %d\n", ret);
+ return -1;
+ }
+
+ /*Free the allocated memory for ethernet private data and dpseci*/
+ priv->hw = NULL;
+ rte_free(dpseci);
+
+ return 0;
+}
+
+static void
+dpaa2_sec_dev_infos_get(struct rte_cryptodev *dev,
+ struct rte_cryptodev_info *info)
+{
+ struct dpaa2_sec_dev_private *internals = dev->data->dev_private;
+
+ PMD_INIT_FUNC_TRACE();
+ if (info != NULL) {
+ info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
+ info->feature_flags = dev->feature_flags;
+ info->capabilities = dpaa2_sec_capabilities;
+ info->sym.max_nb_sessions = internals->max_nb_sessions;
+ info->driver_id = cryptodev_driver_id;
+ }
+}
+
+static
+void dpaa2_sec_stats_get(struct rte_cryptodev *dev,
+ struct rte_cryptodev_stats *stats)
+{
+ struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
+ struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
+ struct dpseci_sec_counters counters = {0};
+ struct dpaa2_sec_qp **qp = (struct dpaa2_sec_qp **)
+ dev->data->queue_pairs;
+ int ret, i;
+
+ PMD_INIT_FUNC_TRACE();
+ if (stats == NULL) {
+ PMD_DRV_LOG(ERR, "invalid stats ptr NULL");
+ return;
+ }
+ for (i = 0; i < dev->data->nb_queue_pairs; i++) {
+ if (qp[i] == NULL) {
+ PMD_DRV_LOG(DEBUG, "Uninitialised queue pair");
+ continue;
+ }
+
+ stats->enqueued_count += qp[i]->tx_vq.tx_pkts;
+ stats->dequeued_count += qp[i]->rx_vq.rx_pkts;
+ stats->enqueue_err_count += qp[i]->tx_vq.err_pkts;
+ stats->dequeue_err_count += qp[i]->rx_vq.err_pkts;
+ }
+
+ ret = dpseci_get_sec_counters(dpseci, CMD_PRI_LOW, priv->token,
+ &counters);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "dpseci_get_sec_counters failed\n");
+ } else {
+ PMD_DRV_LOG(INFO, "dpseci hw stats:"
+ "\n\tNumber of Requests Dequeued = %lu"
+ "\n\tNumber of Outbound Encrypt Requests = %lu"
+ "\n\tNumber of Inbound Decrypt Requests = %lu"
+ "\n\tNumber of Outbound Bytes Encrypted = %lu"
+ "\n\tNumber of Outbound Bytes Protected = %lu"
+ "\n\tNumber of Inbound Bytes Decrypted = %lu"
+ "\n\tNumber of Inbound Bytes Validated = %lu",
+ counters.dequeued_requests,
+ counters.ob_enc_requests,
+ counters.ib_dec_requests,
+ counters.ob_enc_bytes,
+ counters.ob_prot_bytes,
+ counters.ib_dec_bytes,
+ counters.ib_valid_bytes);
+ }
+}
+
+static
+void dpaa2_sec_stats_reset(struct rte_cryptodev *dev)
+{
+ int i;
+ struct dpaa2_sec_qp **qp = (struct dpaa2_sec_qp **)
+ (dev->data->queue_pairs);
+
+ PMD_INIT_FUNC_TRACE();
+
+ for (i = 0; i < dev->data->nb_queue_pairs; i++) {
+ if (qp[i] == NULL) {
+ PMD_DRV_LOG(DEBUG, "Uninitialised queue pair");
+ continue;
+ }
+ qp[i]->tx_vq.rx_pkts = 0;
+ qp[i]->tx_vq.tx_pkts = 0;
+ qp[i]->tx_vq.err_pkts = 0;
+ qp[i]->rx_vq.rx_pkts = 0;
+ qp[i]->rx_vq.tx_pkts = 0;
+ qp[i]->rx_vq.err_pkts = 0;
+ }
+}
+
+static struct rte_cryptodev_ops crypto_ops = {
+ .dev_configure = dpaa2_sec_dev_configure,
+ .dev_start = dpaa2_sec_dev_start,
+ .dev_stop = dpaa2_sec_dev_stop,
+ .dev_close = dpaa2_sec_dev_close,
+ .dev_infos_get = dpaa2_sec_dev_infos_get,
+ .stats_get = dpaa2_sec_stats_get,
+ .stats_reset = dpaa2_sec_stats_reset,
+ .queue_pair_setup = dpaa2_sec_queue_pair_setup,
+ .queue_pair_release = dpaa2_sec_queue_pair_release,
+ .queue_pair_start = dpaa2_sec_queue_pair_start,
+ .queue_pair_stop = dpaa2_sec_queue_pair_stop,
+ .queue_pair_count = dpaa2_sec_queue_pair_count,
+ .session_get_size = dpaa2_sec_session_get_size,
+ .session_configure = dpaa2_sec_session_configure,
+ .session_clear = dpaa2_sec_session_clear,
+};
+
+static int
+dpaa2_sec_uninit(const struct rte_cryptodev *dev)
+{
+ struct dpaa2_sec_dev_private *internals = dev->data->dev_private;
+
+ rte_mempool_free(internals->fle_pool);
+